121 lines
4.2 KiB
Nix
121 lines
4.2 KiB
Nix
{ config, pkgs, lib, flakeRoot, ... }:
|
|
|
|
let
|
|
# Path to the environment file containing OLLAMA_HOST and MISTRAL_API_KEY
|
|
AiRepoEnv = flakeRoot + "/assets/conf/apps/ai/ai.env";
|
|
# Path to the ZED configuration template
|
|
AiRepoConf = flakeRoot + "/assets/conf/apps/ai/ai.conf";
|
|
|
|
# Parse the environment file into an attribute set (key-value pairs)
|
|
# Steps:
|
|
# 1. Read the file as a string
|
|
# 2. Split into lines using newline character
|
|
# 3. Filter out empty lines
|
|
# 4. Split each line by '=' to separate keys and values
|
|
# 5. Use lib.genAttrs to create a Nix attribute set
|
|
envVars = lib.genAttrs (
|
|
builtins.map (
|
|
line: builtins.split "=" line # Split each line into [key, value]
|
|
) (builtins.filter (line: line != "") (builtins.split "\n" (builtins.readFile (toString AiRepoEnv))))
|
|
) (parts: builtins.elemAt parts 0) # First element is the key
|
|
(parts: builtins.elemAt parts 1); # Second element is the value
|
|
in
|
|
{
|
|
# Install required packages:
|
|
# - ollama-vulkan: Ollama with Vulkan support (for AMD/CPU)
|
|
# - zed: The ZED code editor
|
|
home.packages = [
|
|
pkgs.ollama-vulkan
|
|
pkgs.zed
|
|
];
|
|
|
|
# Set environment variables for the user session
|
|
# These will be available to all user processes
|
|
home.sessionVariables = {
|
|
# Ollama server address, default to localhost if not set in ai.env
|
|
OLLAMA_HOST = envVars.OLLAMA_HOST or "http://127.0.0.1:11434";
|
|
# Mistral API key, empty string if not set in ai.env
|
|
MISTRAL_API_KEY = envVars.MISTRAL_API_KEY or "";
|
|
};
|
|
|
|
# Configure Ollama as a user service (runs when user logs in)
|
|
systemd.user.services.ollama = {
|
|
description = "Ollama service for local AI models";
|
|
# Start with the user session
|
|
wantedBy = [ "default.target" ];
|
|
# Ensure network is available before starting
|
|
after = [ "network.target" ];
|
|
|
|
serviceConfig = {
|
|
# Run as a background process
|
|
Type = "forking";
|
|
|
|
# Command to start the Ollama server
|
|
ExecStart = ''
|
|
${pkgs.ollama-vulkan}/bin/ollama serve
|
|
'';
|
|
|
|
# Commands to run after the server starts
|
|
ExecStartPost = ''
|
|
# Wait for server to initialize
|
|
sleep 5
|
|
|
|
# Pull default models
|
|
${pkgs.ollama-vulkan}/bin/ollama pull codellama:70b # Coding model
|
|
${pkgs.ollama-vulkan}/bin/ollama pull mixtral:8x7b # Chat model
|
|
'';
|
|
# Restart if the service crashes
|
|
Restart = "on-failure";
|
|
};
|
|
};
|
|
|
|
# Generate ZED's settings.json with API keys and endpoints
|
|
# Steps:
|
|
# 1. Read the template JSON file
|
|
# 2. Parse it from JSON to a Nix attribute set
|
|
# 3. Override specific values with our environment variables
|
|
# 4. Convert back to JSON and write to the config file
|
|
home.file.".config/zed/settings.json".text = lib.mkForce (
|
|
builtins.toJSON (
|
|
# Start with the base config from ai.conf
|
|
(builtins.fromJSON (builtins.readFile (toString AiRepoConf)))
|
|
# Override specific values
|
|
// {
|
|
mistral = {
|
|
# Use the API key from ai.env, or empty string if not set
|
|
apiKey = envVars.MISTRAL_API_KEY or "";
|
|
defaultModel = "mistral-pro"; # Default Mistral model
|
|
};
|
|
ollama = {
|
|
# Use the host from ai.env, or default localhost
|
|
endpoint = envVars.OLLAMA_HOST or "http://127.0.0.1:11434";
|
|
defaultModel = "codellama:70b"; # Default Ollama model for coding
|
|
};
|
|
}
|
|
)
|
|
);
|
|
|
|
# --- Usage Notes ---
|
|
# 1. Pulling Additional Models:
|
|
# To add more models later, run:
|
|
# ollama pull <model-name>
|
|
# Example: ollama pull llama3:8b
|
|
#
|
|
# 2. Switching GPU Backends:
|
|
# - For NVIDIA: Replace all `ollama-vulkan` with `ollama` (uses CUDA)
|
|
# - For AMD: Use `ollama-rocm` and ensure ROCm is installed
|
|
#
|
|
# 3. ZED Plugin Setup:
|
|
# - Install the Ollama and Mistral plugins in ZED via the plugin marketplace
|
|
# - The Ollama plugin will use the local models pulled above
|
|
# - The Mistral plugin will use the MISTRAL_API_KEY for cloud access
|
|
#
|
|
# 4. Security:
|
|
# - Never commit ./assets/conf/apps/ai.env to version control
|
|
# - For extra security, encrypt ai.env using sops-nix or age
|
|
#
|
|
# 5. Persistent Service:
|
|
# To keep Ollama running after logout, enable lingering:
|
|
# loginctl enable-linger $(whoami)
|
|
}
|