{ config, pkgs, lib, flakeRoot, ... }: let # Path to the environment file containing OLLAMA_HOST and MISTRAL_API_KEY AiRepoEnv = flakeRoot + "/assets/conf/apps/ai/ai.env"; # Path to the ZED configuration template AiRepoConf = flakeRoot + "/assets/conf/apps/ai/ai.conf"; # Improved environment file parser that handles: # - Lines with multiple '=' characters # - Empty values # - Whitespace around keys and values # - Comments (lines starting with #) envVars = lib.genAttrs ( builtins.map ( line: let # Remove comments and trim whitespace cleanLine = builtins.trim (builtins.replaceStrings [ "#" ] [ "" ] (builtins.elemAt (builtins.split "#" line) 0)); # Split on first '=' only parts = builtins.split "=" cleanLine; # Get key (first part) and trim whitespace key = builtins.trim (builtins.elemAt parts 0); # Get value (everything after first '=') and trim whitespace value = builtins.trim (builtins.concatStringsSep "=" (builtins.drop 1 parts)); in { inherit key value; } ) (builtins.filter (line: line != "" && !builtins.startsWith "#" line) (builtins.split "\n" (builtins.readFile (toString AiRepoEnv)))) ) (entry: entry.key); in { # Install required packages home.packages = [ pkgs.ollama-vulkan pkgs.zed ]; # Set environment variables for the user session home.sessionVariables = { OLLAMA_HOST = envVars.OLLAMA_HOST or "http://127.0.0.1:11434"; MISTRAL_API_KEY = envVars.MISTRAL_API_KEY or ""; }; # Configure Ollama as a user service systemd.user.services.ollama = { description = "Ollama service for local AI models"; wantedBy = [ "default.target" ]; after = [ "network.target" ]; serviceConfig = { Type = "forking"; ExecStart = '' ${pkgs.ollama-vulkan}/bin/ollama serve ''; ExecStartPost = '' sleep 5 ${pkgs.ollama-vulkan}/bin/ollama pull codellama:70b ${pkgs.ollama-vulkan}/bin/ollama pull mixtral:8x7b ''; Restart = "on-failure"; }; }; # Generate ZED's settings.json home.file.".config/zed/settings.json".text = lib.mkForce ( builtins.toJSON ( (builtins.fromJSON (builtins.readFile (toString AiRepoConf))) // { mistral = { apiKey = envVars.MISTRAL_API_KEY or ""; defaultModel = "mistral-pro"; }; ollama = { endpoint = envVars.OLLAMA_HOST or "http://127.0.0.1:11434"; defaultModel = "codellama:70b"; }; } ) ); # --- Usage Notes --- # 1. Pulling Additional Models: # To add more models later, run: # ollama pull # Example: ollama pull llama3:8b # # 2. Switching GPU Backends: # - For NVIDIA: Replace all `ollama-vulkan` with `ollama` (uses CUDA) # - For AMD: Use `ollama-rocm` and ensure ROCm is installed # # 3. ZED Plugin Setup: # - Install the Ollama and Mistral plugins in ZED via the plugin marketplace # - The Ollama plugin will use the local models pulled above # - The Mistral plugin will use the MISTRAL_API_KEY for cloud access # # 4. Security: # - Never commit ./assets/conf/apps/ai.env to version control # - For extra security, encrypt ai.env using sops-nix or age # # 5. Persistent Service: # To keep Ollama running after logout, enable lingering: # loginctl enable-linger $(whoami) }