Files
nixos/henrovnix_ok/home/apps/ai.nix
T
2026-02-28 11:26:21 +01:00

105 lines
3.4 KiB
Nix

{ config, pkgs, lib, flakeRoot, ... }:
let
# Path to the environment file containing OLLAMA_HOST and MISTRAL_API_KEY
AiRepoEnv = flakeRoot + "/assets/conf/apps/ai/ai.env";
# Path to the ZED configuration template
AiRepoConf = flakeRoot + "/assets/conf/apps/ai/ai.conf";
# Robust environment file parser
envVars = lib.genAttrs (
builtins.map (
line: let
# Trim whitespace from the line
trimmed = builtins.trim line;
# Skip comments and empty lines
isValid = trimmed != "" && (builtins.substr 0 1 trimmed) != "#";
# Split on first '=' only
parts = if isValid then builtins.split "=" trimmed else [ ];
# Get key (first part) and trim whitespace
key = if builtins.length parts > 0 then builtins.trim (builtins.elemAt parts 0) else null;
# Get value (everything after first '=') and trim whitespace
value = if builtins.length parts > 1 then builtins.trim (builtins.concatStringsSep "=" (builtins.drop 1 parts)) else "";
in { inherit key value; }
) (builtins.split "\n" (builtins.readFile (toString AiRepoEnv)))
) (entry: entry.key);
in
{
# Install required packages
home.packages = [
pkgs.ollama-vulkan
pkgs.zed
];
# Set environment variables for the user session
home.sessionVariables = {
OLLAMA_HOST = envVars.OLLAMA_HOST or "http://127.0.0.1:11434";
MISTRAL_API_KEY = envVars.MISTRAL_API_KEY or "";
};
# Configure Ollama as a user service
systemd.user.services.ollama = {
description = "Ollama service for local AI models";
wantedBy = [ "default.target" ];
after = [ "network.target" ];
serviceConfig = {
Type = "forking";
ExecStart = ''
${pkgs.ollama-vulkan}/bin/ollama serve
'';
ExecStartPost = ''
sleep 5
${pkgs.ollama-vulkan}/bin/ollama pull codellama:70b
${pkgs.ollama-vulkan}/bin/ollama pull mixtral:8x7b
'';
Restart = "on-failure";
};
};
# Generate ZED's settings.json
home.file.".config/zed/settings.json".text = lib.mkForce (
builtins.toJSON (
(builtins.fromJSON (builtins.readFile (toString AiRepoConf)))
// {
mistral = {
apiKey = envVars.MISTRAL_API_KEY or "";
defaultModel = "mistral-pro";
};
ollama = {
endpoint = envVars.OLLAMA_HOST or "http://127.0.0.1:11434";
defaultModel = "codellama:70b";
};
}
)
);
# Filter out null entries from envVars
home.file.".config/ai-env-test".text = lib.mkForce (
builtins.toJSON envVars
);
# --- Usage Notes ---
# 1. Pulling Additional Models:
# To add more models later, run:
# ollama pull <model-name>
# Example: ollama pull llama3:8b
#
# 2. Switching GPU Backends:
# - For NVIDIA: Replace all `ollama-vulkan` with `ollama` (uses CUDA)
# - For AMD: Use `ollama-rocm` and ensure ROCm is installed
#
# 3. ZED Plugin Setup:
# - Install the Ollama and Mistral plugins in ZED via the plugin marketplace
# - The Ollama plugin will use the local models pulled above
# - The Mistral plugin will use the MISTRAL_API_KEY for cloud access
#
# 4. Security:
# - Never commit ./assets/conf/apps/ai.env to version control
# - For extra security, encrypt ai.env using sops-nix or age
#
# 5. Persistent Service:
# To keep Ollama running after logout, enable lingering:
# loginctl enable-linger $(whoami)
}