99 lines
2.8 KiB
Nix
99 lines
2.8 KiB
Nix
{ config, pkgs, lib, flakeRoot, ... }:
|
|
|
|
let
|
|
# Path to environment file
|
|
AiRepoEnv = flakeRoot + "/assets/conf/apps/ai/ai.env";
|
|
|
|
# Environment file parser
|
|
parseEnv = lines:
|
|
lib.foldl' (acc: line:
|
|
let
|
|
trimmed = builtins.trim line;
|
|
in
|
|
if trimmed == "" || (builtins.stringLength trimmed) > 0 && (builtins.substr 0 1 trimmed) == "#"
|
|
then acc
|
|
else
|
|
let parts = builtins.split "=" trimmed;
|
|
in
|
|
if builtins.length parts >= 2
|
|
then builtins.add (builtins.elemAt parts 0) (builtins.elemAt parts 1) acc
|
|
else acc
|
|
) { } (builtins.split "\n" (builtins.readFile (toString AiRepoEnv)));
|
|
|
|
envVars = parseEnv;
|
|
in
|
|
{
|
|
# Install required packages
|
|
home.packages = [
|
|
pkgs.ollama-vulkan
|
|
pkgs.zed-editor
|
|
];
|
|
|
|
# Set environment variables
|
|
home.sessionVariables = {
|
|
OLLAMA_HOST = envVars.OLLAMA_HOST or "http://127.0.0.1:11434";
|
|
MISTRAL_API_KEY = envVars.MISTRAL_API_KEY or "";
|
|
};
|
|
|
|
# Configure Ollama service using the correct Home Manager syntax
|
|
systemd.user.services.ollama = {
|
|
Unit = {
|
|
Description = "Ollama service for local AI models";
|
|
After = [ "network.target" ];
|
|
Wants = [ "network.target" ];
|
|
};
|
|
|
|
Service = {
|
|
Type = "forking";
|
|
ExecStart = "${pkgs.ollama-vulkan}/bin/ollama serve";
|
|
ExecStartPost = ''
|
|
sleep 5
|
|
${pkgs.ollama-vulkan}/bin/ollama pull codellama:70b
|
|
${pkgs.ollama-vulkan}/bin/ollama pull mixtral:8x7b
|
|
'';
|
|
Restart = "on-failure";
|
|
};
|
|
|
|
Install = {
|
|
WantedBy = [ "default.target" ];
|
|
};
|
|
};
|
|
|
|
# ZED configuration
|
|
home.file.".config/zed/settings.json".text = lib.mkForce (
|
|
builtins.toJSON {
|
|
mistral = {
|
|
apiKey = envVars.MISTRAL_API_KEY or "";
|
|
defaultModel = "mistral-pro";
|
|
};
|
|
ollama = {
|
|
endpoint = envVars.OLLAMA_HOST or "http://127.0.0.1:11434";
|
|
defaultModel = "codellama:70b";
|
|
};
|
|
}
|
|
);
|
|
|
|
# --- Usage Notes ---
|
|
# 1. Pulling Additional Models:
|
|
# To add more models later, run:
|
|
# ollama pull <model-name>
|
|
# Example: ollama pull llama3:8b
|
|
#
|
|
# 2. Switching GPU Backends:
|
|
# - For NVIDIA: Replace all `ollama-vulkan` with `ollama` (uses CUDA)
|
|
# - For AMD: Use `ollama-rocm` and ensure ROCm is installed
|
|
#
|
|
# 3. ZED Plugin Setup:
|
|
# - Install the Ollama and Mistral plugins in ZED via the plugin marketplace
|
|
# - The Ollama plugin will use the local models pulled above
|
|
# - The Mistral plugin will use the MISTRAL_API_KEY for cloud access
|
|
#
|
|
# 4. Security:
|
|
# - Never commit ./assets/conf/apps/ai.env to version control
|
|
# - For extra security, encrypt ai.env using sops-nix or age
|
|
#
|
|
# 5. Persistent Service:
|
|
# To keep Ollama running after logout, enable lingering:
|
|
# loginctl enable-linger $(whoami)
|
|
}
|