rebuilding nix files
This commit is contained in:
+334
-362
File diff suppressed because it is too large
Load Diff
+28
-56
@@ -1647,8 +1647,6 @@ It automatically pulls and prepares selected coding models (e.g., Qwen2.5-Coder
|
|||||||
|
|
||||||
let
|
let
|
||||||
# Read environment variables (OLLAMA_HOST and MISTRAL_API_KEY) from a local file.
|
# Read environment variables (OLLAMA_HOST and MISTRAL_API_KEY) from a local file.
|
||||||
# This keeps sensitive values out of the Nix store and version control.
|
|
||||||
|
|
||||||
AiRepoEnv = flakeRoot + "/assets/conf/apps/ai/ai.env";
|
AiRepoEnv = flakeRoot + "/assets/conf/apps/ai/ai.env";
|
||||||
AiRepoConf = flakeRoot + "/assets/conf/apps/ai/ai.conf";
|
AiRepoConf = flakeRoot + "/assets/conf/apps/ai/ai.conf";
|
||||||
envVars = lib.genAttrs (builtins.splitStrings "\n" (builtins.readFile (toString AiRepoEnv)))
|
envVars = lib.genAttrs (builtins.splitStrings "\n" (builtins.readFile (toString AiRepoEnv)))
|
||||||
@@ -1656,98 +1654,72 @@ let
|
|||||||
in
|
in
|
||||||
{
|
{
|
||||||
# Install ZED editor and Ollama with Vulkan support (for CPU/AMD).
|
# Install ZED editor and Ollama with Vulkan support (for CPU/AMD).
|
||||||
# For NVIDIA GPUs, replace `ollama-vulkan` with `ollama`.
|
|
||||||
# For AMD ROCm, use `ollama-rocm` and ensure ROCm is installed.
|
|
||||||
home.packages = [
|
home.packages = [
|
||||||
pkgs.ollama-vulkan
|
pkgs.ollama-vulkan
|
||||||
pkgs.zed
|
pkgs.zed
|
||||||
];
|
];
|
||||||
|
|
||||||
# --- Environment Variables ---
|
# --- Environment Variables ---
|
||||||
# Set OLLAMA_HOST and MISTRAL_API_KEY for ZED and other user applications.
|
|
||||||
# Values are read from AiRepoEnv.
|
|
||||||
|
|
||||||
home.sessionVariables = {
|
home.sessionVariables = {
|
||||||
OLLAMA_HOST = envVars.OLLAMA_HOST or "http://127.0.0.1:11434"; # Default Ollama endpoint
|
OLLAMA_HOST = envVars.OLLAMA_HOST or "http://127.0.0.1:11434";
|
||||||
MISTRAL_API_KEY = envVars.MISTRAL_API_KEY or ""; # Mistral API key (required for cloud models)
|
MISTRAL_API_KEY = envVars.MISTRAL_API_KEY or "";
|
||||||
};
|
};
|
||||||
|
|
||||||
# --- Ollama User Service ---
|
# --- Ollama User Service ---
|
||||||
# Configure Ollama to run as a user service (starts on login).
|
|
||||||
# This avoids root privileges and allows per-user model management.
|
|
||||||
systemd.user.services.ollama = {
|
systemd.user.services.ollama = {
|
||||||
description = "Ollama service for local AI models";
|
description = "Ollama service for local AI models";
|
||||||
wantedBy = [ "default.target" ]; # Start with user session
|
wantedBy = [ "default.target" ];
|
||||||
after = [ "network.target" ]; # Ensure network is ready
|
after = [ "network.target" ];
|
||||||
|
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
Type = "forking"; # Run as a background process
|
Type = "forking";
|
||||||
|
|
||||||
# Start Ollama server
|
|
||||||
ExecStart = ''
|
ExecStart = ''
|
||||||
${pkgs.ollama-vulkan}/bin/ollama serve
|
${pkgs.ollama-vulkan}/bin/ollama serve
|
||||||
'';
|
'';
|
||||||
|
|
||||||
# Pull default models after server starts
|
|
||||||
ExecStartPost = ''
|
ExecStartPost = ''
|
||||||
sleep 5 # Wait for server to initialize
|
sleep 5
|
||||||
|
|
||||||
# Pull coding and chat models at startup
|
${pkgs.ollama-vulkan}/bin/ollama pull codellama:70b
|
||||||
${pkgs.ollama-vulkan}/bin/ollama pull codellama:70b # Best for coding tasks
|
${pkgs.ollama-vulkan}/bin/ollama pull mixtral:8x7b
|
||||||
${pkgs.ollama-vulkan}/bin/ollama pull mixtral:8x7b # Best for chat/conversation
|
|
||||||
|
|
||||||
# Uncomment to pull additional models:
|
|
||||||
# ${pkgs.ollama-vulkan}/bin/ollama pull llama3:8b # General-purpose model
|
|
||||||
# ${pkgs.ollama-vulkan}/bin/ollama pull qwen2.5-coder:7b # Multilingual coding
|
|
||||||
# ${pkgs.ollama-vulkan}/bin/ollama pull starcoder2:15b # Alternative for code
|
|
||||||
'';
|
'';
|
||||||
Restart = "on-failure"; # Restart if Ollama crashes
|
Restart = "on-failure";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
# --- ZED Configuration ---
|
# --- ZED Configuration ---
|
||||||
# Generate ZED's settings.json with substituted API keys and endpoints.
|
|
||||||
# Base config is read from ./assets/conf/apps/ai.conf, with variables injected.
|
|
||||||
home.file.".config/zed/settings.json".text = lib.mkForce (
|
home.file.".config/zed/settings.json".text = lib.mkForce (
|
||||||
builtins.readFile (toString AiRepoConf)
|
builtins.toJSON (
|
||||||
// ''
|
(builtins.fromJSON (builtins.readFile (toString AiRepoConf)))
|
||||||
{
|
// {
|
||||||
"mistral": {
|
mistral = {
|
||||||
"apiKey": "${envVars.MISTRAL_API_KEY}", # Inject Mistral API key
|
apiKey = envVars.MISTRAL_API_KEY or "";
|
||||||
"defaultModel": "mistral-pro" # Default Mistral model
|
defaultModel = "mistral-pro";
|
||||||
},
|
};
|
||||||
"ollama": {
|
ollama = {
|
||||||
"endpoint": "${envVars.OLLAMA_HOST}", # Inject Ollama endpoint
|
endpoint = envVars.OLLAMA_HOST or "http://127.0.0.1:11434";
|
||||||
"defaultModel": "codellama:70b" # Default Ollama model for coding
|
defaultModel = "codellama:70b";
|
||||||
}
|
};
|
||||||
}
|
}
|
||||||
''
|
)
|
||||||
);
|
);
|
||||||
|
|
||||||
# --- Usage Notes ---
|
# --- Usage Notes ---
|
||||||
# 1. Pulling Additional Models:
|
# 1. Pulling Additional Models:
|
||||||
# To add more models later, run:
|
# ollama pull <model-name>
|
||||||
# ollama pull <model-name>
|
|
||||||
# Example: ollama pull llama3:8b
|
|
||||||
|
|
||||||
# 2. Switching GPU Backends:
|
# 2. Switching GPU Backends:
|
||||||
# - For NVIDIA: Replace all `ollama-vulkan` with `ollama` (uses CUDA)
|
# - NVIDIA: Replace `ollama-vulkan` with `ollama`
|
||||||
# - For AMD: Use `ollama-rocm` and ensure ROCm is installed
|
# - AMD: Use `ollama-rocm` and ensure ROCm is installed
|
||||||
|
|
||||||
# 3. ZED Plugin Setup:
|
# 3. ZED Plugin Setup:
|
||||||
# - Install the Ollama and Mistral plugins in ZED via the plugin marketplace
|
# - Install Ollama and Mistral plugins in ZED
|
||||||
# - The Ollama plugin will use the local models pulled above
|
|
||||||
# - The Mistral plugin will use the MISTRAL_API_KEY for cloud access
|
|
||||||
|
|
||||||
# 4. Security:
|
# 4. Security:
|
||||||
# - Never commit ./assets/conf/apps/ai.env to version control
|
# - Never commit ai.env to version control
|
||||||
# - For extra security, encrypt ai.env using sops-nix or age
|
|
||||||
|
|
||||||
# 5. Persistent Service:
|
# 5. Persistent Service:
|
||||||
# To keep Ollama running after logout, enable lingering:
|
# loginctl enable-linger $(whoami)
|
||||||
# loginctl enable-linger $(whoami)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#+end_src
|
#+end_src
|
||||||
|
|
||||||
** NCSway
|
** NCSway
|
||||||
|
|||||||
@@ -2,8 +2,6 @@
|
|||||||
|
|
||||||
let
|
let
|
||||||
# Read environment variables (OLLAMA_HOST and MISTRAL_API_KEY) from a local file.
|
# Read environment variables (OLLAMA_HOST and MISTRAL_API_KEY) from a local file.
|
||||||
# This keeps sensitive values out of the Nix store and version control.
|
|
||||||
|
|
||||||
AiRepoEnv = flakeRoot + "/assets/conf/apps/ai/ai.env";
|
AiRepoEnv = flakeRoot + "/assets/conf/apps/ai/ai.env";
|
||||||
AiRepoConf = flakeRoot + "/assets/conf/apps/ai/ai.conf";
|
AiRepoConf = flakeRoot + "/assets/conf/apps/ai/ai.conf";
|
||||||
envVars = lib.genAttrs (builtins.splitStrings "\n" (builtins.readFile (toString AiRepoEnv)))
|
envVars = lib.genAttrs (builtins.splitStrings "\n" (builtins.readFile (toString AiRepoEnv)))
|
||||||
@@ -11,94 +9,67 @@ let
|
|||||||
in
|
in
|
||||||
{
|
{
|
||||||
# Install ZED editor and Ollama with Vulkan support (for CPU/AMD).
|
# Install ZED editor and Ollama with Vulkan support (for CPU/AMD).
|
||||||
# For NVIDIA GPUs, replace `ollama-vulkan` with `ollama`.
|
|
||||||
# For AMD ROCm, use `ollama-rocm` and ensure ROCm is installed.
|
|
||||||
home.packages = [
|
home.packages = [
|
||||||
pkgs.ollama-vulkan
|
pkgs.ollama-vulkan
|
||||||
pkgs.zed
|
pkgs.zed
|
||||||
];
|
];
|
||||||
|
|
||||||
# --- Environment Variables ---
|
# --- Environment Variables ---
|
||||||
# Set OLLAMA_HOST and MISTRAL_API_KEY for ZED and other user applications.
|
|
||||||
# Values are read from AiRepoEnv.
|
|
||||||
|
|
||||||
home.sessionVariables = {
|
home.sessionVariables = {
|
||||||
OLLAMA_HOST = envVars.OLLAMA_HOST or "http://127.0.0.1:11434"; # Default Ollama endpoint
|
OLLAMA_HOST = envVars.OLLAMA_HOST or "http://127.0.0.1:11434";
|
||||||
MISTRAL_API_KEY = envVars.MISTRAL_API_KEY or ""; # Mistral API key (required for cloud models)
|
MISTRAL_API_KEY = envVars.MISTRAL_API_KEY or "";
|
||||||
};
|
};
|
||||||
|
|
||||||
# --- Ollama User Service ---
|
# --- Ollama User Service ---
|
||||||
# Configure Ollama to run as a user service (starts on login).
|
|
||||||
# This avoids root privileges and allows per-user model management.
|
|
||||||
systemd.user.services.ollama = {
|
systemd.user.services.ollama = {
|
||||||
description = "Ollama service for local AI models";
|
description = "Ollama service for local AI models";
|
||||||
wantedBy = [ "default.target" ]; # Start with user session
|
wantedBy = [ "default.target" ];
|
||||||
after = [ "network.target" ]; # Ensure network is ready
|
after = [ "network.target" ];
|
||||||
|
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
Type = "forking"; # Run as a background process
|
Type = "forking";
|
||||||
|
|
||||||
# Start Ollama server
|
|
||||||
ExecStart = ''
|
ExecStart = ''
|
||||||
${pkgs.ollama-vulkan}/bin/ollama serve
|
${pkgs.ollama-vulkan}/bin/ollama serve
|
||||||
'';
|
'';
|
||||||
|
|
||||||
# Pull default models after server starts
|
|
||||||
ExecStartPost = ''
|
ExecStartPost = ''
|
||||||
sleep 5 # Wait for server to initialize
|
sleep 5
|
||||||
|
|
||||||
# Pull coding and chat models at startup
|
${pkgs.ollama-vulkan}/bin/ollama pull codellama:70b
|
||||||
${pkgs.ollama-vulkan}/bin/ollama pull codellama:70b # Best for coding tasks
|
${pkgs.ollama-vulkan}/bin/ollama pull mixtral:8x7b
|
||||||
${pkgs.ollama-vulkan}/bin/ollama pull mixtral:8x7b # Best for chat/conversation
|
|
||||||
|
|
||||||
# Uncomment to pull additional models:
|
|
||||||
# ${pkgs.ollama-vulkan}/bin/ollama pull llama3:8b # General-purpose model
|
|
||||||
# ${pkgs.ollama-vulkan}/bin/ollama pull qwen2.5-coder:7b # Multilingual coding
|
|
||||||
# ${pkgs.ollama-vulkan}/bin/ollama pull starcoder2:15b # Alternative for code
|
|
||||||
'';
|
'';
|
||||||
Restart = "on-failure"; # Restart if Ollama crashes
|
Restart = "on-failure";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
# --- ZED Configuration ---
|
# --- ZED Configuration ---
|
||||||
# Generate ZED's settings.json with substituted API keys and endpoints.
|
|
||||||
# Base config is read from ./assets/conf/apps/ai.conf, with variables injected.
|
|
||||||
home.file.".config/zed/settings.json".text = lib.mkForce (
|
home.file.".config/zed/settings.json".text = lib.mkForce (
|
||||||
builtins.readFile (toString AiRepoConf)
|
builtins.toJSON (
|
||||||
// ''
|
(builtins.fromJSON (builtins.readFile (toString AiRepoConf)))
|
||||||
{
|
// {
|
||||||
"mistral": {
|
mistral = {
|
||||||
"apiKey": "${envVars.MISTRAL_API_KEY}", # Inject Mistral API key
|
apiKey = envVars.MISTRAL_API_KEY or "";
|
||||||
"defaultModel": "mistral-pro" # Default Mistral model
|
defaultModel = "mistral-pro";
|
||||||
},
|
};
|
||||||
"ollama": {
|
ollama = {
|
||||||
"endpoint": "${envVars.OLLAMA_HOST}", # Inject Ollama endpoint
|
endpoint = envVars.OLLAMA_HOST or "http://127.0.0.1:11434";
|
||||||
"defaultModel": "codellama:70b" # Default Ollama model for coding
|
defaultModel = "codellama:70b";
|
||||||
}
|
};
|
||||||
}
|
}
|
||||||
''
|
)
|
||||||
);
|
);
|
||||||
|
|
||||||
# --- Usage Notes ---
|
# --- Usage Notes ---
|
||||||
# 1. Pulling Additional Models:
|
# 1. Pulling Additional Models:
|
||||||
# To add more models later, run:
|
# ollama pull <model-name>
|
||||||
# ollama pull <model-name>
|
|
||||||
# Example: ollama pull llama3:8b
|
|
||||||
|
|
||||||
# 2. Switching GPU Backends:
|
# 2. Switching GPU Backends:
|
||||||
# - For NVIDIA: Replace all `ollama-vulkan` with `ollama` (uses CUDA)
|
# - NVIDIA: Replace `ollama-vulkan` with `ollama`
|
||||||
# - For AMD: Use `ollama-rocm` and ensure ROCm is installed
|
# - AMD: Use `ollama-rocm` and ensure ROCm is installed
|
||||||
|
|
||||||
# 3. ZED Plugin Setup:
|
# 3. ZED Plugin Setup:
|
||||||
# - Install the Ollama and Mistral plugins in ZED via the plugin marketplace
|
# - Install Ollama and Mistral plugins in ZED
|
||||||
# - The Ollama plugin will use the local models pulled above
|
|
||||||
# - The Mistral plugin will use the MISTRAL_API_KEY for cloud access
|
|
||||||
|
|
||||||
# 4. Security:
|
# 4. Security:
|
||||||
# - Never commit ./assets/conf/apps/ai.env to version control
|
# - Never commit ai.env to version control
|
||||||
# - For extra security, encrypt ai.env using sops-nix or age
|
|
||||||
|
|
||||||
# 5. Persistent Service:
|
# 5. Persistent Service:
|
||||||
# To keep Ollama running after logout, enable lingering:
|
# loginctl enable-linger $(whoami)
|
||||||
# loginctl enable-linger $(whoami)
|
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user