Had to redo headers of ai.nix

This commit is contained in:
2026-02-28 10:33:04 +01:00
parent 2a6c62e1ca
commit 4d2b087d16
3 changed files with 312 additions and 345 deletions
+1 -12
View File
@@ -1655,21 +1655,18 @@ It ensures the Ollama CLI is available system-wide for interacting with local mo
It automatically pulls and prepares selected coding models (e.g., Qwen2.5-Coder and StarCoder2) at system activation.
#+begin_src nix :tangle home/apps/ai.nix :noweb tangle :mkdirp yes
{ config, pkgs, ... }:
{ config, pkgs, lib, flakeRoot, ... }:
{
# Install ZED and Ollama (Vulkan for CPU/AMD, use `ollama` for NVIDIA or `ollama-rocm` for AMD ROCm)
home.packages = [
pkgs.ollama-vulkan # For Vulkan (CPU/AMD). For NVIDIA: pkgs.ollama. For AMD ROCm: pkgs.ollama-rocm
pkgs.zed
];
# Environment variables for ZED and Ollama
home.sessionVariables = {
OLLAMA_HOST = "http://127.0.0.1:11434";
MISTRAL_API_KEY = "CWo91GHwIClzLj6bCLQ69IioSi54PpTZ"; # Replace with your actual Mistral API key
};
# Configure Ollama as a user service (starts with login)
home.services.ollama = {
enable = true;
@@ -1678,11 +1675,9 @@ It automatically pulls and prepares selected coding models (e.g., Qwen2.5-Coder
# Start Ollama server
${pkgs.ollama-vulkan}/bin/ollama serve > /dev/null 2>&1 &
sleep 5 # Wait for server to start
# Pull coding and chat models at startup
${pkgs.ollama-vulkan}/bin/ollama pull codellama:70b # Best for coding
${pkgs.ollama-vulkan}/bin/ollama pull mixtral:8x7b # Best for chat
# To pull additional models, uncomment or add lines below:
# ${pkgs.ollama-vulkan}/bin/ollama pull llama3:8b # General-purpose
# ${pkgs.ollama-vulkan}/bin/ollama pull qwen2.5-coder:7b # Multilingual coding
@@ -1690,7 +1685,6 @@ It automatically pulls and prepares selected coding models (e.g., Qwen2.5-Coder
# ${pkgs.ollama-vulkan}/bin/ollama pull starcoder2:15b # Alternative for code
'';
};
# Configure ZED to use Ollama and Mistral API
home.file.".config/zed/settings.json".text = lib.mkForce ''
{
@@ -1705,26 +1699,21 @@ It automatically pulls and prepares selected coding models (e.g., Qwen2.5-Coder
# Add other ZED plugin configurations here if needed
}
'';
# --- Notes ---
# 1. Pulling Additional Models:
# To pull more models later, run:
# ollama pull <model-name>
# Example: ollama pull llama3:8b
# 2. Switching GPU Backends:
# - For NVIDIA: Replace `ollama-vulkan` with `ollama` (uses CUDA)
# - For AMD: Use `ollama-rocm` and ensure ROCm is installed
# 3. ZED Plugin Setup:
# - Install the Ollama and Mistral plugins in ZED via the plugin marketplace
# - The Ollama plugin will use the models pulled above
# - The Mistral plugin will use the MISTRAL_API_KEY for cloud access
# 4. Custom Prompts:
# To add custom prompts for Ollama, create a prompts.json file or
# configure prompts directly in the ZED Ollama plugin settings
# 5. Resource Management:
# Ollama runs as a user service and stops when you log out
# To run Ollama persistently, consider a systemd user service with `systemctl --user`