Totally new ai.nix, integrating MIstral with ZED

This commit is contained in:
2026-02-28 10:29:56 +01:00
parent 33618c1b07
commit 6dbfc8be6c
44 changed files with 2855 additions and 444 deletions
+138
View File
@@ -0,0 +1,138 @@
{ config, pkgs, ... }:
{
# Install ZED and Ollama (Vulkan for CPU/AMD, use `ollama` for NVIDIA or `ollama-rocm` for AMD ROCm)
home.packages = [
pkgs.ollama-vulkan # For Vulkan (CPU/AMD). For NVIDIA: pkgs.ollama. For AMD ROCm: pkgs.ollama-rocm
pkgs.zed
];
# Environment variables for ZED and Ollama
home.sessionVariables = {
OLLAMA_HOST = "http://127.0.0.1:11434";
MISTRAL_API_KEY = "CWo91GHwIClzLj6bCLQ69IioSi54PpTZ"; # Replace with your actual Mistral API key
};
# Configure Ollama as a user service (starts with login)
home.services.ollama = {
enable = true;
package = pkgs.ollama-vulkan;
onStart = ''
# Start Ollama server
${pkgs.ollama-vulkan}/bin/ollama serve > /dev/null 2>&1 &
sleep 5 # Wait for server to start
# Pull coding and chat models at startup
${pkgs.ollama-vulkan}/bin/ollama pull codellama:70b # Best for coding
${pkgs.ollama-vulkan}/bin/ollama pull mixtral:8x7b # Best for chat
# To pull additional models, uncomment or add lines below:
# ${pkgs.ollama-vulkan}/bin/ollama pull llama3:8b # General-purpose
# ${pkgs.ollama-vulkan}/bin/ollama pull qwen2.5-coder:7b # Multilingual coding
# ${pkgs.ollama-vulkan}/bin/ollama pull qwen2.5-coder:32b # Larger coding model
# ${pkgs.ollama-vulkan}/bin/ollama pull starcoder2:15b # Alternative for code
'';
};
# Configure ZED to use Ollama and Mistral API
home.file.".config/zed/settings.json".text = lib.mkForce ''
{
"mistral": {
"apiKey": "$MISTRAL_API_KEY", # Uses the environment variable set above
"defaultModel": "mistral-pro" # Default model for Mistral API calls
},
"ollama": {
"endpoint": "$OLLAMA_HOST", # Connects to local Ollama instance
"defaultModel": "codellama:70b" # Default model for Ollama plugin
},
# Add other ZED plugin configurations here if needed
}
'';
# --- Notes ---
# 1. Pulling Additional Models:
# To pull more models later, run:
# ollama pull <model-name>
# Example: ollama pull llama3:8b
# 2. Switching GPU Backends:
# - For NVIDIA: Replace `ollama-vulkan` with `ollama` (uses CUDA)
# - For AMD: Use `ollama-rocm` and ensure ROCm is installed
# 3. ZED Plugin Setup:
# - Install the Ollama and Mistral plugins in ZED via the plugin marketplace
# - The Ollama plugin will use the models pulled above
# - The Mistral plugin will use the MISTRAL_API_KEY for cloud access
# 4. Custom Prompts:
# To add custom prompts for Ollama, create a prompts.json file or
# configure prompts directly in the ZED Ollama plugin settings
# 5. Resource Management:
# Ollama runs as a user service and stops when you log out
# To run Ollama persistently, consider a systemd user service with `systemctl --user`
}
{ config, lib, pkgs, ... }:
let
# Continue gebruikt tegenwoordig bij voorkeur config.yaml; config.json bestaat nog
# maar is “deprecated” in de docs. We schrijven hier bewust config.json omdat jij dat vroeg.
continueConfigJson = builtins.toJSON {
models = [
{
title = "Qwen2.5-Coder 7B";
provider = "ollama";
model = "qwen2.5-coder:7b";
apiBase = "http://localhost:11434";
}
{
title = "Qwen2.5-Coder 32B";
provider = "ollama";
model = "qwen2.5-coder:32b";
apiBase = "http://localhost:11434";
}
{
title = "StarCoder2 15B";
provider = "ollama";
model = "starcoder2:15b";
apiBase = "http://localhost:11434";
}
];
# Tab-autocomplete model (pas aan naar smaak/VRAM)
tabAutocompleteModel = {
title = "Qwen2.5-Coder 7B";
provider = "ollama";
model = "qwen2.5-coder:7b";
apiBase = "http://localhost:11434";
};
};
in
{
programs.zed-editor = {
enable = true;
# Zed-extensies (taal/LS/etc). "Continue" bestaat (nog) niet als Zed-extensie.
# Dit is de officiële HM interface voor Zed extensions.
extensions = [
"nix"
"toml"
"rust"
"org-mode"
];
# Zed AI: Ollama als provider
# Zed kan modellen auto-discoveren die jij met Ollama gepulld hebt.
userSettings = {
language_models = {
ollama = {
api_url = "http://localhost:11434";
auto_discover = true;
# Optioneel: zet een grotere context voor alle Ollama modellen
# (Zed stuurt dit als `num_ctx` naar Ollama)
context_window = 8192;
};
};
};
};
# Continue config.json neerzetten (voor Continue in VS Code / JetBrains)
# Pad: ~/.config/continue/config.json
xdg.configFile."continue/config.json".text = continueConfigJson;
}
+9
View File
@@ -0,0 +1,9 @@
{ config, pkgs, lib, ... }:
{
xdg.mimeApps.enable = true;
xdg.mimeApps.defaultApplications = {
"x-scheme-handler/http" = [ "app.zen_browser.zen.desktop" ];
"x-scheme-handler/https" = [ "app.zen_browser.zen.desktop" ];
"text/html" = [ "app.zen_browser.zen.desktop" ];
};
}
+22
View File
@@ -0,0 +1,22 @@
{ pkgs, ...}:
{
gtk = {
enable = true;
colorScheme = "dark";
theme = {
name = "Catppuccin-GTK-Grey-Dark-Compact";
package = (pkgs.magnetic-catppuccin-gtk.override {
accent = [ "grey" ];
shade = "dark";
tweaks = [ "black" ];
size = "compact";
});
};
iconTheme.name = "Papirus-Dark";
};
catppuccin.enable = true;
catppuccin.flavor = "mocha";
catppuccin.accent = "blue";
catppuccin.gtk.icon.enable = true;
catppuccin.cursors.enable = true;
}