Adding Ollama + models + ZED
This commit is contained in:
@@ -891,6 +891,7 @@ This section describes a way of installing packages, either through nixpkgs orr
|
|||||||
This module enables and configures the Ollama system service on NixOS, including optional GPU acceleration (CUDA or ROCm).
|
This module enables and configures the Ollama system service on NixOS, including optional GPU acceleration (CUDA or ROCm).
|
||||||
It ensures the Ollama CLI is available system-wide for interacting with local models.
|
It ensures the Ollama CLI is available system-wide for interacting with local models.
|
||||||
It automatically pulls and prepares selected coding models (e.g., Qwen2.5-Coder and StarCoder2) at system activation.
|
It automatically pulls and prepares selected coding models (e.g., Qwen2.5-Coder and StarCoder2) at system activation.
|
||||||
|
|
||||||
#+begin_src nix :tangle configuration/apps/ai.nix :noweb tangle :mkdirp yes
|
#+begin_src nix :tangle configuration/apps/ai.nix :noweb tangle :mkdirp yes
|
||||||
{ config, lib, pkgs, ... }:
|
{ config, lib, pkgs, ... }:
|
||||||
{
|
{
|
||||||
@@ -2029,7 +2030,7 @@ You'll notice the color values in multiple places outside this as well.
|
|||||||
This Home-Manager module installs and configures the Zed editor in a user environment.
|
This Home-Manager module installs and configures the Zed editor in a user environment.
|
||||||
It integrates Ollama as a local LLM provider within Zed’s AI settings for code assistance.
|
It integrates Ollama as a local LLM provider within Zed’s AI settings for code assistance.
|
||||||
It also generates a Continue configuration file pointing to the local Ollama instance for compatible editors.
|
It also generates a Continue configuration file pointing to the local Ollama instance for compatible editors.
|
||||||
#+begin_src nix :tangle home/apps/theme.nix :noweb tangle :mkdirp yes.
|
#+begin_src nix :tangle home/apps/ai.nix :noweb tangle :mkdirp yes.
|
||||||
{ config, lib, pkgs, ... }:
|
{ config, lib, pkgs, ... }:
|
||||||
let
|
let
|
||||||
# Continue gebruikt tegenwoordig bij voorkeur config.yaml; config.json bestaat nog
|
# Continue gebruikt tegenwoordig bij voorkeur config.yaml; config.json bestaat nog
|
||||||
|
|||||||
@@ -0,0 +1,25 @@
|
|||||||
|
{ config, lib, pkgs, ... }:
|
||||||
|
{
|
||||||
|
# Ollama server als systemd service
|
||||||
|
services.ollama = {
|
||||||
|
enable = true;
|
||||||
|
# Optioneel: "cuda" (NVIDIA) of "rocm" (AMD)
|
||||||
|
# Laat weg of zet op null/false als je CPU-only wilt.
|
||||||
|
acceleration = "cuda";
|
||||||
|
# acceleration = "rocm";
|
||||||
|
# Laat NixOS de modellen automatisch pullen zodra de service draait
|
||||||
|
# (via `ollama pull ...`)
|
||||||
|
loadModels = [
|
||||||
|
"qwen2.5-coder:7b"
|
||||||
|
"qwen2.5-coder:32b"
|
||||||
|
"starcoder2:15b"
|
||||||
|
# Alternatief:
|
||||||
|
# "starcoder2:7b"
|
||||||
|
# "starcoder2:latest"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
# Installeer de CLI tool (handig voor 'ollama run', 'ollama list', etc.)
|
||||||
|
environment.systemPackages = [
|
||||||
|
pkgs.ollama
|
||||||
|
];
|
||||||
|
}
|
||||||
@@ -0,0 +1,63 @@
|
|||||||
|
{ config, lib, pkgs, ... }:
|
||||||
|
let
|
||||||
|
# Continue gebruikt tegenwoordig bij voorkeur config.yaml; config.json bestaat nog
|
||||||
|
# maar is “deprecated” in de docs. We schrijven hier bewust config.json omdat jij dat vroeg.
|
||||||
|
continueConfigJson = builtins.toJSON {
|
||||||
|
models = [
|
||||||
|
{
|
||||||
|
title = "Qwen2.5-Coder 7B";
|
||||||
|
provider = "ollama";
|
||||||
|
model = "qwen2.5-coder:7b";
|
||||||
|
apiBase = "http://localhost:11434";
|
||||||
|
}
|
||||||
|
{
|
||||||
|
title = "Qwen2.5-Coder 32B";
|
||||||
|
provider = "ollama";
|
||||||
|
model = "qwen2.5-coder:32b";
|
||||||
|
apiBase = "http://localhost:11434";
|
||||||
|
}
|
||||||
|
{
|
||||||
|
title = "StarCoder2 15B";
|
||||||
|
provider = "ollama";
|
||||||
|
model = "starcoder2:15b";
|
||||||
|
apiBase = "http://localhost:11434";
|
||||||
|
}
|
||||||
|
];
|
||||||
|
# Tab-autocomplete model (pas aan naar smaak/VRAM)
|
||||||
|
tabAutocompleteModel = {
|
||||||
|
title = "Qwen2.5-Coder 7B";
|
||||||
|
provider = "ollama";
|
||||||
|
model = "qwen2.5-coder:7b";
|
||||||
|
apiBase = "http://localhost:11434";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
in
|
||||||
|
{
|
||||||
|
programs.zed-editor = {
|
||||||
|
enable = true;
|
||||||
|
# Zed-extensies (taal/LS/etc). "Continue" bestaat (nog) niet als Zed-extensie.
|
||||||
|
# Dit is de officiële HM interface voor Zed extensions.
|
||||||
|
extensions = [
|
||||||
|
"nix"
|
||||||
|
"toml"
|
||||||
|
"rust"
|
||||||
|
];
|
||||||
|
# Zed AI: Ollama als provider
|
||||||
|
# Zed kan modellen auto-discoveren die jij met Ollama gepulld hebt.
|
||||||
|
userSettings = {
|
||||||
|
language_models = {
|
||||||
|
ollama = {
|
||||||
|
api_url = "http://localhost:11434";
|
||||||
|
auto_discover = true;
|
||||||
|
|
||||||
|
# Optioneel: zet een grotere context voor alle Ollama modellen
|
||||||
|
# (Zed stuurt dit als `num_ctx` naar Ollama)
|
||||||
|
context_window = 8192;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
# Continue config.json neerzetten (voor Continue in VS Code / JetBrains)
|
||||||
|
# Pad: ~/.config/continue/config.json
|
||||||
|
xdg.configFile."continue/config.json".text = continueConfigJson;
|
||||||
|
}
|
||||||
@@ -20,67 +20,3 @@
|
|||||||
catppuccin.gtk.icon.enable = true;
|
catppuccin.gtk.icon.enable = true;
|
||||||
catppuccin.cursors.enable = true;
|
catppuccin.cursors.enable = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
{ config, lib, pkgs, ... }:
|
|
||||||
let
|
|
||||||
# Continue gebruikt tegenwoordig bij voorkeur config.yaml; config.json bestaat nog
|
|
||||||
# maar is “deprecated” in de docs. We schrijven hier bewust config.json omdat jij dat vroeg.
|
|
||||||
continueConfigJson = builtins.toJSON {
|
|
||||||
models = [
|
|
||||||
{
|
|
||||||
title = "Qwen2.5-Coder 7B";
|
|
||||||
provider = "ollama";
|
|
||||||
model = "qwen2.5-coder:7b";
|
|
||||||
apiBase = "http://localhost:11434";
|
|
||||||
}
|
|
||||||
{
|
|
||||||
title = "Qwen2.5-Coder 32B";
|
|
||||||
provider = "ollama";
|
|
||||||
model = "qwen2.5-coder:32b";
|
|
||||||
apiBase = "http://localhost:11434";
|
|
||||||
}
|
|
||||||
{
|
|
||||||
title = "StarCoder2 15B";
|
|
||||||
provider = "ollama";
|
|
||||||
model = "starcoder2:15b";
|
|
||||||
apiBase = "http://localhost:11434";
|
|
||||||
}
|
|
||||||
];
|
|
||||||
# Tab-autocomplete model (pas aan naar smaak/VRAM)
|
|
||||||
tabAutocompleteModel = {
|
|
||||||
title = "Qwen2.5-Coder 7B";
|
|
||||||
provider = "ollama";
|
|
||||||
model = "qwen2.5-coder:7b";
|
|
||||||
apiBase = "http://localhost:11434";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
in
|
|
||||||
{
|
|
||||||
programs.zed-editor = {
|
|
||||||
enable = true;
|
|
||||||
# Zed-extensies (taal/LS/etc). "Continue" bestaat (nog) niet als Zed-extensie.
|
|
||||||
# Dit is de officiële HM interface voor Zed extensions.
|
|
||||||
extensions = [
|
|
||||||
"nix"
|
|
||||||
"toml"
|
|
||||||
"rust"
|
|
||||||
];
|
|
||||||
# Zed AI: Ollama als provider
|
|
||||||
# Zed kan modellen auto-discoveren die jij met Ollama gepulld hebt.
|
|
||||||
userSettings = {
|
|
||||||
language_models = {
|
|
||||||
ollama = {
|
|
||||||
api_url = "http://localhost:11434";
|
|
||||||
auto_discover = true;
|
|
||||||
|
|
||||||
# Optioneel: zet een grotere context voor alle Ollama modellen
|
|
||||||
# (Zed stuurt dit als `num_ctx` naar Ollama)
|
|
||||||
context_window = 8192;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
# Continue config.json neerzetten (voor Continue in VS Code / JetBrains)
|
|
||||||
# Pad: ~/.config/continue/config.json
|
|
||||||
xdg.configFile."continue/config.json".text = continueConfigJson;
|
|
||||||
}
|
|
||||||
|
|||||||
Reference in New Issue
Block a user