Had to redo headers of ai.nix
This commit is contained in:
@@ -1,18 +1,15 @@
|
||||
{ config, pkgs, ... }:
|
||||
|
||||
{ config, pkgs, lib, flakeRoot, ... }:
|
||||
{
|
||||
# Install ZED and Ollama (Vulkan for CPU/AMD, use `ollama` for NVIDIA or `ollama-rocm` for AMD ROCm)
|
||||
home.packages = [
|
||||
pkgs.ollama-vulkan # For Vulkan (CPU/AMD). For NVIDIA: pkgs.ollama. For AMD ROCm: pkgs.ollama-rocm
|
||||
pkgs.zed
|
||||
];
|
||||
|
||||
# Environment variables for ZED and Ollama
|
||||
home.sessionVariables = {
|
||||
OLLAMA_HOST = "http://127.0.0.1:11434";
|
||||
MISTRAL_API_KEY = "CWo91GHwIClzLj6bCLQ69IioSi54PpTZ"; # Replace with your actual Mistral API key
|
||||
};
|
||||
|
||||
# Configure Ollama as a user service (starts with login)
|
||||
home.services.ollama = {
|
||||
enable = true;
|
||||
@@ -21,11 +18,9 @@
|
||||
# Start Ollama server
|
||||
${pkgs.ollama-vulkan}/bin/ollama serve > /dev/null 2>&1 &
|
||||
sleep 5 # Wait for server to start
|
||||
|
||||
# Pull coding and chat models at startup
|
||||
${pkgs.ollama-vulkan}/bin/ollama pull codellama:70b # Best for coding
|
||||
${pkgs.ollama-vulkan}/bin/ollama pull mixtral:8x7b # Best for chat
|
||||
|
||||
# To pull additional models, uncomment or add lines below:
|
||||
# ${pkgs.ollama-vulkan}/bin/ollama pull llama3:8b # General-purpose
|
||||
# ${pkgs.ollama-vulkan}/bin/ollama pull qwen2.5-coder:7b # Multilingual coding
|
||||
@@ -33,7 +28,6 @@
|
||||
# ${pkgs.ollama-vulkan}/bin/ollama pull starcoder2:15b # Alternative for code
|
||||
'';
|
||||
};
|
||||
|
||||
# Configure ZED to use Ollama and Mistral API
|
||||
home.file.".config/zed/settings.json".text = lib.mkForce ''
|
||||
{
|
||||
@@ -48,26 +42,21 @@
|
||||
# Add other ZED plugin configurations here if needed
|
||||
}
|
||||
'';
|
||||
|
||||
# --- Notes ---
|
||||
# 1. Pulling Additional Models:
|
||||
# To pull more models later, run:
|
||||
# ollama pull <model-name>
|
||||
# Example: ollama pull llama3:8b
|
||||
|
||||
# 2. Switching GPU Backends:
|
||||
# - For NVIDIA: Replace `ollama-vulkan` with `ollama` (uses CUDA)
|
||||
# - For AMD: Use `ollama-rocm` and ensure ROCm is installed
|
||||
|
||||
# 3. ZED Plugin Setup:
|
||||
# - Install the Ollama and Mistral plugins in ZED via the plugin marketplace
|
||||
# - The Ollama plugin will use the models pulled above
|
||||
# - The Mistral plugin will use the MISTRAL_API_KEY for cloud access
|
||||
|
||||
# 4. Custom Prompts:
|
||||
# To add custom prompts for Ollama, create a prompts.json file or
|
||||
# configure prompts directly in the ZED Ollama plugin settings
|
||||
|
||||
# 5. Resource Management:
|
||||
# Ollama runs as a user service and stops when you log out
|
||||
# To run Ollama persistently, consider a systemd user service with `systemctl --user`
|
||||
|
||||
Reference in New Issue
Block a user