From a1e05b8f00416c05a86120aed9bd2a804603489a Mon Sep 17 00:00:00 2001 From: yomaq Date: Mon, 17 Feb 2025 21:06:45 -0600 Subject: [PATCH] ollama --- hosts/wsl/wsl.nix | 17 +++-- modules/containers/ollama-webui.nix | 104 ++++++++++++++++++++++++++++ modules/containers/ollama.nix | 98 ++++++++++++++++++++++++++ modules/hosts/nvidia/nixos.nix | 1 + 4 files changed, 211 insertions(+), 9 deletions(-) create mode 100644 modules/containers/ollama-webui.nix create mode 100644 modules/containers/ollama.nix diff --git a/hosts/wsl/wsl.nix b/hosts/wsl/wsl.nix index 30516a6..10d1c7e 100644 --- a/hosts/wsl/wsl.nix +++ b/hosts/wsl/wsl.nix @@ -19,19 +19,14 @@ networking.hostName = "wsl"; system.stateVersion = "24.05"; + # autostarting wsl with a scheduled task to launch at startup with the command `wsl.exe dbus-launch true` + # based off https://guides.hakedev.com/wiki/windows/WSL/wsl-auto-start/ + wsl.enable = true; wsl.defaultUser = "admin"; wsl.useWindowsDriver = true; - - - services.open-webui.enable = true; - services.ollama = { - enable = true; - acceleration = "cuda"; - loadModels = [ "deepseek-r1:7b" ]; - }; - + environment.systemPackages = [pkgs.dbus]; yomaq = { tailscale = { enable = true; @@ -47,6 +42,10 @@ wsl = true; }; docker.enable = true; + pods = { + ollama.enable = true; + open-webui.enable = true; + }; autoUpgrade.enable = true; primaryUser.users = [ "admin" ]; timezone.central = true; diff --git a/modules/containers/ollama-webui.nix b/modules/containers/ollama-webui.nix new file mode 100644 index 0000000..9e264d1 --- /dev/null +++ b/modules/containers/ollama-webui.nix @@ -0,0 +1,104 @@ +{ + options, + config, + lib, + pkgs, + inputs, + ... +}: +let + ### Set container name and image + NAME = "open-webui"; + IMAGE = "ghcr.io/open-webui/open-webui"; + + cfg = config.yomaq.pods.${NAME}; + inherit (config.networking) hostName; + inherit (config.yomaq.impermanence) dontBackup; + inherit (config.yomaq.tailscale) tailnetName; +in +{ + options.yomaq.pods.${NAME} = { + enable = lib.mkOption { + type = lib.types.bool; + default = false; + description = '' + enable custom ${NAME} container module + ''; + }; + volumeLocation = lib.mkOption { + type = lib.types.str; + default = "${dontBackup}/containers/${NAME}"; + description = '' + path to store container volumes + ''; + }; + env = lib.mkOption { + type = lib.types.attrsOf lib.types.str; + default = { + "OLLAMA_BASE_URL" = "https://${hostName}-ollama.${tailnetName}.ts.net"; + }; + description = '' + env options + ''; + }; + imageVersion = lib.mkOption { + type = lib.types.str; + default = "latest"; + description = '' + container image version + ''; + }; + }; + + config = lib.mkIf cfg.enable { + + systemd.tmpfiles.rules = [ "d ${cfg.volumeLocation}/open-webui 0755 root root" ]; + + virtualisation.oci-containers.containers = { + "${NAME}" = { + image = "${IMAGE}:${cfg.imageVersion}"; + autoStart = true; + environment = cfg.env; + volumes = [ "${cfg.volumeLocation}/open-webui:/app/backend/data" ]; + extraOptions = [ + "--pull=always" + "--network=container:TS${NAME}" + ]; + }; + }; + + yomaq.pods.tailscaled."TS${NAME}" = { + TSserve = { + "/" = "http://127.0.0.1:8080"; + }; + tags = [ "tag:ollama-server" "tag:ollama-access" ]; + }; + + yomaq.homepage.groups.services.services = [ + { + "${NAME}" = { + icon = "si-ollama"; + href = "https://${hostName}-${NAME}.${tailnetName}.ts.net"; + siteMonitor = "https://${hostName}-${NAME}.${tailnetName}.ts.net"; + }; + } + ]; + + yomaq.gatus.endpoints = [ + { + name = "${hostName}-${NAME}"; + group = "webapps"; + url = "https://${hostName}-${NAME}.${tailnetName}.ts.net/"; + interval = "5m"; + conditions = [ "[STATUS] == 200" ]; + alerts = [ + { + type = "ntfy"; + failureThreshold = 3; + description = "healthcheck failed"; + } + ]; + } + ]; + }; +} diff --git a/modules/containers/ollama.nix b/modules/containers/ollama.nix new file mode 100644 index 0000000..bcf8f4d --- /dev/null +++ b/modules/containers/ollama.nix @@ -0,0 +1,98 @@ +{ + options, + config, + lib, + pkgs, + inputs, + ... +}: +let + ### Set container name and image + NAME = "ollama"; + IMAGE = "docker.io/ollama/ollama"; + + cfg = config.yomaq.pods.${NAME}; + inherit (config.networking) hostName; + inherit (config.yomaq.impermanence) dontBackup; + inherit (config.yomaq.tailscale) tailnetName; +in +{ + options.yomaq.pods.${NAME} = { + enable = lib.mkOption { + type = lib.types.bool; + default = false; + description = '' + enable custom ${NAME} container module + ''; + }; + volumeLocation = lib.mkOption { + type = lib.types.str; + default = "${dontBackup}/containers/${NAME}"; + description = '' + path to store container volumes + ''; + }; + imageVersion = lib.mkOption { + type = lib.types.str; + default = "latest"; + description = '' + container image version + ''; + }; + }; + + config = lib.mkIf cfg.enable { + + systemd.tmpfiles.rules = [ "d ${cfg.volumeLocation}/ollama 0755 root root" ]; + + virtualisation.oci-containers.containers = { + "${NAME}" = { + image = "${IMAGE}:${cfg.imageVersion}"; + autoStart = true; + environment = { + "OLLAMA_NUM_PARALLEL" = "1"; + }; + volumes = [ "${cfg.volumeLocation}/ollama:/root/.ollama" ]; + extraOptions = [ + "--pull=always" + "--network=container:TS${NAME}" + "--device=nvidia.com/gpu=all" + ]; + }; + }; + + yomaq.pods.tailscaled."TS${NAME}" = { + TSserve = { + "/" = "http://127.0.0.1:11434"; + }; + tags = [ "tag:ollama-server" ]; + }; + + # yomaq.homepage.groups.services.services = [ + # { + # "${NAME}" = { + # icon = "si-files"; + # href = "https://${hostName}-${NAME}.${tailnetName}.ts.net"; + # siteMonitor = "https://${hostName}-${NAME}.${tailnetName}.ts.net"; + # }; + # } + # ]; + + yomaq.gatus.endpoints = [ + { + name = "${hostName}-${NAME}"; + group = "webapps"; + url = "http://${hostName}-${NAME}.${tailnetName}.ts.net/:11434"; + interval = "5m"; + conditions = [ "[STATUS] == 200" ]; + alerts = [ + { + type = "ntfy"; + failureThreshold = 3; + description = "healthcheck failed"; + } + ]; + } + ]; + }; +} diff --git a/modules/hosts/nvidia/nixos.nix b/modules/hosts/nvidia/nixos.nix index 7b7b713..7b78af8 100644 --- a/modules/hosts/nvidia/nixos.nix +++ b/modules/hosts/nvidia/nixos.nix @@ -48,6 +48,7 @@ in mount-nvidia-executables = lib.mkIf cfg.wsl false; }; virtualisation.docker = lib.mkIf cfg.wsl { + # also must run `nvidia-ctk cdi generate --output=/etc/cdi/nvidia.yaml` daemon.settings.features.cdi = true; daemon.settings.cdi-spec-dirs = ["/etc/cdi"]; };