llm: fixed upstream image for big-agi. use ollama with GPU support
This commit is contained in:
parent
8436e03e88
commit
a30d0f2e68
@ -1,11 +1,14 @@
|
||||
{ config, ... }:
|
||||
{ config, pkgs, ... }:
|
||||
|
||||
let
|
||||
domain = "pino.giugl.io";
|
||||
backendPort = 3000;
|
||||
frontendPort = 3002;
|
||||
llama-cpp = pkgs.unstablePkgs.llama-cpp.override { cudaSupport = true; };
|
||||
ollama = pkgs.unstablePkgs.ollama.override { inherit llama-cpp; };
|
||||
in
|
||||
{
|
||||
environment.systemPackages = [ ollama ];
|
||||
architect.vhost.${domain} = {
|
||||
dnsInterfaces = [ "tailscale" ];
|
||||
|
||||
@ -20,7 +23,7 @@ in
|
||||
virtualisation.oci-containers = {
|
||||
containers = {
|
||||
big-agi = {
|
||||
image = "ghcr.io/enricoros/big-agi:main";
|
||||
image = "ghcr.io/enricoros/big-agi:latest";
|
||||
autoStart = true;
|
||||
|
||||
ports = [
|
||||
|
Loading…
Reference in New Issue
Block a user