llm: fixed upstream image for big-agi. use ollama with GPU support
This commit is contained in:
parent
8436e03e88
commit
a30d0f2e68
@ -1,11 +1,14 @@
|
|||||||
{ config, ... }:
|
{ config, pkgs, ... }:
|
||||||
|
|
||||||
let
|
let
|
||||||
domain = "pino.giugl.io";
|
domain = "pino.giugl.io";
|
||||||
backendPort = 3000;
|
backendPort = 3000;
|
||||||
frontendPort = 3002;
|
frontendPort = 3002;
|
||||||
|
llama-cpp = pkgs.unstablePkgs.llama-cpp.override { cudaSupport = true; };
|
||||||
|
ollama = pkgs.unstablePkgs.ollama.override { inherit llama-cpp; };
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
|
environment.systemPackages = [ ollama ];
|
||||||
architect.vhost.${domain} = {
|
architect.vhost.${domain} = {
|
||||||
dnsInterfaces = [ "tailscale" ];
|
dnsInterfaces = [ "tailscale" ];
|
||||||
|
|
||||||
@ -20,7 +23,7 @@ in
|
|||||||
virtualisation.oci-containers = {
|
virtualisation.oci-containers = {
|
||||||
containers = {
|
containers = {
|
||||||
big-agi = {
|
big-agi = {
|
||||||
image = "ghcr.io/enricoros/big-agi:main";
|
image = "ghcr.io/enricoros/big-agi:latest";
|
||||||
autoStart = true;
|
autoStart = true;
|
||||||
|
|
||||||
ports = [
|
ports = [
|
||||||
|
Loading…
Reference in New Issue
Block a user