{ config, pkgs, ... }:

let
  domain = "pino.giugl.io";
  backendPort = 3000;
  frontendPort = 3002;
  llama-cpp = pkgs.unstablePkgs.llama-cpp.override { cudaSupport = true; };
  ollama = pkgs.unstablePkgs.ollama.override { inherit llama-cpp; };
in
{
  environment.systemPackages = [ ollama ];
  architect.vhost.${domain} = {
    dnsInterfaces = [ "tailscale" ];

    locations."/" = {
      host = "172.17.0.1";
      port = frontendPort;
      allowLan = true;
      allow = [ config.architect.networks."tailscale".net ];
    };
  };

  virtualisation.oci-containers = {
    containers = {
      big-agi = {
        image = "ghcr.io/enricoros/big-agi:latest";
        autoStart = true;

        ports = [
          "172.17.0.1:${toString frontendPort}:${toString backendPort}"
        ];
        environmentFiles = [
          "/var/lib/llm/big-agi.env"
        ];
        extraOptions = [
          "--pull=always"
        ];
      };
    };
  };
}