From 72167fd86145000b84231c317edcbb1bae027756 Mon Sep 17 00:00:00 2001 From: Giulio De Pasquale Date: Sun, 17 Nov 2024 20:15:18 +0000 Subject: [PATCH] refactor(config.yaml): update client configuration for openai-compatible - Updated `type` to `openai-compatible` - Added `name` field for clarity - Adjusted `api_base` URL to include `/v1` - Simplified and updated model configurations for consistency --- roles/home/aichat/config.yaml | 29 +++++++++-------------------- 1 file changed, 9 insertions(+), 20 deletions(-) diff --git a/roles/home/aichat/config.yaml b/roles/home/aichat/config.yaml index dbf40de..fd70a29 100644 --- a/roles/home/aichat/config.yaml +++ b/roles/home/aichat/config.yaml @@ -1,23 +1,12 @@ clients: - - type: ollama - api_base: https://ollama.giugl.io + - type: openai-compatible + name: ollama + api_base: https://ollama.giugl.io/v1 models: - - name: mistral:7b-instruct-v0.3-fp16 - max_input_tokens: 32000 - max_output_tokens: 8192 + - name: pino + max_input_tokens: 8192 + max_output_tokens: 16000 - - name: llama3:8b-instruct-fp16 - max_input_tokens: 8192 - max_output_tokens: 8192 - - - name: phi3:14b-medium-4k-instruct-q8_0 - max_input_tokens: 128000 - max_output_tokens: 8192 - - - name: pino-coder - max_input_tokens: 8192 - max_output_tokens: 8192 - - - type: openai - api_key: null - api_base: https://api.openai.com/v1 + - name: pino-coder + max_input_tokens: 8192 + max_output_tokens: 16000