From 5869eaff9a0b0d9d8c8090ffb2f4d6b825b8dbfb Mon Sep 17 00:00:00 2001 From: perf3ct Date: Wed, 26 Mar 2025 18:08:30 +0000 Subject: [PATCH] move more constants from files into centralized location --- .../llm/constants/provider_constants.ts | 59 +++++++++++++++++++ src/services/llm/providers/ollama_service.ts | 5 +- src/services/llm/providers/openai_service.ts | 5 +- 3 files changed, 65 insertions(+), 4 deletions(-) diff --git a/src/services/llm/constants/provider_constants.ts b/src/services/llm/constants/provider_constants.ts index 43db2b1c0..1d43e8e46 100644 --- a/src/services/llm/constants/provider_constants.ts +++ b/src/services/llm/constants/provider_constants.ts @@ -31,5 +31,64 @@ export const PROVIDER_CONSTANTS = { maxTokens: 4096 } ] + }, + + OPENAI: { + BASE_URL: 'https://api.openai.com/v1', + DEFAULT_MODEL: 'gpt-3.5-turbo', + DEFAULT_EMBEDDING_MODEL: 'text-embedding-ada-002', + CONTEXT_WINDOW: 16000, + EMBEDDING_DIMENSIONS: { + ADA: 1536, + DEFAULT: 1536 + }, + AVAILABLE_MODELS: [ + { + id: 'gpt-4o', + name: 'GPT-4o', + description: 'Most capable multimodal model', + maxTokens: 8192 + }, + { + id: 'gpt-4-turbo', + name: 'GPT-4 Turbo', + description: 'Advanced capabilities with higher token limit', + maxTokens: 8192 + }, + { + id: 'gpt-4', + name: 'GPT-4', + description: 'Original GPT-4 model', + maxTokens: 8192 + }, + { + id: 'gpt-3.5-turbo', + name: 'GPT-3.5 Turbo', + description: 'Fast and efficient model for most tasks', + maxTokens: 4096 + } + ] + }, + + OLLAMA: { + BASE_URL: 'http://localhost:11434', + DEFAULT_MODEL: 'llama2', + BATCH_SIZE: 100, + CHUNKING: { + SIZE: 4000, + OVERLAP: 200 + }, + MODEL_DIMENSIONS: { + default: 4096, + llama2: 4096, + mixtral: 4096, + 'mistral': 4096 + }, + MODEL_CONTEXT_WINDOWS: { + default: 8192, + llama2: 4096, + mixtral: 8192, + 'mistral': 8192 + } } } as const; diff --git a/src/services/llm/providers/ollama_service.ts b/src/services/llm/providers/ollama_service.ts index 6776113f7..6a18fd51e 100644 --- a/src/services/llm/providers/ollama_service.ts +++ b/src/services/llm/providers/ollama_service.ts @@ -1,6 +1,7 @@ import options from '../../options.js'; import { BaseAIService } from '../base_ai_service.js'; import type { ChatCompletionOptions, ChatResponse, Message } from '../ai_interface.js'; +import { PROVIDER_CONSTANTS } from '../constants/provider_constants.js'; export class OllamaService extends BaseAIService { constructor() { @@ -18,8 +19,8 @@ export class OllamaService extends BaseAIService { throw new Error('Ollama service is not available. Check Ollama settings.'); } - const baseUrl = options.getOption('ollamaBaseUrl') || 'http://localhost:11434'; - const model = opts.model || options.getOption('ollamaDefaultModel') || 'llama2'; + const baseUrl = options.getOption('ollamaBaseUrl') || PROVIDER_CONSTANTS.OLLAMA.BASE_URL; + const model = opts.model || options.getOption('ollamaDefaultModel') || PROVIDER_CONSTANTS.OLLAMA.DEFAULT_MODEL; const temperature = opts.temperature !== undefined ? opts.temperature : parseFloat(options.getOption('aiTemperature') || '0.7'); diff --git a/src/services/llm/providers/openai_service.ts b/src/services/llm/providers/openai_service.ts index 90ded4544..98073e6e7 100644 --- a/src/services/llm/providers/openai_service.ts +++ b/src/services/llm/providers/openai_service.ts @@ -1,6 +1,7 @@ import options from '../../options.js'; import { BaseAIService } from '../base_ai_service.js'; import type { ChatCompletionOptions, ChatResponse, Message } from '../ai_interface.js'; +import { PROVIDER_CONSTANTS } from '../constants/provider_constants.js'; export class OpenAIService extends BaseAIService { constructor() { @@ -17,8 +18,8 @@ export class OpenAIService extends BaseAIService { } const apiKey = options.getOption('openaiApiKey'); - const baseUrl = options.getOption('openaiBaseUrl') || 'https://api.openai.com/v1'; - const model = opts.model || options.getOption('openaiDefaultModel') || 'gpt-3.5-turbo'; + const baseUrl = options.getOption('openaiBaseUrl') || PROVIDER_CONSTANTS.OPENAI.BASE_URL; + const model = opts.model || options.getOption('openaiDefaultModel') || PROVIDER_CONSTANTS.OPENAI.DEFAULT_MODEL; const temperature = opts.temperature !== undefined ? opts.temperature : parseFloat(options.getOption('aiTemperature') || '0.7');