From dcab4caee3de567c48a85e28a6e15bdae51b30d9 Mon Sep 17 00:00:00 2001 From: perf3ct Date: Mon, 2 Jun 2025 15:12:08 +0000 Subject: [PATCH] feat(llm): redo chat storage, part 3 --- .../src/services/llm/ai_service_manager.ts | 31 +++++++++++++------ .../stages/message_preparation_stage.ts | 20 ++++++------ .../pipeline/stages/model_selection_stage.ts | 3 +- 3 files changed, 33 insertions(+), 21 deletions(-) diff --git a/apps/server/src/services/llm/ai_service_manager.ts b/apps/server/src/services/llm/ai_service_manager.ts index fbbc12cb5..e76c13ae2 100644 --- a/apps/server/src/services/llm/ai_service_manager.ts +++ b/apps/server/src/services/llm/ai_service_manager.ts @@ -155,7 +155,7 @@ export class AIServiceManager implements IAIServiceManager { // Get precedence list from options let precedenceList: string[] = ['openai']; // Default to openai if not set const precedenceOption = await options.getOption('aiProviderPrecedence'); - + if (precedenceOption) { try { if (precedenceOption.startsWith('[') && precedenceOption.endsWith(']')) { @@ -171,10 +171,10 @@ export class AIServiceManager implements IAIServiceManager { log.error(`Error parsing precedence list: ${e}`); } } - + // Check for configuration issues with providers in the precedence list const configIssues: string[] = []; - + // Check each provider in the precedence list for proper configuration for (const provider of precedenceList) { if (provider === 'openai') { @@ -198,20 +198,20 @@ export class AIServiceManager implements IAIServiceManager { } // Add checks for other providers as needed } - + // Return warning message if there are configuration issues if (configIssues.length > 0) { let message = 'There are issues with your AI provider configuration:'; - + for (const issue of configIssues) { message += `\n• ${issue}`; } - + message += '\n\nPlease check your AI settings.'; - + // Log warning to console log.error('AI Provider Configuration Warning: ' + message); - + return message; } @@ -279,9 +279,19 @@ export class AIServiceManager implements IAIServiceManager { // If a specific provider is requested and available, use it if (options.model && options.model.includes(':')) { - const [providerName, modelName] = options.model.split(':'); + // Check if this is a provider prefix (e.g., "ollama:qwen3:30b") + // vs a model name with version (e.g., "qwen3:30b") + const parts = options.model.split(':'); + + // Only treat as provider:model if the first part is a known provider + const knownProviders = ['openai', 'anthropic', 'ollama', 'local']; + const potentialProvider = parts[0]; + + if (knownProviders.includes(potentialProvider) && availableProviders.includes(potentialProvider as ServiceProviders)) { + // This is a provider:model format + const providerName = potentialProvider; + const modelName = parts.slice(1).join(':'); // Rejoin the rest as model name - if (availableProviders.includes(providerName as ServiceProviders)) { try { const modifiedOptions = { ...options, model: modelName }; log.info(`[AIServiceManager] Using provider ${providerName} from model prefix with modifiedOptions.stream: ${modifiedOptions.stream}`); @@ -291,6 +301,7 @@ export class AIServiceManager implements IAIServiceManager { // If the specified provider fails, continue with the fallback providers } } + // If not a provider prefix, treat the entire string as a model name and continue with normal provider selection } // Try each provider in order until one succeeds diff --git a/apps/server/src/services/llm/pipeline/stages/message_preparation_stage.ts b/apps/server/src/services/llm/pipeline/stages/message_preparation_stage.ts index 753bc6a28..7f129b26d 100644 --- a/apps/server/src/services/llm/pipeline/stages/message_preparation_stage.ts +++ b/apps/server/src/services/llm/pipeline/stages/message_preparation_stage.ts @@ -20,44 +20,44 @@ export class MessagePreparationStage extends BasePipelineStage { const { messages, context, systemPrompt, options } = input; - + // Determine provider from model string if available (format: "provider:model") let provider = 'default'; if (options?.model && options.model.includes(':')) { const [providerName] = options.model.split(':'); provider = providerName; } - + // Check if tools are enabled const toolsEnabled = options?.enableTools === true; - + log.info(`Preparing messages for provider: ${provider}, context: ${!!context}, system prompt: ${!!systemPrompt}, tools: ${toolsEnabled}`); - + // Get appropriate formatter for this provider const formatter = MessageFormatterFactory.getFormatter(provider); - + // Determine the system prompt to use let finalSystemPrompt = systemPrompt || SYSTEM_PROMPTS.DEFAULT_SYSTEM_PROMPT; - + // If tools are enabled, enhance system prompt with tools guidance if (toolsEnabled) { const toolCount = toolRegistry.getAllTools().length; const toolsPrompt = `You have access to ${toolCount} tools to help you respond. When you need information that might be in the user's notes, use the search_notes tool to find relevant content or the read_note tool to read a specific note by ID. Use tools when specific information is required rather than making assumptions.`; - + // Add tools guidance to system prompt finalSystemPrompt = finalSystemPrompt + '\n\n' + toolsPrompt; log.info(`Enhanced system prompt with tools guidance: ${toolCount} tools available`); } - + // Format messages using provider-specific approach const formattedMessages = formatter.formatMessages( messages, finalSystemPrompt, context ); - + log.info(`Formatted ${messages.length} messages into ${formattedMessages.length} messages for provider: ${provider}`); - + return { messages: formattedMessages }; } } diff --git a/apps/server/src/services/llm/pipeline/stages/model_selection_stage.ts b/apps/server/src/services/llm/pipeline/stages/model_selection_stage.ts index e5406997d..0830b0bb8 100644 --- a/apps/server/src/services/llm/pipeline/stages/model_selection_stage.ts +++ b/apps/server/src/services/llm/pipeline/stages/model_selection_stage.ts @@ -234,7 +234,8 @@ export class ModelSelectionStage extends BasePipelineStage