diff --git a/apps/client/src/widgets/llm_chat/validation.ts b/apps/client/src/widgets/llm_chat/validation.ts index e39b07012..d731bfb8e 100644 --- a/apps/client/src/widgets/llm_chat/validation.ts +++ b/apps/client/src/widgets/llm_chat/validation.ts @@ -16,50 +16,35 @@ export async function validateEmbeddingProviders(validationWarning: HTMLElement) return; } - // Get precedence list from options - const precedenceStr = options.get('aiProviderPrecedence') || 'openai,anthropic,ollama'; - let precedenceList: string[] = []; - - if (precedenceStr) { - if (precedenceStr.startsWith('[') && precedenceStr.endsWith(']')) { - try { - precedenceList = JSON.parse(precedenceStr); - } catch (e) { - console.error('Error parsing precedence list:', e); - precedenceList = ['openai']; // Default if parsing fails - } - } else if (precedenceStr.includes(',')) { - precedenceList = precedenceStr.split(',').map(p => p.trim()); - } else { - precedenceList = [precedenceStr]; - } + // Get selected chat provider + const selectedProvider = options.get('aiChatProvider'); + if (!selectedProvider) { + // No provider configured, hide validation + validationWarning.style.display = 'none'; + return; } - // Check for configuration issues with providers in the precedence list + // Check for configuration issues with the selected provider const configIssues: string[] = []; - // Check each provider in the precedence list for proper configuration - for (const provider of precedenceList) { - if (provider === 'openai') { - // Check OpenAI configuration - const apiKey = options.get('openaiApiKey'); - if (!apiKey) { - configIssues.push(`OpenAI API key is missing`); - } - } else if (provider === 'anthropic') { - // Check Anthropic configuration - const apiKey = options.get('anthropicApiKey'); - if (!apiKey) { - configIssues.push(`Anthropic API key is missing`); - } - } else if (provider === 'ollama') { - // Check Ollama configuration - const baseUrl = options.get('ollamaBaseUrl'); - if (!baseUrl) { - configIssues.push(`Ollama Base URL is missing`); - } + if (selectedProvider === 'openai') { + // Check OpenAI configuration + const apiKey = options.get('openaiApiKey'); + if (!apiKey) { + configIssues.push(`OpenAI API key is missing`); + } + } else if (selectedProvider === 'anthropic') { + // Check Anthropic configuration + const apiKey = options.get('anthropicApiKey'); + if (!apiKey) { + configIssues.push(`Anthropic API key is missing`); + } + } else if (selectedProvider === 'ollama') { + // Check Ollama configuration + const baseUrl = options.get('ollamaBaseUrl'); + if (!baseUrl) { + configIssues.push(`Ollama Base URL is missing`); } - // Add checks for other providers as needed } // Fetch embedding stats to check if there are any notes being processed diff --git a/apps/server/src/services/llm/ai_service_manager.ts b/apps/server/src/services/llm/ai_service_manager.ts index fbbc12cb5..50e085778 100644 --- a/apps/server/src/services/llm/ai_service_manager.ts +++ b/apps/server/src/services/llm/ai_service_manager.ts @@ -36,12 +36,14 @@ export class AIServiceManager implements IAIServiceManager { ollama: new OllamaService() }; - private providerOrder: ServiceProviders[] = ['openai', 'anthropic', 'ollama']; // Default order + private currentChatProvider: ServiceProviders | null = null; // No default + private currentChatService: AIService | null = null; // Current active service + private currentEmbeddingProvider: string | null = null; // No default private initialized = false; constructor() { - // Initialize provider order immediately - this.updateProviderOrder(); + // Initialize provider immediately + this.updateCurrentProvider(); // Initialize tools immediately this.initializeTools().catch(error => { @@ -71,68 +73,47 @@ export class AIServiceManager implements IAIServiceManager { } /** - * Update the provider precedence order from saved options + * Update the current provider from saved options * Returns true if successful, false if options not available yet */ - updateProviderOrder(): boolean { + updateCurrentProvider(): boolean { if (this.initialized) { return true; } try { - // Default precedence: openai, anthropic, ollama - const defaultOrder: ServiceProviders[] = ['openai', 'anthropic', 'ollama']; - - // Get custom order from options - const customOrder = options.getOption('aiProviderPrecedence'); - - if (customOrder) { - try { - // Try to parse as JSON first - let parsed; - - // Handle both array in JSON format and simple string format - if (customOrder.startsWith('[') && customOrder.endsWith(']')) { - parsed = JSON.parse(customOrder); - } else if (typeof customOrder === 'string') { - // If it's a string with commas, split it - if (customOrder.includes(',')) { - parsed = customOrder.split(',').map(p => p.trim()); - } else { - // If it's a simple string (like "ollama"), convert to single-item array - parsed = [customOrder]; - } - } else { - // Fallback to default - parsed = defaultOrder; - } - - // Validate that all providers are valid - if (Array.isArray(parsed) && - parsed.every(p => Object.keys(this.services).includes(p))) { - this.providerOrder = parsed as ServiceProviders[]; - } else { - log.info('Invalid AI provider precedence format, using defaults'); - this.providerOrder = defaultOrder; - } - } catch (e) { - log.error(`Failed to parse AI provider precedence: ${e}`); - this.providerOrder = defaultOrder; - } - } else { - this.providerOrder = defaultOrder; + // Always get selected chat provider from options + const selectedChatProvider = options.getOption('aiChatProvider'); + if (!selectedChatProvider) { + throw new Error('No chat provider configured. Please set aiChatProvider option.'); } + + if (!Object.keys(this.services).includes(selectedChatProvider)) { + throw new Error(`Invalid chat provider '${selectedChatProvider}'. Valid providers are: ${Object.keys(this.services).join(', ')}`); + } + + this.currentChatProvider = selectedChatProvider as ServiceProviders; + this.currentChatService = this.services[this.currentChatProvider]; + + // Always get selected embedding provider from options + const selectedEmbeddingProvider = options.getOption('aiEmbeddingProvider'); + if (!selectedEmbeddingProvider) { + throw new Error('No embedding provider configured. Please set aiEmbeddingProvider option.'); + } + + this.currentEmbeddingProvider = selectedEmbeddingProvider; this.initialized = true; - - // Remove the validateEmbeddingProviders call since we now do validation on the client - // this.validateEmbeddingProviders(); + log.info(`AI Service Manager initialized with chat provider: ${this.currentChatProvider}, embedding provider: ${this.currentEmbeddingProvider}`); return true; } catch (error) { - // If options table doesn't exist yet, use defaults + // If options table doesn't exist yet or providers not configured // This happens during initial database creation - this.providerOrder = ['openai', 'anthropic', 'ollama']; + log.error(`Failed to initialize AI providers: ${error}`); + this.currentChatProvider = null; + this.currentChatService = null; + this.currentEmbeddingProvider = null; return false; } } @@ -152,51 +133,34 @@ export class AIServiceManager implements IAIServiceManager { return null; } - // Get precedence list from options - let precedenceList: string[] = ['openai']; // Default to openai if not set - const precedenceOption = await options.getOption('aiProviderPrecedence'); - - if (precedenceOption) { - try { - if (precedenceOption.startsWith('[') && precedenceOption.endsWith(']')) { - precedenceList = JSON.parse(precedenceOption); - } else if (typeof precedenceOption === 'string') { - if (precedenceOption.includes(',')) { - precedenceList = precedenceOption.split(',').map(p => p.trim()); - } else { - precedenceList = [precedenceOption]; - } - } - } catch (e) { - log.error(`Error parsing precedence list: ${e}`); - } + // Get selected provider from options + const selectedProvider = await options.getOption('aiChatProvider'); + if (!selectedProvider) { + throw new Error('No chat provider configured'); } - // Check for configuration issues with providers in the precedence list + // Check for configuration issues with the selected provider const configIssues: string[] = []; - // Check each provider in the precedence list for proper configuration - for (const provider of precedenceList) { - if (provider === 'openai') { - // Check OpenAI configuration - const apiKey = await options.getOption('openaiApiKey'); - if (!apiKey) { - configIssues.push(`OpenAI API key is missing`); - } - } else if (provider === 'anthropic') { - // Check Anthropic configuration - const apiKey = await options.getOption('anthropicApiKey'); - if (!apiKey) { - configIssues.push(`Anthropic API key is missing`); - } - } else if (provider === 'ollama') { - // Check Ollama configuration - const baseUrl = await options.getOption('ollamaBaseUrl'); - if (!baseUrl) { - configIssues.push(`Ollama Base URL is missing`); - } + // Check the selected provider for proper configuration + if (selectedProvider === 'openai') { + // Check OpenAI configuration + const apiKey = await options.getOption('openaiApiKey'); + if (!apiKey) { + configIssues.push(`OpenAI API key is missing`); + } + } else if (selectedProvider === 'anthropic') { + // Check Anthropic configuration + const apiKey = await options.getOption('anthropicApiKey'); + if (!apiKey) { + configIssues.push(`Anthropic API key is missing`); + } + } else if (selectedProvider === 'ollama') { + // Check Ollama configuration + const baseUrl = await options.getOption('ollamaBaseUrl'); + if (!baseUrl) { + configIssues.push(`Ollama Base URL is missing`); } - // Add checks for other providers as needed } // Return warning message if there are configuration issues @@ -227,7 +191,7 @@ export class AIServiceManager implements IAIServiceManager { */ private ensureInitialized() { if (!this.initialized) { - this.updateProviderOrder(); + this.updateCurrentProvider(); } } @@ -249,8 +213,7 @@ export class AIServiceManager implements IAIServiceManager { } /** - * Generate a chat completion response using the first available AI service - * based on the configured precedence order + * Generate a chat completion response using the current AI service */ async generateChatCompletion(messages: Message[], options: ChatCompletionOptions = {}): Promise { this.ensureInitialized(); @@ -266,49 +229,46 @@ export class AIServiceManager implements IAIServiceManager { throw new Error('No messages provided for chat completion'); } - // Try providers in order of preference - const availableProviders = this.getAvailableProviders(); - - if (availableProviders.length === 0) { - throw new Error('No AI providers are available. Please check your AI settings.'); - } - - // Sort available providers by precedence - const sortedProviders = this.providerOrder - .filter(provider => availableProviders.includes(provider)); - - // If a specific provider is requested and available, use it + // If a specific provider is requested via model prefix, use it temporarily if (options.model && options.model.includes(':')) { const [providerName, modelName] = options.model.split(':'); - if (availableProviders.includes(providerName as ServiceProviders)) { + if (this.services[providerName as ServiceProviders]?.isAvailable()) { try { const modifiedOptions = { ...options, model: modelName }; log.info(`[AIServiceManager] Using provider ${providerName} from model prefix with modifiedOptions.stream: ${modifiedOptions.stream}`); return await this.services[providerName as ServiceProviders].generateChatCompletion(messages, modifiedOptions); } catch (error) { log.error(`Error with specified provider ${providerName}: ${error}`); - // If the specified provider fails, continue with the fallback providers + throw new Error(`Provider ${providerName} failed: ${error}`); } + } else { + throw new Error(`Requested provider ${providerName} is not available`); } } - // Try each provider in order until one succeeds - let lastError: Error | null = null; - - for (const provider of sortedProviders) { - try { - log.info(`[AIServiceManager] Trying provider ${provider} with options.stream: ${options.stream}`); - return await this.services[provider].generateChatCompletion(messages, options); - } catch (error) { - log.error(`Error with provider ${provider}: ${error}`); - lastError = error as Error; - // Continue to the next provider + // Ensure we have a configured service + if (!this.currentChatProvider || !this.currentChatService) { + // Try to initialize again in case options were updated + this.initialized = false; + this.updateCurrentProvider(); + + if (!this.currentChatProvider || !this.currentChatService) { + throw new Error('No chat provider configured. Please configure aiChatProvider in AI settings.'); } } + + if (!this.currentChatService.isAvailable()) { + throw new Error(`Configured chat provider '${this.currentChatProvider}' is not available. Please check your AI settings.`); + } - // If we get here, all providers failed - throw new Error(`All AI providers failed: ${lastError?.message || 'Unknown error'}`); + try { + log.info(`[AIServiceManager] Using current chat service (${this.currentChatProvider}) with options.stream: ${options.stream}`); + return await this.currentChatService.generateChatCompletion(messages, options); + } catch (error) { + log.error(`Error with provider ${this.currentChatProvider}: ${error}`); + throw new Error(`Chat provider ${this.currentChatProvider} failed: ${error}`); + } } setupEventListeners() { @@ -406,21 +366,8 @@ export class AIServiceManager implements IAIServiceManager { return; } - // Get provider precedence list - const precedenceOption = await options.getOption('embeddingProviderPrecedence'); - let precedenceList: string[] = []; - - if (precedenceOption) { - if (precedenceOption.startsWith('[') && precedenceOption.endsWith(']')) { - precedenceList = JSON.parse(precedenceOption); - } else if (typeof precedenceOption === 'string') { - if (precedenceOption.includes(',')) { - precedenceList = precedenceOption.split(',').map(p => p.trim()); - } else { - precedenceList = [precedenceOption]; - } - } - } + // Get selected embedding provider + const selectedProvider = await options.getOption('aiEmbeddingProvider') || 'openai'; // Check if we have enabled providers const enabledProviders = await getEnabledEmbeddingProviders(); @@ -572,17 +519,13 @@ export class AIServiceManager implements IAIServiceManager { return this.services[provider as ServiceProviders]; } - // Otherwise, use the first available provider in the configured order - for (const providerName of this.providerOrder) { - const service = this.services[providerName]; - if (service.isAvailable()) { - return service; - } + // Otherwise, use the current chat service + if (this.currentChatService && this.currentChatService.isAvailable()) { + return this.currentChatService; } - // If no provider is available, use first one anyway (it will throw an error) - // This allows us to show a proper error message rather than "provider not found" - return this.services[this.providerOrder[0]]; + // If current service is not available, throw an error + throw new Error(`Configured chat provider '${this.currentChatProvider}' is not available`); } /** @@ -590,16 +533,40 @@ export class AIServiceManager implements IAIServiceManager { */ getPreferredProvider(): string { this.ensureInitialized(); - - // Return the first available provider in the order - for (const providerName of this.providerOrder) { - if (this.services[providerName].isAvailable()) { - return providerName; - } + if (!this.currentChatProvider) { + throw new Error('No chat provider configured'); } - - // Return the first provider as fallback - return this.providerOrder[0]; + return this.currentChatProvider; + } + + /** + * Get the current chat service + */ + getCurrentChatService(): AIService | null { + this.ensureInitialized(); + return this.currentChatService; + } + + /** + * Get the current chat provider name + */ + getCurrentChatProvider(): string { + this.ensureInitialized(); + if (!this.currentChatProvider) { + throw new Error('No chat provider configured'); + } + return this.currentChatProvider; + } + + /** + * Get the current embedding provider name + */ + getCurrentEmbeddingProvider(): string { + this.ensureInitialized(); + if (!this.currentEmbeddingProvider) { + throw new Error('No embedding provider configured'); + } + return this.currentEmbeddingProvider; } /** @@ -609,6 +576,25 @@ export class AIServiceManager implements IAIServiceManager { return this.services[provider as ServiceProviders]?.isAvailable() ?? false; } + /** + * Reinitialize the service manager when provider settings change + * This will update the current provider selection and service objects + */ + async reinitialize(): Promise { + log.info('Reinitializing AI Service Manager due to provider change'); + + // Reset initialization flag to force update + this.initialized = false; + + // Update current provider and service objects from options + this.updateCurrentProvider(); + + // Re-validate providers if needed + await this.validateEmbeddingProviders(); + + log.info(`AI Service Manager reinitialized with chat provider: ${this.currentChatProvider}, embedding provider: ${this.currentEmbeddingProvider}`); + } + /** * Get metadata about a provider */ @@ -723,6 +709,18 @@ export default { }, getProviderMetadata(provider: string): ProviderMetadata | null { return getInstance().getProviderMetadata(provider); + }, + async reinitialize(): Promise { + return getInstance().reinitialize(); + }, + getCurrentChatService(): AIService | null { + return getInstance().getCurrentChatService(); + }, + getCurrentChatProvider(): string { + return getInstance().getCurrentChatProvider(); + }, + getCurrentEmbeddingProvider(): string { + return getInstance().getCurrentEmbeddingProvider(); } }; diff --git a/apps/server/src/services/llm/context/modules/provider_manager.ts b/apps/server/src/services/llm/context/modules/provider_manager.ts index 8030e3592..64bce4168 100644 --- a/apps/server/src/services/llm/context/modules/provider_manager.ts +++ b/apps/server/src/services/llm/context/modules/provider_manager.ts @@ -8,53 +8,26 @@ import { getEmbeddingProvider, getEnabledEmbeddingProviders } from '../../provid export class ProviderManager { /** * Get the preferred embedding provider based on user settings - * Tries to use the most appropriate provider in this order: - * 1. User's configured default provider - * 2. OpenAI if API key is set - * 3. Anthropic if API key is set - * 4. Ollama if configured - * 5. Any available provider - * 6. Local provider as fallback * * @returns The preferred embedding provider or null if none available */ async getPreferredEmbeddingProvider(): Promise { try { - // Try to get providers based on precedence list - const precedenceOption = await options.getOption('embeddingProviderPrecedence'); - let precedenceList: string[] = []; - - if (precedenceOption) { - if (precedenceOption.startsWith('[') && precedenceOption.endsWith(']')) { - precedenceList = JSON.parse(precedenceOption); - } else if (typeof precedenceOption === 'string') { - if (precedenceOption.includes(',')) { - precedenceList = precedenceOption.split(',').map(p => p.trim()); - } else { - precedenceList = [precedenceOption]; - } - } + // Get the selected embedding provider + const selectedProvider = await options.getOption('aiEmbeddingProvider'); + if (!selectedProvider) { + throw new Error('No embedding provider configured. Please set aiEmbeddingProvider option.'); + } + + // Try to get the selected provider + const provider = await getEmbeddingProvider(selectedProvider); + if (provider) { + log.info(`Using selected embedding provider: ${selectedProvider}`); + return provider; } - // Try each provider in the precedence list - for (const providerId of precedenceList) { - const provider = await getEmbeddingProvider(providerId); - if (provider) { - log.info(`Using embedding provider from precedence list: ${providerId}`); - return provider; - } - } - - // If no provider from precedence list is available, try any enabled provider - const providers = await getEnabledEmbeddingProviders(); - if (providers.length > 0) { - log.info(`Using available embedding provider: ${providers[0].name}`); - return providers[0]; - } - - // Last resort is local provider - log.info('Using local embedding provider as fallback'); - return await getEmbeddingProvider('local'); + // If selected provider is not available, throw error + throw new Error(`Selected embedding provider '${selectedProvider}' is not available. Please check your AI settings.`); } catch (error) { log.error(`Error getting preferred embedding provider: ${error}`); return null; diff --git a/apps/server/src/services/llm/pipeline/stages/model_selection_stage.ts b/apps/server/src/services/llm/pipeline/stages/model_selection_stage.ts index e5406997d..c4a27889d 100644 --- a/apps/server/src/services/llm/pipeline/stages/model_selection_stage.ts +++ b/apps/server/src/services/llm/pipeline/stages/model_selection_stage.ts @@ -86,52 +86,44 @@ export class ModelSelectionStage extends BasePipelineStage p.trim()); - } else if (providerPrecedence.startsWith('[') && providerPrecedence.endsWith(']')) { - providers = JSON.parse(providerPrecedence); - } else { - providers = [providerPrecedence]; - } - - // Check for first available provider - if (providers.length > 0) { - const firstProvider = providers[0]; - defaultProvider = firstProvider; - - // Get provider-specific default model - if (firstProvider === 'openai') { - const model = await options.getOption('openaiDefaultModel'); - if (model) defaultModelName = model; - } else if (firstProvider === 'anthropic') { - const model = await options.getOption('anthropicDefaultModel'); - if (model) defaultModelName = model; - } else if (firstProvider === 'ollama') { - const model = await options.getOption('ollamaDefaultModel'); - if (model) { - defaultModelName = model; - - // Enable tools for all Ollama models - // The Ollama API will handle models that don't support tool calling - log.info(`Using Ollama model ${model} with tool calling enabled`); - updatedOptions.enableTools = true; - } - } - } - } + currentProvider = aiServiceManager.getCurrentChatProvider(); } catch (error) { - // If any error occurs, use the fallback default - log.error(`Error determining default model: ${error}`); + // Provider not configured, try to get from options + const provider = await options.getOption('aiChatProvider'); + if (!provider) { + throw new Error('No chat provider configured. Please configure AI settings.'); + } + currentProvider = provider; + } + + // Get provider-specific default model from options + if (currentProvider === 'openai') { + defaultModelName = await options.getOption('openaiDefaultModel'); + if (!defaultModelName) { + throw new Error('OpenAI default model not configured. Please set openaiDefaultModel option.'); + } + } else if (currentProvider === 'anthropic') { + defaultModelName = await options.getOption('anthropicDefaultModel'); + if (!defaultModelName) { + throw new Error('Anthropic default model not configured. Please set anthropicDefaultModel option.'); + } + } else if (currentProvider === 'ollama') { + defaultModelName = await options.getOption('ollamaDefaultModel'); + if (!defaultModelName) { + throw new Error('Ollama default model not configured. Please set ollamaDefaultModel option.'); + } + + // Enable tools for all Ollama models + // The Ollama API will handle models that don't support tool calling + log.info(`Using Ollama model ${defaultModelName} with tool calling enabled`); + updatedOptions.enableTools = true; + } else { + throw new Error(`Unknown provider '${currentProvider}'. Cannot determine default model.`); } // Determine query complexity @@ -162,13 +154,13 @@ export class ModelSelectionStage extends BasePipelineStage - aiServiceManager.isProviderAvailable(provider)); - - if (availableProviders.length === 0) { - throw new Error('No AI providers are available'); + private async determineDefaultModel(input: ModelSelectionInput): Promise { + let currentProvider: string; + try { + currentProvider = aiServiceManager.getCurrentChatProvider(); + } catch (error) { + // Provider not initialized, get from options + const provider = await options.getOption('aiChatProvider'); + if (!provider) { + throw new Error('No chat provider configured'); + } + currentProvider = provider; + } + + const service = aiServiceManager.getCurrentChatService(); + if (!service || !service.isAvailable()) { + throw new Error(`Current AI provider '${currentProvider}' is not available`); } - // Get the first available provider and its default model - const defaultProvider = availableProviders[0] as 'openai' | 'anthropic' | 'ollama' | 'local'; - let defaultModel = 'gpt-3.5-turbo'; // Use model from our constants + // Get the default model from options based on provider + let defaultModel: string | null = null; + + if (currentProvider === 'openai') { + defaultModel = await options.getOption('openaiDefaultModel'); + } else if (currentProvider === 'anthropic') { + defaultModel = await options.getOption('anthropicDefaultModel'); + } else if (currentProvider === 'ollama') { + defaultModel = await options.getOption('ollamaDefaultModel'); + } + + if (!defaultModel) { + throw new Error(`No default model configured for provider '${currentProvider}'`); + } // Set provider metadata if (!input.options.providerMetadata) { input.options.providerMetadata = { - provider: defaultProvider, + provider: currentProvider as 'openai' | 'anthropic' | 'ollama' | 'local', modelId: defaultModel }; } - log.info(`Selected default model ${defaultModel} from provider ${defaultProvider}`); + log.info(`Selected default model ${defaultModel} from provider ${currentProvider}`); return defaultModel; }