diff --git a/apps/server/src/services/llm/pipeline/stages/model_selection_stage.ts b/apps/server/src/services/llm/pipeline/stages/model_selection_stage.ts index b51e59658..7b1276b91 100644 --- a/apps/server/src/services/llm/pipeline/stages/model_selection_stage.ts +++ b/apps/server/src/services/llm/pipeline/stages/model_selection_stage.ts @@ -111,19 +111,13 @@ export class ModelSelectionStage extends BasePipelineStage { - try { - // Use the same logic as the main process method - const { getValidModelConfig, getSelectedProvider } = await import('../../config/configuration_helpers.js'); - const selectedProvider = await getSelectedProvider(); - if (!selectedProvider) { - throw new Error('No AI provider is selected. Please select a provider in your AI settings.'); - } - - // Check if the provider is available through the service manager - if (!aiServiceManager.isProviderAvailable(selectedProvider)) { - throw new Error(`Selected provider ${selectedProvider} is not available`); - } - - // Try to get a valid model config - const modelConfig = await getValidModelConfig(selectedProvider); - - if (!modelConfig) { - throw new Error(`No default model configured for provider ${selectedProvider}. Please configure a default model in your AI settings.`); - } - - // Set provider metadata - if (!input.options.providerMetadata) { - input.options.providerMetadata = { - provider: selectedProvider as 'openai' | 'anthropic' | 'ollama' | 'local', - modelId: modelConfig.model - }; - } - - log.info(`Selected default model ${modelConfig.model} from provider ${selectedProvider}`); - return modelConfig.model; - } catch (error) { - log.error(`Error determining default model: ${error}`); - throw error; // Don't provide fallback defaults, let the error propagate - } - } /** * Get estimated context window for Ollama models @@ -283,48 +225,5 @@ export class ModelSelectionStage extends BasePipelineStage { - try { - log.info(`Getting default model for provider ${provider} using AI service manager`); - - // Use the existing AI service manager instead of duplicating API calls - const service = await aiServiceManager.getInstance().getService(provider); - - if (!service || !service.isAvailable()) { - log.info(`Provider ${provider} service is not available`); - return null; - } - // Check if the service has a method to get available models - if (typeof (service as any).getAvailableModels === 'function') { - try { - const models = await (service as any).getAvailableModels(); - if (models && models.length > 0) { - // Use the first available model - no hardcoded preferences - const selectedModel = models[0]; - - // Import server-side options to update the default model - const optionService = (await import('../../../options.js')).default; - const optionKey = `${provider}DefaultModel` as const; - - await optionService.setOption(optionKey, selectedModel); - log.info(`Set default ${provider} model to: ${selectedModel}`); - return selectedModel; - } - } catch (modelError) { - log.error(`Error fetching models from ${provider} service: ${modelError}`); - } - } - - log.info(`Provider ${provider} does not support dynamic model fetching`); - return null; - } catch (error) { - log.error(`Error getting default model for provider ${provider}: ${error}`); - return null; - } - } } diff --git a/apps/server/src/services/llm/providers/providers.ts b/apps/server/src/services/llm/providers/providers.ts index 8575fa6aa..5416d9366 100644 --- a/apps/server/src/services/llm/providers/providers.ts +++ b/apps/server/src/services/llm/providers/providers.ts @@ -26,7 +26,11 @@ export function getOpenAIOptions( } const baseUrl = options.getOption('openaiBaseUrl') || PROVIDER_CONSTANTS.OPENAI.BASE_URL; - const modelName = opts.model || options.getOption('openaiDefaultModel') || PROVIDER_CONSTANTS.OPENAI.DEFAULT_MODEL; + const modelName = opts.model || options.getOption('openaiDefaultModel'); + + if (!modelName) { + throw new Error('No OpenAI model configured. Please set a default model in your AI settings.'); + } // Create provider metadata const providerMetadata: ModelMetadata = { @@ -87,7 +91,11 @@ export function getAnthropicOptions( } const baseUrl = options.getOption('anthropicBaseUrl') || PROVIDER_CONSTANTS.ANTHROPIC.BASE_URL; - const modelName = opts.model || options.getOption('anthropicDefaultModel') || PROVIDER_CONSTANTS.ANTHROPIC.DEFAULT_MODEL; + const modelName = opts.model || options.getOption('anthropicDefaultModel'); + + if (!modelName) { + throw new Error('No Anthropic model configured. Please set a default model in your AI settings.'); + } // Create provider metadata const providerMetadata: ModelMetadata = { @@ -150,8 +158,12 @@ export async function getOllamaOptions( throw new Error('Ollama API URL is not configured'); } - // Get the model name - no prefix handling needed now - let modelName = opts.model || options.getOption('ollamaDefaultModel') || 'llama3'; + // Get the model name - no defaults, must be configured by user + let modelName = opts.model || options.getOption('ollamaDefaultModel'); + + if (!modelName) { + throw new Error('No Ollama model configured. Please set a default model in your AI settings.'); + } // Create provider metadata const providerMetadata: ModelMetadata = { @@ -249,4 +261,4 @@ async function getOllamaModelContextWindow(modelName: string): Promise { log.info(`Error getting context window for model ${modelName}: ${error}`); return MODEL_CAPABILITIES['default'].contextWindowTokens; // Default fallback } -} \ No newline at end of file +}