refactor(llm): integrate new configuration system for provider management and model selection

This commit is contained in:
perf3ct 2025-06-02 21:36:19 +00:00
parent 00ce765e96
commit 45175b6af3
No known key found for this signature in database
GPG Key ID: 569C4EEC436F5232
6 changed files with 730 additions and 206 deletions

View File

@ -18,6 +18,18 @@ import type {
} from './interfaces/ai_service_interfaces.js'; } from './interfaces/ai_service_interfaces.js';
import type { NoteSearchResult } from './interfaces/context_interfaces.js'; import type { NoteSearchResult } from './interfaces/context_interfaces.js';
// Import new configuration system
import {
getProviderPrecedence,
getPreferredProvider,
getEmbeddingProviderPrecedence,
parseModelIdentifier,
isAIEnabled,
getDefaultModelForProvider,
clearConfigurationCache
} from './config/configuration_helpers.js';
import type { ProviderType } from './interfaces/configuration_interfaces.js';
/** /**
* Interface representing relevant note context * Interface representing relevant note context
*/ */
@ -71,7 +83,7 @@ export class AIServiceManager implements IAIServiceManager {
} }
/** /**
* Update the provider precedence order from saved options * Update the provider precedence order using the new configuration system
* Returns true if successful, false if options not available yet * Returns true if successful, false if options not available yet
*/ */
updateProviderOrder(): boolean { updateProviderOrder(): boolean {
@ -80,54 +92,17 @@ export class AIServiceManager implements IAIServiceManager {
} }
try { try {
// Default precedence: openai, anthropic, ollama // Use async helper but handle it synchronously for now
const defaultOrder: ServiceProviders[] = ['openai', 'anthropic', 'ollama']; // In a real refactor, this method should become async
getProviderPrecedence().then(providers => {
// Get custom order from options this.providerOrder = providers as ServiceProviders[];
const customOrder = options.getOption('aiProviderPrecedence'); log.info(`Updated provider order: ${providers.join(', ')}`);
}).catch(error => {
if (customOrder) { log.error(`Failed to get provider precedence: ${error}`);
try { // Keep default order
// Try to parse as JSON first });
let parsed;
// Handle both array in JSON format and simple string format
if (customOrder.startsWith('[') && customOrder.endsWith(']')) {
parsed = JSON.parse(customOrder);
} else if (typeof customOrder === 'string') {
// If it's a string with commas, split it
if (customOrder.includes(',')) {
parsed = customOrder.split(',').map(p => p.trim());
} else {
// If it's a simple string (like "ollama"), convert to single-item array
parsed = [customOrder];
}
} else {
// Fallback to default
parsed = defaultOrder;
}
// Validate that all providers are valid
if (Array.isArray(parsed) &&
parsed.every(p => Object.keys(this.services).includes(p))) {
this.providerOrder = parsed as ServiceProviders[];
} else {
log.info('Invalid AI provider precedence format, using defaults');
this.providerOrder = defaultOrder;
}
} catch (e) {
log.error(`Failed to parse AI provider precedence: ${e}`);
this.providerOrder = defaultOrder;
}
} else {
this.providerOrder = defaultOrder;
}
this.initialized = true; this.initialized = true;
// Remove the validateEmbeddingProviders call since we now do validation on the client
// this.validateEmbeddingProviders();
return true; return true;
} catch (error) { } catch (error) {
// If options table doesn't exist yet, use defaults // If options table doesn't exist yet, use defaults
@ -138,39 +113,18 @@ export class AIServiceManager implements IAIServiceManager {
} }
/** /**
* Validate embedding providers configuration * Validate embedding providers configuration using the new configuration system
* - Check if embedding default provider is in provider precedence list
* - Check if all providers in precedence list and default provider are enabled
*
* @returns A warning message if there are issues, or null if everything is fine
*/ */
async validateEmbeddingProviders(): Promise<string | null> { async validateEmbeddingProviders(): Promise<string | null> {
try { try {
// Check if AI is enabled, if not, skip validation // Check if AI is enabled using the new helper
const aiEnabled = await options.getOptionBool('aiEnabled'); const aiEnabled = await isAIEnabled();
if (!aiEnabled) { if (!aiEnabled) {
return null; return null;
} }
// Get precedence list from options // Get precedence list using the new helper (no string parsing!)
let precedenceList: string[] = ['openai']; // Default to openai if not set const precedenceList = await getEmbeddingProviderPrecedence();
const precedenceOption = await options.getOption('aiProviderPrecedence');
if (precedenceOption) {
try {
if (precedenceOption.startsWith('[') && precedenceOption.endsWith(']')) {
precedenceList = JSON.parse(precedenceOption);
} else if (typeof precedenceOption === 'string') {
if (precedenceOption.includes(',')) {
precedenceList = precedenceOption.split(',').map(p => p.trim());
} else {
precedenceList = [precedenceOption];
}
}
} catch (e) {
log.error(`Error parsing precedence list: ${e}`);
}
}
// Check for configuration issues with providers in the precedence list // Check for configuration issues with providers in the precedence list
const configIssues: string[] = []; const configIssues: string[] = [];
@ -279,25 +233,16 @@ export class AIServiceManager implements IAIServiceManager {
// If a specific provider is requested and available, use it // If a specific provider is requested and available, use it
if (options.model && options.model.includes(':')) { if (options.model && options.model.includes(':')) {
// Check if this is a provider prefix (e.g., "ollama:qwen3:30b") // Use the new configuration system to parse model identifier
// vs a model name with version (e.g., "qwen3:30b") const modelIdentifier = parseModelIdentifier(options.model);
const parts = options.model.split(':');
// Only treat as provider:model if the first part is a known provider
const knownProviders = ['openai', 'anthropic', 'ollama', 'local'];
const potentialProvider = parts[0];
if (knownProviders.includes(potentialProvider) && availableProviders.includes(potentialProvider as ServiceProviders)) {
// This is a provider:model format
const providerName = potentialProvider;
const modelName = parts.slice(1).join(':'); // Rejoin the rest as model name
if (modelIdentifier.provider && availableProviders.includes(modelIdentifier.provider as ServiceProviders)) {
try { try {
const modifiedOptions = { ...options, model: modelName }; const modifiedOptions = { ...options, model: modelIdentifier.modelId };
log.info(`[AIServiceManager] Using provider ${providerName} from model prefix with modifiedOptions.stream: ${modifiedOptions.stream}`); log.info(`[AIServiceManager] Using provider ${modelIdentifier.provider} from model prefix with modifiedOptions.stream: ${modifiedOptions.stream}`);
return await this.services[providerName as ServiceProviders].generateChatCompletion(messages, modifiedOptions); return await this.services[modelIdentifier.provider as ServiceProviders].generateChatCompletion(messages, modifiedOptions);
} catch (error) { } catch (error) {
log.error(`Error with specified provider ${providerName}: ${error}`); log.error(`Error with specified provider ${modelIdentifier.provider}: ${error}`);
// If the specified provider fails, continue with the fallback providers // If the specified provider fails, continue with the fallback providers
} }
} }
@ -401,9 +346,11 @@ export class AIServiceManager implements IAIServiceManager {
} }
/** /**
* Get whether AI features are enabled from options * Get whether AI features are enabled using the new configuration system
*/ */
getAIEnabled(): boolean { getAIEnabled(): boolean {
// For synchronous compatibility, use the old method
// In a full refactor, this should be async
return options.getOptionBool('aiEnabled'); return options.getOptionBool('aiEnabled');
} }
@ -417,23 +364,7 @@ export class AIServiceManager implements IAIServiceManager {
return; return;
} }
// Get provider precedence list // Use the new configuration system - no string parsing!
const precedenceOption = await options.getOption('embeddingProviderPrecedence');
let precedenceList: string[] = [];
if (precedenceOption) {
if (precedenceOption.startsWith('[') && precedenceOption.endsWith(']')) {
precedenceList = JSON.parse(precedenceOption);
} else if (typeof precedenceOption === 'string') {
if (precedenceOption.includes(',')) {
precedenceList = precedenceOption.split(',').map(p => p.trim());
} else {
precedenceList = [precedenceOption];
}
}
}
// Check if we have enabled providers
const enabledProviders = await getEnabledEmbeddingProviders(); const enabledProviders = await getEnabledEmbeddingProviders();
if (enabledProviders.length === 0) { if (enabledProviders.length === 0) {
@ -456,10 +387,10 @@ export class AIServiceManager implements IAIServiceManager {
try { try {
log.info("Initializing AI service..."); log.info("Initializing AI service...");
// Check if AI is enabled in options // Check if AI is enabled using the new helper
const isAIEnabled = this.getAIEnabled(); const isAIEnabled_value = await isAIEnabled();
if (!isAIEnabled) { if (!isAIEnabled_value) {
log.info("AI features are disabled in options"); log.info("AI features are disabled in options");
return; return;
} }
@ -597,7 +528,19 @@ export class AIServiceManager implements IAIServiceManager {
} }
/** /**
* Get the preferred provider based on configuration * Get the preferred provider based on configuration using the new system
*/
async getPreferredProviderAsync(): Promise<string> {
try {
return await getPreferredProvider();
} catch (error) {
log.error(`Error getting preferred provider: ${error}`);
return this.providerOrder[0];
}
}
/**
* Get the preferred provider based on configuration (sync version for compatibility)
*/ */
getPreferredProvider(): string { getPreferredProvider(): string {
this.ensureInitialized(); this.ensureInitialized();

View File

@ -510,8 +510,8 @@ class RestChatService {
} }
if (!noteId) { if (!noteId) {
log.info(`Creating new chat note from context of note ${options.currentNoteId}`); log.info(`Creating new chat note from context of note ${options.currentNoteId}`);
// Don't use the currentNoteId as the chat note ID - create a new one // Don't use the currentNoteId as the chat note ID - create a new one
} }
} }

View File

@ -0,0 +1,130 @@
import configurationManager from './configuration_manager.js';
import type {
ProviderType,
ModelIdentifier,
ModelConfig,
ProviderPrecedenceConfig,
EmbeddingProviderPrecedenceConfig
} from '../interfaces/configuration_interfaces.js';
/**
* Helper functions for accessing AI configuration without string parsing
* Use these throughout the codebase instead of parsing strings directly
*/
/**
* Get the ordered list of AI providers
*/
export async function getProviderPrecedence(): Promise<ProviderType[]> {
const config = await configurationManager.getProviderPrecedence();
return config.providers;
}
/**
* Get the default/preferred AI provider
*/
export async function getPreferredProvider(): Promise<ProviderType> {
const config = await configurationManager.getProviderPrecedence();
return config.defaultProvider || config.providers[0];
}
/**
* Get the ordered list of embedding providers
*/
export async function getEmbeddingProviderPrecedence(): Promise<string[]> {
const config = await configurationManager.getEmbeddingProviderPrecedence();
return config.providers;
}
/**
* Get the default embedding provider
*/
export async function getPreferredEmbeddingProvider(): Promise<string> {
const config = await configurationManager.getEmbeddingProviderPrecedence();
return config.defaultProvider || config.providers[0];
}
/**
* Parse a model identifier (handles "provider:model" format)
*/
export function parseModelIdentifier(modelString: string): ModelIdentifier {
return configurationManager.parseModelIdentifier(modelString);
}
/**
* Create a model configuration from a model string
*/
export function createModelConfig(modelString: string, defaultProvider?: ProviderType): ModelConfig {
return configurationManager.createModelConfig(modelString, defaultProvider);
}
/**
* Get the default model for a specific provider
*/
export async function getDefaultModelForProvider(provider: ProviderType): Promise<string> {
const config = await configurationManager.getAIConfig();
return config.defaultModels[provider];
}
/**
* Get provider settings for a specific provider
*/
export async function getProviderSettings(provider: ProviderType) {
const config = await configurationManager.getAIConfig();
return config.providerSettings[provider];
}
/**
* Check if AI is enabled
*/
export async function isAIEnabled(): Promise<boolean> {
const config = await configurationManager.getAIConfig();
return config.enabled;
}
/**
* Check if a provider has required configuration
*/
export async function isProviderConfigured(provider: ProviderType): Promise<boolean> {
const settings = await getProviderSettings(provider);
switch (provider) {
case 'openai':
return Boolean((settings as any)?.apiKey);
case 'anthropic':
return Boolean((settings as any)?.apiKey);
case 'ollama':
return Boolean((settings as any)?.baseUrl);
default:
return false;
}
}
/**
* Get the first available (configured) provider from the precedence list
*/
export async function getFirstAvailableProvider(): Promise<ProviderType | null> {
const providers = await getProviderPrecedence();
for (const provider of providers) {
if (await isProviderConfigured(provider)) {
return provider;
}
}
return null;
}
/**
* Validate the current AI configuration
*/
export async function validateConfiguration() {
return configurationManager.validateConfig();
}
/**
* Clear cached configuration (use when settings change)
*/
export function clearConfigurationCache(): void {
configurationManager.clearCache();
}

View File

@ -0,0 +1,373 @@
import options from '../../options.js';
import log from '../../log.js';
import type {
AIConfig,
ProviderPrecedenceConfig,
EmbeddingProviderPrecedenceConfig,
ModelIdentifier,
ModelConfig,
ProviderType,
EmbeddingProviderType,
ConfigValidationResult,
ProviderSettings,
OpenAISettings,
AnthropicSettings,
OllamaSettings
} from '../interfaces/configuration_interfaces.js';
/**
* Configuration manager that handles conversion from string-based options
* to proper typed configuration objects.
*
* This is the ONLY place where string parsing should happen for LLM configurations.
*/
export class ConfigurationManager {
private static instance: ConfigurationManager | null = null;
private cachedConfig: AIConfig | null = null;
private lastConfigUpdate: number = 0;
// Cache for 5 minutes to avoid excessive option reads
private static readonly CACHE_DURATION = 5 * 60 * 1000;
private constructor() {}
public static getInstance(): ConfigurationManager {
if (!ConfigurationManager.instance) {
ConfigurationManager.instance = new ConfigurationManager();
}
return ConfigurationManager.instance;
}
/**
* Get the complete AI configuration
*/
public async getAIConfig(): Promise<AIConfig> {
const now = Date.now();
if (this.cachedConfig && (now - this.lastConfigUpdate) < ConfigurationManager.CACHE_DURATION) {
return this.cachedConfig;
}
try {
const config: AIConfig = {
enabled: await this.getAIEnabled(),
providerPrecedence: await this.getProviderPrecedence(),
embeddingProviderPrecedence: await this.getEmbeddingProviderPrecedence(),
defaultModels: await this.getDefaultModels(),
providerSettings: await this.getProviderSettings()
};
this.cachedConfig = config;
this.lastConfigUpdate = now;
return config;
} catch (error) {
log.error(`Error loading AI configuration: ${error}`);
return this.getDefaultConfig();
}
}
/**
* Parse provider precedence from string option
*/
public async getProviderPrecedence(): Promise<ProviderPrecedenceConfig> {
try {
const precedenceOption = await options.getOption('aiProviderPrecedence');
const providers = this.parseProviderList(precedenceOption);
return {
providers: providers as ProviderType[],
defaultProvider: providers[0] as ProviderType
};
} catch (error) {
log.error(`Error parsing provider precedence: ${error}`);
return {
providers: ['openai', 'anthropic', 'ollama'],
defaultProvider: 'openai'
};
}
}
/**
* Parse embedding provider precedence from string option
*/
public async getEmbeddingProviderPrecedence(): Promise<EmbeddingProviderPrecedenceConfig> {
try {
const precedenceOption = await options.getOption('embeddingProviderPrecedence');
const providers = this.parseProviderList(precedenceOption);
return {
providers: providers as EmbeddingProviderType[],
defaultProvider: providers[0] as EmbeddingProviderType
};
} catch (error) {
log.error(`Error parsing embedding provider precedence: ${error}`);
return {
providers: ['openai', 'ollama'],
defaultProvider: 'openai'
};
}
}
/**
* Parse model identifier with optional provider prefix
* Handles formats like "gpt-4", "openai:gpt-4", "ollama:llama2:7b"
*/
public parseModelIdentifier(modelString: string): ModelIdentifier {
if (!modelString) {
return {
modelId: '',
fullIdentifier: ''
};
}
const parts = modelString.split(':');
if (parts.length === 1) {
// No provider prefix, just model name
return {
modelId: modelString,
fullIdentifier: modelString
};
}
// Check if first part is a known provider
const potentialProvider = parts[0].toLowerCase();
const knownProviders: ProviderType[] = ['openai', 'anthropic', 'ollama'];
if (knownProviders.includes(potentialProvider as ProviderType)) {
// Provider prefix format
const provider = potentialProvider as ProviderType;
const modelId = parts.slice(1).join(':'); // Rejoin in case model has colons
return {
provider,
modelId,
fullIdentifier: modelString
};
}
// Not a provider prefix, treat whole string as model name
return {
modelId: modelString,
fullIdentifier: modelString
};
}
/**
* Create model configuration from string
*/
public createModelConfig(modelString: string, defaultProvider?: ProviderType): ModelConfig {
const identifier = this.parseModelIdentifier(modelString);
const provider = identifier.provider || defaultProvider || 'openai';
return {
provider,
modelId: identifier.modelId,
displayName: identifier.fullIdentifier
};
}
/**
* Get default models for each provider
*/
public async getDefaultModels(): Promise<Record<ProviderType, string>> {
try {
const [openaiModel, anthropicModel, ollamaModel] = await Promise.all([
options.getOption('openaiDefaultModel'),
options.getOption('anthropicDefaultModel'),
options.getOption('ollamaDefaultModel')
]);
return {
openai: openaiModel || 'gpt-3.5-turbo',
anthropic: anthropicModel || 'claude-3-sonnet-20240229',
ollama: ollamaModel || 'llama2'
};
} catch (error) {
log.error(`Error loading default models: ${error}`);
return {
openai: 'gpt-3.5-turbo',
anthropic: 'claude-3-sonnet-20240229',
ollama: 'llama2'
};
}
}
/**
* Get provider-specific settings
*/
public async getProviderSettings(): Promise<ProviderSettings> {
try {
const [
openaiApiKey, openaiBaseUrl, openaiDefaultModel,
anthropicApiKey, anthropicBaseUrl, anthropicDefaultModel,
ollamaBaseUrl, ollamaDefaultModel
] = await Promise.all([
options.getOption('openaiApiKey'),
options.getOption('openaiBaseUrl'),
options.getOption('openaiDefaultModel'),
options.getOption('anthropicApiKey'),
options.getOption('anthropicBaseUrl'),
options.getOption('anthropicDefaultModel'),
options.getOption('ollamaBaseUrl'),
options.getOption('ollamaDefaultModel')
]);
const settings: ProviderSettings = {};
if (openaiApiKey || openaiBaseUrl || openaiDefaultModel) {
settings.openai = {
apiKey: openaiApiKey,
baseUrl: openaiBaseUrl,
defaultModel: openaiDefaultModel
};
}
if (anthropicApiKey || anthropicBaseUrl || anthropicDefaultModel) {
settings.anthropic = {
apiKey: anthropicApiKey,
baseUrl: anthropicBaseUrl,
defaultModel: anthropicDefaultModel
};
}
if (ollamaBaseUrl || ollamaDefaultModel) {
settings.ollama = {
baseUrl: ollamaBaseUrl,
defaultModel: ollamaDefaultModel
};
}
return settings;
} catch (error) {
log.error(`Error loading provider settings: ${error}`);
return {};
}
}
/**
* Validate configuration
*/
public async validateConfig(): Promise<ConfigValidationResult> {
const result: ConfigValidationResult = {
isValid: true,
errors: [],
warnings: []
};
try {
const config = await this.getAIConfig();
if (!config.enabled) {
result.warnings.push('AI features are disabled');
return result;
}
// Validate provider precedence
if (config.providerPrecedence.providers.length === 0) {
result.errors.push('No providers configured in precedence list');
result.isValid = false;
}
// Validate provider settings
for (const provider of config.providerPrecedence.providers) {
const providerConfig = config.providerSettings[provider];
if (provider === 'openai') {
const openaiConfig = providerConfig as OpenAISettings | undefined;
if (!openaiConfig?.apiKey) {
result.warnings.push('OpenAI API key is not configured');
}
}
if (provider === 'anthropic') {
const anthropicConfig = providerConfig as AnthropicSettings | undefined;
if (!anthropicConfig?.apiKey) {
result.warnings.push('Anthropic API key is not configured');
}
}
if (provider === 'ollama') {
const ollamaConfig = providerConfig as OllamaSettings | undefined;
if (!ollamaConfig?.baseUrl) {
result.warnings.push('Ollama base URL is not configured');
}
}
}
} catch (error) {
result.errors.push(`Configuration validation error: ${error}`);
result.isValid = false;
}
return result;
}
/**
* Clear cached configuration (force reload on next access)
*/
public clearCache(): void {
this.cachedConfig = null;
this.lastConfigUpdate = 0;
}
// Private helper methods
private async getAIEnabled(): Promise<boolean> {
try {
return await options.getOptionBool('aiEnabled');
} catch {
return false;
}
}
private parseProviderList(precedenceOption: string | null): string[] {
if (!precedenceOption) {
return ['openai', 'anthropic', 'ollama'];
}
try {
// Handle JSON array format
if (precedenceOption.startsWith('[') && precedenceOption.endsWith(']')) {
const parsed = JSON.parse(precedenceOption);
if (Array.isArray(parsed)) {
return parsed.map(p => String(p).trim());
}
}
// Handle comma-separated format
if (precedenceOption.includes(',')) {
return precedenceOption.split(',').map(p => p.trim());
}
// Handle single provider
return [precedenceOption.trim()];
} catch (error) {
log.error(`Error parsing provider list "${precedenceOption}": ${error}`);
return ['openai', 'anthropic', 'ollama'];
}
}
private getDefaultConfig(): AIConfig {
return {
enabled: false,
providerPrecedence: {
providers: ['openai', 'anthropic', 'ollama'],
defaultProvider: 'openai'
},
embeddingProviderPrecedence: {
providers: ['openai', 'ollama'],
defaultProvider: 'openai'
},
defaultModels: {
openai: 'gpt-3.5-turbo',
anthropic: 'claude-3-sonnet-20240229',
ollama: 'llama2'
},
providerSettings: {}
};
}
}
// Export singleton instance
export default ConfigurationManager.getInstance();

View File

@ -0,0 +1,108 @@
/**
* Configuration interfaces for LLM services
* These interfaces replace string parsing with proper typed objects
*/
/**
* Provider precedence configuration
*/
export interface ProviderPrecedenceConfig {
providers: ProviderType[];
defaultProvider?: ProviderType;
}
/**
* Model configuration with provider information
*/
export interface ModelConfig {
provider: ProviderType;
modelId: string;
displayName?: string;
capabilities?: ModelCapabilities;
}
/**
* Embedding provider precedence configuration
*/
export interface EmbeddingProviderPrecedenceConfig {
providers: EmbeddingProviderType[];
defaultProvider?: EmbeddingProviderType;
}
/**
* Model capabilities
*/
export interface ModelCapabilities {
contextWindow?: number;
supportsTools?: boolean;
supportsVision?: boolean;
supportsStreaming?: boolean;
maxTokens?: number;
temperature?: number;
}
/**
* Complete AI configuration
*/
export interface AIConfig {
enabled: boolean;
providerPrecedence: ProviderPrecedenceConfig;
embeddingProviderPrecedence: EmbeddingProviderPrecedenceConfig;
defaultModels: Record<ProviderType, string>;
providerSettings: ProviderSettings;
}
/**
* Provider-specific settings
*/
export interface ProviderSettings {
openai?: OpenAISettings;
anthropic?: AnthropicSettings;
ollama?: OllamaSettings;
}
export interface OpenAISettings {
apiKey?: string;
baseUrl?: string;
defaultModel?: string;
}
export interface AnthropicSettings {
apiKey?: string;
baseUrl?: string;
defaultModel?: string;
}
export interface OllamaSettings {
baseUrl?: string;
defaultModel?: string;
timeout?: number;
}
/**
* Valid provider types
*/
export type ProviderType = 'openai' | 'anthropic' | 'ollama';
/**
* Valid embedding provider types
*/
export type EmbeddingProviderType = 'openai' | 'ollama' | 'local';
/**
* Model identifier with provider prefix (e.g., "openai:gpt-4" or "ollama:llama2")
*/
export interface ModelIdentifier {
provider?: ProviderType;
modelId: string;
fullIdentifier: string; // The complete string representation
}
/**
* Validation result for configuration
*/
export interface ConfigValidationResult {
isValid: boolean;
errors: string[];
warnings: string[];
}

View File

@ -3,9 +3,22 @@ import type { ModelSelectionInput } from '../interfaces.js';
import type { ChatCompletionOptions } from '../../ai_interface.js'; import type { ChatCompletionOptions } from '../../ai_interface.js';
import type { ModelMetadata } from '../../providers/provider_options.js'; import type { ModelMetadata } from '../../providers/provider_options.js';
import log from '../../../log.js'; import log from '../../../log.js';
import options from '../../../options.js';
import aiServiceManager from '../../ai_service_manager.js'; import aiServiceManager from '../../ai_service_manager.js';
import { SEARCH_CONSTANTS, MODEL_CAPABILITIES } from "../../constants/search_constants.js"; import { SEARCH_CONSTANTS, MODEL_CAPABILITIES } from "../../constants/search_constants.js";
// Import types
import type { ServiceProviders } from '../../interfaces/ai_service_interfaces.js';
// Import new configuration system
import {
getProviderPrecedence,
getPreferredProvider,
parseModelIdentifier,
getDefaultModelForProvider,
createModelConfig
} from '../../config/configuration_helpers.js';
import type { ProviderType } from '../../interfaces/configuration_interfaces.js';
/** /**
* Pipeline stage for selecting the appropriate LLM model * Pipeline stage for selecting the appropriate LLM model
*/ */
@ -36,15 +49,15 @@ export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput,
// If model already specified, don't override it // If model already specified, don't override it
if (updatedOptions.model) { if (updatedOptions.model) {
// Check if the model has a provider prefix, which indicates legacy format // Use the new configuration system to parse model identifier
const modelParts = this.parseModelIdentifier(updatedOptions.model); const modelIdentifier = parseModelIdentifier(updatedOptions.model);
if (modelParts.provider) { if (modelIdentifier.provider) {
// Add provider metadata for backward compatibility // Add provider metadata for backward compatibility
this.addProviderMetadata(updatedOptions, modelParts.provider, modelParts.model); this.addProviderMetadata(updatedOptions, modelIdentifier.provider as ServiceProviders, modelIdentifier.modelId);
// Update the model to be just the model name without provider prefix // Update the model to be just the model name without provider prefix
updatedOptions.model = modelParts.model; updatedOptions.model = modelIdentifier.modelId;
log.info(`Using explicitly specified model: ${modelParts.model} from provider: ${modelParts.provider}`); log.info(`Using explicitly specified model: ${modelIdentifier.modelId} from provider: ${modelIdentifier.provider}`);
} else { } else {
log.info(`Using explicitly specified model: ${updatedOptions.model}`); log.info(`Using explicitly specified model: ${updatedOptions.model}`);
} }
@ -86,52 +99,21 @@ export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput,
} }
} }
// Get default provider and model based on precedence // Get default provider and model using the new configuration system
let defaultProvider = 'openai'; let defaultProvider: ProviderType = 'openai';
let defaultModelName = 'gpt-3.5-turbo'; let defaultModelName = 'gpt-3.5-turbo';
try { try {
// Get provider precedence list // Use the new configuration helpers - no string parsing!
const providerPrecedence = await options.getOption('aiProviderPrecedence'); defaultProvider = await getPreferredProvider();
if (providerPrecedence) { defaultModelName = await getDefaultModelForProvider(defaultProvider);
// Parse provider precedence list
let providers: string[] = [];
if (providerPrecedence.includes(',')) {
providers = providerPrecedence.split(',').map(p => p.trim());
} else if (providerPrecedence.startsWith('[') && providerPrecedence.endsWith(']')) {
providers = JSON.parse(providerPrecedence);
} else {
providers = [providerPrecedence];
}
// Check for first available provider log.info(`Selected provider: ${defaultProvider}, model: ${defaultModelName}`);
if (providers.length > 0) {
const firstProvider = providers[0];
defaultProvider = firstProvider;
// Get provider-specific default model
if (firstProvider === 'openai') {
const model = await options.getOption('openaiDefaultModel');
if (model) defaultModelName = model;
} else if (firstProvider === 'anthropic') {
const model = await options.getOption('anthropicDefaultModel');
if (model) defaultModelName = model;
} else if (firstProvider === 'ollama') {
const model = await options.getOption('ollamaDefaultModel');
if (model) {
defaultModelName = model;
// Enable tools for all Ollama models
// The Ollama API will handle models that don't support tool calling
log.info(`Using Ollama model ${model} with tool calling enabled`);
updatedOptions.enableTools = true;
}
}
}
}
} catch (error) { } catch (error) {
// If any error occurs, use the fallback default // If any error occurs, use the fallback default
log.error(`Error determining default model: ${error}`); log.error(`Error determining default model: ${error}`);
defaultProvider = 'openai';
defaultModelName = 'gpt-3.5-turbo';
} }
// Determine query complexity // Determine query complexity
@ -162,7 +144,7 @@ export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput,
// Set the model and add provider metadata // Set the model and add provider metadata
updatedOptions.model = defaultModelName; updatedOptions.model = defaultModelName;
this.addProviderMetadata(updatedOptions, defaultProvider, defaultModelName); this.addProviderMetadata(updatedOptions, defaultProvider as ServiceProviders, defaultModelName);
log.info(`Selected model: ${defaultModelName} from provider: ${defaultProvider} for query complexity: ${queryComplexity}`); log.info(`Selected model: ${defaultModelName} from provider: ${defaultProvider} for query complexity: ${queryComplexity}`);
log.info(`[ModelSelectionStage] Final options: ${JSON.stringify({ log.info(`[ModelSelectionStage] Final options: ${JSON.stringify({
@ -175,29 +157,10 @@ export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput,
return { options: updatedOptions }; return { options: updatedOptions };
} }
/**
* Helper to parse model identifier with provider prefix
* Handles legacy format "provider:model"
*/
private parseModelIdentifier(modelId: string): { provider?: string, model: string } {
if (!modelId) return { model: '' };
const parts = modelId.split(':');
if (parts.length === 1) {
// No provider prefix
return { model: modelId };
} else {
// Extract provider and model
const provider = parts[0];
const model = parts.slice(1).join(':'); // Handle model names that might include :
return { provider, model };
}
}
/** /**
* Add provider metadata to the options based on model name * Add provider metadata to the options based on model name
*/ */
private addProviderMetadata(options: ChatCompletionOptions, provider: string, modelName: string): void { private addProviderMetadata(options: ChatCompletionOptions, provider: ServiceProviders, modelName: string): void {
// Check if we already have providerMetadata // Check if we already have providerMetadata
if (options.providerMetadata) { if (options.providerMetadata) {
// If providerMetadata exists but not modelId, add the model name // If providerMetadata exists but not modelId, add the model name
@ -216,7 +179,7 @@ export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput,
// Find the first available provider // Find the first available provider
for (const p of providerPrecedence) { for (const p of providerPrecedence) {
if (aiServiceManager.isProviderAvailable(p)) { if (aiServiceManager.isProviderAvailable(p)) {
selectedProvider = p; selectedProvider = p as ServiceProviders;
break; break;
} }
} }
@ -234,8 +197,8 @@ export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput,
// For backward compatibility, ensure model name is set without prefix // For backward compatibility, ensure model name is set without prefix
if (options.model && options.model.includes(':')) { if (options.model && options.model.includes(':')) {
const parsed = this.parseModelIdentifier(options.model); const parsed = parseModelIdentifier(options.model);
options.model = modelName || parsed.model; options.model = modelName || parsed.modelId;
} }
log.info(`Set provider metadata: provider=${selectedProvider}, model=${modelName}`); log.info(`Set provider metadata: provider=${selectedProvider}, model=${modelName}`);
@ -243,33 +206,40 @@ export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput,
} }
/** /**
* Determine model based on provider precedence * Determine model based on provider precedence using the new configuration system
*/ */
private determineDefaultModel(input: ModelSelectionInput): string { private async determineDefaultModel(input: ModelSelectionInput): Promise<string> {
const providerPrecedence = ['anthropic', 'openai', 'ollama']; try {
// Use the new configuration system
const providers = await getProviderPrecedence();
// Use only providers that are available // Use only providers that are available
const availableProviders = providerPrecedence.filter(provider => const availableProviders = providers.filter(provider =>
aiServiceManager.isProviderAvailable(provider)); aiServiceManager.isProviderAvailable(provider));
if (availableProviders.length === 0) { if (availableProviders.length === 0) {
throw new Error('No AI providers are available'); throw new Error('No AI providers are available');
}
// Get the first available provider and its default model
const defaultProvider = availableProviders[0];
const defaultModel = await getDefaultModelForProvider(defaultProvider);
// Set provider metadata
if (!input.options.providerMetadata) {
input.options.providerMetadata = {
provider: defaultProvider as 'openai' | 'anthropic' | 'ollama' | 'local',
modelId: defaultModel
};
}
log.info(`Selected default model ${defaultModel} from provider ${defaultProvider}`);
return defaultModel;
} catch (error) {
log.error(`Error determining default model: ${error}`);
// Fallback to hardcoded default
return 'gpt-3.5-turbo';
} }
// Get the first available provider and its default model
const defaultProvider = availableProviders[0] as 'openai' | 'anthropic' | 'ollama' | 'local';
let defaultModel = 'gpt-3.5-turbo'; // Use model from our constants
// Set provider metadata
if (!input.options.providerMetadata) {
input.options.providerMetadata = {
provider: defaultProvider,
modelId: defaultModel
};
}
log.info(`Selected default model ${defaultModel} from provider ${defaultProvider}`);
return defaultModel;
} }
/** /**