Merge pull request #2208 from TriliumNext/fix/llm-chat-save-bug

fix(llm): save to the same note that the chat request was sent from
This commit is contained in:
Elian Doran 2025-06-08 10:45:58 +03:00 committed by GitHub
commit e87789d92b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 409 additions and 301 deletions

View File

@ -350,6 +350,115 @@ export default class LlmChatPanel extends BasicWidget {
}
}
/**
* Save current chat data to a specific note ID
*/
async saveCurrentDataToSpecificNote(targetNoteId: string | null) {
if (!this.onSaveData || !targetNoteId) {
console.warn('Cannot save chat data: no saveData callback or no targetNoteId available');
return;
}
try {
// Extract current tool execution steps if any exist
const toolSteps = extractInChatToolSteps(this.noteContextChatMessages);
// Get tool executions from both UI and any cached executions in metadata
let toolExecutions: Array<{
id: string;
name: string;
arguments: any;
result: any;
error?: string;
timestamp: string;
}> = [];
// First include any tool executions already in metadata (from streaming events)
if (this.metadata?.toolExecutions && Array.isArray(this.metadata.toolExecutions)) {
toolExecutions = [...this.metadata.toolExecutions];
console.log(`Including ${toolExecutions.length} tool executions from metadata`);
}
// Also extract any visible tool steps from the UI
const extractedExecutions = toolSteps.map(step => {
// Parse tool execution information
if (step.type === 'tool-execution') {
try {
const content = JSON.parse(step.content);
return {
id: content.toolCallId || `tool-${Date.now()}-${Math.random().toString(36).substring(2, 7)}`,
name: content.tool || 'unknown',
arguments: content.args || {},
result: content.result || {},
error: content.error,
timestamp: new Date().toISOString()
};
} catch (e) {
// If we can't parse it, create a basic record
return {
id: `tool-${Date.now()}-${Math.random().toString(36).substring(2, 7)}`,
name: 'unknown',
arguments: {},
result: step.content,
timestamp: new Date().toISOString()
};
}
} else if (step.type === 'result' && step.name) {
// Handle result steps with a name
return {
id: `tool-${Date.now()}-${Math.random().toString(36).substring(2, 7)}`,
name: step.name,
arguments: {},
result: step.content,
timestamp: new Date().toISOString()
};
}
return {
id: `tool-${Date.now()}-${Math.random().toString(36).substring(2, 7)}`,
name: 'unknown',
arguments: {},
result: 'Unrecognized tool step',
timestamp: new Date().toISOString()
};
});
// Merge the tool executions, keeping only unique IDs
const existingIds = new Set(toolExecutions.map((t: {id: string}) => t.id));
for (const exec of extractedExecutions) {
if (!existingIds.has(exec.id)) {
toolExecutions.push(exec);
existingIds.add(exec.id);
}
}
const dataToSave = {
messages: this.messages,
noteId: targetNoteId,
chatNoteId: targetNoteId, // For backward compatibility
toolSteps: toolSteps,
// Add sources if we have them
sources: this.sources || [],
// Add metadata
metadata: {
model: this.metadata?.model || undefined,
provider: this.metadata?.provider || undefined,
temperature: this.metadata?.temperature || 0.7,
lastUpdated: new Date().toISOString(),
// Add tool executions
toolExecutions: toolExecutions
}
};
console.log(`Saving chat data to specific note ${targetNoteId}, ${toolSteps.length} tool steps, ${this.sources?.length || 0} sources, ${toolExecutions.length} tool executions`);
// Save the data to the note attribute via the callback
// This is the ONLY place we should save data, letting the container widget handle persistence
await this.onSaveData(dataToSave);
} catch (error) {
console.error('Error saving chat data to specific note:', error);
}
}
/**
* Load saved chat data from the note attribute
*/
@ -867,8 +976,8 @@ export default class LlmChatPanel extends BasicWidget {
this.showSources(postResponse.sources);
}
// Process the assistant response
this.processAssistantResponse(postResponse.content, postResponse);
// Process the assistant response with original chat note ID
this.processAssistantResponse(postResponse.content, postResponse, this.noteId);
hideLoadingIndicator(this.loadingIndicator);
return true;
@ -884,7 +993,7 @@ export default class LlmChatPanel extends BasicWidget {
/**
* Process an assistant response - add to UI and save
*/
private async processAssistantResponse(content: string, fullResponse?: any) {
private async processAssistantResponse(content: string, fullResponse?: any, originalChatNoteId?: string | null) {
// Add the response to the chat UI
this.addMessageToChat('assistant', content);
@ -910,8 +1019,8 @@ export default class LlmChatPanel extends BasicWidget {
];
}
// Save to note
this.saveCurrentData().catch(err => {
// Save to note - use original chat note ID if provided
this.saveCurrentDataToSpecificNote(originalChatNoteId || this.noteId).catch(err => {
console.error("Failed to save assistant response to note:", err);
});
}
@ -936,12 +1045,15 @@ export default class LlmChatPanel extends BasicWidget {
timestamp: string;
}> = [];
// Store the original chat note ID to ensure we save to the correct note even if user switches
const originalChatNoteId = this.noteId;
return setupStreamingResponse(
this.noteId,
messageParams,
// Content update handler
(content: string, isDone: boolean = false) => {
this.updateStreamingUI(content, isDone);
this.updateStreamingUI(content, isDone, originalChatNoteId);
// Update session data with additional metadata when streaming is complete
if (isDone) {
@ -1067,13 +1179,13 @@ export default class LlmChatPanel extends BasicWidget {
/**
* Update the UI with streaming content
*/
private updateStreamingUI(assistantResponse: string, isDone: boolean = false) {
private updateStreamingUI(assistantResponse: string, isDone: boolean = false, originalChatNoteId?: string | null) {
// Track if we have a streaming message in progress
const hasStreamingMessage = !!this.noteContextChatMessages.querySelector('.assistant-message.streaming');
// Create a new message element or use the existing streaming one
let assistantMessageEl: HTMLElement;
if (hasStreamingMessage) {
// Use the existing streaming message
assistantMessageEl = this.noteContextChatMessages.querySelector('.assistant-message.streaming')!;
@ -1103,7 +1215,7 @@ export default class LlmChatPanel extends BasicWidget {
if (isDone) {
// Remove the streaming class to mark this message as complete
assistantMessageEl.classList.remove('streaming');
// Apply syntax highlighting
formatCodeBlocks($(assistantMessageEl as HTMLElement));
@ -1118,8 +1230,8 @@ export default class LlmChatPanel extends BasicWidget {
timestamp: new Date()
});
// Save the updated message list
this.saveCurrentData();
// Save the updated message list to the original chat note
this.saveCurrentDataToSpecificNote(originalChatNoteId || this.noteId);
}
// Scroll to bottom

View File

@ -182,17 +182,30 @@ export default class AiChatTypeWidget extends TypeWidget {
// Save chat data to the note
async saveData(data: any) {
if (!this.note) {
// If we have a noteId in the data, that's the AI Chat note we should save to
// This happens when the chat panel is saving its conversation
const targetNoteId = data.noteId;
// If no noteId in data, use the current note (for new chats)
const noteIdToUse = targetNoteId || this.note?.noteId;
if (!noteIdToUse) {
console.warn("Cannot save AI Chat data: no note ID available");
return;
}
try {
console.log(`AiChatTypeWidget: Saving data for note ${this.note.noteId}`);
console.log(`AiChatTypeWidget: Saving data for note ${noteIdToUse} (current note: ${this.note?.noteId}, data.noteId: ${data.noteId})`);
// Safety check: if we have both IDs and they don't match, warn about it
if (targetNoteId && this.note?.noteId && targetNoteId !== this.note.noteId) {
console.warn(`Note ID mismatch: saving to ${targetNoteId} but current note is ${this.note.noteId}`);
}
// Format the data properly - this is the canonical format of the data
const formattedData = {
messages: data.messages || [],
noteId: this.note.noteId, // Always use the note's own ID
noteId: noteIdToUse, // Always preserve the correct note ID
toolSteps: data.toolSteps || [],
sources: data.sources || [],
metadata: {
@ -201,8 +214,8 @@ export default class AiChatTypeWidget extends TypeWidget {
}
};
// Save the data to the note
await server.put(`notes/${this.note.noteId}/data`, {
// Save the data to the correct note
await server.put(`notes/${noteIdToUse}/data`, {
content: JSON.stringify(formattedData, null, 2)
});
} catch (e) {

View File

@ -48,7 +48,7 @@ export default class AiSettingsWidget extends OptionsWidget {
if (optionName === 'aiEnabled') {
try {
const isEnabled = value === 'true';
if (isEnabled) {
toastService.showMessage(t("ai_llm.ai_enabled") || "AI features enabled");
} else {

View File

@ -40,8 +40,8 @@ interface NoteContext {
}
export class AIServiceManager implements IAIServiceManager {
private services: Partial<Record<ServiceProviders, AIService>> = {};
private currentService: AIService | null = null;
private currentProvider: ServiceProviders | null = null;
private initialized = false;
constructor() {
@ -50,9 +50,8 @@ export class AIServiceManager implements IAIServiceManager {
log.error(`Error initializing LLM tools during AIServiceManager construction: ${error.message || String(error)}`);
});
// Set up event listener for provider changes
this.setupProviderChangeListener();
// Removed complex provider change listener - we'll read options fresh each time
this.initialized = true;
}
@ -140,15 +139,15 @@ export class AIServiceManager implements IAIServiceManager {
*/
async getOrCreateAnyService(): Promise<AIService> {
this.ensureInitialized();
// Get the selected provider using the new configuration system
const selectedProvider = await this.getSelectedProviderAsync();
if (!selectedProvider) {
throw new Error('No AI provider is selected. Please select a provider (OpenAI, Anthropic, or Ollama) in your AI settings.');
}
try {
const service = await this.getOrCreateChatProvider(selectedProvider);
if (service) {
@ -166,7 +165,7 @@ export class AIServiceManager implements IAIServiceManager {
*/
isAnyServiceAvailable(): boolean {
this.ensureInitialized();
// Check if we have the selected provider available
return this.getAvailableProviders().length > 0;
}
@ -174,43 +173,37 @@ export class AIServiceManager implements IAIServiceManager {
/**
* Get list of available providers
*/
getAvailableProviders(): ServiceProviders[] {
getAvailableProviders(): ServiceProviders[] {
this.ensureInitialized();
const allProviders: ServiceProviders[] = ['openai', 'anthropic', 'ollama'];
const availableProviders: ServiceProviders[] = [];
for (const providerName of allProviders) {
// Use a sync approach - check if we can create the provider
const service = this.services[providerName];
if (service && service.isAvailable()) {
availableProviders.push(providerName);
} else {
// For providers not yet created, check configuration to see if they would be available
try {
switch (providerName) {
case 'openai':
if (options.getOption('openaiApiKey')) {
availableProviders.push(providerName);
}
break;
case 'anthropic':
if (options.getOption('anthropicApiKey')) {
availableProviders.push(providerName);
}
break;
case 'ollama':
if (options.getOption('ollamaBaseUrl')) {
availableProviders.push(providerName);
}
break;
}
} catch (error) {
// Ignore configuration errors, provider just won't be available
// Check configuration to see if provider would be available
try {
switch (providerName) {
case 'openai':
if (options.getOption('openaiApiKey') || options.getOption('openaiBaseUrl')) {
availableProviders.push(providerName);
}
break;
case 'anthropic':
if (options.getOption('anthropicApiKey')) {
availableProviders.push(providerName);
}
break;
case 'ollama':
if (options.getOption('ollamaBaseUrl')) {
availableProviders.push(providerName);
}
break;
}
} catch (error) {
// Ignore configuration errors, provider just won't be available
}
}
return availableProviders;
}
@ -234,11 +227,11 @@ export class AIServiceManager implements IAIServiceManager {
// Get the selected provider
const selectedProvider = await this.getSelectedProviderAsync();
if (!selectedProvider) {
throw new Error('No AI provider is selected. Please select a provider in your AI settings.');
}
// Check if the selected provider is available
const availableProviders = this.getAvailableProviders();
if (!availableProviders.includes(selectedProvider)) {
@ -379,47 +372,68 @@ export class AIServiceManager implements IAIServiceManager {
}
/**
* Get or create a chat provider on-demand with inline validation
* Clear the current provider (forces recreation on next access)
*/
public clearCurrentProvider(): void {
this.currentService = null;
this.currentProvider = null;
log.info('Cleared current provider - will be recreated on next access');
}
/**
* Get or create the current provider instance - only one instance total
*/
private async getOrCreateChatProvider(providerName: ServiceProviders): Promise<AIService | null> {
// Return existing provider if already created
if (this.services[providerName]) {
return this.services[providerName];
// If provider type changed, clear the old one
if (this.currentProvider && this.currentProvider !== providerName) {
log.info(`Provider changed from ${this.currentProvider} to ${providerName}, clearing old service`);
this.currentService = null;
this.currentProvider = null;
}
// Create and validate provider on-demand
// Return existing service if it matches and is available
if (this.currentService && this.currentProvider === providerName && this.currentService.isAvailable()) {
return this.currentService;
}
// Clear invalid service
if (this.currentService) {
this.currentService = null;
this.currentProvider = null;
}
// Create new service for the requested provider
try {
let service: AIService | null = null;
switch (providerName) {
case 'openai': {
const apiKey = options.getOption('openaiApiKey');
const baseUrl = options.getOption('openaiBaseUrl');
if (!apiKey && !baseUrl) return null;
service = new OpenAIService();
// Validate by checking if it's available
if (!service.isAvailable()) {
throw new Error('OpenAI service not available');
}
break;
}
case 'anthropic': {
const apiKey = options.getOption('anthropicApiKey');
if (!apiKey) return null;
service = new AnthropicService();
if (!service.isAvailable()) {
throw new Error('Anthropic service not available');
}
break;
}
case 'ollama': {
const baseUrl = options.getOption('ollamaBaseUrl');
if (!baseUrl) return null;
service = new OllamaService();
if (!service.isAvailable()) {
throw new Error('Ollama service not available');
@ -427,9 +441,12 @@ export class AIServiceManager implements IAIServiceManager {
break;
}
}
if (service) {
this.services[providerName] = service;
// Cache the new service
this.currentService = service;
this.currentProvider = providerName;
log.info(`Created and cached new ${providerName} service`);
return service;
}
} catch (error: any) {
@ -630,28 +647,47 @@ export class AIServiceManager implements IAIServiceManager {
* Check if a specific provider is available
*/
isProviderAvailable(provider: string): boolean {
return this.services[provider as ServiceProviders]?.isAvailable() ?? false;
// Check if this is the current provider and if it's available
if (this.currentProvider === provider && this.currentService) {
return this.currentService.isAvailable();
}
// For other providers, check configuration
try {
switch (provider) {
case 'openai':
return !!(options.getOption('openaiApiKey') || options.getOption('openaiBaseUrl'));
case 'anthropic':
return !!options.getOption('anthropicApiKey');
case 'ollama':
return !!options.getOption('ollamaBaseUrl');
default:
return false;
}
} catch {
return false;
}
}
/**
* Get metadata about a provider
*/
getProviderMetadata(provider: string): ProviderMetadata | null {
const service = this.services[provider as ServiceProviders];
if (!service) {
return null;
// Only return metadata if this is the current active provider
if (this.currentProvider === provider && this.currentService) {
return {
name: provider,
capabilities: {
chat: true,
streaming: true,
functionCalling: provider === 'openai' // Only OpenAI has function calling
},
models: ['default'], // Placeholder, could be populated from the service
defaultModel: 'default'
};
}
return {
name: provider,
capabilities: {
chat: true,
streaming: true,
functionCalling: provider === 'openai' // Only OpenAI has function calling
},
models: ['default'], // Placeholder, could be populated from the service
defaultModel: 'default'
};
return null;
}
@ -665,67 +701,8 @@ export class AIServiceManager implements IAIServiceManager {
return String(error);
}
/**
* Set up event listener for provider changes
*/
private setupProviderChangeListener(): void {
// List of AI-related options that should trigger service recreation
const aiRelatedOptions = [
'aiEnabled',
'aiSelectedProvider',
'openaiApiKey',
'openaiBaseUrl',
'openaiDefaultModel',
'anthropicApiKey',
'anthropicBaseUrl',
'anthropicDefaultModel',
'ollamaBaseUrl',
'ollamaDefaultModel'
];
eventService.subscribe(['entityChanged'], async ({ entityName, entity }) => {
if (entityName === 'options' && entity && aiRelatedOptions.includes(entity.name)) {
log.info(`AI-related option '${entity.name}' changed, recreating LLM services`);
// Special handling for aiEnabled toggle
if (entity.name === 'aiEnabled') {
const isEnabled = entity.value === 'true';
if (isEnabled) {
log.info('AI features enabled, initializing AI service');
// Initialize the AI service
await this.initialize();
} else {
log.info('AI features disabled, clearing providers');
// Clear chat providers
this.services = {};
}
} else {
// For other AI-related options, recreate services on-demand
await this.recreateServices();
}
}
});
}
/**
* Recreate LLM services when provider settings change
*/
private async recreateServices(): Promise<void> {
try {
log.info('Recreating LLM services due to configuration change');
// Clear configuration cache first
clearConfigurationCache();
// Clear existing chat providers (they will be recreated on-demand)
this.services = {};
log.info('LLM services recreated successfully');
} catch (error) {
log.error(`Error recreating LLM services: ${this.handleError(error)}`);
}
}
// Removed complex event listener and cache invalidation logic
// Services will be created fresh when needed by reading current options
}

View File

@ -1,4 +1,3 @@
import configurationManager from './configuration_manager.js';
import optionService from '../../options.js';
import log from '../../log.js';
import type {
@ -13,7 +12,7 @@ import type {
*/
/**
* Get the selected AI provider
* Get the selected AI provider - always fresh from options
*/
export async function getSelectedProvider(): Promise<ProviderType | null> {
const providerOption = optionService.getOption('aiSelectedProvider');
@ -25,38 +24,100 @@ export async function getSelectedProvider(): Promise<ProviderType | null> {
* Parse a model identifier (handles "provider:model" format)
*/
export function parseModelIdentifier(modelString: string): ModelIdentifier {
return configurationManager.parseModelIdentifier(modelString);
if (!modelString) {
return {
modelId: '',
fullIdentifier: ''
};
}
const parts = modelString.split(':');
if (parts.length === 1) {
// No provider prefix, just model name
return {
modelId: modelString,
fullIdentifier: modelString
};
}
// Check if first part is a known provider
const potentialProvider = parts[0].toLowerCase();
const knownProviders: ProviderType[] = ['openai', 'anthropic', 'ollama'];
if (knownProviders.includes(potentialProvider as ProviderType)) {
// Provider prefix format
const provider = potentialProvider as ProviderType;
const modelId = parts.slice(1).join(':'); // Rejoin in case model has colons
return {
provider,
modelId,
fullIdentifier: modelString
};
}
// Not a provider prefix, treat whole string as model name
return {
modelId: modelString,
fullIdentifier: modelString
};
}
/**
* Create a model configuration from a model string
*/
export function createModelConfig(modelString: string, defaultProvider?: ProviderType): ModelConfig {
return configurationManager.createModelConfig(modelString, defaultProvider);
const identifier = parseModelIdentifier(modelString);
const provider = identifier.provider || defaultProvider || 'openai'; // fallback to openai if no provider specified
return {
provider,
modelId: identifier.modelId,
displayName: identifier.fullIdentifier
};
}
/**
* Get the default model for a specific provider
* Get the default model for a specific provider - always fresh from options
*/
export async function getDefaultModelForProvider(provider: ProviderType): Promise<string | undefined> {
const config = await configurationManager.getAIConfig();
return config.defaultModels[provider]; // This can now be undefined
const optionKey = `${provider}DefaultModel` as const;
return optionService.getOption(optionKey) || undefined;
}
/**
* Get provider settings for a specific provider
* Get provider settings for a specific provider - always fresh from options
*/
export async function getProviderSettings(provider: ProviderType) {
const config = await configurationManager.getAIConfig();
return config.providerSettings[provider];
switch (provider) {
case 'openai':
return {
apiKey: optionService.getOption('openaiApiKey'),
baseUrl: optionService.getOption('openaiBaseUrl'),
defaultModel: optionService.getOption('openaiDefaultModel')
};
case 'anthropic':
return {
apiKey: optionService.getOption('anthropicApiKey'),
baseUrl: optionService.getOption('anthropicBaseUrl'),
defaultModel: optionService.getOption('anthropicDefaultModel')
};
case 'ollama':
return {
baseUrl: optionService.getOption('ollamaBaseUrl'),
defaultModel: optionService.getOption('ollamaDefaultModel')
};
default:
return {};
}
}
/**
* Check if AI is enabled
* Check if AI is enabled - always fresh from options
*/
export async function isAIEnabled(): Promise<boolean> {
const config = await configurationManager.getAIConfig();
return config.enabled;
return optionService.getOptionBool('aiEnabled');
}
/**
@ -82,7 +143,7 @@ export async function isProviderConfigured(provider: ProviderType): Promise<bool
*/
export async function getAvailableSelectedProvider(): Promise<ProviderType | null> {
const selectedProvider = await getSelectedProvider();
if (!selectedProvider) {
return null; // No provider selected
}
@ -95,17 +156,51 @@ export async function getAvailableSelectedProvider(): Promise<ProviderType | nul
}
/**
* Validate the current AI configuration
* Validate the current AI configuration - simplified validation
*/
export async function validateConfiguration() {
return configurationManager.validateConfig();
const result = {
isValid: true,
errors: [] as string[],
warnings: [] as string[]
};
const aiEnabled = await isAIEnabled();
if (!aiEnabled) {
result.warnings.push('AI features are disabled');
return result;
}
const selectedProvider = await getSelectedProvider();
if (!selectedProvider) {
result.errors.push('No AI provider selected');
result.isValid = false;
return result;
}
// Validate provider-specific settings
const settings = await getProviderSettings(selectedProvider);
if (selectedProvider === 'openai' && !(settings as any)?.apiKey) {
result.warnings.push('OpenAI API key is not configured');
}
if (selectedProvider === 'anthropic' && !(settings as any)?.apiKey) {
result.warnings.push('Anthropic API key is not configured');
}
if (selectedProvider === 'ollama' && !(settings as any)?.baseUrl) {
result.warnings.push('Ollama base URL is not configured');
}
return result;
}
/**
* Clear cached configuration (use when settings change)
* Clear cached configuration (no-op since we removed caching)
*/
export function clearConfigurationCache(): void {
configurationManager.clearCache();
// No caching anymore, so nothing to clear
}
/**
@ -136,7 +231,7 @@ export async function getValidModelConfig(provider: ProviderType): Promise<{ mod
*/
export async function getSelectedModelConfig(): Promise<{ model: string; provider: ProviderType } | null> {
const selectedProvider = await getSelectedProvider();
if (!selectedProvider) {
return null; // No provider selected
}

View File

@ -21,11 +21,6 @@ import type {
*/
export class ConfigurationManager {
private static instance: ConfigurationManager | null = null;
private cachedConfig: AIConfig | null = null;
private lastConfigUpdate: number = 0;
// Cache for 5 minutes to avoid excessive option reads
private static readonly CACHE_DURATION = 5 * 60 * 1000;
private constructor() {}
@ -37,14 +32,9 @@ export class ConfigurationManager {
}
/**
* Get the complete AI configuration
* Get the complete AI configuration - always fresh, no caching
*/
public async getAIConfig(): Promise<AIConfig> {
const now = Date.now();
if (this.cachedConfig && (now - this.lastConfigUpdate) < ConfigurationManager.CACHE_DURATION) {
return this.cachedConfig;
}
try {
const config: AIConfig = {
enabled: await this.getAIEnabled(),
@ -53,8 +43,6 @@ export class ConfigurationManager {
providerSettings: await this.getProviderSettings()
};
this.cachedConfig = config;
this.lastConfigUpdate = now;
return config;
} catch (error) {
log.error(`Error loading AI configuration: ${error}`);
@ -263,14 +251,6 @@ export class ConfigurationManager {
return result;
}
/**
* Clear cached configuration (force reload on next access)
*/
public clearCache(): void {
this.cachedConfig = null;
this.lastConfigUpdate = 0;
}
// Private helper methods
private async getAIEnabled(): Promise<boolean> {

View File

@ -111,19 +111,13 @@ export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput,
const { getValidModelConfig } = await import('../../config/configuration_helpers.js');
const modelConfig = await getValidModelConfig(selectedProvider);
if (modelConfig) {
// We have a valid configured model
updatedOptions.model = modelConfig.model;
} else {
// No model configured, try to fetch and set a default from the service
const fetchedModel = await this.fetchAndSetDefaultModel(selectedProvider);
if (!fetchedModel) {
throw new Error(`No default model configured for provider ${selectedProvider}. Please set a default model in your AI settings or ensure the provider service is available.`);
}
// Use the fetched model
updatedOptions.model = fetchedModel;
if (!modelConfig) {
throw new Error(`No default model configured for provider ${selectedProvider}. Please set a default model in your AI settings.`);
}
// Use the configured model
updatedOptions.model = modelConfig.model;
log.info(`Selected provider: ${selectedProvider}, model: ${updatedOptions.model}`);
// Determine query complexity
@ -183,20 +177,8 @@ export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput,
return;
}
// If no provider could be determined, try to use precedence
// Use the explicitly provided provider - no automatic fallbacks
let selectedProvider = provider;
if (!selectedProvider) {
// List of providers in precedence order
const providerPrecedence = ['anthropic', 'openai', 'ollama'];
// Find the first available provider
for (const p of providerPrecedence) {
if (aiServiceManager.isProviderAvailable(p)) {
selectedProvider = p as ServiceProviders;
break;
}
}
}
// Set the provider metadata in the options
if (selectedProvider) {
@ -218,47 +200,7 @@ export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput,
}
}
/**
* Determine model based on selected provider using the new configuration system
* This method is now simplified and delegates to the main model selection logic
*/
private async determineDefaultModel(input: ModelSelectionInput): Promise<string> {
try {
// Use the same logic as the main process method
const { getValidModelConfig, getSelectedProvider } = await import('../../config/configuration_helpers.js');
const selectedProvider = await getSelectedProvider();
if (!selectedProvider) {
throw new Error('No AI provider is selected. Please select a provider in your AI settings.');
}
// Check if the provider is available through the service manager
if (!aiServiceManager.isProviderAvailable(selectedProvider)) {
throw new Error(`Selected provider ${selectedProvider} is not available`);
}
// Try to get a valid model config
const modelConfig = await getValidModelConfig(selectedProvider);
if (!modelConfig) {
throw new Error(`No default model configured for provider ${selectedProvider}. Please configure a default model in your AI settings.`);
}
// Set provider metadata
if (!input.options.providerMetadata) {
input.options.providerMetadata = {
provider: selectedProvider as 'openai' | 'anthropic' | 'ollama' | 'local',
modelId: modelConfig.model
};
}
log.info(`Selected default model ${modelConfig.model} from provider ${selectedProvider}`);
return modelConfig.model;
} catch (error) {
log.error(`Error determining default model: ${error}`);
throw error; // Don't provide fallback defaults, let the error propagate
}
}
/**
* Get estimated context window for Ollama models
@ -283,48 +225,5 @@ export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput,
}
}
/**
* Use AI service manager to get a configured model for the provider
* This eliminates duplication and uses the existing service layer
*/
private async fetchAndSetDefaultModel(provider: ProviderType): Promise<string | null> {
try {
log.info(`Getting default model for provider ${provider} using AI service manager`);
// Use the existing AI service manager instead of duplicating API calls
const service = await aiServiceManager.getInstance().getService(provider);
if (!service || !service.isAvailable()) {
log.info(`Provider ${provider} service is not available`);
return null;
}
// Check if the service has a method to get available models
if (typeof (service as any).getAvailableModels === 'function') {
try {
const models = await (service as any).getAvailableModels();
if (models && models.length > 0) {
// Use the first available model - no hardcoded preferences
const selectedModel = models[0];
// Import server-side options to update the default model
const optionService = (await import('../../../options.js')).default;
const optionKey = `${provider}DefaultModel` as const;
await optionService.setOption(optionKey, selectedModel);
log.info(`Set default ${provider} model to: ${selectedModel}`);
return selectedModel;
}
} catch (modelError) {
log.error(`Error fetching models from ${provider} service: ${modelError}`);
}
}
log.info(`Provider ${provider} does not support dynamic model fetching`);
return null;
} catch (error) {
log.error(`Error getting default model for provider ${provider}: ${error}`);
return null;
}
}
}

View File

@ -26,7 +26,11 @@ export function getOpenAIOptions(
}
const baseUrl = options.getOption('openaiBaseUrl') || PROVIDER_CONSTANTS.OPENAI.BASE_URL;
const modelName = opts.model || options.getOption('openaiDefaultModel') || PROVIDER_CONSTANTS.OPENAI.DEFAULT_MODEL;
const modelName = opts.model || options.getOption('openaiDefaultModel');
if (!modelName) {
throw new Error('No OpenAI model configured. Please set a default model in your AI settings.');
}
// Create provider metadata
const providerMetadata: ModelMetadata = {
@ -87,7 +91,11 @@ export function getAnthropicOptions(
}
const baseUrl = options.getOption('anthropicBaseUrl') || PROVIDER_CONSTANTS.ANTHROPIC.BASE_URL;
const modelName = opts.model || options.getOption('anthropicDefaultModel') || PROVIDER_CONSTANTS.ANTHROPIC.DEFAULT_MODEL;
const modelName = opts.model || options.getOption('anthropicDefaultModel');
if (!modelName) {
throw new Error('No Anthropic model configured. Please set a default model in your AI settings.');
}
// Create provider metadata
const providerMetadata: ModelMetadata = {
@ -150,8 +158,12 @@ export async function getOllamaOptions(
throw new Error('Ollama API URL is not configured');
}
// Get the model name - no prefix handling needed now
let modelName = opts.model || options.getOption('ollamaDefaultModel') || 'llama3';
// Get the model name - no defaults, must be configured by user
let modelName = opts.model || options.getOption('ollamaDefaultModel');
if (!modelName) {
throw new Error('No Ollama model configured. Please set a default model in your AI settings.');
}
// Create provider metadata
const providerMetadata: ModelMetadata = {
@ -249,4 +261,4 @@ async function getOllamaModelContextWindow(modelName: string): Promise<number> {
log.info(`Error getting context window for model ${modelName}: ${error}`);
return MODEL_CAPABILITIES['default'].contextWindowTokens; // Default fallback
}
}
}

View File

@ -82,6 +82,26 @@ function setOption<T extends OptionNames>(name: T, value: string | OptionDefinit
} else {
createOption(name, value, false);
}
// Clear current AI provider when AI-related options change
const aiOptions = [
'aiSelectedProvider', 'openaiApiKey', 'openaiBaseUrl', 'openaiDefaultModel',
'anthropicApiKey', 'anthropicBaseUrl', 'anthropicDefaultModel',
'ollamaBaseUrl', 'ollamaDefaultModel'
];
if (aiOptions.includes(name)) {
// Import dynamically to avoid circular dependencies
setImmediate(async () => {
try {
const aiServiceManager = (await import('./llm/ai_service_manager.js')).default;
aiServiceManager.getInstance().clearCurrentProvider();
console.log(`Cleared AI provider after ${name} option changed`);
} catch (error) {
console.log(`Could not clear AI provider: ${error}`);
}
});
}
}
/**