mirror of
https://github.com/TriliumNext/Notes.git
synced 2025-08-01 20:52:27 +08:00
Merge pull request #2208 from TriliumNext/fix/llm-chat-save-bug
fix(llm): save to the same note that the chat request was sent from
This commit is contained in:
commit
e87789d92b
@ -350,6 +350,115 @@ export default class LlmChatPanel extends BasicWidget {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Save current chat data to a specific note ID
|
||||||
|
*/
|
||||||
|
async saveCurrentDataToSpecificNote(targetNoteId: string | null) {
|
||||||
|
if (!this.onSaveData || !targetNoteId) {
|
||||||
|
console.warn('Cannot save chat data: no saveData callback or no targetNoteId available');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Extract current tool execution steps if any exist
|
||||||
|
const toolSteps = extractInChatToolSteps(this.noteContextChatMessages);
|
||||||
|
|
||||||
|
// Get tool executions from both UI and any cached executions in metadata
|
||||||
|
let toolExecutions: Array<{
|
||||||
|
id: string;
|
||||||
|
name: string;
|
||||||
|
arguments: any;
|
||||||
|
result: any;
|
||||||
|
error?: string;
|
||||||
|
timestamp: string;
|
||||||
|
}> = [];
|
||||||
|
|
||||||
|
// First include any tool executions already in metadata (from streaming events)
|
||||||
|
if (this.metadata?.toolExecutions && Array.isArray(this.metadata.toolExecutions)) {
|
||||||
|
toolExecutions = [...this.metadata.toolExecutions];
|
||||||
|
console.log(`Including ${toolExecutions.length} tool executions from metadata`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Also extract any visible tool steps from the UI
|
||||||
|
const extractedExecutions = toolSteps.map(step => {
|
||||||
|
// Parse tool execution information
|
||||||
|
if (step.type === 'tool-execution') {
|
||||||
|
try {
|
||||||
|
const content = JSON.parse(step.content);
|
||||||
|
return {
|
||||||
|
id: content.toolCallId || `tool-${Date.now()}-${Math.random().toString(36).substring(2, 7)}`,
|
||||||
|
name: content.tool || 'unknown',
|
||||||
|
arguments: content.args || {},
|
||||||
|
result: content.result || {},
|
||||||
|
error: content.error,
|
||||||
|
timestamp: new Date().toISOString()
|
||||||
|
};
|
||||||
|
} catch (e) {
|
||||||
|
// If we can't parse it, create a basic record
|
||||||
|
return {
|
||||||
|
id: `tool-${Date.now()}-${Math.random().toString(36).substring(2, 7)}`,
|
||||||
|
name: 'unknown',
|
||||||
|
arguments: {},
|
||||||
|
result: step.content,
|
||||||
|
timestamp: new Date().toISOString()
|
||||||
|
};
|
||||||
|
}
|
||||||
|
} else if (step.type === 'result' && step.name) {
|
||||||
|
// Handle result steps with a name
|
||||||
|
return {
|
||||||
|
id: `tool-${Date.now()}-${Math.random().toString(36).substring(2, 7)}`,
|
||||||
|
name: step.name,
|
||||||
|
arguments: {},
|
||||||
|
result: step.content,
|
||||||
|
timestamp: new Date().toISOString()
|
||||||
|
};
|
||||||
|
}
|
||||||
|
return {
|
||||||
|
id: `tool-${Date.now()}-${Math.random().toString(36).substring(2, 7)}`,
|
||||||
|
name: 'unknown',
|
||||||
|
arguments: {},
|
||||||
|
result: 'Unrecognized tool step',
|
||||||
|
timestamp: new Date().toISOString()
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
// Merge the tool executions, keeping only unique IDs
|
||||||
|
const existingIds = new Set(toolExecutions.map((t: {id: string}) => t.id));
|
||||||
|
for (const exec of extractedExecutions) {
|
||||||
|
if (!existingIds.has(exec.id)) {
|
||||||
|
toolExecutions.push(exec);
|
||||||
|
existingIds.add(exec.id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const dataToSave = {
|
||||||
|
messages: this.messages,
|
||||||
|
noteId: targetNoteId,
|
||||||
|
chatNoteId: targetNoteId, // For backward compatibility
|
||||||
|
toolSteps: toolSteps,
|
||||||
|
// Add sources if we have them
|
||||||
|
sources: this.sources || [],
|
||||||
|
// Add metadata
|
||||||
|
metadata: {
|
||||||
|
model: this.metadata?.model || undefined,
|
||||||
|
provider: this.metadata?.provider || undefined,
|
||||||
|
temperature: this.metadata?.temperature || 0.7,
|
||||||
|
lastUpdated: new Date().toISOString(),
|
||||||
|
// Add tool executions
|
||||||
|
toolExecutions: toolExecutions
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
console.log(`Saving chat data to specific note ${targetNoteId}, ${toolSteps.length} tool steps, ${this.sources?.length || 0} sources, ${toolExecutions.length} tool executions`);
|
||||||
|
|
||||||
|
// Save the data to the note attribute via the callback
|
||||||
|
// This is the ONLY place we should save data, letting the container widget handle persistence
|
||||||
|
await this.onSaveData(dataToSave);
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error saving chat data to specific note:', error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Load saved chat data from the note attribute
|
* Load saved chat data from the note attribute
|
||||||
*/
|
*/
|
||||||
@ -867,8 +976,8 @@ export default class LlmChatPanel extends BasicWidget {
|
|||||||
this.showSources(postResponse.sources);
|
this.showSources(postResponse.sources);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process the assistant response
|
// Process the assistant response with original chat note ID
|
||||||
this.processAssistantResponse(postResponse.content, postResponse);
|
this.processAssistantResponse(postResponse.content, postResponse, this.noteId);
|
||||||
|
|
||||||
hideLoadingIndicator(this.loadingIndicator);
|
hideLoadingIndicator(this.loadingIndicator);
|
||||||
return true;
|
return true;
|
||||||
@ -884,7 +993,7 @@ export default class LlmChatPanel extends BasicWidget {
|
|||||||
/**
|
/**
|
||||||
* Process an assistant response - add to UI and save
|
* Process an assistant response - add to UI and save
|
||||||
*/
|
*/
|
||||||
private async processAssistantResponse(content: string, fullResponse?: any) {
|
private async processAssistantResponse(content: string, fullResponse?: any, originalChatNoteId?: string | null) {
|
||||||
// Add the response to the chat UI
|
// Add the response to the chat UI
|
||||||
this.addMessageToChat('assistant', content);
|
this.addMessageToChat('assistant', content);
|
||||||
|
|
||||||
@ -910,8 +1019,8 @@ export default class LlmChatPanel extends BasicWidget {
|
|||||||
];
|
];
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save to note
|
// Save to note - use original chat note ID if provided
|
||||||
this.saveCurrentData().catch(err => {
|
this.saveCurrentDataToSpecificNote(originalChatNoteId || this.noteId).catch(err => {
|
||||||
console.error("Failed to save assistant response to note:", err);
|
console.error("Failed to save assistant response to note:", err);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -936,12 +1045,15 @@ export default class LlmChatPanel extends BasicWidget {
|
|||||||
timestamp: string;
|
timestamp: string;
|
||||||
}> = [];
|
}> = [];
|
||||||
|
|
||||||
|
// Store the original chat note ID to ensure we save to the correct note even if user switches
|
||||||
|
const originalChatNoteId = this.noteId;
|
||||||
|
|
||||||
return setupStreamingResponse(
|
return setupStreamingResponse(
|
||||||
this.noteId,
|
this.noteId,
|
||||||
messageParams,
|
messageParams,
|
||||||
// Content update handler
|
// Content update handler
|
||||||
(content: string, isDone: boolean = false) => {
|
(content: string, isDone: boolean = false) => {
|
||||||
this.updateStreamingUI(content, isDone);
|
this.updateStreamingUI(content, isDone, originalChatNoteId);
|
||||||
|
|
||||||
// Update session data with additional metadata when streaming is complete
|
// Update session data with additional metadata when streaming is complete
|
||||||
if (isDone) {
|
if (isDone) {
|
||||||
@ -1067,13 +1179,13 @@ export default class LlmChatPanel extends BasicWidget {
|
|||||||
/**
|
/**
|
||||||
* Update the UI with streaming content
|
* Update the UI with streaming content
|
||||||
*/
|
*/
|
||||||
private updateStreamingUI(assistantResponse: string, isDone: boolean = false) {
|
private updateStreamingUI(assistantResponse: string, isDone: boolean = false, originalChatNoteId?: string | null) {
|
||||||
// Track if we have a streaming message in progress
|
// Track if we have a streaming message in progress
|
||||||
const hasStreamingMessage = !!this.noteContextChatMessages.querySelector('.assistant-message.streaming');
|
const hasStreamingMessage = !!this.noteContextChatMessages.querySelector('.assistant-message.streaming');
|
||||||
|
|
||||||
// Create a new message element or use the existing streaming one
|
// Create a new message element or use the existing streaming one
|
||||||
let assistantMessageEl: HTMLElement;
|
let assistantMessageEl: HTMLElement;
|
||||||
|
|
||||||
if (hasStreamingMessage) {
|
if (hasStreamingMessage) {
|
||||||
// Use the existing streaming message
|
// Use the existing streaming message
|
||||||
assistantMessageEl = this.noteContextChatMessages.querySelector('.assistant-message.streaming')!;
|
assistantMessageEl = this.noteContextChatMessages.querySelector('.assistant-message.streaming')!;
|
||||||
@ -1103,7 +1215,7 @@ export default class LlmChatPanel extends BasicWidget {
|
|||||||
if (isDone) {
|
if (isDone) {
|
||||||
// Remove the streaming class to mark this message as complete
|
// Remove the streaming class to mark this message as complete
|
||||||
assistantMessageEl.classList.remove('streaming');
|
assistantMessageEl.classList.remove('streaming');
|
||||||
|
|
||||||
// Apply syntax highlighting
|
// Apply syntax highlighting
|
||||||
formatCodeBlocks($(assistantMessageEl as HTMLElement));
|
formatCodeBlocks($(assistantMessageEl as HTMLElement));
|
||||||
|
|
||||||
@ -1118,8 +1230,8 @@ export default class LlmChatPanel extends BasicWidget {
|
|||||||
timestamp: new Date()
|
timestamp: new Date()
|
||||||
});
|
});
|
||||||
|
|
||||||
// Save the updated message list
|
// Save the updated message list to the original chat note
|
||||||
this.saveCurrentData();
|
this.saveCurrentDataToSpecificNote(originalChatNoteId || this.noteId);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Scroll to bottom
|
// Scroll to bottom
|
||||||
|
@ -182,17 +182,30 @@ export default class AiChatTypeWidget extends TypeWidget {
|
|||||||
|
|
||||||
// Save chat data to the note
|
// Save chat data to the note
|
||||||
async saveData(data: any) {
|
async saveData(data: any) {
|
||||||
if (!this.note) {
|
// If we have a noteId in the data, that's the AI Chat note we should save to
|
||||||
|
// This happens when the chat panel is saving its conversation
|
||||||
|
const targetNoteId = data.noteId;
|
||||||
|
|
||||||
|
// If no noteId in data, use the current note (for new chats)
|
||||||
|
const noteIdToUse = targetNoteId || this.note?.noteId;
|
||||||
|
|
||||||
|
if (!noteIdToUse) {
|
||||||
|
console.warn("Cannot save AI Chat data: no note ID available");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
console.log(`AiChatTypeWidget: Saving data for note ${this.note.noteId}`);
|
console.log(`AiChatTypeWidget: Saving data for note ${noteIdToUse} (current note: ${this.note?.noteId}, data.noteId: ${data.noteId})`);
|
||||||
|
|
||||||
|
// Safety check: if we have both IDs and they don't match, warn about it
|
||||||
|
if (targetNoteId && this.note?.noteId && targetNoteId !== this.note.noteId) {
|
||||||
|
console.warn(`Note ID mismatch: saving to ${targetNoteId} but current note is ${this.note.noteId}`);
|
||||||
|
}
|
||||||
|
|
||||||
// Format the data properly - this is the canonical format of the data
|
// Format the data properly - this is the canonical format of the data
|
||||||
const formattedData = {
|
const formattedData = {
|
||||||
messages: data.messages || [],
|
messages: data.messages || [],
|
||||||
noteId: this.note.noteId, // Always use the note's own ID
|
noteId: noteIdToUse, // Always preserve the correct note ID
|
||||||
toolSteps: data.toolSteps || [],
|
toolSteps: data.toolSteps || [],
|
||||||
sources: data.sources || [],
|
sources: data.sources || [],
|
||||||
metadata: {
|
metadata: {
|
||||||
@ -201,8 +214,8 @@ export default class AiChatTypeWidget extends TypeWidget {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Save the data to the note
|
// Save the data to the correct note
|
||||||
await server.put(`notes/${this.note.noteId}/data`, {
|
await server.put(`notes/${noteIdToUse}/data`, {
|
||||||
content: JSON.stringify(formattedData, null, 2)
|
content: JSON.stringify(formattedData, null, 2)
|
||||||
});
|
});
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
|
@ -48,7 +48,7 @@ export default class AiSettingsWidget extends OptionsWidget {
|
|||||||
if (optionName === 'aiEnabled') {
|
if (optionName === 'aiEnabled') {
|
||||||
try {
|
try {
|
||||||
const isEnabled = value === 'true';
|
const isEnabled = value === 'true';
|
||||||
|
|
||||||
if (isEnabled) {
|
if (isEnabled) {
|
||||||
toastService.showMessage(t("ai_llm.ai_enabled") || "AI features enabled");
|
toastService.showMessage(t("ai_llm.ai_enabled") || "AI features enabled");
|
||||||
} else {
|
} else {
|
||||||
|
@ -40,8 +40,8 @@ interface NoteContext {
|
|||||||
}
|
}
|
||||||
|
|
||||||
export class AIServiceManager implements IAIServiceManager {
|
export class AIServiceManager implements IAIServiceManager {
|
||||||
private services: Partial<Record<ServiceProviders, AIService>> = {};
|
private currentService: AIService | null = null;
|
||||||
|
private currentProvider: ServiceProviders | null = null;
|
||||||
private initialized = false;
|
private initialized = false;
|
||||||
|
|
||||||
constructor() {
|
constructor() {
|
||||||
@ -50,9 +50,8 @@ export class AIServiceManager implements IAIServiceManager {
|
|||||||
log.error(`Error initializing LLM tools during AIServiceManager construction: ${error.message || String(error)}`);
|
log.error(`Error initializing LLM tools during AIServiceManager construction: ${error.message || String(error)}`);
|
||||||
});
|
});
|
||||||
|
|
||||||
// Set up event listener for provider changes
|
// Removed complex provider change listener - we'll read options fresh each time
|
||||||
this.setupProviderChangeListener();
|
|
||||||
|
|
||||||
this.initialized = true;
|
this.initialized = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -140,15 +139,15 @@ export class AIServiceManager implements IAIServiceManager {
|
|||||||
*/
|
*/
|
||||||
async getOrCreateAnyService(): Promise<AIService> {
|
async getOrCreateAnyService(): Promise<AIService> {
|
||||||
this.ensureInitialized();
|
this.ensureInitialized();
|
||||||
|
|
||||||
// Get the selected provider using the new configuration system
|
// Get the selected provider using the new configuration system
|
||||||
const selectedProvider = await this.getSelectedProviderAsync();
|
const selectedProvider = await this.getSelectedProviderAsync();
|
||||||
|
|
||||||
|
|
||||||
if (!selectedProvider) {
|
if (!selectedProvider) {
|
||||||
throw new Error('No AI provider is selected. Please select a provider (OpenAI, Anthropic, or Ollama) in your AI settings.');
|
throw new Error('No AI provider is selected. Please select a provider (OpenAI, Anthropic, or Ollama) in your AI settings.');
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const service = await this.getOrCreateChatProvider(selectedProvider);
|
const service = await this.getOrCreateChatProvider(selectedProvider);
|
||||||
if (service) {
|
if (service) {
|
||||||
@ -166,7 +165,7 @@ export class AIServiceManager implements IAIServiceManager {
|
|||||||
*/
|
*/
|
||||||
isAnyServiceAvailable(): boolean {
|
isAnyServiceAvailable(): boolean {
|
||||||
this.ensureInitialized();
|
this.ensureInitialized();
|
||||||
|
|
||||||
// Check if we have the selected provider available
|
// Check if we have the selected provider available
|
||||||
return this.getAvailableProviders().length > 0;
|
return this.getAvailableProviders().length > 0;
|
||||||
}
|
}
|
||||||
@ -174,43 +173,37 @@ export class AIServiceManager implements IAIServiceManager {
|
|||||||
/**
|
/**
|
||||||
* Get list of available providers
|
* Get list of available providers
|
||||||
*/
|
*/
|
||||||
getAvailableProviders(): ServiceProviders[] {
|
getAvailableProviders(): ServiceProviders[] {
|
||||||
this.ensureInitialized();
|
this.ensureInitialized();
|
||||||
|
|
||||||
const allProviders: ServiceProviders[] = ['openai', 'anthropic', 'ollama'];
|
const allProviders: ServiceProviders[] = ['openai', 'anthropic', 'ollama'];
|
||||||
const availableProviders: ServiceProviders[] = [];
|
const availableProviders: ServiceProviders[] = [];
|
||||||
|
|
||||||
for (const providerName of allProviders) {
|
for (const providerName of allProviders) {
|
||||||
// Use a sync approach - check if we can create the provider
|
// Check configuration to see if provider would be available
|
||||||
const service = this.services[providerName];
|
try {
|
||||||
if (service && service.isAvailable()) {
|
switch (providerName) {
|
||||||
availableProviders.push(providerName);
|
case 'openai':
|
||||||
} else {
|
if (options.getOption('openaiApiKey') || options.getOption('openaiBaseUrl')) {
|
||||||
// For providers not yet created, check configuration to see if they would be available
|
availableProviders.push(providerName);
|
||||||
try {
|
}
|
||||||
switch (providerName) {
|
break;
|
||||||
case 'openai':
|
case 'anthropic':
|
||||||
if (options.getOption('openaiApiKey')) {
|
if (options.getOption('anthropicApiKey')) {
|
||||||
availableProviders.push(providerName);
|
availableProviders.push(providerName);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case 'anthropic':
|
case 'ollama':
|
||||||
if (options.getOption('anthropicApiKey')) {
|
if (options.getOption('ollamaBaseUrl')) {
|
||||||
availableProviders.push(providerName);
|
availableProviders.push(providerName);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case 'ollama':
|
|
||||||
if (options.getOption('ollamaBaseUrl')) {
|
|
||||||
availableProviders.push(providerName);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
// Ignore configuration errors, provider just won't be available
|
|
||||||
}
|
}
|
||||||
|
} catch (error) {
|
||||||
|
// Ignore configuration errors, provider just won't be available
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return availableProviders;
|
return availableProviders;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -234,11 +227,11 @@ export class AIServiceManager implements IAIServiceManager {
|
|||||||
|
|
||||||
// Get the selected provider
|
// Get the selected provider
|
||||||
const selectedProvider = await this.getSelectedProviderAsync();
|
const selectedProvider = await this.getSelectedProviderAsync();
|
||||||
|
|
||||||
if (!selectedProvider) {
|
if (!selectedProvider) {
|
||||||
throw new Error('No AI provider is selected. Please select a provider in your AI settings.');
|
throw new Error('No AI provider is selected. Please select a provider in your AI settings.');
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if the selected provider is available
|
// Check if the selected provider is available
|
||||||
const availableProviders = this.getAvailableProviders();
|
const availableProviders = this.getAvailableProviders();
|
||||||
if (!availableProviders.includes(selectedProvider)) {
|
if (!availableProviders.includes(selectedProvider)) {
|
||||||
@ -379,47 +372,68 @@ export class AIServiceManager implements IAIServiceManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get or create a chat provider on-demand with inline validation
|
* Clear the current provider (forces recreation on next access)
|
||||||
|
*/
|
||||||
|
public clearCurrentProvider(): void {
|
||||||
|
this.currentService = null;
|
||||||
|
this.currentProvider = null;
|
||||||
|
log.info('Cleared current provider - will be recreated on next access');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get or create the current provider instance - only one instance total
|
||||||
*/
|
*/
|
||||||
private async getOrCreateChatProvider(providerName: ServiceProviders): Promise<AIService | null> {
|
private async getOrCreateChatProvider(providerName: ServiceProviders): Promise<AIService | null> {
|
||||||
// Return existing provider if already created
|
// If provider type changed, clear the old one
|
||||||
if (this.services[providerName]) {
|
if (this.currentProvider && this.currentProvider !== providerName) {
|
||||||
return this.services[providerName];
|
log.info(`Provider changed from ${this.currentProvider} to ${providerName}, clearing old service`);
|
||||||
|
this.currentService = null;
|
||||||
|
this.currentProvider = null;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create and validate provider on-demand
|
// Return existing service if it matches and is available
|
||||||
|
if (this.currentService && this.currentProvider === providerName && this.currentService.isAvailable()) {
|
||||||
|
return this.currentService;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear invalid service
|
||||||
|
if (this.currentService) {
|
||||||
|
this.currentService = null;
|
||||||
|
this.currentProvider = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create new service for the requested provider
|
||||||
try {
|
try {
|
||||||
let service: AIService | null = null;
|
let service: AIService | null = null;
|
||||||
|
|
||||||
switch (providerName) {
|
switch (providerName) {
|
||||||
case 'openai': {
|
case 'openai': {
|
||||||
const apiKey = options.getOption('openaiApiKey');
|
const apiKey = options.getOption('openaiApiKey');
|
||||||
const baseUrl = options.getOption('openaiBaseUrl');
|
const baseUrl = options.getOption('openaiBaseUrl');
|
||||||
if (!apiKey && !baseUrl) return null;
|
if (!apiKey && !baseUrl) return null;
|
||||||
|
|
||||||
service = new OpenAIService();
|
service = new OpenAIService();
|
||||||
// Validate by checking if it's available
|
|
||||||
if (!service.isAvailable()) {
|
if (!service.isAvailable()) {
|
||||||
throw new Error('OpenAI service not available');
|
throw new Error('OpenAI service not available');
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
case 'anthropic': {
|
case 'anthropic': {
|
||||||
const apiKey = options.getOption('anthropicApiKey');
|
const apiKey = options.getOption('anthropicApiKey');
|
||||||
if (!apiKey) return null;
|
if (!apiKey) return null;
|
||||||
|
|
||||||
service = new AnthropicService();
|
service = new AnthropicService();
|
||||||
if (!service.isAvailable()) {
|
if (!service.isAvailable()) {
|
||||||
throw new Error('Anthropic service not available');
|
throw new Error('Anthropic service not available');
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
case 'ollama': {
|
case 'ollama': {
|
||||||
const baseUrl = options.getOption('ollamaBaseUrl');
|
const baseUrl = options.getOption('ollamaBaseUrl');
|
||||||
if (!baseUrl) return null;
|
if (!baseUrl) return null;
|
||||||
|
|
||||||
service = new OllamaService();
|
service = new OllamaService();
|
||||||
if (!service.isAvailable()) {
|
if (!service.isAvailable()) {
|
||||||
throw new Error('Ollama service not available');
|
throw new Error('Ollama service not available');
|
||||||
@ -427,9 +441,12 @@ export class AIServiceManager implements IAIServiceManager {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (service) {
|
if (service) {
|
||||||
this.services[providerName] = service;
|
// Cache the new service
|
||||||
|
this.currentService = service;
|
||||||
|
this.currentProvider = providerName;
|
||||||
|
log.info(`Created and cached new ${providerName} service`);
|
||||||
return service;
|
return service;
|
||||||
}
|
}
|
||||||
} catch (error: any) {
|
} catch (error: any) {
|
||||||
@ -630,28 +647,47 @@ export class AIServiceManager implements IAIServiceManager {
|
|||||||
* Check if a specific provider is available
|
* Check if a specific provider is available
|
||||||
*/
|
*/
|
||||||
isProviderAvailable(provider: string): boolean {
|
isProviderAvailable(provider: string): boolean {
|
||||||
return this.services[provider as ServiceProviders]?.isAvailable() ?? false;
|
// Check if this is the current provider and if it's available
|
||||||
|
if (this.currentProvider === provider && this.currentService) {
|
||||||
|
return this.currentService.isAvailable();
|
||||||
|
}
|
||||||
|
|
||||||
|
// For other providers, check configuration
|
||||||
|
try {
|
||||||
|
switch (provider) {
|
||||||
|
case 'openai':
|
||||||
|
return !!(options.getOption('openaiApiKey') || options.getOption('openaiBaseUrl'));
|
||||||
|
case 'anthropic':
|
||||||
|
return !!options.getOption('anthropicApiKey');
|
||||||
|
case 'ollama':
|
||||||
|
return !!options.getOption('ollamaBaseUrl');
|
||||||
|
default:
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get metadata about a provider
|
* Get metadata about a provider
|
||||||
*/
|
*/
|
||||||
getProviderMetadata(provider: string): ProviderMetadata | null {
|
getProviderMetadata(provider: string): ProviderMetadata | null {
|
||||||
const service = this.services[provider as ServiceProviders];
|
// Only return metadata if this is the current active provider
|
||||||
if (!service) {
|
if (this.currentProvider === provider && this.currentService) {
|
||||||
return null;
|
return {
|
||||||
|
name: provider,
|
||||||
|
capabilities: {
|
||||||
|
chat: true,
|
||||||
|
streaming: true,
|
||||||
|
functionCalling: provider === 'openai' // Only OpenAI has function calling
|
||||||
|
},
|
||||||
|
models: ['default'], // Placeholder, could be populated from the service
|
||||||
|
defaultModel: 'default'
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
return {
|
return null;
|
||||||
name: provider,
|
|
||||||
capabilities: {
|
|
||||||
chat: true,
|
|
||||||
streaming: true,
|
|
||||||
functionCalling: provider === 'openai' // Only OpenAI has function calling
|
|
||||||
},
|
|
||||||
models: ['default'], // Placeholder, could be populated from the service
|
|
||||||
defaultModel: 'default'
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -665,67 +701,8 @@ export class AIServiceManager implements IAIServiceManager {
|
|||||||
return String(error);
|
return String(error);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
// Removed complex event listener and cache invalidation logic
|
||||||
* Set up event listener for provider changes
|
// Services will be created fresh when needed by reading current options
|
||||||
*/
|
|
||||||
private setupProviderChangeListener(): void {
|
|
||||||
// List of AI-related options that should trigger service recreation
|
|
||||||
const aiRelatedOptions = [
|
|
||||||
'aiEnabled',
|
|
||||||
'aiSelectedProvider',
|
|
||||||
'openaiApiKey',
|
|
||||||
'openaiBaseUrl',
|
|
||||||
'openaiDefaultModel',
|
|
||||||
'anthropicApiKey',
|
|
||||||
'anthropicBaseUrl',
|
|
||||||
'anthropicDefaultModel',
|
|
||||||
'ollamaBaseUrl',
|
|
||||||
'ollamaDefaultModel'
|
|
||||||
];
|
|
||||||
|
|
||||||
eventService.subscribe(['entityChanged'], async ({ entityName, entity }) => {
|
|
||||||
if (entityName === 'options' && entity && aiRelatedOptions.includes(entity.name)) {
|
|
||||||
log.info(`AI-related option '${entity.name}' changed, recreating LLM services`);
|
|
||||||
|
|
||||||
// Special handling for aiEnabled toggle
|
|
||||||
if (entity.name === 'aiEnabled') {
|
|
||||||
const isEnabled = entity.value === 'true';
|
|
||||||
|
|
||||||
if (isEnabled) {
|
|
||||||
log.info('AI features enabled, initializing AI service');
|
|
||||||
// Initialize the AI service
|
|
||||||
await this.initialize();
|
|
||||||
} else {
|
|
||||||
log.info('AI features disabled, clearing providers');
|
|
||||||
// Clear chat providers
|
|
||||||
this.services = {};
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// For other AI-related options, recreate services on-demand
|
|
||||||
await this.recreateServices();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Recreate LLM services when provider settings change
|
|
||||||
*/
|
|
||||||
private async recreateServices(): Promise<void> {
|
|
||||||
try {
|
|
||||||
log.info('Recreating LLM services due to configuration change');
|
|
||||||
|
|
||||||
// Clear configuration cache first
|
|
||||||
clearConfigurationCache();
|
|
||||||
|
|
||||||
// Clear existing chat providers (they will be recreated on-demand)
|
|
||||||
this.services = {};
|
|
||||||
|
|
||||||
log.info('LLM services recreated successfully');
|
|
||||||
} catch (error) {
|
|
||||||
log.error(`Error recreating LLM services: ${this.handleError(error)}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
import configurationManager from './configuration_manager.js';
|
|
||||||
import optionService from '../../options.js';
|
import optionService from '../../options.js';
|
||||||
import log from '../../log.js';
|
import log from '../../log.js';
|
||||||
import type {
|
import type {
|
||||||
@ -13,7 +12,7 @@ import type {
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the selected AI provider
|
* Get the selected AI provider - always fresh from options
|
||||||
*/
|
*/
|
||||||
export async function getSelectedProvider(): Promise<ProviderType | null> {
|
export async function getSelectedProvider(): Promise<ProviderType | null> {
|
||||||
const providerOption = optionService.getOption('aiSelectedProvider');
|
const providerOption = optionService.getOption('aiSelectedProvider');
|
||||||
@ -25,38 +24,100 @@ export async function getSelectedProvider(): Promise<ProviderType | null> {
|
|||||||
* Parse a model identifier (handles "provider:model" format)
|
* Parse a model identifier (handles "provider:model" format)
|
||||||
*/
|
*/
|
||||||
export function parseModelIdentifier(modelString: string): ModelIdentifier {
|
export function parseModelIdentifier(modelString: string): ModelIdentifier {
|
||||||
return configurationManager.parseModelIdentifier(modelString);
|
if (!modelString) {
|
||||||
|
return {
|
||||||
|
modelId: '',
|
||||||
|
fullIdentifier: ''
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
const parts = modelString.split(':');
|
||||||
|
|
||||||
|
if (parts.length === 1) {
|
||||||
|
// No provider prefix, just model name
|
||||||
|
return {
|
||||||
|
modelId: modelString,
|
||||||
|
fullIdentifier: modelString
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if first part is a known provider
|
||||||
|
const potentialProvider = parts[0].toLowerCase();
|
||||||
|
const knownProviders: ProviderType[] = ['openai', 'anthropic', 'ollama'];
|
||||||
|
|
||||||
|
if (knownProviders.includes(potentialProvider as ProviderType)) {
|
||||||
|
// Provider prefix format
|
||||||
|
const provider = potentialProvider as ProviderType;
|
||||||
|
const modelId = parts.slice(1).join(':'); // Rejoin in case model has colons
|
||||||
|
|
||||||
|
return {
|
||||||
|
provider,
|
||||||
|
modelId,
|
||||||
|
fullIdentifier: modelString
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not a provider prefix, treat whole string as model name
|
||||||
|
return {
|
||||||
|
modelId: modelString,
|
||||||
|
fullIdentifier: modelString
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a model configuration from a model string
|
* Create a model configuration from a model string
|
||||||
*/
|
*/
|
||||||
export function createModelConfig(modelString: string, defaultProvider?: ProviderType): ModelConfig {
|
export function createModelConfig(modelString: string, defaultProvider?: ProviderType): ModelConfig {
|
||||||
return configurationManager.createModelConfig(modelString, defaultProvider);
|
const identifier = parseModelIdentifier(modelString);
|
||||||
|
const provider = identifier.provider || defaultProvider || 'openai'; // fallback to openai if no provider specified
|
||||||
|
|
||||||
|
return {
|
||||||
|
provider,
|
||||||
|
modelId: identifier.modelId,
|
||||||
|
displayName: identifier.fullIdentifier
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the default model for a specific provider
|
* Get the default model for a specific provider - always fresh from options
|
||||||
*/
|
*/
|
||||||
export async function getDefaultModelForProvider(provider: ProviderType): Promise<string | undefined> {
|
export async function getDefaultModelForProvider(provider: ProviderType): Promise<string | undefined> {
|
||||||
const config = await configurationManager.getAIConfig();
|
const optionKey = `${provider}DefaultModel` as const;
|
||||||
return config.defaultModels[provider]; // This can now be undefined
|
return optionService.getOption(optionKey) || undefined;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get provider settings for a specific provider
|
* Get provider settings for a specific provider - always fresh from options
|
||||||
*/
|
*/
|
||||||
export async function getProviderSettings(provider: ProviderType) {
|
export async function getProviderSettings(provider: ProviderType) {
|
||||||
const config = await configurationManager.getAIConfig();
|
switch (provider) {
|
||||||
return config.providerSettings[provider];
|
case 'openai':
|
||||||
|
return {
|
||||||
|
apiKey: optionService.getOption('openaiApiKey'),
|
||||||
|
baseUrl: optionService.getOption('openaiBaseUrl'),
|
||||||
|
defaultModel: optionService.getOption('openaiDefaultModel')
|
||||||
|
};
|
||||||
|
case 'anthropic':
|
||||||
|
return {
|
||||||
|
apiKey: optionService.getOption('anthropicApiKey'),
|
||||||
|
baseUrl: optionService.getOption('anthropicBaseUrl'),
|
||||||
|
defaultModel: optionService.getOption('anthropicDefaultModel')
|
||||||
|
};
|
||||||
|
case 'ollama':
|
||||||
|
return {
|
||||||
|
baseUrl: optionService.getOption('ollamaBaseUrl'),
|
||||||
|
defaultModel: optionService.getOption('ollamaDefaultModel')
|
||||||
|
};
|
||||||
|
default:
|
||||||
|
return {};
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check if AI is enabled
|
* Check if AI is enabled - always fresh from options
|
||||||
*/
|
*/
|
||||||
export async function isAIEnabled(): Promise<boolean> {
|
export async function isAIEnabled(): Promise<boolean> {
|
||||||
const config = await configurationManager.getAIConfig();
|
return optionService.getOptionBool('aiEnabled');
|
||||||
return config.enabled;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -82,7 +143,7 @@ export async function isProviderConfigured(provider: ProviderType): Promise<bool
|
|||||||
*/
|
*/
|
||||||
export async function getAvailableSelectedProvider(): Promise<ProviderType | null> {
|
export async function getAvailableSelectedProvider(): Promise<ProviderType | null> {
|
||||||
const selectedProvider = await getSelectedProvider();
|
const selectedProvider = await getSelectedProvider();
|
||||||
|
|
||||||
if (!selectedProvider) {
|
if (!selectedProvider) {
|
||||||
return null; // No provider selected
|
return null; // No provider selected
|
||||||
}
|
}
|
||||||
@ -95,17 +156,51 @@ export async function getAvailableSelectedProvider(): Promise<ProviderType | nul
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Validate the current AI configuration
|
* Validate the current AI configuration - simplified validation
|
||||||
*/
|
*/
|
||||||
export async function validateConfiguration() {
|
export async function validateConfiguration() {
|
||||||
return configurationManager.validateConfig();
|
const result = {
|
||||||
|
isValid: true,
|
||||||
|
errors: [] as string[],
|
||||||
|
warnings: [] as string[]
|
||||||
|
};
|
||||||
|
|
||||||
|
const aiEnabled = await isAIEnabled();
|
||||||
|
if (!aiEnabled) {
|
||||||
|
result.warnings.push('AI features are disabled');
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
const selectedProvider = await getSelectedProvider();
|
||||||
|
if (!selectedProvider) {
|
||||||
|
result.errors.push('No AI provider selected');
|
||||||
|
result.isValid = false;
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate provider-specific settings
|
||||||
|
const settings = await getProviderSettings(selectedProvider);
|
||||||
|
|
||||||
|
if (selectedProvider === 'openai' && !(settings as any)?.apiKey) {
|
||||||
|
result.warnings.push('OpenAI API key is not configured');
|
||||||
|
}
|
||||||
|
|
||||||
|
if (selectedProvider === 'anthropic' && !(settings as any)?.apiKey) {
|
||||||
|
result.warnings.push('Anthropic API key is not configured');
|
||||||
|
}
|
||||||
|
|
||||||
|
if (selectedProvider === 'ollama' && !(settings as any)?.baseUrl) {
|
||||||
|
result.warnings.push('Ollama base URL is not configured');
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Clear cached configuration (use when settings change)
|
* Clear cached configuration (no-op since we removed caching)
|
||||||
*/
|
*/
|
||||||
export function clearConfigurationCache(): void {
|
export function clearConfigurationCache(): void {
|
||||||
configurationManager.clearCache();
|
// No caching anymore, so nothing to clear
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -136,7 +231,7 @@ export async function getValidModelConfig(provider: ProviderType): Promise<{ mod
|
|||||||
*/
|
*/
|
||||||
export async function getSelectedModelConfig(): Promise<{ model: string; provider: ProviderType } | null> {
|
export async function getSelectedModelConfig(): Promise<{ model: string; provider: ProviderType } | null> {
|
||||||
const selectedProvider = await getSelectedProvider();
|
const selectedProvider = await getSelectedProvider();
|
||||||
|
|
||||||
if (!selectedProvider) {
|
if (!selectedProvider) {
|
||||||
return null; // No provider selected
|
return null; // No provider selected
|
||||||
}
|
}
|
||||||
|
@ -21,11 +21,6 @@ import type {
|
|||||||
*/
|
*/
|
||||||
export class ConfigurationManager {
|
export class ConfigurationManager {
|
||||||
private static instance: ConfigurationManager | null = null;
|
private static instance: ConfigurationManager | null = null;
|
||||||
private cachedConfig: AIConfig | null = null;
|
|
||||||
private lastConfigUpdate: number = 0;
|
|
||||||
|
|
||||||
// Cache for 5 minutes to avoid excessive option reads
|
|
||||||
private static readonly CACHE_DURATION = 5 * 60 * 1000;
|
|
||||||
|
|
||||||
private constructor() {}
|
private constructor() {}
|
||||||
|
|
||||||
@ -37,14 +32,9 @@ export class ConfigurationManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the complete AI configuration
|
* Get the complete AI configuration - always fresh, no caching
|
||||||
*/
|
*/
|
||||||
public async getAIConfig(): Promise<AIConfig> {
|
public async getAIConfig(): Promise<AIConfig> {
|
||||||
const now = Date.now();
|
|
||||||
if (this.cachedConfig && (now - this.lastConfigUpdate) < ConfigurationManager.CACHE_DURATION) {
|
|
||||||
return this.cachedConfig;
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const config: AIConfig = {
|
const config: AIConfig = {
|
||||||
enabled: await this.getAIEnabled(),
|
enabled: await this.getAIEnabled(),
|
||||||
@ -53,8 +43,6 @@ export class ConfigurationManager {
|
|||||||
providerSettings: await this.getProviderSettings()
|
providerSettings: await this.getProviderSettings()
|
||||||
};
|
};
|
||||||
|
|
||||||
this.cachedConfig = config;
|
|
||||||
this.lastConfigUpdate = now;
|
|
||||||
return config;
|
return config;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log.error(`Error loading AI configuration: ${error}`);
|
log.error(`Error loading AI configuration: ${error}`);
|
||||||
@ -263,14 +251,6 @@ export class ConfigurationManager {
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Clear cached configuration (force reload on next access)
|
|
||||||
*/
|
|
||||||
public clearCache(): void {
|
|
||||||
this.cachedConfig = null;
|
|
||||||
this.lastConfigUpdate = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Private helper methods
|
// Private helper methods
|
||||||
|
|
||||||
private async getAIEnabled(): Promise<boolean> {
|
private async getAIEnabled(): Promise<boolean> {
|
||||||
|
@ -111,19 +111,13 @@ export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput,
|
|||||||
const { getValidModelConfig } = await import('../../config/configuration_helpers.js');
|
const { getValidModelConfig } = await import('../../config/configuration_helpers.js');
|
||||||
const modelConfig = await getValidModelConfig(selectedProvider);
|
const modelConfig = await getValidModelConfig(selectedProvider);
|
||||||
|
|
||||||
if (modelConfig) {
|
if (!modelConfig) {
|
||||||
// We have a valid configured model
|
throw new Error(`No default model configured for provider ${selectedProvider}. Please set a default model in your AI settings.`);
|
||||||
updatedOptions.model = modelConfig.model;
|
|
||||||
} else {
|
|
||||||
// No model configured, try to fetch and set a default from the service
|
|
||||||
const fetchedModel = await this.fetchAndSetDefaultModel(selectedProvider);
|
|
||||||
if (!fetchedModel) {
|
|
||||||
throw new Error(`No default model configured for provider ${selectedProvider}. Please set a default model in your AI settings or ensure the provider service is available.`);
|
|
||||||
}
|
|
||||||
// Use the fetched model
|
|
||||||
updatedOptions.model = fetchedModel;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Use the configured model
|
||||||
|
updatedOptions.model = modelConfig.model;
|
||||||
|
|
||||||
log.info(`Selected provider: ${selectedProvider}, model: ${updatedOptions.model}`);
|
log.info(`Selected provider: ${selectedProvider}, model: ${updatedOptions.model}`);
|
||||||
|
|
||||||
// Determine query complexity
|
// Determine query complexity
|
||||||
@ -183,20 +177,8 @@ export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// If no provider could be determined, try to use precedence
|
// Use the explicitly provided provider - no automatic fallbacks
|
||||||
let selectedProvider = provider;
|
let selectedProvider = provider;
|
||||||
if (!selectedProvider) {
|
|
||||||
// List of providers in precedence order
|
|
||||||
const providerPrecedence = ['anthropic', 'openai', 'ollama'];
|
|
||||||
|
|
||||||
// Find the first available provider
|
|
||||||
for (const p of providerPrecedence) {
|
|
||||||
if (aiServiceManager.isProviderAvailable(p)) {
|
|
||||||
selectedProvider = p as ServiceProviders;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the provider metadata in the options
|
// Set the provider metadata in the options
|
||||||
if (selectedProvider) {
|
if (selectedProvider) {
|
||||||
@ -218,47 +200,7 @@ export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Determine model based on selected provider using the new configuration system
|
|
||||||
* This method is now simplified and delegates to the main model selection logic
|
|
||||||
*/
|
|
||||||
private async determineDefaultModel(input: ModelSelectionInput): Promise<string> {
|
|
||||||
try {
|
|
||||||
// Use the same logic as the main process method
|
|
||||||
const { getValidModelConfig, getSelectedProvider } = await import('../../config/configuration_helpers.js');
|
|
||||||
const selectedProvider = await getSelectedProvider();
|
|
||||||
|
|
||||||
if (!selectedProvider) {
|
|
||||||
throw new Error('No AI provider is selected. Please select a provider in your AI settings.');
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the provider is available through the service manager
|
|
||||||
if (!aiServiceManager.isProviderAvailable(selectedProvider)) {
|
|
||||||
throw new Error(`Selected provider ${selectedProvider} is not available`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to get a valid model config
|
|
||||||
const modelConfig = await getValidModelConfig(selectedProvider);
|
|
||||||
|
|
||||||
if (!modelConfig) {
|
|
||||||
throw new Error(`No default model configured for provider ${selectedProvider}. Please configure a default model in your AI settings.`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set provider metadata
|
|
||||||
if (!input.options.providerMetadata) {
|
|
||||||
input.options.providerMetadata = {
|
|
||||||
provider: selectedProvider as 'openai' | 'anthropic' | 'ollama' | 'local',
|
|
||||||
modelId: modelConfig.model
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
log.info(`Selected default model ${modelConfig.model} from provider ${selectedProvider}`);
|
|
||||||
return modelConfig.model;
|
|
||||||
} catch (error) {
|
|
||||||
log.error(`Error determining default model: ${error}`);
|
|
||||||
throw error; // Don't provide fallback defaults, let the error propagate
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get estimated context window for Ollama models
|
* Get estimated context window for Ollama models
|
||||||
@ -283,48 +225,5 @@ export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Use AI service manager to get a configured model for the provider
|
|
||||||
* This eliminates duplication and uses the existing service layer
|
|
||||||
*/
|
|
||||||
private async fetchAndSetDefaultModel(provider: ProviderType): Promise<string | null> {
|
|
||||||
try {
|
|
||||||
log.info(`Getting default model for provider ${provider} using AI service manager`);
|
|
||||||
|
|
||||||
// Use the existing AI service manager instead of duplicating API calls
|
|
||||||
const service = await aiServiceManager.getInstance().getService(provider);
|
|
||||||
|
|
||||||
if (!service || !service.isAvailable()) {
|
|
||||||
log.info(`Provider ${provider} service is not available`);
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the service has a method to get available models
|
|
||||||
if (typeof (service as any).getAvailableModels === 'function') {
|
|
||||||
try {
|
|
||||||
const models = await (service as any).getAvailableModels();
|
|
||||||
if (models && models.length > 0) {
|
|
||||||
// Use the first available model - no hardcoded preferences
|
|
||||||
const selectedModel = models[0];
|
|
||||||
|
|
||||||
// Import server-side options to update the default model
|
|
||||||
const optionService = (await import('../../../options.js')).default;
|
|
||||||
const optionKey = `${provider}DefaultModel` as const;
|
|
||||||
|
|
||||||
await optionService.setOption(optionKey, selectedModel);
|
|
||||||
log.info(`Set default ${provider} model to: ${selectedModel}`);
|
|
||||||
return selectedModel;
|
|
||||||
}
|
|
||||||
} catch (modelError) {
|
|
||||||
log.error(`Error fetching models from ${provider} service: ${modelError}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
log.info(`Provider ${provider} does not support dynamic model fetching`);
|
|
||||||
return null;
|
|
||||||
} catch (error) {
|
|
||||||
log.error(`Error getting default model for provider ${provider}: ${error}`);
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -26,7 +26,11 @@ export function getOpenAIOptions(
|
|||||||
}
|
}
|
||||||
|
|
||||||
const baseUrl = options.getOption('openaiBaseUrl') || PROVIDER_CONSTANTS.OPENAI.BASE_URL;
|
const baseUrl = options.getOption('openaiBaseUrl') || PROVIDER_CONSTANTS.OPENAI.BASE_URL;
|
||||||
const modelName = opts.model || options.getOption('openaiDefaultModel') || PROVIDER_CONSTANTS.OPENAI.DEFAULT_MODEL;
|
const modelName = opts.model || options.getOption('openaiDefaultModel');
|
||||||
|
|
||||||
|
if (!modelName) {
|
||||||
|
throw new Error('No OpenAI model configured. Please set a default model in your AI settings.');
|
||||||
|
}
|
||||||
|
|
||||||
// Create provider metadata
|
// Create provider metadata
|
||||||
const providerMetadata: ModelMetadata = {
|
const providerMetadata: ModelMetadata = {
|
||||||
@ -87,7 +91,11 @@ export function getAnthropicOptions(
|
|||||||
}
|
}
|
||||||
|
|
||||||
const baseUrl = options.getOption('anthropicBaseUrl') || PROVIDER_CONSTANTS.ANTHROPIC.BASE_URL;
|
const baseUrl = options.getOption('anthropicBaseUrl') || PROVIDER_CONSTANTS.ANTHROPIC.BASE_URL;
|
||||||
const modelName = opts.model || options.getOption('anthropicDefaultModel') || PROVIDER_CONSTANTS.ANTHROPIC.DEFAULT_MODEL;
|
const modelName = opts.model || options.getOption('anthropicDefaultModel');
|
||||||
|
|
||||||
|
if (!modelName) {
|
||||||
|
throw new Error('No Anthropic model configured. Please set a default model in your AI settings.');
|
||||||
|
}
|
||||||
|
|
||||||
// Create provider metadata
|
// Create provider metadata
|
||||||
const providerMetadata: ModelMetadata = {
|
const providerMetadata: ModelMetadata = {
|
||||||
@ -150,8 +158,12 @@ export async function getOllamaOptions(
|
|||||||
throw new Error('Ollama API URL is not configured');
|
throw new Error('Ollama API URL is not configured');
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the model name - no prefix handling needed now
|
// Get the model name - no defaults, must be configured by user
|
||||||
let modelName = opts.model || options.getOption('ollamaDefaultModel') || 'llama3';
|
let modelName = opts.model || options.getOption('ollamaDefaultModel');
|
||||||
|
|
||||||
|
if (!modelName) {
|
||||||
|
throw new Error('No Ollama model configured. Please set a default model in your AI settings.');
|
||||||
|
}
|
||||||
|
|
||||||
// Create provider metadata
|
// Create provider metadata
|
||||||
const providerMetadata: ModelMetadata = {
|
const providerMetadata: ModelMetadata = {
|
||||||
@ -249,4 +261,4 @@ async function getOllamaModelContextWindow(modelName: string): Promise<number> {
|
|||||||
log.info(`Error getting context window for model ${modelName}: ${error}`);
|
log.info(`Error getting context window for model ${modelName}: ${error}`);
|
||||||
return MODEL_CAPABILITIES['default'].contextWindowTokens; // Default fallback
|
return MODEL_CAPABILITIES['default'].contextWindowTokens; // Default fallback
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -82,6 +82,26 @@ function setOption<T extends OptionNames>(name: T, value: string | OptionDefinit
|
|||||||
} else {
|
} else {
|
||||||
createOption(name, value, false);
|
createOption(name, value, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Clear current AI provider when AI-related options change
|
||||||
|
const aiOptions = [
|
||||||
|
'aiSelectedProvider', 'openaiApiKey', 'openaiBaseUrl', 'openaiDefaultModel',
|
||||||
|
'anthropicApiKey', 'anthropicBaseUrl', 'anthropicDefaultModel',
|
||||||
|
'ollamaBaseUrl', 'ollamaDefaultModel'
|
||||||
|
];
|
||||||
|
|
||||||
|
if (aiOptions.includes(name)) {
|
||||||
|
// Import dynamically to avoid circular dependencies
|
||||||
|
setImmediate(async () => {
|
||||||
|
try {
|
||||||
|
const aiServiceManager = (await import('./llm/ai_service_manager.js')).default;
|
||||||
|
aiServiceManager.getInstance().clearCurrentProvider();
|
||||||
|
console.log(`Cleared AI provider after ${name} option changed`);
|
||||||
|
} catch (error) {
|
||||||
|
console.log(`Could not clear AI provider: ${error}`);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
Loading…
x
Reference in New Issue
Block a user