mirror of
https://github.com/TriliumNext/Notes.git
synced 2025-09-18 17:31:53 +08:00
feat(llm): resolve sending double headers in responses, and not being able to send requests to ollama
This commit is contained in:
parent
20ec294774
commit
6bc9b3c184
@ -376,7 +376,7 @@ export default class AiSettingsWidget extends OptionsWidget {
|
||||
embeddingWarnings.push(t("ai_llm.empty_key_warning.voyage"));
|
||||
}
|
||||
|
||||
if (selectedEmbeddingProvider === 'ollama' && !this.$widget.find('.ollama-base-url').val()) {
|
||||
if (selectedEmbeddingProvider === 'ollama' && !this.$widget.find('.ollama-embedding-base-url').val()) {
|
||||
embeddingWarnings.push(t("ai_llm.empty_key_warning.ollama"));
|
||||
}
|
||||
}
|
||||
|
@ -240,40 +240,65 @@ export class ProviderService {
|
||||
}
|
||||
|
||||
try {
|
||||
const ollamaBaseUrl = this.$widget.find('.ollama-base-url').val() as string;
|
||||
// Determine which URL to use based on the current context
|
||||
// If we're in the embedding provider context, use the embedding base URL
|
||||
// Otherwise, use the general base URL
|
||||
const selectedAiProvider = this.$widget.find('.ai-selected-provider').val() as string;
|
||||
const selectedEmbeddingProvider = this.$widget.find('.embedding-selected-provider').val() as string;
|
||||
|
||||
let ollamaBaseUrl: string;
|
||||
|
||||
// If embedding provider is Ollama and it's visible, use embedding URL
|
||||
const $embeddingOllamaSettings = this.$widget.find('.ollama-embedding-provider-settings');
|
||||
if (selectedEmbeddingProvider === 'ollama' && $embeddingOllamaSettings.is(':visible')) {
|
||||
ollamaBaseUrl = this.$widget.find('.ollama-embedding-base-url').val() as string;
|
||||
} else {
|
||||
ollamaBaseUrl = this.$widget.find('.ollama-base-url').val() as string;
|
||||
}
|
||||
|
||||
const response = await server.get<OllamaModelResponse>(`llm/providers/ollama/models?baseUrl=${encodeURIComponent(ollamaBaseUrl)}`);
|
||||
|
||||
if (response && response.success && response.models && response.models.length > 0) {
|
||||
// Update both embedding model dropdowns
|
||||
const $embedModelSelect = this.$widget.find('.ollama-embedding-model');
|
||||
const $chatEmbedModelSelect = this.$widget.find('.ollama-chat-embedding-model');
|
||||
|
||||
const currentValue = $embedModelSelect.val();
|
||||
const currentChatEmbedValue = $chatEmbedModelSelect.val();
|
||||
|
||||
// Clear existing options
|
||||
$embedModelSelect.empty();
|
||||
|
||||
// Add embedding-specific models first
|
||||
// Prepare embedding models
|
||||
const embeddingModels = response.models.filter(model =>
|
||||
model.name.includes('embed') || model.name.includes('bert'));
|
||||
|
||||
embeddingModels.forEach(model => {
|
||||
$embedModelSelect.append(`<option value="${model.name}">${model.name}</option>`);
|
||||
});
|
||||
|
||||
if (embeddingModels.length > 0) {
|
||||
// Add separator if we have embedding models
|
||||
$embedModelSelect.append(`<option disabled>─────────────</option>`);
|
||||
}
|
||||
|
||||
// Then add general models which can be used for embeddings too
|
||||
|
||||
const generalModels = response.models.filter(model =>
|
||||
!model.name.includes('embed') && !model.name.includes('bert'));
|
||||
|
||||
// Update .ollama-embedding-model dropdown (embedding provider settings)
|
||||
$embedModelSelect.empty();
|
||||
embeddingModels.forEach(model => {
|
||||
$embedModelSelect.append(`<option value="${model.name}">${model.name}</option>`);
|
||||
});
|
||||
if (embeddingModels.length > 0) {
|
||||
$embedModelSelect.append(`<option disabled>─────────────</option>`);
|
||||
}
|
||||
generalModels.forEach(model => {
|
||||
$embedModelSelect.append(`<option value="${model.name}">${model.name}</option>`);
|
||||
});
|
||||
|
||||
// Try to restore the previously selected value
|
||||
this.ensureSelectedValue($embedModelSelect, currentValue, 'ollamaEmbeddingModel');
|
||||
|
||||
// Update .ollama-chat-embedding-model dropdown (general Ollama provider settings)
|
||||
$chatEmbedModelSelect.empty();
|
||||
embeddingModels.forEach(model => {
|
||||
$chatEmbedModelSelect.append(`<option value="${model.name}">${model.name}</option>`);
|
||||
});
|
||||
if (embeddingModels.length > 0) {
|
||||
$chatEmbedModelSelect.append(`<option disabled>─────────────</option>`);
|
||||
}
|
||||
generalModels.forEach(model => {
|
||||
$chatEmbedModelSelect.append(`<option value="${model.name}">${model.name}</option>`);
|
||||
});
|
||||
this.ensureSelectedValue($chatEmbedModelSelect, currentChatEmbedValue, 'ollamaEmbeddingModel');
|
||||
|
||||
// Also update the LLM model dropdown
|
||||
const $modelSelect = this.$widget.find('.ollama-default-model');
|
||||
const currentModelValue = $modelSelect.val();
|
||||
|
@ -825,7 +825,10 @@ async function streamMessage(req: Request, res: Response) {
|
||||
success: true,
|
||||
message: 'Streaming initiated successfully'
|
||||
});
|
||||
log.info(`Sent immediate success response for streaming setup`);
|
||||
|
||||
// Mark response as handled to prevent apiResultHandler from processing it again
|
||||
(res as any).triliumResponseHandled = true;
|
||||
|
||||
|
||||
// Create a new response object for streaming through WebSocket only
|
||||
// We won't use HTTP streaming since we've already sent the HTTP response
|
||||
@ -889,78 +892,33 @@ async function streamMessage(req: Request, res: Response) {
|
||||
thinking: showThinking ? 'Initializing streaming LLM response...' : undefined
|
||||
});
|
||||
|
||||
// Instead of trying to reimplement the streaming logic ourselves,
|
||||
// delegate to restChatService but set up the correct protocol:
|
||||
// 1. We've already sent a success response to the initial POST
|
||||
// 2. Now we'll have restChatService process the actual streaming through WebSocket
|
||||
// Process the LLM request using the existing service but with streaming setup
|
||||
// Since we've already sent the initial HTTP response, we'll use the WebSocket for streaming
|
||||
try {
|
||||
// Import the WebSocket service for sending messages
|
||||
const wsService = (await import('../../services/ws.js')).default;
|
||||
|
||||
// Create a simple pass-through response object that won't write to the HTTP response
|
||||
// but will allow restChatService to send WebSocket messages
|
||||
const dummyResponse = {
|
||||
writableEnded: false,
|
||||
// Implement methods that would normally be used by restChatService
|
||||
write: (_chunk: string) => {
|
||||
// Silent no-op - we're only using WebSocket
|
||||
return true;
|
||||
// Call restChatService with streaming mode enabled
|
||||
// The important part is setting method to GET to indicate streaming mode
|
||||
await restChatService.handleSendMessage({
|
||||
...req,
|
||||
method: 'GET', // Indicate streaming mode
|
||||
query: {
|
||||
...req.query,
|
||||
stream: 'true' // Add the required stream parameter
|
||||
},
|
||||
end: (_chunk?: string) => {
|
||||
// Log when streaming is complete via WebSocket
|
||||
log.info(`[${chatNoteId}] Completed HTTP response handling during WebSocket streaming`);
|
||||
return dummyResponse;
|
||||
body: {
|
||||
content: enhancedContent,
|
||||
useAdvancedContext: useAdvancedContext === true,
|
||||
showThinking: showThinking === true
|
||||
},
|
||||
setHeader: (name: string, _value: string) => {
|
||||
// Only log for content-type to reduce noise
|
||||
if (name.toLowerCase() === 'content-type') {
|
||||
log.info(`[${chatNoteId}] Setting up streaming for WebSocket only`);
|
||||
}
|
||||
return dummyResponse;
|
||||
}
|
||||
};
|
||||
params: { chatNoteId }
|
||||
} as unknown as Request, res);
|
||||
} catch (streamError) {
|
||||
log.error(`Error during WebSocket streaming: ${streamError}`);
|
||||
|
||||
// Process the streaming now through WebSocket only
|
||||
try {
|
||||
log.info(`[${chatNoteId}] Processing LLM streaming through WebSocket after successful initiation at ${new Date().toISOString()}`);
|
||||
|
||||
// Call restChatService with our enhanced request and dummy response
|
||||
// The important part is setting method to GET to indicate streaming mode
|
||||
await restChatService.handleSendMessage({
|
||||
...req,
|
||||
method: 'GET', // Indicate streaming mode
|
||||
query: {
|
||||
...req.query,
|
||||
stream: 'true' // Add the required stream parameter
|
||||
},
|
||||
body: {
|
||||
content: enhancedContent,
|
||||
useAdvancedContext: useAdvancedContext === true,
|
||||
showThinking: showThinking === true
|
||||
},
|
||||
params: { chatNoteId }
|
||||
} as unknown as Request, dummyResponse as unknown as Response);
|
||||
|
||||
log.info(`[${chatNoteId}] WebSocket streaming completed at ${new Date().toISOString()}`);
|
||||
} catch (streamError) {
|
||||
log.error(`[${chatNoteId}] Error during WebSocket streaming: ${streamError}`);
|
||||
|
||||
// Send error message through WebSocket
|
||||
wsService.sendMessageToAllClients({
|
||||
type: 'llm-stream',
|
||||
chatNoteId: chatNoteId,
|
||||
error: `Error during streaming: ${streamError}`,
|
||||
done: true
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
log.error(`Error during streaming: ${error}`);
|
||||
|
||||
// Send error to client via WebSocket
|
||||
// Send error message through WebSocket
|
||||
wsService.sendMessageToAllClients({
|
||||
type: 'llm-stream',
|
||||
chatNoteId: chatNoteId,
|
||||
error: `Error processing message: ${error}`,
|
||||
error: `Error during streaming: ${streamError}`,
|
||||
done: true
|
||||
});
|
||||
}
|
||||
|
@ -45,13 +45,9 @@ interface NoteContext {
|
||||
export class AIServiceManager implements IAIServiceManager {
|
||||
private services: Partial<Record<ServiceProviders, AIService>> = {};
|
||||
|
||||
private providerOrder: ServiceProviders[] = []; // Will be populated from configuration
|
||||
private initialized = false;
|
||||
|
||||
constructor() {
|
||||
// Initialize provider order immediately
|
||||
this.updateProviderOrder();
|
||||
|
||||
// Initialize tools immediately
|
||||
this.initializeTools().catch(error => {
|
||||
log.error(`Error initializing LLM tools during AIServiceManager construction: ${error.message || String(error)}`);
|
||||
@ -59,6 +55,8 @@ export class AIServiceManager implements IAIServiceManager {
|
||||
|
||||
// Set up event listener for provider changes
|
||||
this.setupProviderChangeListener();
|
||||
|
||||
this.initialized = true;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -83,44 +81,18 @@ export class AIServiceManager implements IAIServiceManager {
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the provider order using the new configuration system (single provider)
|
||||
* Get the currently selected provider using the new configuration system
|
||||
*/
|
||||
async updateProviderOrderAsync(): Promise<void> {
|
||||
async getSelectedProviderAsync(): Promise<ServiceProviders | null> {
|
||||
try {
|
||||
const selectedProvider = await getSelectedProvider();
|
||||
if (selectedProvider) {
|
||||
this.providerOrder = [selectedProvider as ServiceProviders];
|
||||
log.info(`Updated provider order: ${selectedProvider}`);
|
||||
} else {
|
||||
this.providerOrder = [];
|
||||
log.info('No provider selected');
|
||||
}
|
||||
this.initialized = true;
|
||||
return selectedProvider as ServiceProviders || null;
|
||||
} catch (error) {
|
||||
log.error(`Failed to get selected provider: ${error}`);
|
||||
// Keep empty order, will be handled gracefully by other methods
|
||||
this.providerOrder = [];
|
||||
this.initialized = true;
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the provider precedence order (legacy sync version)
|
||||
* Returns true if successful, false if options not available yet
|
||||
*/
|
||||
updateProviderOrder(): boolean {
|
||||
if (this.initialized) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Use async version but don't wait
|
||||
this.updateProviderOrderAsync().catch(error => {
|
||||
log.error(`Error in async provider order update: ${error}`);
|
||||
});
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate AI configuration using the new configuration system
|
||||
*/
|
||||
@ -162,16 +134,44 @@ export class AIServiceManager implements IAIServiceManager {
|
||||
* Ensure manager is initialized before using
|
||||
*/
|
||||
private ensureInitialized() {
|
||||
if (!this.initialized) {
|
||||
this.updateProviderOrder();
|
||||
// No longer needed with simplified approach
|
||||
}
|
||||
|
||||
/**
|
||||
* Get or create any available AI service following the simplified pattern
|
||||
* Returns a service or throws a meaningful error
|
||||
*/
|
||||
async getOrCreateAnyService(): Promise<AIService> {
|
||||
this.ensureInitialized();
|
||||
|
||||
// Get the selected provider using the new configuration system
|
||||
const selectedProvider = await this.getSelectedProviderAsync();
|
||||
|
||||
|
||||
if (!selectedProvider) {
|
||||
throw new Error('No AI provider is selected. Please select a provider (OpenAI, Anthropic, or Ollama) in your AI settings.');
|
||||
}
|
||||
|
||||
try {
|
||||
const service = await this.getOrCreateChatProvider(selectedProvider);
|
||||
if (service) {
|
||||
return service;
|
||||
}
|
||||
throw new Error(`Failed to create ${selectedProvider} service`);
|
||||
} catch (error) {
|
||||
log.error(`Provider ${selectedProvider} not available: ${error}`);
|
||||
throw new Error(`Selected AI provider (${selectedProvider}) is not available. Please check your configuration: ${error}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if any AI service is available
|
||||
* Check if any AI service is available (legacy method for backward compatibility)
|
||||
*/
|
||||
isAnyServiceAvailable(): boolean {
|
||||
return Object.values(this.services).some(service => service.isAvailable());
|
||||
this.ensureInitialized();
|
||||
|
||||
// Check if we have the selected provider available
|
||||
return this.getAvailableProviders().length > 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -235,25 +235,27 @@ export class AIServiceManager implements IAIServiceManager {
|
||||
throw new Error('No messages provided for chat completion');
|
||||
}
|
||||
|
||||
// Try providers in order of preference
|
||||
const availableProviders = this.getAvailableProviders();
|
||||
|
||||
if (availableProviders.length === 0) {
|
||||
throw new Error('No AI providers are available. Please check your AI settings.');
|
||||
// Get the selected provider
|
||||
const selectedProvider = await this.getSelectedProviderAsync();
|
||||
|
||||
if (!selectedProvider) {
|
||||
throw new Error('No AI provider is selected. Please select a provider in your AI settings.');
|
||||
}
|
||||
|
||||
// Check if the selected provider is available
|
||||
const availableProviders = this.getAvailableProviders();
|
||||
if (!availableProviders.includes(selectedProvider)) {
|
||||
throw new Error(`Selected AI provider (${selectedProvider}) is not available. Please check your configuration.`);
|
||||
}
|
||||
|
||||
// Sort available providers by precedence
|
||||
const sortedProviders = this.providerOrder
|
||||
.filter(provider => availableProviders.includes(provider));
|
||||
|
||||
// If a specific provider is requested and available, use it
|
||||
if (options.model && options.model.includes(':')) {
|
||||
// Use the new configuration system to parse model identifier
|
||||
const modelIdentifier = parseModelIdentifier(options.model);
|
||||
|
||||
if (modelIdentifier.provider && availableProviders.includes(modelIdentifier.provider as ServiceProviders)) {
|
||||
if (modelIdentifier.provider && modelIdentifier.provider === selectedProvider) {
|
||||
try {
|
||||
const service = this.services[modelIdentifier.provider as ServiceProviders];
|
||||
const service = await this.getOrCreateChatProvider(modelIdentifier.provider as ServiceProviders);
|
||||
if (service) {
|
||||
const modifiedOptions = { ...options, model: modelIdentifier.modelId };
|
||||
log.info(`[AIServiceManager] Using provider ${modelIdentifier.provider} from model prefix with modifiedOptions.stream: ${modifiedOptions.stream}`);
|
||||
@ -261,42 +263,26 @@ export class AIServiceManager implements IAIServiceManager {
|
||||
}
|
||||
} catch (error) {
|
||||
log.error(`Error with specified provider ${modelIdentifier.provider}: ${error}`);
|
||||
// If the specified provider fails, continue with the fallback providers
|
||||
throw new Error(`Failed to use specified provider ${modelIdentifier.provider}: ${error}`);
|
||||
}
|
||||
} else if (modelIdentifier.provider && modelIdentifier.provider !== selectedProvider) {
|
||||
throw new Error(`Model specifies provider '${modelIdentifier.provider}' but selected provider is '${selectedProvider}'. Please select the correct provider or use a model without provider prefix.`);
|
||||
}
|
||||
// If not a provider prefix, treat the entire string as a model name and continue with normal provider selection
|
||||
}
|
||||
|
||||
// If user has a specific provider selected, try only that one and fail fast
|
||||
if (this.providerOrder.length === 1 && sortedProviders.length === 1) {
|
||||
const selectedProvider = sortedProviders[0];
|
||||
// Use the selected provider
|
||||
try {
|
||||
const service = await this.getOrCreateChatProvider(selectedProvider);
|
||||
if (!service) {
|
||||
throw new Error(`Failed to create selected chat provider: ${selectedProvider}. Please check your configuration.`);
|
||||
}
|
||||
log.info(`[AIServiceManager] Using selected provider ${selectedProvider} with options.stream: ${options.stream}`);
|
||||
return await service.generateChatCompletion(messages, options);
|
||||
} catch (error) {
|
||||
log.error(`Error with selected provider ${selectedProvider}: ${error}`);
|
||||
throw new Error(`Selected AI provider (${selectedProvider}) failed: ${error}`);
|
||||
}
|
||||
|
||||
// If no specific provider selected, try each provider in order until one succeeds
|
||||
let lastError: Error | null = null;
|
||||
|
||||
for (const provider of sortedProviders) {
|
||||
try {
|
||||
const service = await this.getOrCreateChatProvider(provider);
|
||||
if (service) {
|
||||
log.info(`[AIServiceManager] Trying provider ${provider} with options.stream: ${options.stream}`);
|
||||
return await service.generateChatCompletion(messages, options);
|
||||
}
|
||||
} catch (error) {
|
||||
log.error(`Error with provider ${provider}: ${error}`);
|
||||
lastError = error as Error;
|
||||
// Continue to the next provider
|
||||
}
|
||||
}
|
||||
|
||||
// If we get here, all providers failed
|
||||
throw new Error(`All AI providers failed: ${lastError?.message || 'Unknown error'}`);
|
||||
}
|
||||
|
||||
setupEventListeners() {
|
||||
@ -408,8 +394,8 @@ export class AIServiceManager implements IAIServiceManager {
|
||||
|
||||
switch (providerName) {
|
||||
case 'openai': {
|
||||
const apiKey = await options.getOption('openaiApiKey');
|
||||
const baseUrl = await options.getOption('openaiBaseUrl');
|
||||
const apiKey = options.getOption('openaiApiKey');
|
||||
const baseUrl = options.getOption('openaiBaseUrl');
|
||||
if (!apiKey && !baseUrl) return null;
|
||||
|
||||
service = new OpenAIService();
|
||||
@ -421,7 +407,7 @@ export class AIServiceManager implements IAIServiceManager {
|
||||
}
|
||||
|
||||
case 'anthropic': {
|
||||
const apiKey = await options.getOption('anthropicApiKey');
|
||||
const apiKey = options.getOption('anthropicApiKey');
|
||||
if (!apiKey) return null;
|
||||
|
||||
service = new AnthropicService();
|
||||
@ -432,7 +418,7 @@ export class AIServiceManager implements IAIServiceManager {
|
||||
}
|
||||
|
||||
case 'ollama': {
|
||||
const baseUrl = await options.getOption('ollamaBaseUrl');
|
||||
const baseUrl = options.getOption('ollamaBaseUrl');
|
||||
if (!baseUrl) return null;
|
||||
|
||||
service = new OllamaService();
|
||||
@ -445,7 +431,6 @@ export class AIServiceManager implements IAIServiceManager {
|
||||
|
||||
if (service) {
|
||||
this.services[providerName] = service;
|
||||
log.info(`Created and validated ${providerName} chat provider`);
|
||||
return service;
|
||||
}
|
||||
} catch (error: any) {
|
||||
@ -470,9 +455,6 @@ export class AIServiceManager implements IAIServiceManager {
|
||||
return;
|
||||
}
|
||||
|
||||
// Update provider order from configuration
|
||||
await this.updateProviderOrderAsync();
|
||||
|
||||
// Initialize index service
|
||||
await this.getIndexService().initialize();
|
||||
|
||||
@ -590,18 +572,22 @@ export class AIServiceManager implements IAIServiceManager {
|
||||
if (service && service.isAvailable()) {
|
||||
return service;
|
||||
}
|
||||
throw new Error(`Specified provider ${provider} is not available`);
|
||||
}
|
||||
|
||||
// Otherwise, try providers in the configured order
|
||||
for (const providerName of this.providerOrder) {
|
||||
const service = await this.getOrCreateChatProvider(providerName);
|
||||
if (service && service.isAvailable()) {
|
||||
return service;
|
||||
}
|
||||
// Otherwise, use the selected provider
|
||||
const selectedProvider = await this.getSelectedProviderAsync();
|
||||
if (!selectedProvider) {
|
||||
throw new Error('No AI provider is selected. Please select a provider in your AI settings.');
|
||||
}
|
||||
|
||||
const service = await this.getOrCreateChatProvider(selectedProvider);
|
||||
if (service && service.isAvailable()) {
|
||||
return service;
|
||||
}
|
||||
|
||||
// If no provider is available, throw a clear error
|
||||
throw new Error('No AI chat providers are available. Please check your AI settings.');
|
||||
throw new Error(`Selected AI provider (${selectedProvider}) is not available. Please check your AI settings.`);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -611,14 +597,14 @@ export class AIServiceManager implements IAIServiceManager {
|
||||
try {
|
||||
const selectedProvider = await getSelectedProvider();
|
||||
if (selectedProvider === null) {
|
||||
// No provider selected, fallback to first available
|
||||
log.info('No provider selected, using first available provider');
|
||||
return this.providerOrder[0];
|
||||
// No provider selected, fallback to default
|
||||
log.info('No provider selected, using default provider');
|
||||
return 'openai';
|
||||
}
|
||||
return selectedProvider;
|
||||
} catch (error) {
|
||||
log.error(`Error getting preferred provider: ${error}`);
|
||||
return this.providerOrder[0];
|
||||
return 'openai';
|
||||
}
|
||||
}
|
||||
|
||||
@ -628,16 +614,18 @@ export class AIServiceManager implements IAIServiceManager {
|
||||
getSelectedProvider(): string {
|
||||
this.ensureInitialized();
|
||||
|
||||
// Return the first available provider in the order
|
||||
for (const providerName of this.providerOrder) {
|
||||
const service = this.services[providerName];
|
||||
if (service && service.isAvailable()) {
|
||||
return providerName;
|
||||
// Try to get the selected provider synchronously
|
||||
try {
|
||||
const selectedProvider = options.getOption('aiSelectedProvider');
|
||||
if (selectedProvider) {
|
||||
return selectedProvider;
|
||||
}
|
||||
} catch (error) {
|
||||
log.error(`Error getting selected provider: ${error}`);
|
||||
}
|
||||
|
||||
// Return the first provider as fallback
|
||||
return this.providerOrder[0];
|
||||
// Return a default if nothing is selected (for backward compatibility)
|
||||
return 'openai';
|
||||
}
|
||||
|
||||
/**
|
||||
@ -746,9 +734,6 @@ export class AIServiceManager implements IAIServiceManager {
|
||||
const providerManager = await import('./providers/providers.js');
|
||||
providerManager.clearAllEmbeddingProviders();
|
||||
|
||||
// Update provider order with new configuration
|
||||
await this.updateProviderOrderAsync();
|
||||
|
||||
log.info('LLM services recreated successfully');
|
||||
} catch (error) {
|
||||
log.error(`Error recreating LLM services: ${this.handleError(error)}`);
|
||||
@ -776,6 +761,9 @@ export default {
|
||||
isAnyServiceAvailable(): boolean {
|
||||
return getInstance().isAnyServiceAvailable();
|
||||
},
|
||||
async getOrCreateAnyService(): Promise<AIService> {
|
||||
return getInstance().getOrCreateAnyService();
|
||||
},
|
||||
getAvailableProviders() {
|
||||
return getInstance().getAvailableProviders();
|
||||
},
|
||||
|
@ -5,7 +5,7 @@
|
||||
import log from "../../log.js";
|
||||
import type { Request, Response } from "express";
|
||||
import type { Message, ChatCompletionOptions } from "../ai_interface.js";
|
||||
import { AIServiceManager } from "../ai_service_manager.js";
|
||||
import aiServiceManager from "../ai_service_manager.js";
|
||||
import { ChatPipeline } from "../pipeline/chat_pipeline.js";
|
||||
import type { ChatPipelineInput } from "../pipeline/interfaces.js";
|
||||
import options from "../../options.js";
|
||||
@ -33,25 +33,6 @@ class RestChatService {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if AI services are available
|
||||
*/
|
||||
safelyUseAIManager(): boolean {
|
||||
if (!this.isDatabaseInitialized()) {
|
||||
log.info("AI check failed: Database is not initialized");
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
const aiManager = new AIServiceManager();
|
||||
const isAvailable = aiManager.isAnyServiceAvailable();
|
||||
log.info(`AI service availability check result: ${isAvailable}`);
|
||||
return isAvailable;
|
||||
} catch (error) {
|
||||
log.error(`Error accessing AI service manager: ${error}`);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle a message sent to an LLM and get a response
|
||||
@ -93,10 +74,14 @@ class RestChatService {
|
||||
return { error: "AI features are disabled. Please enable them in the settings." };
|
||||
}
|
||||
|
||||
if (!this.safelyUseAIManager()) {
|
||||
return { error: "AI services are currently unavailable. Please check your configuration." };
|
||||
// Check database initialization first
|
||||
if (!this.isDatabaseInitialized()) {
|
||||
throw new Error("Database is not initialized");
|
||||
}
|
||||
|
||||
// Get or create AI service - will throw meaningful error if not possible
|
||||
await aiServiceManager.getOrCreateAnyService();
|
||||
|
||||
// Load or create chat directly from storage
|
||||
let chat = await chatStorageService.getChat(chatNoteId);
|
||||
|
||||
|
@ -70,7 +70,7 @@ export class ConfigurationManager {
|
||||
*/
|
||||
public async getSelectedProvider(): Promise<ProviderType | null> {
|
||||
try {
|
||||
const selectedProvider = await options.getOption('aiSelectedProvider');
|
||||
const selectedProvider = options.getOption('aiSelectedProvider');
|
||||
return selectedProvider as ProviderType || null;
|
||||
} catch (error) {
|
||||
log.error(`Error getting selected provider: ${error}`);
|
||||
@ -83,7 +83,7 @@ export class ConfigurationManager {
|
||||
*/
|
||||
public async getSelectedEmbeddingProvider(): Promise<EmbeddingProviderType | null> {
|
||||
try {
|
||||
const selectedProvider = await options.getOption('embeddingSelectedProvider');
|
||||
const selectedProvider = options.getOption('embeddingSelectedProvider');
|
||||
return selectedProvider as EmbeddingProviderType || null;
|
||||
} catch (error) {
|
||||
log.error(`Error getting selected embedding provider: ${error}`);
|
||||
@ -155,11 +155,9 @@ export class ConfigurationManager {
|
||||
*/
|
||||
public async getDefaultModels(): Promise<Record<ProviderType, string | undefined>> {
|
||||
try {
|
||||
const [openaiModel, anthropicModel, ollamaModel] = await Promise.all([
|
||||
options.getOption('openaiDefaultModel'),
|
||||
options.getOption('anthropicDefaultModel'),
|
||||
options.getOption('ollamaDefaultModel')
|
||||
]);
|
||||
const openaiModel = options.getOption('openaiDefaultModel');
|
||||
const anthropicModel = options.getOption('anthropicDefaultModel');
|
||||
const ollamaModel = options.getOption('ollamaDefaultModel');
|
||||
|
||||
return {
|
||||
openai: openaiModel || undefined,
|
||||
@ -182,20 +180,14 @@ export class ConfigurationManager {
|
||||
*/
|
||||
public async getProviderSettings(): Promise<ProviderSettings> {
|
||||
try {
|
||||
const [
|
||||
openaiApiKey, openaiBaseUrl, openaiDefaultModel,
|
||||
anthropicApiKey, anthropicBaseUrl, anthropicDefaultModel,
|
||||
ollamaBaseUrl, ollamaDefaultModel
|
||||
] = await Promise.all([
|
||||
options.getOption('openaiApiKey'),
|
||||
options.getOption('openaiBaseUrl'),
|
||||
options.getOption('openaiDefaultModel'),
|
||||
options.getOption('anthropicApiKey'),
|
||||
options.getOption('anthropicBaseUrl'),
|
||||
options.getOption('anthropicDefaultModel'),
|
||||
options.getOption('ollamaBaseUrl'),
|
||||
options.getOption('ollamaDefaultModel')
|
||||
]);
|
||||
const openaiApiKey = options.getOption('openaiApiKey');
|
||||
const openaiBaseUrl = options.getOption('openaiBaseUrl');
|
||||
const openaiDefaultModel = options.getOption('openaiDefaultModel');
|
||||
const anthropicApiKey = options.getOption('anthropicApiKey');
|
||||
const anthropicBaseUrl = options.getOption('anthropicBaseUrl');
|
||||
const anthropicDefaultModel = options.getOption('anthropicDefaultModel');
|
||||
const ollamaBaseUrl = options.getOption('ollamaBaseUrl');
|
||||
const ollamaDefaultModel = options.getOption('ollamaDefaultModel');
|
||||
|
||||
const settings: ProviderSettings = {};
|
||||
|
||||
@ -302,7 +294,7 @@ export class ConfigurationManager {
|
||||
|
||||
private async getAIEnabled(): Promise<boolean> {
|
||||
try {
|
||||
return await options.getOptionBool('aiEnabled');
|
||||
return options.getOptionBool('aiEnabled');
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user