mirror of
https://github.com/TriliumNext/Notes.git
synced 2025-07-27 18:12:29 +08:00
Merge branch 'develop' of https://github.com/TriliumNext/Notes into develop
This commit is contained in:
commit
7a04c8a7fd
@ -1314,7 +1314,25 @@
|
||||
"create_new_ai_chat": "Create new AI Chat"
|
||||
},
|
||||
"create_new_ai_chat": "Create new AI Chat",
|
||||
"configuration_warnings": "There are some issues with your AI configuration. Please check your settings."
|
||||
"configuration_warnings": "There are some issues with your AI configuration. Please check your settings.",
|
||||
"embeddings_started": "Embedding generation started",
|
||||
"embeddings_stopped": "Embedding generation stopped",
|
||||
"embeddings_toggle_error": "Error toggling embeddings",
|
||||
"local_embedding_description": "Uses local embedding models for offline text embedding generation",
|
||||
"local_embedding_settings": "Local Embedding Settings",
|
||||
"ollama_embedding_settings": "Ollama Embedding Settings",
|
||||
"ollama_embedding_url_description": "URL for the Ollama API for embedding generation (default: http://localhost:11434)",
|
||||
"openai_embedding_api_key_description": "Your OpenAI API key for embedding generation (can be different from chat API key)",
|
||||
"openai_embedding_settings": "OpenAI Embedding Settings",
|
||||
"openai_embedding_url_description": "Base URL for OpenAI embedding API (default: https://api.openai.com/v1)",
|
||||
"selected_embedding_provider": "Selected Embedding Provider",
|
||||
"selected_embedding_provider_description": "Choose the provider for generating note embeddings",
|
||||
"selected_provider": "Selected Provider",
|
||||
"selected_provider_description": "Choose the AI provider for chat and completion features",
|
||||
"select_embedding_provider": "Select embedding provider...",
|
||||
"select_model": "Select model...",
|
||||
"select_provider": "Select provider...",
|
||||
"voyage_embedding_url_description": "Base URL for the Voyage AI embedding API (default: https://api.voyageai.com/v1)"
|
||||
},
|
||||
"zoom_factor": {
|
||||
"title": "Zoom Factor (desktop build only)",
|
||||
|
@ -44,7 +44,7 @@ export async function validateEmbeddingProviders(validationWarning: HTMLElement)
|
||||
// Check OpenAI configuration
|
||||
const apiKey = options.get('openaiApiKey');
|
||||
if (!apiKey) {
|
||||
configIssues.push(`OpenAI API key is missing`);
|
||||
configIssues.push(`OpenAI API key is missing (optional for OpenAI-compatible endpoints)`);
|
||||
}
|
||||
} else if (provider === 'anthropic') {
|
||||
// Check Anthropic configuration
|
||||
|
@ -51,6 +51,35 @@ export default class AiSettingsWidget extends OptionsWidget {
|
||||
|
||||
await this.updateOption(optionName, value);
|
||||
|
||||
// Special handling for aiEnabled option
|
||||
if (optionName === 'aiEnabled') {
|
||||
try {
|
||||
const isEnabled = value === 'true';
|
||||
|
||||
if (isEnabled) {
|
||||
// Start embedding generation
|
||||
await server.post('llm/embeddings/start');
|
||||
toastService.showMessage(t("ai_llm.embeddings_started") || "Embedding generation started");
|
||||
|
||||
// Start polling for stats updates
|
||||
this.refreshEmbeddingStats();
|
||||
} else {
|
||||
// Stop embedding generation
|
||||
await server.post('llm/embeddings/stop');
|
||||
toastService.showMessage(t("ai_llm.embeddings_stopped") || "Embedding generation stopped");
|
||||
|
||||
// Clear any active polling intervals
|
||||
if (this.indexRebuildRefreshInterval) {
|
||||
clearInterval(this.indexRebuildRefreshInterval);
|
||||
this.indexRebuildRefreshInterval = null;
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error toggling embeddings:', error);
|
||||
toastService.showError(t("ai_llm.embeddings_toggle_error") || "Error toggling embeddings");
|
||||
}
|
||||
}
|
||||
|
||||
if (validateAfter) {
|
||||
await this.displayValidationWarnings();
|
||||
}
|
||||
@ -65,7 +94,7 @@ export default class AiSettingsWidget extends OptionsWidget {
|
||||
|
||||
// Core AI options
|
||||
this.setupChangeHandler('.ai-enabled', 'aiEnabled', true, true);
|
||||
this.setupChangeHandler('.ai-provider-precedence', 'aiProviderPrecedence', true);
|
||||
this.setupChangeHandler('.ai-selected-provider', 'aiSelectedProvider', true);
|
||||
this.setupChangeHandler('.ai-temperature', 'aiTemperature');
|
||||
this.setupChangeHandler('.ai-system-prompt', 'aiSystemPrompt');
|
||||
|
||||
@ -83,11 +112,17 @@ export default class AiSettingsWidget extends OptionsWidget {
|
||||
// Voyage options
|
||||
this.setupChangeHandler('.voyage-api-key', 'voyageApiKey');
|
||||
this.setupChangeHandler('.voyage-embedding-model', 'voyageEmbeddingModel');
|
||||
this.setupChangeHandler('.voyage-embedding-base-url', 'voyageEmbeddingBaseUrl');
|
||||
|
||||
// Ollama options
|
||||
this.setupChangeHandler('.ollama-base-url', 'ollamaBaseUrl');
|
||||
this.setupChangeHandler('.ollama-default-model', 'ollamaDefaultModel');
|
||||
this.setupChangeHandler('.ollama-embedding-model', 'ollamaEmbeddingModel');
|
||||
this.setupChangeHandler('.ollama-embedding-base-url', 'ollamaEmbeddingBaseUrl');
|
||||
|
||||
// Embedding-specific provider options
|
||||
this.setupChangeHandler('.openai-embedding-api-key', 'openaiEmbeddingApiKey', true);
|
||||
this.setupChangeHandler('.openai-embedding-base-url', 'openaiEmbeddingBaseUrl', true);
|
||||
|
||||
const $refreshModels = this.$widget.find('.refresh-models');
|
||||
$refreshModels.on('click', async () => {
|
||||
@ -132,11 +167,120 @@ export default class AiSettingsWidget extends OptionsWidget {
|
||||
this.setupChangeHandler('.enable-automatic-indexing', 'enableAutomaticIndexing', false, true);
|
||||
this.setupChangeHandler('.embedding-similarity-threshold', 'embeddingSimilarityThreshold');
|
||||
this.setupChangeHandler('.max-notes-per-llm-query', 'maxNotesPerLlmQuery');
|
||||
this.setupChangeHandler('.embedding-provider-precedence', 'embeddingProviderPrecedence', true);
|
||||
this.setupChangeHandler('.embedding-selected-provider', 'embeddingSelectedProvider', true);
|
||||
this.setupChangeHandler('.embedding-dimension-strategy', 'embeddingDimensionStrategy');
|
||||
this.setupChangeHandler('.embedding-batch-size', 'embeddingBatchSize');
|
||||
this.setupChangeHandler('.embedding-update-interval', 'embeddingUpdateInterval');
|
||||
|
||||
// Add provider selection change handlers for dynamic settings visibility
|
||||
this.$widget.find('.ai-selected-provider').on('change', async () => {
|
||||
const selectedProvider = this.$widget.find('.ai-selected-provider').val() as string;
|
||||
this.$widget.find('.provider-settings').hide();
|
||||
if (selectedProvider) {
|
||||
this.$widget.find(`.${selectedProvider}-provider-settings`).show();
|
||||
// Automatically fetch models for the newly selected provider
|
||||
await this.fetchModelsForProvider(selectedProvider, 'chat');
|
||||
}
|
||||
});
|
||||
|
||||
this.$widget.find('.embedding-selected-provider').on('change', async () => {
|
||||
const selectedProvider = this.$widget.find('.embedding-selected-provider').val() as string;
|
||||
this.$widget.find('.embedding-provider-settings').hide();
|
||||
if (selectedProvider) {
|
||||
this.$widget.find(`.${selectedProvider}-embedding-provider-settings`).show();
|
||||
// Automatically fetch embedding models for the newly selected provider
|
||||
await this.fetchModelsForProvider(selectedProvider, 'embedding');
|
||||
}
|
||||
});
|
||||
|
||||
// Add base URL change handlers to trigger model fetching
|
||||
this.$widget.find('.openai-base-url').on('change', async () => {
|
||||
const selectedProvider = this.$widget.find('.ai-selected-provider').val() as string;
|
||||
const selectedEmbeddingProvider = this.$widget.find('.embedding-selected-provider').val() as string;
|
||||
if (selectedProvider === 'openai') {
|
||||
await this.fetchModelsForProvider('openai', 'chat');
|
||||
}
|
||||
if (selectedEmbeddingProvider === 'openai') {
|
||||
await this.fetchModelsForProvider('openai', 'embedding');
|
||||
}
|
||||
});
|
||||
|
||||
this.$widget.find('.anthropic-base-url').on('change', async () => {
|
||||
const selectedProvider = this.$widget.find('.ai-selected-provider').val() as string;
|
||||
if (selectedProvider === 'anthropic') {
|
||||
await this.fetchModelsForProvider('anthropic', 'chat');
|
||||
}
|
||||
});
|
||||
|
||||
this.$widget.find('.ollama-base-url').on('change', async () => {
|
||||
const selectedProvider = this.$widget.find('.ai-selected-provider').val() as string;
|
||||
const selectedEmbeddingProvider = this.$widget.find('.embedding-selected-provider').val() as string;
|
||||
if (selectedProvider === 'ollama') {
|
||||
await this.fetchModelsForProvider('ollama', 'chat');
|
||||
}
|
||||
if (selectedEmbeddingProvider === 'ollama') {
|
||||
await this.fetchModelsForProvider('ollama', 'embedding');
|
||||
}
|
||||
});
|
||||
|
||||
// Add API key change handlers to trigger model fetching
|
||||
this.$widget.find('.openai-api-key').on('change', async () => {
|
||||
const selectedProvider = this.$widget.find('.ai-selected-provider').val() as string;
|
||||
const selectedEmbeddingProvider = this.$widget.find('.embedding-selected-provider').val() as string;
|
||||
if (selectedProvider === 'openai') {
|
||||
await this.fetchModelsForProvider('openai', 'chat');
|
||||
}
|
||||
if (selectedEmbeddingProvider === 'openai') {
|
||||
await this.fetchModelsForProvider('openai', 'embedding');
|
||||
}
|
||||
});
|
||||
|
||||
this.$widget.find('.anthropic-api-key').on('change', async () => {
|
||||
const selectedProvider = this.$widget.find('.ai-selected-provider').val() as string;
|
||||
if (selectedProvider === 'anthropic') {
|
||||
await this.fetchModelsForProvider('anthropic', 'chat');
|
||||
}
|
||||
});
|
||||
|
||||
this.$widget.find('.voyage-api-key').on('change', async () => {
|
||||
const selectedEmbeddingProvider = this.$widget.find('.embedding-selected-provider').val() as string;
|
||||
if (selectedEmbeddingProvider === 'voyage') {
|
||||
// Voyage doesn't have dynamic model fetching yet, but we can add it here when implemented
|
||||
console.log('Voyage API key changed - model fetching not yet implemented');
|
||||
}
|
||||
});
|
||||
|
||||
// Add embedding base URL change handlers to trigger model fetching
|
||||
this.$widget.find('.openai-embedding-base-url').on('change', async () => {
|
||||
const selectedEmbeddingProvider = this.$widget.find('.embedding-selected-provider').val() as string;
|
||||
if (selectedEmbeddingProvider === 'openai') {
|
||||
await this.fetchModelsForProvider('openai', 'embedding');
|
||||
}
|
||||
});
|
||||
|
||||
this.$widget.find('.voyage-embedding-base-url').on('change', async () => {
|
||||
const selectedEmbeddingProvider = this.$widget.find('.embedding-selected-provider').val() as string;
|
||||
if (selectedEmbeddingProvider === 'voyage') {
|
||||
// Voyage doesn't have dynamic model fetching yet, but we can add it here when implemented
|
||||
console.log('Voyage embedding base URL changed - model fetching not yet implemented');
|
||||
}
|
||||
});
|
||||
|
||||
this.$widget.find('.ollama-embedding-base-url').on('change', async () => {
|
||||
const selectedEmbeddingProvider = this.$widget.find('.embedding-selected-provider').val() as string;
|
||||
if (selectedEmbeddingProvider === 'ollama') {
|
||||
await this.fetchModelsForProvider('ollama', 'embedding');
|
||||
}
|
||||
});
|
||||
|
||||
// Add embedding API key change handlers to trigger model fetching
|
||||
this.$widget.find('.openai-embedding-api-key').on('change', async () => {
|
||||
const selectedEmbeddingProvider = this.$widget.find('.embedding-selected-provider').val() as string;
|
||||
if (selectedEmbeddingProvider === 'openai') {
|
||||
await this.fetchModelsForProvider('openai', 'embedding');
|
||||
}
|
||||
});
|
||||
|
||||
// No sortable behavior needed anymore
|
||||
|
||||
// Embedding stats refresh button
|
||||
@ -194,42 +338,25 @@ export default class AiSettingsWidget extends OptionsWidget {
|
||||
return;
|
||||
}
|
||||
|
||||
// Get provider precedence
|
||||
const providerPrecedence = (this.$widget.find('.ai-provider-precedence').val() as string || '').split(',');
|
||||
// Get selected provider
|
||||
const selectedProvider = this.$widget.find('.ai-selected-provider').val() as string;
|
||||
|
||||
// Check for OpenAI configuration if it's in the precedence list
|
||||
const openaiWarnings: string[] = [];
|
||||
if (providerPrecedence.includes('openai')) {
|
||||
// Check for selected provider configuration
|
||||
const providerWarnings: string[] = [];
|
||||
if (selectedProvider === 'openai') {
|
||||
const openaiApiKey = this.$widget.find('.openai-api-key').val();
|
||||
if (!openaiApiKey) {
|
||||
openaiWarnings.push(t("ai_llm.empty_key_warning.openai"));
|
||||
providerWarnings.push(t("ai_llm.empty_key_warning.openai"));
|
||||
}
|
||||
}
|
||||
|
||||
// Check for Anthropic configuration if it's in the precedence list
|
||||
const anthropicWarnings: string[] = [];
|
||||
if (providerPrecedence.includes('anthropic')) {
|
||||
} else if (selectedProvider === 'anthropic') {
|
||||
const anthropicApiKey = this.$widget.find('.anthropic-api-key').val();
|
||||
if (!anthropicApiKey) {
|
||||
anthropicWarnings.push(t("ai_llm.empty_key_warning.anthropic"));
|
||||
providerWarnings.push(t("ai_llm.empty_key_warning.anthropic"));
|
||||
}
|
||||
}
|
||||
|
||||
// Check for Voyage configuration if it's in the precedence list
|
||||
const voyageWarnings: string[] = [];
|
||||
if (providerPrecedence.includes('voyage')) {
|
||||
const voyageApiKey = this.$widget.find('.voyage-api-key').val();
|
||||
if (!voyageApiKey) {
|
||||
voyageWarnings.push(t("ai_llm.empty_key_warning.voyage"));
|
||||
}
|
||||
}
|
||||
|
||||
// Check for Ollama configuration if it's in the precedence list
|
||||
const ollamaWarnings: string[] = [];
|
||||
if (providerPrecedence.includes('ollama')) {
|
||||
} else if (selectedProvider === 'ollama') {
|
||||
const ollamaBaseUrl = this.$widget.find('.ollama-base-url').val();
|
||||
if (!ollamaBaseUrl) {
|
||||
ollamaWarnings.push(t("ai_llm.ollama_no_url"));
|
||||
providerWarnings.push(t("ai_llm.ollama_no_url"));
|
||||
}
|
||||
}
|
||||
|
||||
@ -238,27 +365,24 @@ export default class AiSettingsWidget extends OptionsWidget {
|
||||
const embeddingsEnabled = this.$widget.find('.enable-automatic-indexing').prop('checked');
|
||||
|
||||
if (embeddingsEnabled) {
|
||||
const embeddingProviderPrecedence = (this.$widget.find('.embedding-provider-precedence').val() as string || '').split(',');
|
||||
const selectedEmbeddingProvider = this.$widget.find('.embedding-selected-provider').val() as string;
|
||||
|
||||
if (embeddingProviderPrecedence.includes('openai') && !this.$widget.find('.openai-api-key').val()) {
|
||||
if (selectedEmbeddingProvider === 'openai' && !this.$widget.find('.openai-api-key').val()) {
|
||||
embeddingWarnings.push(t("ai_llm.empty_key_warning.openai"));
|
||||
}
|
||||
|
||||
if (embeddingProviderPrecedence.includes('voyage') && !this.$widget.find('.voyage-api-key').val()) {
|
||||
if (selectedEmbeddingProvider === 'voyage' && !this.$widget.find('.voyage-api-key').val()) {
|
||||
embeddingWarnings.push(t("ai_llm.empty_key_warning.voyage"));
|
||||
}
|
||||
|
||||
if (embeddingProviderPrecedence.includes('ollama') && !this.$widget.find('.ollama-base-url').val()) {
|
||||
if (selectedEmbeddingProvider === 'ollama' && !this.$widget.find('.ollama-embedding-base-url').val()) {
|
||||
embeddingWarnings.push(t("ai_llm.empty_key_warning.ollama"));
|
||||
}
|
||||
}
|
||||
|
||||
// Combine all warnings
|
||||
const allWarnings = [
|
||||
...openaiWarnings,
|
||||
...anthropicWarnings,
|
||||
...voyageWarnings,
|
||||
...ollamaWarnings,
|
||||
...providerWarnings,
|
||||
...embeddingWarnings
|
||||
];
|
||||
|
||||
@ -449,40 +573,110 @@ export default class AiSettingsWidget extends OptionsWidget {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Set model dropdown value, adding the option if it doesn't exist
|
||||
*/
|
||||
setModelDropdownValue(selector: string, value: string | undefined) {
|
||||
if (!this.$widget || !value) return;
|
||||
|
||||
const $dropdown = this.$widget.find(selector);
|
||||
|
||||
// Check if the value already exists as an option
|
||||
if ($dropdown.find(`option[value="${value}"]`).length === 0) {
|
||||
// Add the custom value as an option
|
||||
$dropdown.append(`<option value="${value}">${value} (current)</option>`);
|
||||
}
|
||||
|
||||
// Set the value
|
||||
$dropdown.val(value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch models for a specific provider and model type
|
||||
*/
|
||||
async fetchModelsForProvider(provider: string, modelType: 'chat' | 'embedding') {
|
||||
if (!this.providerService) return;
|
||||
|
||||
try {
|
||||
switch (provider) {
|
||||
case 'openai':
|
||||
this.openaiModelsRefreshed = await this.providerService.refreshOpenAIModels(false, this.openaiModelsRefreshed);
|
||||
break;
|
||||
case 'anthropic':
|
||||
this.anthropicModelsRefreshed = await this.providerService.refreshAnthropicModels(false, this.anthropicModelsRefreshed);
|
||||
break;
|
||||
case 'ollama':
|
||||
this.ollamaModelsRefreshed = await this.providerService.refreshOllamaModels(false, this.ollamaModelsRefreshed);
|
||||
break;
|
||||
default:
|
||||
console.log(`Model fetching not implemented for provider: ${provider}`);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(`Error fetching models for ${provider}:`, error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update provider settings visibility based on selected providers
|
||||
*/
|
||||
updateProviderSettingsVisibility() {
|
||||
if (!this.$widget) return;
|
||||
|
||||
// Update AI provider settings visibility
|
||||
const selectedAiProvider = this.$widget.find('.ai-selected-provider').val() as string;
|
||||
this.$widget.find('.provider-settings').hide();
|
||||
if (selectedAiProvider) {
|
||||
this.$widget.find(`.${selectedAiProvider}-provider-settings`).show();
|
||||
}
|
||||
|
||||
// Update embedding provider settings visibility
|
||||
const selectedEmbeddingProvider = this.$widget.find('.embedding-selected-provider').val() as string;
|
||||
this.$widget.find('.embedding-provider-settings').hide();
|
||||
if (selectedEmbeddingProvider) {
|
||||
this.$widget.find(`.${selectedEmbeddingProvider}-embedding-provider-settings`).show();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Called when the options have been loaded from the server
|
||||
*/
|
||||
optionsLoaded(options: OptionMap) {
|
||||
async optionsLoaded(options: OptionMap) {
|
||||
if (!this.$widget) return;
|
||||
|
||||
// AI Options
|
||||
this.$widget.find('.ai-enabled').prop('checked', options.aiEnabled !== 'false');
|
||||
this.$widget.find('.ai-temperature').val(options.aiTemperature || '0.7');
|
||||
this.$widget.find('.ai-system-prompt').val(options.aiSystemPrompt || '');
|
||||
this.$widget.find('.ai-provider-precedence').val(options.aiProviderPrecedence || 'openai,anthropic,ollama');
|
||||
this.$widget.find('.ai-selected-provider').val(options.aiSelectedProvider || 'openai');
|
||||
|
||||
// OpenAI Section
|
||||
this.$widget.find('.openai-api-key').val(options.openaiApiKey || '');
|
||||
this.$widget.find('.openai-base-url').val(options.openaiBaseUrl || 'https://api.openai_llm.com/v1');
|
||||
this.$widget.find('.openai-default-model').val(options.openaiDefaultModel || 'gpt-4o');
|
||||
this.$widget.find('.openai-embedding-model').val(options.openaiEmbeddingModel || 'text-embedding-3-small');
|
||||
this.$widget.find('.openai-base-url').val(options.openaiBaseUrl || 'https://api.openai.com/v1');
|
||||
this.setModelDropdownValue('.openai-default-model', options.openaiDefaultModel);
|
||||
this.setModelDropdownValue('.openai-embedding-model', options.openaiEmbeddingModel);
|
||||
|
||||
// Anthropic Section
|
||||
this.$widget.find('.anthropic-api-key').val(options.anthropicApiKey || '');
|
||||
this.$widget.find('.anthropic-base-url').val(options.anthropicBaseUrl || 'https://api.anthropic.com');
|
||||
this.$widget.find('.anthropic-default-model').val(options.anthropicDefaultModel || 'claude-3-opus-20240229');
|
||||
this.setModelDropdownValue('.anthropic-default-model', options.anthropicDefaultModel);
|
||||
|
||||
// Voyage Section
|
||||
this.$widget.find('.voyage-api-key').val(options.voyageApiKey || '');
|
||||
this.$widget.find('.voyage-embedding-model').val(options.voyageEmbeddingModel || 'voyage-2');
|
||||
this.$widget.find('.voyage-embedding-base-url').val(options.voyageEmbeddingBaseUrl || 'https://api.voyageai.com/v1');
|
||||
this.setModelDropdownValue('.voyage-embedding-model', options.voyageEmbeddingModel);
|
||||
|
||||
// Ollama Section
|
||||
this.$widget.find('.ollama-base-url').val(options.ollamaBaseUrl || 'http://localhost:11434');
|
||||
this.$widget.find('.ollama-default-model').val(options.ollamaDefaultModel || 'llama3');
|
||||
this.$widget.find('.ollama-embedding-model').val(options.ollamaEmbeddingModel || 'nomic-embed-text');
|
||||
this.$widget.find('.ollama-embedding-base-url').val(options.ollamaEmbeddingBaseUrl || 'http://localhost:11434');
|
||||
this.setModelDropdownValue('.ollama-default-model', options.ollamaDefaultModel);
|
||||
this.setModelDropdownValue('.ollama-embedding-model', options.ollamaEmbeddingModel);
|
||||
|
||||
// Embedding-specific provider options
|
||||
this.$widget.find('.openai-embedding-api-key').val(options.openaiEmbeddingApiKey || '');
|
||||
this.$widget.find('.openai-embedding-base-url').val(options.openaiEmbeddingBaseUrl || 'https://api.openai.com/v1');
|
||||
|
||||
// Embedding Options
|
||||
this.$widget.find('.embedding-provider-precedence').val(options.embeddingProviderPrecedence || 'openai,voyage,ollama,local');
|
||||
this.$widget.find('.embedding-selected-provider').val(options.embeddingSelectedProvider || 'openai');
|
||||
this.$widget.find('.embedding-auto-update-enabled').prop('checked', options.embeddingAutoUpdateEnabled !== 'false');
|
||||
this.$widget.find('.enable-automatic-indexing').prop('checked', options.enableAutomaticIndexing !== 'false');
|
||||
this.$widget.find('.embedding-similarity-threshold').val(options.embeddingSimilarityThreshold || '0.75');
|
||||
@ -491,6 +685,21 @@ export default class AiSettingsWidget extends OptionsWidget {
|
||||
this.$widget.find('.embedding-batch-size').val(options.embeddingBatchSize || '10');
|
||||
this.$widget.find('.embedding-update-interval').val(options.embeddingUpdateInterval || '5000');
|
||||
|
||||
// Show/hide provider settings based on selected providers
|
||||
this.updateProviderSettingsVisibility();
|
||||
|
||||
// Automatically fetch models for currently selected providers
|
||||
const selectedAiProvider = this.$widget.find('.ai-selected-provider').val() as string;
|
||||
const selectedEmbeddingProvider = this.$widget.find('.embedding-selected-provider').val() as string;
|
||||
|
||||
if (selectedAiProvider) {
|
||||
await this.fetchModelsForProvider(selectedAiProvider, 'chat');
|
||||
}
|
||||
|
||||
if (selectedEmbeddingProvider) {
|
||||
await this.fetchModelsForProvider(selectedEmbeddingProvider, 'embedding');
|
||||
}
|
||||
|
||||
// Display validation warnings
|
||||
this.displayValidationWarnings();
|
||||
}
|
||||
|
@ -240,40 +240,65 @@ export class ProviderService {
|
||||
}
|
||||
|
||||
try {
|
||||
const ollamaBaseUrl = this.$widget.find('.ollama-base-url').val() as string;
|
||||
// Determine which URL to use based on the current context
|
||||
// If we're in the embedding provider context, use the embedding base URL
|
||||
// Otherwise, use the general base URL
|
||||
const selectedAiProvider = this.$widget.find('.ai-selected-provider').val() as string;
|
||||
const selectedEmbeddingProvider = this.$widget.find('.embedding-selected-provider').val() as string;
|
||||
|
||||
let ollamaBaseUrl: string;
|
||||
|
||||
// If embedding provider is Ollama and it's visible, use embedding URL
|
||||
const $embeddingOllamaSettings = this.$widget.find('.ollama-embedding-provider-settings');
|
||||
if (selectedEmbeddingProvider === 'ollama' && $embeddingOllamaSettings.is(':visible')) {
|
||||
ollamaBaseUrl = this.$widget.find('.ollama-embedding-base-url').val() as string;
|
||||
} else {
|
||||
ollamaBaseUrl = this.$widget.find('.ollama-base-url').val() as string;
|
||||
}
|
||||
|
||||
const response = await server.get<OllamaModelResponse>(`llm/providers/ollama/models?baseUrl=${encodeURIComponent(ollamaBaseUrl)}`);
|
||||
|
||||
if (response && response.success && response.models && response.models.length > 0) {
|
||||
// Update both embedding model dropdowns
|
||||
const $embedModelSelect = this.$widget.find('.ollama-embedding-model');
|
||||
const $chatEmbedModelSelect = this.$widget.find('.ollama-chat-embedding-model');
|
||||
|
||||
const currentValue = $embedModelSelect.val();
|
||||
const currentChatEmbedValue = $chatEmbedModelSelect.val();
|
||||
|
||||
// Clear existing options
|
||||
$embedModelSelect.empty();
|
||||
|
||||
// Add embedding-specific models first
|
||||
// Prepare embedding models
|
||||
const embeddingModels = response.models.filter(model =>
|
||||
model.name.includes('embed') || model.name.includes('bert'));
|
||||
|
||||
embeddingModels.forEach(model => {
|
||||
$embedModelSelect.append(`<option value="${model.name}">${model.name}</option>`);
|
||||
});
|
||||
|
||||
if (embeddingModels.length > 0) {
|
||||
// Add separator if we have embedding models
|
||||
$embedModelSelect.append(`<option disabled>─────────────</option>`);
|
||||
}
|
||||
|
||||
// Then add general models which can be used for embeddings too
|
||||
const generalModels = response.models.filter(model =>
|
||||
!model.name.includes('embed') && !model.name.includes('bert'));
|
||||
|
||||
// Update .ollama-embedding-model dropdown (embedding provider settings)
|
||||
$embedModelSelect.empty();
|
||||
embeddingModels.forEach(model => {
|
||||
$embedModelSelect.append(`<option value="${model.name}">${model.name}</option>`);
|
||||
});
|
||||
if (embeddingModels.length > 0) {
|
||||
$embedModelSelect.append(`<option disabled>─────────────</option>`);
|
||||
}
|
||||
generalModels.forEach(model => {
|
||||
$embedModelSelect.append(`<option value="${model.name}">${model.name}</option>`);
|
||||
});
|
||||
|
||||
// Try to restore the previously selected value
|
||||
this.ensureSelectedValue($embedModelSelect, currentValue, 'ollamaEmbeddingModel');
|
||||
|
||||
// Update .ollama-chat-embedding-model dropdown (general Ollama provider settings)
|
||||
$chatEmbedModelSelect.empty();
|
||||
embeddingModels.forEach(model => {
|
||||
$chatEmbedModelSelect.append(`<option value="${model.name}">${model.name}</option>`);
|
||||
});
|
||||
if (embeddingModels.length > 0) {
|
||||
$chatEmbedModelSelect.append(`<option disabled>─────────────</option>`);
|
||||
}
|
||||
generalModels.forEach(model => {
|
||||
$chatEmbedModelSelect.append(`<option value="${model.name}">${model.name}</option>`);
|
||||
});
|
||||
this.ensureSelectedValue($chatEmbedModelSelect, currentChatEmbedValue, 'ollamaEmbeddingModel');
|
||||
|
||||
// Also update the LLM model dropdown
|
||||
const $modelSelect = this.$widget.find('.ollama-default-model');
|
||||
const currentModelValue = $modelSelect.val();
|
||||
|
@ -61,36 +61,19 @@ export const TPL = `
|
||||
<h4>${t("ai_llm.provider_configuration")}</h4>
|
||||
|
||||
<div class="form-group">
|
||||
<label>${t("ai_llm.provider_precedence")}</label>
|
||||
<input type="text" class="ai-provider-precedence form-control" placeholder="openai,anthropic,ollama">
|
||||
<div class="form-text">${t("ai_llm.provider_precedence_description")}</div>
|
||||
<label>${t("ai_llm.selected_provider")}</label>
|
||||
<select class="ai-selected-provider form-control">
|
||||
<option value="">${t("ai_llm.select_provider")}</option>
|
||||
<option value="openai">OpenAI</option>
|
||||
<option value="anthropic">Anthropic</option>
|
||||
<option value="ollama">Ollama</option>
|
||||
</select>
|
||||
<div class="form-text">${t("ai_llm.selected_provider_description")}</div>
|
||||
</div>
|
||||
|
||||
<div class="form-group">
|
||||
<label>${t("ai_llm.temperature")}</label>
|
||||
<input class="ai-temperature form-control" type="number" min="0" max="2" step="0.1">
|
||||
<div class="form-text">${t("ai_llm.temperature_description")}</div>
|
||||
</div>
|
||||
|
||||
<div class="form-group">
|
||||
<label>${t("ai_llm.system_prompt")}</label>
|
||||
<textarea class="ai-system-prompt form-control" rows="3"></textarea>
|
||||
<div class="form-text">${t("ai_llm.system_prompt_description")}</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<nav class="options-section-tabs">
|
||||
<div class="nav nav-tabs" id="nav-tab" role="tablist">
|
||||
<button class="nav-link active" id="nav-openai-tab" data-bs-toggle="tab" data-bs-target="#nav-openai" type="button" role="tab" aria-controls="nav-openai" aria-selected="true">${t("ai_llm.openai_tab")}</button>
|
||||
<button class="nav-link" id="nav-anthropic-tab" data-bs-toggle="tab" data-bs-target="#nav-anthropic" type="button" role="tab" aria-controls="nav-anthropic" aria-selected="false">${t("ai_llm.anthropic_tab")}</button>
|
||||
<button class="nav-link" id="nav-voyage-tab" data-bs-toggle="tab" data-bs-target="#nav-voyage" type="button" role="tab" aria-controls="nav-voyage" aria-selected="false">${t("ai_llm.voyage_tab")}</button>
|
||||
<button class="nav-link" id="nav-ollama-tab" data-bs-toggle="tab" data-bs-target="#nav-ollama" type="button" role="tab" aria-controls="nav-ollama" aria-selected="false">${t("ai_llm.ollama_tab")}</button>
|
||||
</div>
|
||||
</nav>
|
||||
<div class="options-section">
|
||||
<div class="tab-content" id="nav-tabContent">
|
||||
<div class="tab-pane fade show active" id="nav-openai" role="tabpanel" aria-labelledby="nav-openai-tab">
|
||||
<div class="card">
|
||||
<!-- OpenAI Provider Settings -->
|
||||
<div class="provider-settings openai-provider-settings" style="display: none;">
|
||||
<div class="card mt-3">
|
||||
<div class="card-header">
|
||||
<h5>${t("ai_llm.openai_settings")}</h5>
|
||||
</div>
|
||||
@ -110,27 +93,18 @@ export const TPL = `
|
||||
<div class="form-group">
|
||||
<label>${t("ai_llm.model")}</label>
|
||||
<select class="openai-default-model form-control">
|
||||
<option value="gpt-4o">GPT-4o (recommended)</option>
|
||||
<option value="gpt-4">GPT-4</option>
|
||||
<option value="gpt-3.5-turbo">GPT-3.5 Turbo</option>
|
||||
<option value="">${t("ai_llm.select_model")}</option>
|
||||
</select>
|
||||
<div class="form-text">${t("ai_llm.openai_model_description")}</div>
|
||||
<button class="btn btn-sm btn-outline-secondary refresh-openai-models">${t("ai_llm.refresh_models")}</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="form-group">
|
||||
<label>${t("ai_llm.embedding_model")}</label>
|
||||
<select class="openai-embedding-model form-control">
|
||||
<option value="text-embedding-3-small">text-embedding-3-small (recommended)</option>
|
||||
<option value="text-embedding-3-large">text-embedding-3-large</option>
|
||||
</select>
|
||||
<div class="form-text">${t("ai_llm.openai_embedding_model_description")}</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="tab-pane fade" id="nav-anthropic" role="tabpanel" aria-labelledby="nav-anthropic-tab">
|
||||
<div class="card">
|
||||
<!-- Anthropic Provider Settings -->
|
||||
<div class="provider-settings anthropic-provider-settings" style="display: none;">
|
||||
<div class="card mt-3">
|
||||
<div class="card-header">
|
||||
<h5>${t("ai_llm.anthropic_settings")}</h5>
|
||||
</div>
|
||||
@ -150,9 +124,7 @@ export const TPL = `
|
||||
<div class="form-group">
|
||||
<label>${t("ai_llm.model")}</label>
|
||||
<select class="anthropic-default-model form-control">
|
||||
<option value="claude-3-opus-20240229">Claude 3 Opus (recommended)</option>
|
||||
<option value="claude-3-sonnet-20240229">Claude 3 Sonnet</option>
|
||||
<option value="claude-3-haiku-20240307">Claude 3 Haiku</option>
|
||||
<option value="">${t("ai_llm.select_model")}</option>
|
||||
</select>
|
||||
<div class="form-text">${t("ai_llm.anthropic_model_description")}</div>
|
||||
<button class="btn btn-sm btn-outline-secondary refresh-anthropic-models">${t("ai_llm.refresh_models")}</button>
|
||||
@ -160,32 +132,10 @@ export const TPL = `
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="tab-pane fade" id="nav-voyage" role="tabpanel" aria-labelledby="nav-voyage-tab">
|
||||
<div class="card">
|
||||
<div class="card-header">
|
||||
<h5>${t("ai_llm.voyage_settings")}</h5>
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<div class="form-group">
|
||||
<label>${t("ai_llm.api_key")}</label>
|
||||
<input type="password" class="voyage-api-key form-control" autocomplete="off" />
|
||||
<div class="form-text">${t("ai_llm.voyage_api_key_description")}</div>
|
||||
</div>
|
||||
|
||||
<div class="form-group">
|
||||
<label>${t("ai_llm.embedding_model")}</label>
|
||||
<select class="voyage-embedding-model form-control">
|
||||
<option value="voyage-2">Voyage-2 (recommended)</option>
|
||||
<option value="voyage-2-code">Voyage-2-Code</option>
|
||||
<option value="voyage-large-2">Voyage-Large-2</option>
|
||||
</select>
|
||||
<div class="form-text">${t("ai_llm.voyage_embedding_model_description")}</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="tab-pane fade" id="nav-ollama" role="tabpanel" aria-labelledby="nav-ollama-tab">
|
||||
<div class="card">
|
||||
<!-- Ollama Provider Settings -->
|
||||
<div class="provider-settings ollama-provider-settings" style="display: none;">
|
||||
<div class="card mt-3">
|
||||
<div class="card-header">
|
||||
<h5>${t("ai_llm.ollama_settings")}</h5>
|
||||
</div>
|
||||
@ -199,35 +149,138 @@ export const TPL = `
|
||||
<div class="form-group">
|
||||
<label>${t("ai_llm.model")}</label>
|
||||
<select class="ollama-default-model form-control">
|
||||
<option value="llama3">llama3 (recommended)</option>
|
||||
<option value="mistral">mistral</option>
|
||||
<option value="phi3">phi3</option>
|
||||
<option value="">${t("ai_llm.select_model")}</option>
|
||||
</select>
|
||||
<div class="form-text">${t("ai_llm.ollama_model_description")}</div>
|
||||
<button class="btn btn-sm btn-outline-secondary refresh-models"><span class="bx bx-refresh"></span></button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="form-group">
|
||||
<label>${t("ai_llm.temperature")}</label>
|
||||
<input class="ai-temperature form-control" type="number" min="0" max="2" step="0.1">
|
||||
<div class="form-text">${t("ai_llm.temperature_description")}</div>
|
||||
</div>
|
||||
|
||||
<div class="form-group">
|
||||
<label>${t("ai_llm.system_prompt")}</label>
|
||||
<textarea class="ai-system-prompt form-control" rows="3"></textarea>
|
||||
<div class="form-text">${t("ai_llm.system_prompt_description")}</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
<div class="options-section">
|
||||
<h4>${t("ai_llm.embeddings_configuration")}</h4>
|
||||
|
||||
<div class="form-group">
|
||||
<label class="embedding-provider-label">${t("ai_llm.selected_embedding_provider")}</label>
|
||||
<select class="embedding-selected-provider form-control">
|
||||
<option value="">${t("ai_llm.select_embedding_provider")}</option>
|
||||
<option value="openai">OpenAI</option>
|
||||
<option value="voyage">Voyage AI</option>
|
||||
<option value="ollama">Ollama</option>
|
||||
<option value="local">Local</option>
|
||||
</select>
|
||||
<div class="form-text">${t("ai_llm.selected_embedding_provider_description")}</div>
|
||||
</div>
|
||||
|
||||
<!-- OpenAI Embedding Provider Settings -->
|
||||
<div class="embedding-provider-settings openai-embedding-provider-settings" style="display: none;">
|
||||
<div class="card mt-3">
|
||||
<div class="card-header">
|
||||
<h5>${t("ai_llm.openai_embedding_settings")}</h5>
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<div class="form-group">
|
||||
<label>${t("ai_llm.api_key")}</label>
|
||||
<input type="password" class="openai-embedding-api-key form-control" autocomplete="off" />
|
||||
<div class="form-text">${t("ai_llm.openai_embedding_api_key_description")}</div>
|
||||
</div>
|
||||
|
||||
<div class="form-group">
|
||||
<label>${t("ai_llm.url")}</label>
|
||||
<input type="text" class="openai-embedding-base-url form-control" />
|
||||
<div class="form-text">${t("ai_llm.openai_embedding_url_description")}</div>
|
||||
</div>
|
||||
|
||||
<div class="form-group">
|
||||
<label>${t("ai_llm.embedding_model")}</label>
|
||||
<select class="openai-embedding-model form-control">
|
||||
<option value="">${t("ai_llm.select_model")}</option>
|
||||
</select>
|
||||
<div class="form-text">${t("ai_llm.openai_embedding_model_description")}</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Voyage Embedding Provider Settings -->
|
||||
<div class="embedding-provider-settings voyage-embedding-provider-settings" style="display: none;">
|
||||
<div class="card mt-3">
|
||||
<div class="card-header">
|
||||
<h5>${t("ai_llm.voyage_settings")}</h5>
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<div class="form-group">
|
||||
<label>${t("ai_llm.api_key")}</label>
|
||||
<input type="password" class="voyage-api-key form-control" autocomplete="off" />
|
||||
<div class="form-text">${t("ai_llm.voyage_api_key_description")}</div>
|
||||
</div>
|
||||
|
||||
<div class="form-group">
|
||||
<label>${t("ai_llm.url")}</label>
|
||||
<input type="text" class="voyage-embedding-base-url form-control" />
|
||||
<div class="form-text">${t("ai_llm.voyage_embedding_url_description")}</div>
|
||||
</div>
|
||||
|
||||
<div class="form-group">
|
||||
<label>${t("ai_llm.embedding_model")}</label>
|
||||
<select class="voyage-embedding-model form-control">
|
||||
<option value="">${t("ai_llm.select_model")}</option>
|
||||
</select>
|
||||
<div class="form-text">${t("ai_llm.voyage_embedding_model_description")}</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Ollama Embedding Provider Settings -->
|
||||
<div class="embedding-provider-settings ollama-embedding-provider-settings" style="display: none;">
|
||||
<div class="card mt-3">
|
||||
<div class="card-header">
|
||||
<h5>${t("ai_llm.ollama_embedding_settings")}</h5>
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<div class="form-group">
|
||||
<label>${t("ai_llm.url")}</label>
|
||||
<input type="text" class="ollama-embedding-base-url form-control" />
|
||||
<div class="form-text">${t("ai_llm.ollama_embedding_url_description")}</div>
|
||||
</div>
|
||||
|
||||
<div class="form-group">
|
||||
<label>${t("ai_llm.embedding_model")}</label>
|
||||
<select class="ollama-embedding-model form-control">
|
||||
<option value="nomic-embed-text">nomic-embed-text (recommended)</option>
|
||||
<option value="all-MiniLM-L6-v2">all-MiniLM-L6-v2</option>
|
||||
<option value="">${t("ai_llm.select_model")}</option>
|
||||
</select>
|
||||
<div class="form-text">${t("ai_llm.ollama_embedding_model_description")}</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Local Embedding Provider Settings -->
|
||||
<div class="embedding-provider-settings local-embedding-provider-settings" style="display: none;">
|
||||
<div class="card mt-3">
|
||||
<div class="card-header">
|
||||
<h5>${t("ai_llm.local_embedding_settings")}</h5>
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<div class="form-text">${t("ai_llm.local_embedding_description")}</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="options-section">
|
||||
<h4>${t("ai_llm.embeddings_configuration")}</h4>
|
||||
|
||||
<div class="form-group">
|
||||
<label class="embedding-provider-label">${t("ai_llm.embedding_provider_precedence")}</label>
|
||||
<input type="text" class="embedding-provider-precedence form-control" placeholder="openai,voyage,ollama,local">
|
||||
<div class="form-text">${t("ai_llm.embedding_provider_precedence_description")}</div>
|
||||
</div>
|
||||
|
||||
<div class="form-group">
|
||||
|
@ -408,7 +408,7 @@ async function reprocessAllNotes(req: Request, res: Response) {
|
||||
try {
|
||||
// Wrap the operation in cls.init to ensure proper context
|
||||
cls.init(async () => {
|
||||
await vectorStore.reprocessAllNotes();
|
||||
await indexService.reprocessAllNotes();
|
||||
log.info("Embedding reprocessing completed successfully");
|
||||
});
|
||||
} catch (error: any) {
|
||||
@ -782,6 +782,49 @@ async function getIndexRebuildStatus(req: Request, res: Response) {
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Start embedding generation when AI is enabled
|
||||
*/
|
||||
async function startEmbeddings(req: Request, res: Response) {
|
||||
try {
|
||||
log.info("Starting embedding generation system");
|
||||
|
||||
// Initialize the index service if not already initialized
|
||||
await indexService.initialize();
|
||||
|
||||
// Start automatic indexing
|
||||
await indexService.startEmbeddingGeneration();
|
||||
|
||||
return {
|
||||
success: true,
|
||||
message: "Embedding generation started"
|
||||
};
|
||||
} catch (error: any) {
|
||||
log.error(`Error starting embeddings: ${error.message || 'Unknown error'}`);
|
||||
throw new Error(`Failed to start embeddings: ${error.message || 'Unknown error'}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop embedding generation when AI is disabled
|
||||
*/
|
||||
async function stopEmbeddings(req: Request, res: Response) {
|
||||
try {
|
||||
log.info("Stopping embedding generation system");
|
||||
|
||||
// Stop automatic indexing
|
||||
await indexService.stopEmbeddingGeneration();
|
||||
|
||||
return {
|
||||
success: true,
|
||||
message: "Embedding generation stopped"
|
||||
};
|
||||
} catch (error: any) {
|
||||
log.error(`Error stopping embeddings: ${error.message || 'Unknown error'}`);
|
||||
throw new Error(`Failed to stop embeddings: ${error.message || 'Unknown error'}`);
|
||||
}
|
||||
}
|
||||
|
||||
export default {
|
||||
findSimilarNotes,
|
||||
searchByText,
|
||||
@ -794,5 +837,7 @@ export default {
|
||||
retryFailedNote,
|
||||
retryAllFailedNotes,
|
||||
rebuildIndex,
|
||||
getIndexRebuildStatus
|
||||
getIndexRebuildStatus,
|
||||
startEmbeddings,
|
||||
stopEmbeddings
|
||||
};
|
||||
|
@ -825,7 +825,10 @@ async function streamMessage(req: Request, res: Response) {
|
||||
success: true,
|
||||
message: 'Streaming initiated successfully'
|
||||
});
|
||||
log.info(`Sent immediate success response for streaming setup`);
|
||||
|
||||
// Mark response as handled to prevent apiResultHandler from processing it again
|
||||
(res as any).triliumResponseHandled = true;
|
||||
|
||||
|
||||
// Create a new response object for streaming through WebSocket only
|
||||
// We won't use HTTP streaming since we've already sent the HTTP response
|
||||
@ -889,42 +892,10 @@ async function streamMessage(req: Request, res: Response) {
|
||||
thinking: showThinking ? 'Initializing streaming LLM response...' : undefined
|
||||
});
|
||||
|
||||
// Instead of trying to reimplement the streaming logic ourselves,
|
||||
// delegate to restChatService but set up the correct protocol:
|
||||
// 1. We've already sent a success response to the initial POST
|
||||
// 2. Now we'll have restChatService process the actual streaming through WebSocket
|
||||
// Process the LLM request using the existing service but with streaming setup
|
||||
// Since we've already sent the initial HTTP response, we'll use the WebSocket for streaming
|
||||
try {
|
||||
// Import the WebSocket service for sending messages
|
||||
const wsService = (await import('../../services/ws.js')).default;
|
||||
|
||||
// Create a simple pass-through response object that won't write to the HTTP response
|
||||
// but will allow restChatService to send WebSocket messages
|
||||
const dummyResponse = {
|
||||
writableEnded: false,
|
||||
// Implement methods that would normally be used by restChatService
|
||||
write: (_chunk: string) => {
|
||||
// Silent no-op - we're only using WebSocket
|
||||
return true;
|
||||
},
|
||||
end: (_chunk?: string) => {
|
||||
// Log when streaming is complete via WebSocket
|
||||
log.info(`[${chatNoteId}] Completed HTTP response handling during WebSocket streaming`);
|
||||
return dummyResponse;
|
||||
},
|
||||
setHeader: (name: string, _value: string) => {
|
||||
// Only log for content-type to reduce noise
|
||||
if (name.toLowerCase() === 'content-type') {
|
||||
log.info(`[${chatNoteId}] Setting up streaming for WebSocket only`);
|
||||
}
|
||||
return dummyResponse;
|
||||
}
|
||||
};
|
||||
|
||||
// Process the streaming now through WebSocket only
|
||||
try {
|
||||
log.info(`[${chatNoteId}] Processing LLM streaming through WebSocket after successful initiation at ${new Date().toISOString()}`);
|
||||
|
||||
// Call restChatService with our enhanced request and dummy response
|
||||
// Call restChatService with streaming mode enabled
|
||||
// The important part is setting method to GET to indicate streaming mode
|
||||
await restChatService.handleSendMessage({
|
||||
...req,
|
||||
@ -939,11 +910,9 @@ async function streamMessage(req: Request, res: Response) {
|
||||
showThinking: showThinking === true
|
||||
},
|
||||
params: { chatNoteId }
|
||||
} as unknown as Request, dummyResponse as unknown as Response);
|
||||
|
||||
log.info(`[${chatNoteId}] WebSocket streaming completed at ${new Date().toISOString()}`);
|
||||
} as unknown as Request, res);
|
||||
} catch (streamError) {
|
||||
log.error(`[${chatNoteId}] Error during WebSocket streaming: ${streamError}`);
|
||||
log.error(`Error during WebSocket streaming: ${streamError}`);
|
||||
|
||||
// Send error message through WebSocket
|
||||
wsService.sendMessageToAllClients({
|
||||
@ -953,17 +922,6 @@ async function streamMessage(req: Request, res: Response) {
|
||||
done: true
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
log.error(`Error during streaming: ${error}`);
|
||||
|
||||
// Send error to client via WebSocket
|
||||
wsService.sendMessageToAllClients({
|
||||
type: 'llm-stream',
|
||||
chatNoteId: chatNoteId,
|
||||
error: `Error processing message: ${error}`,
|
||||
done: true
|
||||
});
|
||||
}
|
||||
} catch (error: any) {
|
||||
log.error(`Error starting message stream: ${error.message}`);
|
||||
log.error(`Error starting message stream, can't communicate via WebSocket: ${error.message}`);
|
||||
|
@ -66,12 +66,13 @@ async function listModels(req: Request, res: Response) {
|
||||
const apiKey = await options.getOption('openaiApiKey');
|
||||
|
||||
if (!apiKey) {
|
||||
throw new Error('OpenAI API key is not configured');
|
||||
// Log warning but don't throw - some OpenAI-compatible endpoints don't require API keys
|
||||
log.info('OpenAI API key is not configured when listing models. This may cause issues with official OpenAI endpoints.');
|
||||
}
|
||||
|
||||
// Initialize OpenAI client with the API key and base URL
|
||||
// Initialize OpenAI client with the API key (or empty string) and base URL
|
||||
const openai = new OpenAI({
|
||||
apiKey,
|
||||
apiKey: apiKey || '', // Default to empty string if no API key
|
||||
baseURL: openaiBaseUrl
|
||||
});
|
||||
|
||||
|
@ -96,22 +96,26 @@ const ALLOWED_OPTIONS = new Set<OptionNames>([
|
||||
"aiEnabled",
|
||||
"aiTemperature",
|
||||
"aiSystemPrompt",
|
||||
"aiProviderPrecedence",
|
||||
"aiSelectedProvider",
|
||||
"openaiApiKey",
|
||||
"openaiBaseUrl",
|
||||
"openaiDefaultModel",
|
||||
"openaiEmbeddingModel",
|
||||
"openaiEmbeddingApiKey",
|
||||
"openaiEmbeddingBaseUrl",
|
||||
"anthropicApiKey",
|
||||
"anthropicBaseUrl",
|
||||
"anthropicDefaultModel",
|
||||
"voyageApiKey",
|
||||
"voyageEmbeddingModel",
|
||||
"voyageEmbeddingBaseUrl",
|
||||
"ollamaBaseUrl",
|
||||
"ollamaDefaultModel",
|
||||
"ollamaEmbeddingModel",
|
||||
"ollamaEmbeddingBaseUrl",
|
||||
"embeddingAutoUpdateEnabled",
|
||||
"embeddingDimensionStrategy",
|
||||
"embeddingProviderPrecedence",
|
||||
"embeddingSelectedProvider",
|
||||
"embeddingSimilarityThreshold",
|
||||
"embeddingBatchSize",
|
||||
"embeddingUpdateInterval",
|
||||
|
@ -400,6 +400,8 @@ function register(app: express.Application) {
|
||||
asyncApiRoute(PST, "/api/llm/embeddings/retry-all-failed", embeddingsRoute.retryAllFailedNotes);
|
||||
asyncApiRoute(PST, "/api/llm/embeddings/rebuild-index", embeddingsRoute.rebuildIndex);
|
||||
asyncApiRoute(GET, "/api/llm/embeddings/index-rebuild-status", embeddingsRoute.getIndexRebuildStatus);
|
||||
asyncApiRoute(PST, "/api/llm/embeddings/start", embeddingsRoute.startEmbeddings);
|
||||
asyncApiRoute(PST, "/api/llm/embeddings/stop", embeddingsRoute.stopEmbeddings);
|
||||
|
||||
// LLM provider endpoints - moved under /api/llm/providers hierarchy
|
||||
asyncApiRoute(GET, "/api/llm/providers/ollama/models", ollamaRoute.listModels);
|
||||
|
@ -1,4 +1,5 @@
|
||||
import options from '../options.js';
|
||||
import eventService from '../events.js';
|
||||
import type { AIService, ChatCompletionOptions, ChatResponse, Message } from './ai_interface.js';
|
||||
import { AnthropicService } from './providers/anthropic_service.js';
|
||||
import { ContextExtractor } from './context/index.js';
|
||||
@ -20,9 +21,8 @@ import type { NoteSearchResult } from './interfaces/context_interfaces.js';
|
||||
|
||||
// Import new configuration system
|
||||
import {
|
||||
getProviderPrecedence,
|
||||
getPreferredProvider,
|
||||
getEmbeddingProviderPrecedence,
|
||||
getSelectedProvider,
|
||||
getSelectedEmbeddingProvider,
|
||||
parseModelIdentifier,
|
||||
isAIEnabled,
|
||||
getDefaultModelForProvider,
|
||||
@ -43,23 +43,20 @@ interface NoteContext {
|
||||
}
|
||||
|
||||
export class AIServiceManager implements IAIServiceManager {
|
||||
private services: Record<ServiceProviders, AIService> = {
|
||||
openai: new OpenAIService(),
|
||||
anthropic: new AnthropicService(),
|
||||
ollama: new OllamaService()
|
||||
};
|
||||
private services: Partial<Record<ServiceProviders, AIService>> = {};
|
||||
|
||||
private providerOrder: ServiceProviders[] = []; // Will be populated from configuration
|
||||
private initialized = false;
|
||||
|
||||
constructor() {
|
||||
// Initialize provider order immediately
|
||||
this.updateProviderOrder();
|
||||
|
||||
// Initialize tools immediately
|
||||
this.initializeTools().catch(error => {
|
||||
log.error(`Error initializing LLM tools during AIServiceManager construction: ${error.message || String(error)}`);
|
||||
});
|
||||
|
||||
// Set up event listener for provider changes
|
||||
this.setupProviderChangeListener();
|
||||
|
||||
this.initialized = true;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -84,39 +81,18 @@ export class AIServiceManager implements IAIServiceManager {
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the provider precedence order using the new configuration system
|
||||
* Get the currently selected provider using the new configuration system
|
||||
*/
|
||||
async updateProviderOrderAsync(): Promise<void> {
|
||||
async getSelectedProviderAsync(): Promise<ServiceProviders | null> {
|
||||
try {
|
||||
const providers = await getProviderPrecedence();
|
||||
this.providerOrder = providers as ServiceProviders[];
|
||||
this.initialized = true;
|
||||
log.info(`Updated provider order: ${providers.join(', ')}`);
|
||||
const selectedProvider = await getSelectedProvider();
|
||||
return selectedProvider as ServiceProviders || null;
|
||||
} catch (error) {
|
||||
log.error(`Failed to get provider precedence: ${error}`);
|
||||
// Keep empty order, will be handled gracefully by other methods
|
||||
this.providerOrder = [];
|
||||
this.initialized = true;
|
||||
log.error(`Failed to get selected provider: ${error}`);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the provider precedence order (legacy sync version)
|
||||
* Returns true if successful, false if options not available yet
|
||||
*/
|
||||
updateProviderOrder(): boolean {
|
||||
if (this.initialized) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Use async version but don't wait
|
||||
this.updateProviderOrderAsync().catch(error => {
|
||||
log.error(`Error in async provider order update: ${error}`);
|
||||
});
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate AI configuration using the new configuration system
|
||||
*/
|
||||
@ -158,16 +134,44 @@ export class AIServiceManager implements IAIServiceManager {
|
||||
* Ensure manager is initialized before using
|
||||
*/
|
||||
private ensureInitialized() {
|
||||
if (!this.initialized) {
|
||||
this.updateProviderOrder();
|
||||
// No longer needed with simplified approach
|
||||
}
|
||||
|
||||
/**
|
||||
* Get or create any available AI service following the simplified pattern
|
||||
* Returns a service or throws a meaningful error
|
||||
*/
|
||||
async getOrCreateAnyService(): Promise<AIService> {
|
||||
this.ensureInitialized();
|
||||
|
||||
// Get the selected provider using the new configuration system
|
||||
const selectedProvider = await this.getSelectedProviderAsync();
|
||||
|
||||
|
||||
if (!selectedProvider) {
|
||||
throw new Error('No AI provider is selected. Please select a provider (OpenAI, Anthropic, or Ollama) in your AI settings.');
|
||||
}
|
||||
|
||||
try {
|
||||
const service = await this.getOrCreateChatProvider(selectedProvider);
|
||||
if (service) {
|
||||
return service;
|
||||
}
|
||||
throw new Error(`Failed to create ${selectedProvider} service`);
|
||||
} catch (error) {
|
||||
log.error(`Provider ${selectedProvider} not available: ${error}`);
|
||||
throw new Error(`Selected AI provider (${selectedProvider}) is not available. Please check your configuration: ${error}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if any AI service is available
|
||||
* Check if any AI service is available (legacy method for backward compatibility)
|
||||
*/
|
||||
isAnyServiceAvailable(): boolean {
|
||||
return Object.values(this.services).some(service => service.isAvailable());
|
||||
this.ensureInitialized();
|
||||
|
||||
// Check if we have the selected provider available
|
||||
return this.getAvailableProviders().length > 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -175,9 +179,42 @@ export class AIServiceManager implements IAIServiceManager {
|
||||
*/
|
||||
getAvailableProviders(): ServiceProviders[] {
|
||||
this.ensureInitialized();
|
||||
return Object.entries(this.services)
|
||||
.filter(([_, service]) => service.isAvailable())
|
||||
.map(([key, _]) => key as ServiceProviders);
|
||||
|
||||
const allProviders: ServiceProviders[] = ['openai', 'anthropic', 'ollama'];
|
||||
const availableProviders: ServiceProviders[] = [];
|
||||
|
||||
for (const providerName of allProviders) {
|
||||
// Use a sync approach - check if we can create the provider
|
||||
const service = this.services[providerName];
|
||||
if (service && service.isAvailable()) {
|
||||
availableProviders.push(providerName);
|
||||
} else {
|
||||
// For providers not yet created, check configuration to see if they would be available
|
||||
try {
|
||||
switch (providerName) {
|
||||
case 'openai':
|
||||
if (options.getOption('openaiApiKey')) {
|
||||
availableProviders.push(providerName);
|
||||
}
|
||||
break;
|
||||
case 'anthropic':
|
||||
if (options.getOption('anthropicApiKey')) {
|
||||
availableProviders.push(providerName);
|
||||
}
|
||||
break;
|
||||
case 'ollama':
|
||||
if (options.getOption('ollamaBaseUrl')) {
|
||||
availableProviders.push(providerName);
|
||||
}
|
||||
break;
|
||||
}
|
||||
} catch (error) {
|
||||
// Ignore configuration errors, provider just won't be available
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return availableProviders;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -198,53 +235,56 @@ export class AIServiceManager implements IAIServiceManager {
|
||||
throw new Error('No messages provided for chat completion');
|
||||
}
|
||||
|
||||
// Try providers in order of preference
|
||||
const availableProviders = this.getAvailableProviders();
|
||||
// Get the selected provider
|
||||
const selectedProvider = await this.getSelectedProviderAsync();
|
||||
|
||||
if (availableProviders.length === 0) {
|
||||
throw new Error('No AI providers are available. Please check your AI settings.');
|
||||
if (!selectedProvider) {
|
||||
throw new Error('No AI provider is selected. Please select a provider in your AI settings.');
|
||||
}
|
||||
|
||||
// Sort available providers by precedence
|
||||
const sortedProviders = this.providerOrder
|
||||
.filter(provider => availableProviders.includes(provider));
|
||||
// Check if the selected provider is available
|
||||
const availableProviders = this.getAvailableProviders();
|
||||
if (!availableProviders.includes(selectedProvider)) {
|
||||
throw new Error(`Selected AI provider (${selectedProvider}) is not available. Please check your configuration.`);
|
||||
}
|
||||
|
||||
// If a specific provider is requested and available, use it
|
||||
if (options.model && options.model.includes(':')) {
|
||||
// Use the new configuration system to parse model identifier
|
||||
const modelIdentifier = parseModelIdentifier(options.model);
|
||||
|
||||
if (modelIdentifier.provider && availableProviders.includes(modelIdentifier.provider as ServiceProviders)) {
|
||||
if (modelIdentifier.provider && modelIdentifier.provider === selectedProvider) {
|
||||
try {
|
||||
const service = await this.getOrCreateChatProvider(modelIdentifier.provider as ServiceProviders);
|
||||
if (service) {
|
||||
const modifiedOptions = { ...options, model: modelIdentifier.modelId };
|
||||
log.info(`[AIServiceManager] Using provider ${modelIdentifier.provider} from model prefix with modifiedOptions.stream: ${modifiedOptions.stream}`);
|
||||
return await this.services[modelIdentifier.provider as ServiceProviders].generateChatCompletion(messages, modifiedOptions);
|
||||
return await service.generateChatCompletion(messages, modifiedOptions);
|
||||
}
|
||||
} catch (error) {
|
||||
log.error(`Error with specified provider ${modelIdentifier.provider}: ${error}`);
|
||||
// If the specified provider fails, continue with the fallback providers
|
||||
throw new Error(`Failed to use specified provider ${modelIdentifier.provider}: ${error}`);
|
||||
}
|
||||
} else if (modelIdentifier.provider && modelIdentifier.provider !== selectedProvider) {
|
||||
throw new Error(`Model specifies provider '${modelIdentifier.provider}' but selected provider is '${selectedProvider}'. Please select the correct provider or use a model without provider prefix.`);
|
||||
}
|
||||
// If not a provider prefix, treat the entire string as a model name and continue with normal provider selection
|
||||
}
|
||||
|
||||
// Try each provider in order until one succeeds
|
||||
let lastError: Error | null = null;
|
||||
|
||||
for (const provider of sortedProviders) {
|
||||
// Use the selected provider
|
||||
try {
|
||||
log.info(`[AIServiceManager] Trying provider ${provider} with options.stream: ${options.stream}`);
|
||||
return await this.services[provider].generateChatCompletion(messages, options);
|
||||
const service = await this.getOrCreateChatProvider(selectedProvider);
|
||||
if (!service) {
|
||||
throw new Error(`Failed to create selected chat provider: ${selectedProvider}. Please check your configuration.`);
|
||||
}
|
||||
log.info(`[AIServiceManager] Using selected provider ${selectedProvider} with options.stream: ${options.stream}`);
|
||||
return await service.generateChatCompletion(messages, options);
|
||||
} catch (error) {
|
||||
log.error(`Error with provider ${provider}: ${error}`);
|
||||
lastError = error as Error;
|
||||
// Continue to the next provider
|
||||
log.error(`Error with selected provider ${selectedProvider}: ${error}`);
|
||||
throw new Error(`Selected AI provider (${selectedProvider}) failed: ${error}`);
|
||||
}
|
||||
}
|
||||
|
||||
// If we get here, all providers failed
|
||||
throw new Error(`All AI providers failed: ${lastError?.message || 'Unknown error'}`);
|
||||
}
|
||||
|
||||
setupEventListeners() {
|
||||
// Setup event listeners for AI services
|
||||
}
|
||||
@ -340,30 +380,64 @@ export class AIServiceManager implements IAIServiceManager {
|
||||
}
|
||||
|
||||
/**
|
||||
* Set up embeddings provider using the new configuration system
|
||||
* Get or create a chat provider on-demand with inline validation
|
||||
*/
|
||||
async setupEmbeddingsProvider(): Promise<void> {
|
||||
private async getOrCreateChatProvider(providerName: ServiceProviders): Promise<AIService | null> {
|
||||
// Return existing provider if already created
|
||||
if (this.services[providerName]) {
|
||||
return this.services[providerName];
|
||||
}
|
||||
|
||||
// Create and validate provider on-demand
|
||||
try {
|
||||
const aiEnabled = await isAIEnabled();
|
||||
if (!aiEnabled) {
|
||||
log.info('AI features are disabled');
|
||||
return;
|
||||
let service: AIService | null = null;
|
||||
|
||||
switch (providerName) {
|
||||
case 'openai': {
|
||||
const apiKey = options.getOption('openaiApiKey');
|
||||
const baseUrl = options.getOption('openaiBaseUrl');
|
||||
if (!apiKey && !baseUrl) return null;
|
||||
|
||||
service = new OpenAIService();
|
||||
// Validate by checking if it's available
|
||||
if (!service.isAvailable()) {
|
||||
throw new Error('OpenAI service not available');
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
// Use the new configuration system - no string parsing!
|
||||
const enabledProviders = await getEnabledEmbeddingProviders();
|
||||
case 'anthropic': {
|
||||
const apiKey = options.getOption('anthropicApiKey');
|
||||
if (!apiKey) return null;
|
||||
|
||||
if (enabledProviders.length === 0) {
|
||||
log.info('No embedding providers are enabled');
|
||||
return;
|
||||
service = new AnthropicService();
|
||||
if (!service.isAvailable()) {
|
||||
throw new Error('Anthropic service not available');
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
// Initialize embedding providers
|
||||
log.info('Embedding providers initialized successfully');
|
||||
case 'ollama': {
|
||||
const baseUrl = options.getOption('ollamaBaseUrl');
|
||||
if (!baseUrl) return null;
|
||||
|
||||
service = new OllamaService();
|
||||
if (!service.isAvailable()) {
|
||||
throw new Error('Ollama service not available');
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (service) {
|
||||
this.services[providerName] = service;
|
||||
return service;
|
||||
}
|
||||
} catch (error: any) {
|
||||
log.error(`Error setting up embedding providers: ${error.message}`);
|
||||
throw error;
|
||||
log.error(`Failed to create ${providerName} chat provider: ${error.message || 'Unknown error'}`);
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -381,12 +455,6 @@ export class AIServiceManager implements IAIServiceManager {
|
||||
return;
|
||||
}
|
||||
|
||||
// Update provider order from configuration
|
||||
await this.updateProviderOrderAsync();
|
||||
|
||||
// Set up embeddings provider if AI is enabled
|
||||
await this.setupEmbeddingsProvider();
|
||||
|
||||
// Initialize index service
|
||||
await this.getIndexService().initialize();
|
||||
|
||||
@ -453,8 +521,8 @@ export class AIServiceManager implements IAIServiceManager {
|
||||
if (!contextNotes || contextNotes.length === 0) {
|
||||
try {
|
||||
// Get the default LLM service for context enhancement
|
||||
const provider = this.getPreferredProvider();
|
||||
const llmService = this.getService(provider);
|
||||
const provider = this.getSelectedProvider();
|
||||
const llmService = await this.getService(provider);
|
||||
|
||||
// Find relevant notes
|
||||
contextNotes = await contextService.findRelevantNotes(
|
||||
@ -495,25 +563,31 @@ export class AIServiceManager implements IAIServiceManager {
|
||||
/**
|
||||
* Get AI service for the given provider
|
||||
*/
|
||||
getService(provider?: string): AIService {
|
||||
async getService(provider?: string): Promise<AIService> {
|
||||
this.ensureInitialized();
|
||||
|
||||
// If provider is specified, try to use it
|
||||
if (provider && this.services[provider as ServiceProviders]?.isAvailable()) {
|
||||
return this.services[provider as ServiceProviders];
|
||||
}
|
||||
|
||||
// Otherwise, use the first available provider in the configured order
|
||||
for (const providerName of this.providerOrder) {
|
||||
const service = this.services[providerName];
|
||||
if (service.isAvailable()) {
|
||||
// If provider is specified, try to get or create it
|
||||
if (provider) {
|
||||
const service = await this.getOrCreateChatProvider(provider as ServiceProviders);
|
||||
if (service && service.isAvailable()) {
|
||||
return service;
|
||||
}
|
||||
throw new Error(`Specified provider ${provider} is not available`);
|
||||
}
|
||||
|
||||
// If no provider is available, use first one anyway (it will throw an error)
|
||||
// This allows us to show a proper error message rather than "provider not found"
|
||||
return this.services[this.providerOrder[0]];
|
||||
// Otherwise, use the selected provider
|
||||
const selectedProvider = await this.getSelectedProviderAsync();
|
||||
if (!selectedProvider) {
|
||||
throw new Error('No AI provider is selected. Please select a provider in your AI settings.');
|
||||
}
|
||||
|
||||
const service = await this.getOrCreateChatProvider(selectedProvider);
|
||||
if (service && service.isAvailable()) {
|
||||
return service;
|
||||
}
|
||||
|
||||
// If no provider is available, throw a clear error
|
||||
throw new Error(`Selected AI provider (${selectedProvider}) is not available. Please check your AI settings.`);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -521,34 +595,37 @@ export class AIServiceManager implements IAIServiceManager {
|
||||
*/
|
||||
async getPreferredProviderAsync(): Promise<string> {
|
||||
try {
|
||||
const preferredProvider = await getPreferredProvider();
|
||||
if (preferredProvider === null) {
|
||||
// No providers configured, fallback to first available
|
||||
log.info('No providers configured in precedence, using first available provider');
|
||||
return this.providerOrder[0];
|
||||
const selectedProvider = await getSelectedProvider();
|
||||
if (selectedProvider === null) {
|
||||
// No provider selected, fallback to default
|
||||
log.info('No provider selected, using default provider');
|
||||
return 'openai';
|
||||
}
|
||||
return preferredProvider;
|
||||
return selectedProvider;
|
||||
} catch (error) {
|
||||
log.error(`Error getting preferred provider: ${error}`);
|
||||
return this.providerOrder[0];
|
||||
return 'openai';
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the preferred provider based on configuration (sync version for compatibility)
|
||||
* Get the selected provider based on configuration (sync version for compatibility)
|
||||
*/
|
||||
getPreferredProvider(): string {
|
||||
getSelectedProvider(): string {
|
||||
this.ensureInitialized();
|
||||
|
||||
// Return the first available provider in the order
|
||||
for (const providerName of this.providerOrder) {
|
||||
if (this.services[providerName].isAvailable()) {
|
||||
return providerName;
|
||||
// Try to get the selected provider synchronously
|
||||
try {
|
||||
const selectedProvider = options.getOption('aiSelectedProvider');
|
||||
if (selectedProvider) {
|
||||
return selectedProvider;
|
||||
}
|
||||
} catch (error) {
|
||||
log.error(`Error getting selected provider: ${error}`);
|
||||
}
|
||||
|
||||
// Return the first provider as fallback
|
||||
return this.providerOrder[0];
|
||||
// Return a default if nothing is selected (for backward compatibility)
|
||||
return 'openai';
|
||||
}
|
||||
|
||||
/**
|
||||
@ -580,6 +657,7 @@ export class AIServiceManager implements IAIServiceManager {
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Error handler that properly types the error object
|
||||
*/
|
||||
@ -589,6 +667,79 @@ export class AIServiceManager implements IAIServiceManager {
|
||||
}
|
||||
return String(error);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set up event listener for provider changes
|
||||
*/
|
||||
private setupProviderChangeListener(): void {
|
||||
// List of AI-related options that should trigger service recreation
|
||||
const aiRelatedOptions = [
|
||||
'aiEnabled',
|
||||
'aiSelectedProvider',
|
||||
'embeddingSelectedProvider',
|
||||
'openaiApiKey',
|
||||
'openaiBaseUrl',
|
||||
'openaiDefaultModel',
|
||||
'anthropicApiKey',
|
||||
'anthropicBaseUrl',
|
||||
'anthropicDefaultModel',
|
||||
'ollamaBaseUrl',
|
||||
'ollamaDefaultModel',
|
||||
'voyageApiKey'
|
||||
];
|
||||
|
||||
eventService.subscribe(['entityChanged'], async ({ entityName, entity }) => {
|
||||
if (entityName === 'options' && entity && aiRelatedOptions.includes(entity.name)) {
|
||||
log.info(`AI-related option '${entity.name}' changed, recreating LLM services`);
|
||||
|
||||
// Special handling for aiEnabled toggle
|
||||
if (entity.name === 'aiEnabled') {
|
||||
const isEnabled = entity.value === 'true';
|
||||
|
||||
if (isEnabled) {
|
||||
log.info('AI features enabled, initializing AI service and embeddings');
|
||||
// Initialize the AI service
|
||||
await this.initialize();
|
||||
// Initialize embeddings through index service
|
||||
await indexService.startEmbeddingGeneration();
|
||||
} else {
|
||||
log.info('AI features disabled, stopping embeddings and clearing providers');
|
||||
// Stop embeddings through index service
|
||||
await indexService.stopEmbeddingGeneration();
|
||||
// Clear chat providers
|
||||
this.services = {};
|
||||
}
|
||||
} else {
|
||||
// For other AI-related options, recreate services on-demand
|
||||
await this.recreateServices();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Recreate LLM services when provider settings change
|
||||
*/
|
||||
private async recreateServices(): Promise<void> {
|
||||
try {
|
||||
log.info('Recreating LLM services due to configuration change');
|
||||
|
||||
// Clear configuration cache first
|
||||
clearConfigurationCache();
|
||||
|
||||
// Clear existing chat providers (they will be recreated on-demand)
|
||||
this.services = {};
|
||||
|
||||
// Clear embedding providers (they will be recreated on-demand when needed)
|
||||
const providerManager = await import('./providers/providers.js');
|
||||
providerManager.clearAllEmbeddingProviders();
|
||||
|
||||
log.info('LLM services recreated successfully');
|
||||
} catch (error) {
|
||||
log.error(`Error recreating LLM services: ${this.handleError(error)}`);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Don't create singleton immediately, use a lazy-loading pattern
|
||||
@ -610,6 +761,9 @@ export default {
|
||||
isAnyServiceAvailable(): boolean {
|
||||
return getInstance().isAnyServiceAvailable();
|
||||
},
|
||||
async getOrCreateAnyService(): Promise<AIService> {
|
||||
return getInstance().getOrCreateAnyService();
|
||||
},
|
||||
getAvailableProviders() {
|
||||
return getInstance().getAvailableProviders();
|
||||
},
|
||||
@ -661,11 +815,11 @@ export default {
|
||||
);
|
||||
},
|
||||
// New methods
|
||||
getService(provider?: string): AIService {
|
||||
async getService(provider?: string): Promise<AIService> {
|
||||
return getInstance().getService(provider);
|
||||
},
|
||||
getPreferredProvider(): string {
|
||||
return getInstance().getPreferredProvider();
|
||||
getSelectedProvider(): string {
|
||||
return getInstance().getSelectedProvider();
|
||||
},
|
||||
isProviderAvailable(provider: string): boolean {
|
||||
return getInstance().isProviderAvailable(provider);
|
||||
|
@ -5,7 +5,7 @@
|
||||
import log from "../../log.js";
|
||||
import type { Request, Response } from "express";
|
||||
import type { Message, ChatCompletionOptions } from "../ai_interface.js";
|
||||
import { AIServiceManager } from "../ai_service_manager.js";
|
||||
import aiServiceManager from "../ai_service_manager.js";
|
||||
import { ChatPipeline } from "../pipeline/chat_pipeline.js";
|
||||
import type { ChatPipelineInput } from "../pipeline/interfaces.js";
|
||||
import options from "../../options.js";
|
||||
@ -14,7 +14,7 @@ import type { LLMStreamMessage } from "../interfaces/chat_ws_messages.js";
|
||||
import chatStorageService from '../chat_storage_service.js';
|
||||
import {
|
||||
isAIEnabled,
|
||||
getFirstValidModelConfig,
|
||||
getSelectedModelConfig,
|
||||
} from '../config/configuration_helpers.js';
|
||||
|
||||
/**
|
||||
@ -33,25 +33,6 @@ class RestChatService {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if AI services are available
|
||||
*/
|
||||
safelyUseAIManager(): boolean {
|
||||
if (!this.isDatabaseInitialized()) {
|
||||
log.info("AI check failed: Database is not initialized");
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
const aiManager = new AIServiceManager();
|
||||
const isAvailable = aiManager.isAnyServiceAvailable();
|
||||
log.info(`AI service availability check result: ${isAvailable}`);
|
||||
return isAvailable;
|
||||
} catch (error) {
|
||||
log.error(`Error accessing AI service manager: ${error}`);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle a message sent to an LLM and get a response
|
||||
@ -93,10 +74,14 @@ class RestChatService {
|
||||
return { error: "AI features are disabled. Please enable them in the settings." };
|
||||
}
|
||||
|
||||
if (!this.safelyUseAIManager()) {
|
||||
return { error: "AI services are currently unavailable. Please check your configuration." };
|
||||
// Check database initialization first
|
||||
if (!this.isDatabaseInitialized()) {
|
||||
throw new Error("Database is not initialized");
|
||||
}
|
||||
|
||||
// Get or create AI service - will throw meaningful error if not possible
|
||||
await aiServiceManager.getOrCreateAnyService();
|
||||
|
||||
// Load or create chat directly from storage
|
||||
let chat = await chatStorageService.getChat(chatNoteId);
|
||||
|
||||
@ -253,14 +238,6 @@ class RestChatService {
|
||||
// Send WebSocket message
|
||||
wsService.sendMessageToAllClients(message);
|
||||
|
||||
// Send SSE response for compatibility
|
||||
const responseData: any = { content: data, done };
|
||||
if (rawChunk?.toolExecution) {
|
||||
responseData.toolExecution = rawChunk.toolExecution;
|
||||
}
|
||||
|
||||
res.write(`data: ${JSON.stringify(responseData)}\n\n`);
|
||||
|
||||
// When streaming is complete, save the accumulated content to the chat note
|
||||
if (done) {
|
||||
try {
|
||||
@ -281,8 +258,8 @@ class RestChatService {
|
||||
log.error(`Error saving streaming response: ${error}`);
|
||||
}
|
||||
|
||||
// End the response
|
||||
res.end();
|
||||
// Note: For WebSocket-only streaming, we don't end the HTTP response here
|
||||
// since it was already handled by the calling endpoint
|
||||
}
|
||||
}
|
||||
|
||||
@ -419,7 +396,7 @@ class RestChatService {
|
||||
*/
|
||||
async getPreferredModel(): Promise<string | undefined> {
|
||||
try {
|
||||
const validConfig = await getFirstValidModelConfig();
|
||||
const validConfig = await getSelectedModelConfig();
|
||||
if (!validConfig) {
|
||||
log.error('No valid AI model configuration found');
|
||||
return undefined;
|
||||
|
@ -1,10 +1,9 @@
|
||||
import configurationManager from './configuration_manager.js';
|
||||
import optionService from '../../options.js';
|
||||
import type {
|
||||
ProviderType,
|
||||
ModelIdentifier,
|
||||
ModelConfig,
|
||||
ProviderPrecedenceConfig,
|
||||
EmbeddingProviderPrecedenceConfig
|
||||
} from '../interfaces/configuration_interfaces.js';
|
||||
|
||||
/**
|
||||
@ -13,41 +12,19 @@ import type {
|
||||
*/
|
||||
|
||||
/**
|
||||
* Get the ordered list of AI providers
|
||||
* Get the selected AI provider
|
||||
*/
|
||||
export async function getProviderPrecedence(): Promise<ProviderType[]> {
|
||||
const config = await configurationManager.getProviderPrecedence();
|
||||
return config.providers;
|
||||
export async function getSelectedProvider(): Promise<ProviderType | null> {
|
||||
const providerOption = optionService.getOption('aiSelectedProvider');
|
||||
return providerOption as ProviderType || null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the default/preferred AI provider
|
||||
* Get the selected embedding provider
|
||||
*/
|
||||
export async function getPreferredProvider(): Promise<ProviderType | null> {
|
||||
const config = await configurationManager.getProviderPrecedence();
|
||||
if (config.providers.length === 0) {
|
||||
return null; // No providers configured
|
||||
}
|
||||
return config.defaultProvider || config.providers[0];
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the ordered list of embedding providers
|
||||
*/
|
||||
export async function getEmbeddingProviderPrecedence(): Promise<string[]> {
|
||||
const config = await configurationManager.getEmbeddingProviderPrecedence();
|
||||
return config.providers;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the default embedding provider
|
||||
*/
|
||||
export async function getPreferredEmbeddingProvider(): Promise<string | null> {
|
||||
const config = await configurationManager.getEmbeddingProviderPrecedence();
|
||||
if (config.providers.length === 0) {
|
||||
return null; // No providers configured
|
||||
}
|
||||
return config.defaultProvider || config.providers[0];
|
||||
export async function getSelectedEmbeddingProvider(): Promise<string | null> {
|
||||
const providerOption = optionService.getOption('embeddingSelectedProvider');
|
||||
return providerOption || null;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -107,22 +84,20 @@ export async function isProviderConfigured(provider: ProviderType): Promise<bool
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the first available (configured) provider from the precedence list
|
||||
* Get the currently selected provider if it's available and configured
|
||||
*/
|
||||
export async function getFirstAvailableProvider(): Promise<ProviderType | null> {
|
||||
const providers = await getProviderPrecedence();
|
||||
export async function getAvailableSelectedProvider(): Promise<ProviderType | null> {
|
||||
const selectedProvider = await getSelectedProvider();
|
||||
|
||||
if (providers.length === 0) {
|
||||
return null; // No providers configured
|
||||
if (!selectedProvider) {
|
||||
return null; // No provider selected
|
||||
}
|
||||
|
||||
for (const provider of providers) {
|
||||
if (await isProviderConfigured(provider)) {
|
||||
return provider;
|
||||
}
|
||||
if (await isProviderConfigured(selectedProvider)) {
|
||||
return selectedProvider;
|
||||
}
|
||||
|
||||
return null; // No providers are properly configured
|
||||
return null; // Selected provider is not properly configured
|
||||
}
|
||||
|
||||
/**
|
||||
@ -163,17 +138,15 @@ export async function getValidModelConfig(provider: ProviderType): Promise<{ mod
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the first valid model configuration from the provider precedence list
|
||||
* Get the model configuration for the currently selected provider
|
||||
*/
|
||||
export async function getFirstValidModelConfig(): Promise<{ model: string; provider: ProviderType } | null> {
|
||||
const providers = await getProviderPrecedence();
|
||||
export async function getSelectedModelConfig(): Promise<{ model: string; provider: ProviderType } | null> {
|
||||
const selectedProvider = await getSelectedProvider();
|
||||
|
||||
for (const provider of providers) {
|
||||
const config = await getValidModelConfig(provider);
|
||||
if (config) {
|
||||
return config;
|
||||
}
|
||||
if (!selectedProvider) {
|
||||
return null; // No provider selected
|
||||
}
|
||||
|
||||
return null; // No valid model configuration found
|
||||
return await getValidModelConfig(selectedProvider);
|
||||
}
|
||||
|
||||
|
@ -50,8 +50,8 @@ export class ConfigurationManager {
|
||||
try {
|
||||
const config: AIConfig = {
|
||||
enabled: await this.getAIEnabled(),
|
||||
providerPrecedence: await this.getProviderPrecedence(),
|
||||
embeddingProviderPrecedence: await this.getEmbeddingProviderPrecedence(),
|
||||
selectedProvider: await this.getSelectedProvider(),
|
||||
selectedEmbeddingProvider: await this.getSelectedEmbeddingProvider(),
|
||||
defaultModels: await this.getDefaultModels(),
|
||||
providerSettings: await this.getProviderSettings()
|
||||
};
|
||||
@ -66,46 +66,28 @@ export class ConfigurationManager {
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse provider precedence from string option
|
||||
* Get the selected AI provider
|
||||
*/
|
||||
public async getProviderPrecedence(): Promise<ProviderPrecedenceConfig> {
|
||||
public async getSelectedProvider(): Promise<ProviderType | null> {
|
||||
try {
|
||||
const precedenceOption = await options.getOption('aiProviderPrecedence');
|
||||
const providers = this.parseProviderList(precedenceOption);
|
||||
|
||||
return {
|
||||
providers: providers as ProviderType[],
|
||||
defaultProvider: providers.length > 0 ? providers[0] as ProviderType : undefined
|
||||
};
|
||||
const selectedProvider = options.getOption('aiSelectedProvider');
|
||||
return selectedProvider as ProviderType || null;
|
||||
} catch (error) {
|
||||
log.error(`Error parsing provider precedence: ${error}`);
|
||||
// Only return known providers if they exist, don't assume defaults
|
||||
return {
|
||||
providers: [],
|
||||
defaultProvider: undefined
|
||||
};
|
||||
log.error(`Error getting selected provider: ${error}`);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse embedding provider precedence from string option
|
||||
* Get the selected embedding provider
|
||||
*/
|
||||
public async getEmbeddingProviderPrecedence(): Promise<EmbeddingProviderPrecedenceConfig> {
|
||||
public async getSelectedEmbeddingProvider(): Promise<EmbeddingProviderType | null> {
|
||||
try {
|
||||
const precedenceOption = await options.getOption('embeddingProviderPrecedence');
|
||||
const providers = this.parseProviderList(precedenceOption);
|
||||
|
||||
return {
|
||||
providers: providers as EmbeddingProviderType[],
|
||||
defaultProvider: providers.length > 0 ? providers[0] as EmbeddingProviderType : undefined
|
||||
};
|
||||
const selectedProvider = options.getOption('embeddingSelectedProvider');
|
||||
return selectedProvider as EmbeddingProviderType || null;
|
||||
} catch (error) {
|
||||
log.error(`Error parsing embedding provider precedence: ${error}`);
|
||||
// Don't assume defaults, return empty configuration
|
||||
return {
|
||||
providers: [],
|
||||
defaultProvider: undefined
|
||||
};
|
||||
log.error(`Error getting selected embedding provider: ${error}`);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@ -173,11 +155,9 @@ export class ConfigurationManager {
|
||||
*/
|
||||
public async getDefaultModels(): Promise<Record<ProviderType, string | undefined>> {
|
||||
try {
|
||||
const [openaiModel, anthropicModel, ollamaModel] = await Promise.all([
|
||||
options.getOption('openaiDefaultModel'),
|
||||
options.getOption('anthropicDefaultModel'),
|
||||
options.getOption('ollamaDefaultModel')
|
||||
]);
|
||||
const openaiModel = options.getOption('openaiDefaultModel');
|
||||
const anthropicModel = options.getOption('anthropicDefaultModel');
|
||||
const ollamaModel = options.getOption('ollamaDefaultModel');
|
||||
|
||||
return {
|
||||
openai: openaiModel || undefined,
|
||||
@ -200,20 +180,14 @@ export class ConfigurationManager {
|
||||
*/
|
||||
public async getProviderSettings(): Promise<ProviderSettings> {
|
||||
try {
|
||||
const [
|
||||
openaiApiKey, openaiBaseUrl, openaiDefaultModel,
|
||||
anthropicApiKey, anthropicBaseUrl, anthropicDefaultModel,
|
||||
ollamaBaseUrl, ollamaDefaultModel
|
||||
] = await Promise.all([
|
||||
options.getOption('openaiApiKey'),
|
||||
options.getOption('openaiBaseUrl'),
|
||||
options.getOption('openaiDefaultModel'),
|
||||
options.getOption('anthropicApiKey'),
|
||||
options.getOption('anthropicBaseUrl'),
|
||||
options.getOption('anthropicDefaultModel'),
|
||||
options.getOption('ollamaBaseUrl'),
|
||||
options.getOption('ollamaDefaultModel')
|
||||
]);
|
||||
const openaiApiKey = options.getOption('openaiApiKey');
|
||||
const openaiBaseUrl = options.getOption('openaiBaseUrl');
|
||||
const openaiDefaultModel = options.getOption('openaiDefaultModel');
|
||||
const anthropicApiKey = options.getOption('anthropicApiKey');
|
||||
const anthropicBaseUrl = options.getOption('anthropicBaseUrl');
|
||||
const anthropicDefaultModel = options.getOption('anthropicDefaultModel');
|
||||
const ollamaBaseUrl = options.getOption('ollamaBaseUrl');
|
||||
const ollamaDefaultModel = options.getOption('ollamaDefaultModel');
|
||||
|
||||
const settings: ProviderSettings = {};
|
||||
|
||||
@ -265,31 +239,29 @@ export class ConfigurationManager {
|
||||
return result;
|
||||
}
|
||||
|
||||
// Validate provider precedence
|
||||
if (config.providerPrecedence.providers.length === 0) {
|
||||
result.errors.push('No providers configured in precedence list');
|
||||
// Validate selected provider
|
||||
if (!config.selectedProvider) {
|
||||
result.errors.push('No AI provider selected');
|
||||
result.isValid = false;
|
||||
}
|
||||
} else {
|
||||
// Validate selected provider settings
|
||||
const providerConfig = config.providerSettings[config.selectedProvider];
|
||||
|
||||
// Validate provider settings
|
||||
for (const provider of config.providerPrecedence.providers) {
|
||||
const providerConfig = config.providerSettings[provider];
|
||||
|
||||
if (provider === 'openai') {
|
||||
if (config.selectedProvider === 'openai') {
|
||||
const openaiConfig = providerConfig as OpenAISettings | undefined;
|
||||
if (!openaiConfig?.apiKey) {
|
||||
result.warnings.push('OpenAI API key is not configured');
|
||||
}
|
||||
}
|
||||
|
||||
if (provider === 'anthropic') {
|
||||
if (config.selectedProvider === 'anthropic') {
|
||||
const anthropicConfig = providerConfig as AnthropicSettings | undefined;
|
||||
if (!anthropicConfig?.apiKey) {
|
||||
result.warnings.push('Anthropic API key is not configured');
|
||||
}
|
||||
}
|
||||
|
||||
if (provider === 'ollama') {
|
||||
if (config.selectedProvider === 'ollama') {
|
||||
const ollamaConfig = providerConfig as OllamaSettings | undefined;
|
||||
if (!ollamaConfig?.baseUrl) {
|
||||
result.warnings.push('Ollama base URL is not configured');
|
||||
@ -297,6 +269,11 @@ export class ConfigurationManager {
|
||||
}
|
||||
}
|
||||
|
||||
// Validate selected embedding provider
|
||||
if (!config.selectedEmbeddingProvider) {
|
||||
result.warnings.push('No embedding provider selected');
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
result.errors.push(`Configuration validation error: ${error}`);
|
||||
result.isValid = false;
|
||||
@ -317,7 +294,7 @@ export class ConfigurationManager {
|
||||
|
||||
private async getAIEnabled(): Promise<boolean> {
|
||||
try {
|
||||
return await options.getOptionBool('aiEnabled');
|
||||
return options.getOptionBool('aiEnabled');
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
@ -356,14 +333,8 @@ export class ConfigurationManager {
|
||||
private getDefaultConfig(): AIConfig {
|
||||
return {
|
||||
enabled: false,
|
||||
providerPrecedence: {
|
||||
providers: [],
|
||||
defaultProvider: undefined
|
||||
},
|
||||
embeddingProviderPrecedence: {
|
||||
providers: [],
|
||||
defaultProvider: undefined
|
||||
},
|
||||
selectedProvider: null,
|
||||
selectedEmbeddingProvider: null,
|
||||
defaultModels: {
|
||||
openai: undefined,
|
||||
anthropic: undefined,
|
||||
|
@ -33,7 +33,7 @@ async function getSemanticContext(
|
||||
}
|
||||
|
||||
// Get an LLM service
|
||||
const llmService = aiServiceManager.getInstance().getService();
|
||||
const llmService = await aiServiceManager.getInstance().getService();
|
||||
|
||||
const result = await contextService.processQuery("", llmService, {
|
||||
maxResults: options.maxSimilarNotes || 5,
|
||||
@ -543,7 +543,7 @@ export class ContextExtractor {
|
||||
try {
|
||||
const { default: aiServiceManager } = await import('../ai_service_manager.js');
|
||||
const contextService = aiServiceManager.getInstance().getContextService();
|
||||
const llmService = aiServiceManager.getInstance().getService();
|
||||
const llmService = await aiServiceManager.getInstance().getService();
|
||||
|
||||
if (!contextService) {
|
||||
return "Context service not available.";
|
||||
|
@ -1,51 +1,32 @@
|
||||
import options from '../../../options.js';
|
||||
import log from '../../../log.js';
|
||||
import { getEmbeddingProvider, getEnabledEmbeddingProviders } from '../../providers/providers.js';
|
||||
import { getSelectedEmbeddingProvider as getSelectedEmbeddingProviderName } from '../../config/configuration_helpers.js';
|
||||
|
||||
/**
|
||||
* Manages embedding providers for context services
|
||||
*/
|
||||
export class ProviderManager {
|
||||
/**
|
||||
* Get the preferred embedding provider based on user settings
|
||||
* Tries to use the most appropriate provider in this order:
|
||||
* 1. User's configured default provider
|
||||
* 2. OpenAI if API key is set
|
||||
* 3. Anthropic if API key is set
|
||||
* 4. Ollama if configured
|
||||
* 5. Any available provider
|
||||
* 6. Local provider as fallback
|
||||
* Get the selected embedding provider based on user settings
|
||||
* Uses the single provider selection approach
|
||||
*
|
||||
* @returns The preferred embedding provider or null if none available
|
||||
* @returns The selected embedding provider or null if none available
|
||||
*/
|
||||
async getPreferredEmbeddingProvider(): Promise<any> {
|
||||
async getSelectedEmbeddingProvider(): Promise<any> {
|
||||
try {
|
||||
// Try to get providers based on precedence list
|
||||
const precedenceOption = await options.getOption('embeddingProviderPrecedence');
|
||||
let precedenceList: string[] = [];
|
||||
// Get the selected embedding provider
|
||||
const selectedProvider = await getSelectedEmbeddingProviderName();
|
||||
|
||||
if (precedenceOption) {
|
||||
if (precedenceOption.startsWith('[') && precedenceOption.endsWith(']')) {
|
||||
precedenceList = JSON.parse(precedenceOption);
|
||||
} else if (typeof precedenceOption === 'string') {
|
||||
if (precedenceOption.includes(',')) {
|
||||
precedenceList = precedenceOption.split(',').map(p => p.trim());
|
||||
} else {
|
||||
precedenceList = [precedenceOption];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Try each provider in the precedence list
|
||||
for (const providerId of precedenceList) {
|
||||
const provider = await getEmbeddingProvider(providerId);
|
||||
if (selectedProvider) {
|
||||
const provider = await getEmbeddingProvider(selectedProvider);
|
||||
if (provider) {
|
||||
log.info(`Using embedding provider from precedence list: ${providerId}`);
|
||||
log.info(`Using selected embedding provider: ${selectedProvider}`);
|
||||
return provider;
|
||||
}
|
||||
log.info(`Selected embedding provider ${selectedProvider} is not available`);
|
||||
}
|
||||
|
||||
// If no provider from precedence list is available, try any enabled provider
|
||||
// If no provider is selected or available, try any enabled provider
|
||||
const providers = await getEnabledEmbeddingProviders();
|
||||
if (providers.length > 0) {
|
||||
log.info(`Using available embedding provider: ${providers[0].name}`);
|
||||
@ -70,7 +51,7 @@ export class ProviderManager {
|
||||
async generateQueryEmbedding(query: string): Promise<Float32Array | null> {
|
||||
try {
|
||||
// Get the preferred embedding provider
|
||||
const provider = await this.getPreferredEmbeddingProvider();
|
||||
const provider = await this.getSelectedEmbeddingProvider();
|
||||
if (!provider) {
|
||||
log.error('No embedding provider available');
|
||||
return null;
|
||||
|
@ -58,7 +58,7 @@ export class ContextService {
|
||||
this.initPromise = (async () => {
|
||||
try {
|
||||
// Initialize provider
|
||||
const provider = await providerManager.getPreferredEmbeddingProvider();
|
||||
const provider = await providerManager.getSelectedEmbeddingProvider();
|
||||
if (!provider) {
|
||||
throw new Error(`No embedding provider available. Could not initialize context service.`);
|
||||
}
|
||||
@ -224,7 +224,7 @@ export class ContextService {
|
||||
log.info(`Final combined results: ${relevantNotes.length} relevant notes`);
|
||||
|
||||
// Step 4: Build context from the notes
|
||||
const provider = await providerManager.getPreferredEmbeddingProvider();
|
||||
const provider = await providerManager.getSelectedEmbeddingProvider();
|
||||
const providerId = provider?.name || 'default';
|
||||
|
||||
const context = await contextFormatter.buildContextFromNotes(
|
||||
|
@ -79,7 +79,7 @@ export class VectorSearchService {
|
||||
}
|
||||
|
||||
// Get provider information
|
||||
const provider = await providerManager.getPreferredEmbeddingProvider();
|
||||
const provider = await providerManager.getSelectedEmbeddingProvider();
|
||||
if (!provider) {
|
||||
log.error('No embedding provider available');
|
||||
return [];
|
||||
@ -280,7 +280,7 @@ export class VectorSearchService {
|
||||
}
|
||||
|
||||
// Get provider information
|
||||
const provider = await providerManager.getPreferredEmbeddingProvider();
|
||||
const provider = await providerManager.getSelectedEmbeddingProvider();
|
||||
if (!provider) {
|
||||
log.error('No embedding provider available');
|
||||
return [];
|
||||
|
@ -9,6 +9,9 @@ import becca from "../../../becca/becca.js";
|
||||
// Add mutex to prevent concurrent processing
|
||||
let isProcessingEmbeddings = false;
|
||||
|
||||
// Store interval reference for cleanup
|
||||
let backgroundProcessingInterval: NodeJS.Timeout | null = null;
|
||||
|
||||
/**
|
||||
* Setup event listeners for embedding-related events
|
||||
*/
|
||||
@ -53,9 +56,15 @@ export function setupEmbeddingEventListeners() {
|
||||
* Setup background processing of the embedding queue
|
||||
*/
|
||||
export async function setupEmbeddingBackgroundProcessing() {
|
||||
// Clear any existing interval
|
||||
if (backgroundProcessingInterval) {
|
||||
clearInterval(backgroundProcessingInterval);
|
||||
backgroundProcessingInterval = null;
|
||||
}
|
||||
|
||||
const interval = parseInt(await options.getOption('embeddingUpdateInterval') || '200', 10);
|
||||
|
||||
setInterval(async () => {
|
||||
backgroundProcessingInterval = setInterval(async () => {
|
||||
try {
|
||||
// Skip if already processing
|
||||
if (isProcessingEmbeddings) {
|
||||
@ -78,6 +87,17 @@ export async function setupEmbeddingBackgroundProcessing() {
|
||||
}, interval);
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop background processing of the embedding queue
|
||||
*/
|
||||
export function stopEmbeddingBackgroundProcessing() {
|
||||
if (backgroundProcessingInterval) {
|
||||
clearInterval(backgroundProcessingInterval);
|
||||
backgroundProcessingInterval = null;
|
||||
log.info("Embedding background processing stopped");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize embeddings system
|
||||
*/
|
||||
|
@ -58,12 +58,12 @@ export const processNoteWithChunking = async (
|
||||
export const {
|
||||
setupEmbeddingEventListeners,
|
||||
setupEmbeddingBackgroundProcessing,
|
||||
stopEmbeddingBackgroundProcessing,
|
||||
initEmbeddings
|
||||
} = events;
|
||||
|
||||
export const {
|
||||
getEmbeddingStats,
|
||||
reprocessAllNotes,
|
||||
cleanupEmbeddings
|
||||
} = stats;
|
||||
|
||||
@ -100,11 +100,11 @@ export default {
|
||||
// Event handling
|
||||
setupEmbeddingEventListeners: events.setupEmbeddingEventListeners,
|
||||
setupEmbeddingBackgroundProcessing: events.setupEmbeddingBackgroundProcessing,
|
||||
stopEmbeddingBackgroundProcessing: events.stopEmbeddingBackgroundProcessing,
|
||||
initEmbeddings: events.initEmbeddings,
|
||||
|
||||
// Stats and maintenance
|
||||
getEmbeddingStats: stats.getEmbeddingStats,
|
||||
reprocessAllNotes: stats.reprocessAllNotes,
|
||||
cleanupEmbeddings: stats.cleanupEmbeddings,
|
||||
|
||||
// Index operations
|
||||
|
@ -4,6 +4,7 @@ import { initEmbeddings } from "./index.js";
|
||||
import providerManager from "../providers/providers.js";
|
||||
import sqlInit from "../../sql_init.js";
|
||||
import sql from "../../sql.js";
|
||||
import { validateProviders, logValidationResults, hasWorkingEmbeddingProviders } from "../provider_validation.js";
|
||||
|
||||
/**
|
||||
* Reset any stuck embedding queue items that were left in processing state
|
||||
@ -43,13 +44,20 @@ export async function initializeEmbeddings() {
|
||||
// Reset any stuck embedding queue items from previous server shutdown
|
||||
await resetStuckEmbeddingQueue();
|
||||
|
||||
// Initialize default embedding providers
|
||||
await providerManager.initializeDefaultProviders();
|
||||
|
||||
// Start the embedding system if AI is enabled
|
||||
if (await options.getOptionBool('aiEnabled')) {
|
||||
// Validate providers before starting the embedding system
|
||||
log.info("Validating AI providers before starting embedding system...");
|
||||
const validation = await validateProviders();
|
||||
logValidationResults(validation);
|
||||
|
||||
if (await hasWorkingEmbeddingProviders()) {
|
||||
// Embedding providers will be created on-demand when needed
|
||||
await initEmbeddings();
|
||||
log.info("Embedding system initialized successfully.");
|
||||
} else {
|
||||
log.info("Embedding system not started: No working embedding providers found. Please configure at least one AI provider (OpenAI, Ollama, or Voyage) to use embedding features.");
|
||||
}
|
||||
} else {
|
||||
log.info("Embedding system disabled (AI features are turned off).");
|
||||
}
|
||||
|
@ -282,8 +282,6 @@ export async function processEmbeddingQueue() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Log that we're starting to process this note
|
||||
log.info(`Starting embedding generation for note ${noteId}`);
|
||||
|
||||
// Get note context for embedding
|
||||
const context = await getNoteEmbeddingContext(noteId);
|
||||
@ -334,7 +332,6 @@ export async function processEmbeddingQueue() {
|
||||
"DELETE FROM embedding_queue WHERE noteId = ?",
|
||||
[noteId]
|
||||
);
|
||||
log.info(`Successfully completed embedding processing for note ${noteId}`);
|
||||
|
||||
// Count as successfully processed
|
||||
processedCount++;
|
||||
|
@ -1,29 +1,5 @@
|
||||
import sql from "../../../services/sql.js";
|
||||
import log from "../../../services/log.js";
|
||||
import cls from "../../../services/cls.js";
|
||||
import { queueNoteForEmbedding } from "./queue.js";
|
||||
|
||||
/**
|
||||
* Reprocess all notes to update embeddings
|
||||
*/
|
||||
export async function reprocessAllNotes() {
|
||||
log.info("Queueing all notes for embedding updates");
|
||||
|
||||
// Get all non-deleted note IDs
|
||||
const noteIds = await sql.getColumn(
|
||||
"SELECT noteId FROM notes WHERE isDeleted = 0"
|
||||
);
|
||||
|
||||
log.info(`Adding ${noteIds.length} notes to embedding queue`);
|
||||
|
||||
// Process each note ID within a cls context
|
||||
for (const noteId of noteIds) {
|
||||
// Use cls.init to ensure proper context for each operation
|
||||
await cls.init(async () => {
|
||||
await queueNoteForEmbedding(noteId as string, 'UPDATE');
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current embedding statistics
|
||||
|
@ -11,7 +11,7 @@ import { SEARCH_CONSTANTS } from '../constants/search_constants.js';
|
||||
import type { NoteEmbeddingContext } from "./embeddings_interface.js";
|
||||
import becca from "../../../becca/becca.js";
|
||||
import { isNoteExcludedFromAIById } from "../utils/ai_exclusion_utils.js";
|
||||
import { getEmbeddingProviderPrecedence } from '../config/configuration_helpers.js';
|
||||
import { getSelectedEmbeddingProvider } from '../config/configuration_helpers.js';
|
||||
|
||||
interface Similarity {
|
||||
noteId: string;
|
||||
@ -277,9 +277,10 @@ export async function findSimilarNotes(
|
||||
log.info('No embeddings found for specified provider, trying fallback providers...');
|
||||
|
||||
// Use the new configuration system - no string parsing!
|
||||
const preferredProviders = await getEmbeddingProviderPrecedence();
|
||||
const selectedProvider = await getSelectedEmbeddingProvider();
|
||||
const preferredProviders = selectedProvider ? [selectedProvider] : [];
|
||||
|
||||
log.info(`Using provider precedence: ${preferredProviders.join(', ')}`);
|
||||
log.info(`Using selected provider: ${selectedProvider || 'none'}`);
|
||||
|
||||
// Try providers in precedence order
|
||||
for (const provider of preferredProviders) {
|
||||
|
@ -12,6 +12,7 @@
|
||||
import log from "../log.js";
|
||||
import options from "../options.js";
|
||||
import becca from "../../becca/becca.js";
|
||||
import beccaLoader from "../../becca/becca_loader.js";
|
||||
import vectorStore from "./embeddings/index.js";
|
||||
import providerManager from "./providers/providers.js";
|
||||
import { ContextExtractor } from "./context/index.js";
|
||||
@ -21,6 +22,7 @@ import sqlInit from "../sql_init.js";
|
||||
import { CONTEXT_PROMPTS } from './constants/llm_prompt_constants.js';
|
||||
import { SEARCH_CONSTANTS } from './constants/search_constants.js';
|
||||
import { isNoteExcludedFromAI } from "./utils/ai_exclusion_utils.js";
|
||||
import { hasWorkingEmbeddingProviders } from "./provider_validation.js";
|
||||
|
||||
export class IndexService {
|
||||
private initialized = false;
|
||||
@ -46,47 +48,16 @@ export class IndexService {
|
||||
async initialize() {
|
||||
if (this.initialized) return;
|
||||
|
||||
try {
|
||||
// Check if database is initialized before proceeding
|
||||
if (!sqlInit.isDbInitialized()) {
|
||||
log.info("Index service: Database not initialized yet, skipping initialization");
|
||||
return;
|
||||
}
|
||||
|
||||
const aiEnabled = options.getOptionOrNull('aiEnabled') === "true";
|
||||
if (!aiEnabled) {
|
||||
log.info("Index service: AI features disabled, skipping initialization");
|
||||
return;
|
||||
}
|
||||
|
||||
// Check if embedding system is ready
|
||||
const providers = await providerManager.getEnabledEmbeddingProviders();
|
||||
if (!providers || providers.length === 0) {
|
||||
throw new Error("No embedding providers available");
|
||||
}
|
||||
|
||||
// Check if this instance should process embeddings
|
||||
const embeddingLocation = await options.getOption('embeddingGenerationLocation') || 'client';
|
||||
const isSyncServer = await this.isSyncServerForEmbeddings();
|
||||
const shouldProcessEmbeddings = embeddingLocation === 'client' || isSyncServer;
|
||||
|
||||
// Setup automatic indexing if enabled and this instance should process embeddings
|
||||
if (await options.getOptionBool('embeddingAutoUpdateEnabled') && shouldProcessEmbeddings) {
|
||||
this.setupAutomaticIndexing();
|
||||
log.info(`Index service: Automatic indexing enabled, processing embeddings ${isSyncServer ? 'as sync server' : 'as client'}`);
|
||||
} else if (await options.getOptionBool('embeddingAutoUpdateEnabled')) {
|
||||
log.info("Index service: Automatic indexing enabled, but this instance is not configured to process embeddings");
|
||||
}
|
||||
|
||||
// Listen for note changes to update index
|
||||
// Setup event listeners for note changes
|
||||
this.setupEventListeners();
|
||||
|
||||
this.initialized = true;
|
||||
log.info("Index service initialized successfully");
|
||||
} catch (error: any) {
|
||||
log.error(`Error initializing index service: ${error.message || "Unknown error"}`);
|
||||
throw error;
|
||||
// Setup automatic indexing if enabled
|
||||
if (await options.getOptionBool('embeddingAutoUpdateEnabled')) {
|
||||
this.setupAutomaticIndexing();
|
||||
}
|
||||
|
||||
this.initialized = true;
|
||||
log.info("Index service initialized");
|
||||
}
|
||||
|
||||
/**
|
||||
@ -139,23 +110,7 @@ export class IndexService {
|
||||
this.automaticIndexingInterval = setInterval(async () => {
|
||||
try {
|
||||
if (!this.indexingInProgress) {
|
||||
// Check if this instance should process embeddings
|
||||
const embeddingLocation = await options.getOption('embeddingGenerationLocation') || 'client';
|
||||
const isSyncServer = await this.isSyncServerForEmbeddings();
|
||||
const shouldProcessEmbeddings = embeddingLocation === 'client' || isSyncServer;
|
||||
|
||||
if (!shouldProcessEmbeddings) {
|
||||
// This instance is not configured to process embeddings
|
||||
return;
|
||||
}
|
||||
|
||||
const stats = await vectorStore.getEmbeddingStats();
|
||||
|
||||
// Only run automatic indexing if we're below 95% completion
|
||||
if (stats.percentComplete < 95) {
|
||||
log.info(`Starting automatic indexing (current completion: ${stats.percentComplete}%)`);
|
||||
await this.runBatchIndexing(50); // Process 50 notes at a time
|
||||
}
|
||||
await this.runBatchIndexing(50); // Processing logic handles sync server checks
|
||||
}
|
||||
} catch (error: any) {
|
||||
log.error(`Error in automatic indexing: ${error.message || "Unknown error"}`);
|
||||
@ -265,7 +220,7 @@ export class IndexService {
|
||||
this.indexRebuildTotal = totalNotes;
|
||||
|
||||
log.info("No embeddings found, starting full embedding generation first");
|
||||
await vectorStore.reprocessAllNotes();
|
||||
await this.reprocessAllNotes();
|
||||
log.info("Full embedding generation initiated");
|
||||
} else {
|
||||
// For index rebuild, use the number of embeddings as the total
|
||||
@ -292,7 +247,7 @@ export class IndexService {
|
||||
// Only start indexing if we're below 90% completion or if embeddings exist but need optimization
|
||||
if (stats.percentComplete < 90) {
|
||||
log.info("Embedding coverage below 90%, starting full embedding generation");
|
||||
await vectorStore.reprocessAllNotes();
|
||||
await this.reprocessAllNotes();
|
||||
log.info("Full embedding generation initiated");
|
||||
} else {
|
||||
log.info(`Embedding coverage at ${stats.percentComplete}%, starting index optimization`);
|
||||
@ -378,11 +333,10 @@ export class IndexService {
|
||||
|
||||
if (!shouldProcessEmbeddings) {
|
||||
// This instance is not configured to process embeddings
|
||||
log.info("Skipping batch indexing as this instance is not configured to process embeddings");
|
||||
return false;
|
||||
}
|
||||
|
||||
// Process the embedding queue
|
||||
// Process the embedding queue (batch size is controlled by embeddingBatchSize option)
|
||||
await vectorStore.processEmbeddingQueue();
|
||||
|
||||
return true;
|
||||
@ -491,51 +445,14 @@ export class IndexService {
|
||||
}
|
||||
|
||||
try {
|
||||
// Get all enabled embedding providers
|
||||
const providers = await providerManager.getEnabledEmbeddingProviders();
|
||||
if (!providers || providers.length === 0) {
|
||||
throw new Error("No embedding providers available");
|
||||
}
|
||||
|
||||
// Get the embedding provider precedence
|
||||
const options = (await import('../options.js')).default;
|
||||
let preferredProviders: string[] = [];
|
||||
|
||||
const embeddingPrecedence = await options.getOption('embeddingProviderPrecedence');
|
||||
let provider;
|
||||
|
||||
if (embeddingPrecedence) {
|
||||
// Parse the precedence string
|
||||
if (embeddingPrecedence.startsWith('[') && embeddingPrecedence.endsWith(']')) {
|
||||
preferredProviders = JSON.parse(embeddingPrecedence);
|
||||
} else if (typeof embeddingPrecedence === 'string') {
|
||||
if (embeddingPrecedence.includes(',')) {
|
||||
preferredProviders = embeddingPrecedence.split(',').map(p => p.trim());
|
||||
} else {
|
||||
preferredProviders = [embeddingPrecedence];
|
||||
}
|
||||
}
|
||||
|
||||
// Find first enabled provider by precedence order
|
||||
for (const providerName of preferredProviders) {
|
||||
const matchedProvider = providers.find(p => p.name === providerName);
|
||||
if (matchedProvider) {
|
||||
provider = matchedProvider;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// If no match found, use first available
|
||||
if (!provider && providers.length > 0) {
|
||||
provider = providers[0];
|
||||
}
|
||||
} else {
|
||||
// Default to first available provider
|
||||
provider = providers[0];
|
||||
}
|
||||
// Get the selected embedding provider on-demand
|
||||
const selectedEmbeddingProvider = await options.getOption('embeddingSelectedProvider');
|
||||
const provider = selectedEmbeddingProvider
|
||||
? await providerManager.getOrCreateEmbeddingProvider(selectedEmbeddingProvider)
|
||||
: (await providerManager.getEnabledEmbeddingProviders())[0];
|
||||
|
||||
if (!provider) {
|
||||
throw new Error("No suitable embedding provider found");
|
||||
throw new Error("No embedding provider available");
|
||||
}
|
||||
|
||||
log.info(`Searching with embedding provider: ${provider.name}, model: ${provider.getConfig().model}`);
|
||||
@ -693,6 +610,12 @@ export class IndexService {
|
||||
}
|
||||
|
||||
try {
|
||||
// Get embedding providers on-demand
|
||||
const providers = await providerManager.getEnabledEmbeddingProviders();
|
||||
if (providers.length === 0) {
|
||||
return "I don't have access to your note embeddings. Please configure an embedding provider in your AI settings.";
|
||||
}
|
||||
|
||||
// Find similar notes to the query
|
||||
const similarNotes = await this.findSimilarNotes(
|
||||
query,
|
||||
@ -828,9 +751,13 @@ export class IndexService {
|
||||
// Get complete note context for indexing
|
||||
const context = await vectorStore.getNoteEmbeddingContext(noteId);
|
||||
|
||||
// Queue note for embedding with all available providers
|
||||
const providers = await providerManager.getEnabledEmbeddingProviders();
|
||||
for (const provider of providers) {
|
||||
// Generate embedding with the selected provider
|
||||
const selectedEmbeddingProvider = await options.getOption('embeddingSelectedProvider');
|
||||
const provider = selectedEmbeddingProvider
|
||||
? await providerManager.getOrCreateEmbeddingProvider(selectedEmbeddingProvider)
|
||||
: (await providerManager.getEnabledEmbeddingProviders())[0];
|
||||
|
||||
if (provider) {
|
||||
try {
|
||||
const embedding = await provider.generateNoteEmbeddings(context);
|
||||
if (embedding) {
|
||||
@ -853,6 +780,189 @@ export class IndexService {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Start embedding generation (called when AI is enabled)
|
||||
*/
|
||||
async startEmbeddingGeneration() {
|
||||
try {
|
||||
log.info("Starting embedding generation system");
|
||||
|
||||
const aiEnabled = options.getOptionOrNull('aiEnabled') === "true";
|
||||
if (!aiEnabled) {
|
||||
log.error("Cannot start embedding generation - AI features are disabled");
|
||||
throw new Error("AI features must be enabled first");
|
||||
}
|
||||
|
||||
// Re-initialize if needed
|
||||
if (!this.initialized) {
|
||||
await this.initialize();
|
||||
}
|
||||
|
||||
// Check if this instance should process embeddings
|
||||
const embeddingLocation = await options.getOption('embeddingGenerationLocation') || 'client';
|
||||
const isSyncServer = await this.isSyncServerForEmbeddings();
|
||||
const shouldProcessEmbeddings = embeddingLocation === 'client' || isSyncServer;
|
||||
|
||||
if (!shouldProcessEmbeddings) {
|
||||
log.info("This instance is not configured to process embeddings");
|
||||
return;
|
||||
}
|
||||
|
||||
// Get embedding providers (will be created on-demand when needed)
|
||||
const providers = await providerManager.getEnabledEmbeddingProviders();
|
||||
if (providers.length === 0) {
|
||||
log.info("No embedding providers configured, but continuing initialization");
|
||||
} else {
|
||||
log.info(`Found ${providers.length} embedding providers: ${providers.map(p => p.name).join(', ')}`);
|
||||
}
|
||||
|
||||
// Setup automatic indexing if enabled
|
||||
if (await options.getOptionBool('embeddingAutoUpdateEnabled')) {
|
||||
this.setupAutomaticIndexing();
|
||||
log.info(`Automatic embedding indexing started ${isSyncServer ? 'as sync server' : 'as client'}`);
|
||||
}
|
||||
|
||||
// Start background processing of the embedding queue
|
||||
const { setupEmbeddingBackgroundProcessing } = await import('./embeddings/events.js');
|
||||
await setupEmbeddingBackgroundProcessing();
|
||||
|
||||
// Re-initialize event listeners
|
||||
this.setupEventListeners();
|
||||
|
||||
// Queue notes that don't have embeddings for current providers
|
||||
await this.queueNotesForMissingEmbeddings();
|
||||
|
||||
// Start processing the queue immediately
|
||||
await this.runBatchIndexing(20);
|
||||
|
||||
log.info("Embedding generation started successfully");
|
||||
} catch (error: any) {
|
||||
log.error(`Error starting embedding generation: ${error.message || "Unknown error"}`);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Queue notes that don't have embeddings for current provider settings
|
||||
*/
|
||||
async queueNotesForMissingEmbeddings() {
|
||||
try {
|
||||
// Wait for becca to be fully loaded before accessing notes
|
||||
await beccaLoader.beccaLoaded;
|
||||
|
||||
// Get all non-deleted notes
|
||||
const allNotes = Object.values(becca.notes).filter(note => !note.isDeleted);
|
||||
|
||||
// Get enabled providers
|
||||
const providers = await providerManager.getEnabledEmbeddingProviders();
|
||||
if (providers.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
let queuedCount = 0;
|
||||
let excludedCount = 0;
|
||||
|
||||
// Process notes in batches to avoid overwhelming the system
|
||||
const batchSize = 100;
|
||||
for (let i = 0; i < allNotes.length; i += batchSize) {
|
||||
const batch = allNotes.slice(i, i + batchSize);
|
||||
|
||||
for (const note of batch) {
|
||||
try {
|
||||
// Skip notes excluded from AI
|
||||
if (isNoteExcludedFromAI(note)) {
|
||||
excludedCount++;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if note needs embeddings for any enabled provider
|
||||
let needsEmbedding = false;
|
||||
|
||||
for (const provider of providers) {
|
||||
const config = provider.getConfig();
|
||||
const existingEmbedding = await vectorStore.getEmbeddingForNote(
|
||||
note.noteId,
|
||||
provider.name,
|
||||
config.model
|
||||
);
|
||||
|
||||
if (!existingEmbedding) {
|
||||
needsEmbedding = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (needsEmbedding) {
|
||||
await vectorStore.queueNoteForEmbedding(note.noteId, 'UPDATE');
|
||||
queuedCount++;
|
||||
}
|
||||
} catch (error: any) {
|
||||
log.error(`Error checking embeddings for note ${note.noteId}: ${error.message || 'Unknown error'}`);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
} catch (error: any) {
|
||||
log.error(`Error queuing notes for missing embeddings: ${error.message || 'Unknown error'}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Reprocess all notes to update embeddings
|
||||
*/
|
||||
async reprocessAllNotes() {
|
||||
if (!this.initialized) {
|
||||
await this.initialize();
|
||||
}
|
||||
|
||||
try {
|
||||
// Get all non-deleted note IDs
|
||||
const noteIds = await sql.getColumn("SELECT noteId FROM notes WHERE isDeleted = 0");
|
||||
|
||||
// Process each note ID
|
||||
for (const noteId of noteIds) {
|
||||
await vectorStore.queueNoteForEmbedding(noteId as string, 'UPDATE');
|
||||
}
|
||||
} catch (error: any) {
|
||||
log.error(`Error reprocessing all notes: ${error.message || 'Unknown error'}`);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop embedding generation (called when AI is disabled)
|
||||
*/
|
||||
async stopEmbeddingGeneration() {
|
||||
try {
|
||||
log.info("Stopping embedding generation system");
|
||||
|
||||
// Clear automatic indexing interval
|
||||
if (this.automaticIndexingInterval) {
|
||||
clearInterval(this.automaticIndexingInterval);
|
||||
this.automaticIndexingInterval = undefined;
|
||||
log.info("Automatic indexing stopped");
|
||||
}
|
||||
|
||||
// Stop the background processing from embeddings/events.ts
|
||||
const { stopEmbeddingBackgroundProcessing } = await import('./embeddings/events.js');
|
||||
stopEmbeddingBackgroundProcessing();
|
||||
|
||||
// Clear all embedding providers to clean up resources
|
||||
providerManager.clearAllEmbeddingProviders();
|
||||
|
||||
// Mark as not indexing
|
||||
this.indexingInProgress = false;
|
||||
this.indexRebuildInProgress = false;
|
||||
|
||||
log.info("Embedding generation stopped successfully");
|
||||
} catch (error: any) {
|
||||
log.error(`Error stopping embedding generation: ${error.message || "Unknown error"}`);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create singleton instance
|
||||
|
@ -28,9 +28,9 @@ export interface AIServiceManagerConfig {
|
||||
* Interface for managing AI service providers
|
||||
*/
|
||||
export interface IAIServiceManager {
|
||||
getService(provider?: string): AIService;
|
||||
getService(provider?: string): Promise<AIService>;
|
||||
getAvailableProviders(): string[];
|
||||
getPreferredProvider(): string;
|
||||
getSelectedProvider(): string;
|
||||
isProviderAvailable(provider: string): boolean;
|
||||
getProviderMetadata(provider: string): ProviderMetadata | null;
|
||||
getAIEnabled(): boolean;
|
||||
|
@ -46,8 +46,8 @@ export interface ModelCapabilities {
|
||||
*/
|
||||
export interface AIConfig {
|
||||
enabled: boolean;
|
||||
providerPrecedence: ProviderPrecedenceConfig;
|
||||
embeddingProviderPrecedence: EmbeddingProviderPrecedenceConfig;
|
||||
selectedProvider: ProviderType | null;
|
||||
selectedEmbeddingProvider: EmbeddingProviderType | null;
|
||||
defaultModels: Record<ProviderType, string | undefined>;
|
||||
providerSettings: ProviderSettings;
|
||||
}
|
||||
@ -87,7 +87,7 @@ export type ProviderType = 'openai' | 'anthropic' | 'ollama';
|
||||
/**
|
||||
* Valid embedding provider types
|
||||
*/
|
||||
export type EmbeddingProviderType = 'openai' | 'ollama' | 'local';
|
||||
export type EmbeddingProviderType = 'openai' | 'voyage' | 'ollama' | 'local';
|
||||
|
||||
/**
|
||||
* Model identifier with provider prefix (e.g., "openai:gpt-4" or "ollama:llama2")
|
||||
|
@ -298,6 +298,9 @@ export class ChatPipeline {
|
||||
this.updateStageMetrics('llmCompletion', llmStartTime);
|
||||
log.info(`Received LLM response from model: ${completion.response.model}, provider: ${completion.response.provider}`);
|
||||
|
||||
// Track whether content has been streamed to prevent duplication
|
||||
let hasStreamedContent = false;
|
||||
|
||||
// Handle streaming if enabled and available
|
||||
// Use shouldEnableStream variable which contains our streaming decision
|
||||
if (shouldEnableStream && completion.response.stream && streamCallback) {
|
||||
@ -311,6 +314,9 @@ export class ChatPipeline {
|
||||
|
||||
// Forward to callback with original chunk data in case it contains additional information
|
||||
streamCallback(processedChunk.text, processedChunk.done, chunk);
|
||||
|
||||
// Mark that we have streamed content to prevent duplication
|
||||
hasStreamedContent = true;
|
||||
});
|
||||
}
|
||||
|
||||
@ -767,11 +773,15 @@ export class ChatPipeline {
|
||||
const responseText = currentResponse.text || "";
|
||||
log.info(`Resuming streaming with final response: ${responseText.length} chars`);
|
||||
|
||||
if (responseText.length > 0) {
|
||||
// Resume streaming with the final response text
|
||||
if (responseText.length > 0 && !hasStreamedContent) {
|
||||
// Resume streaming with the final response text only if we haven't already streamed content
|
||||
// This is where we send the definitive done:true signal with the complete content
|
||||
streamCallback(responseText, true);
|
||||
log.info(`Sent final response with done=true signal and text content`);
|
||||
} else if (hasStreamedContent) {
|
||||
log.info(`Content already streamed, sending done=true signal only after tool execution`);
|
||||
// Just send the done signal without duplicating content
|
||||
streamCallback('', true);
|
||||
} else {
|
||||
// For Anthropic, sometimes text is empty but response is in stream
|
||||
if ((currentResponse.provider === 'Anthropic' || currentResponse.provider === 'OpenAI') && currentResponse.stream) {
|
||||
@ -803,13 +813,17 @@ export class ChatPipeline {
|
||||
log.info(`LLM response did not contain any tool calls, skipping tool execution`);
|
||||
|
||||
// Handle streaming for responses without tool calls
|
||||
if (shouldEnableStream && streamCallback) {
|
||||
if (shouldEnableStream && streamCallback && !hasStreamedContent) {
|
||||
log.info(`Sending final streaming response without tool calls: ${currentResponse.text.length} chars`);
|
||||
|
||||
// Send the final response with done=true to complete the streaming
|
||||
streamCallback(currentResponse.text, true);
|
||||
|
||||
log.info(`Sent final non-tool response with done=true signal`);
|
||||
} else if (shouldEnableStream && streamCallback && hasStreamedContent) {
|
||||
log.info(`Content already streamed, sending done=true signal only`);
|
||||
// Just send the done signal without duplicating content
|
||||
streamCallback('', true);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,7 @@ export class ContextExtractionStage {
|
||||
|
||||
// Get enhanced context from the context service
|
||||
const contextService = aiServiceManager.getContextService();
|
||||
const llmService = aiServiceManager.getService();
|
||||
const llmService = await aiServiceManager.getService();
|
||||
|
||||
if (contextService) {
|
||||
// Use unified context service to get smart context
|
||||
|
@ -104,7 +104,7 @@ export class LLMCompletionStage extends BasePipelineStage<LLMCompletionInput, {
|
||||
|
||||
// Use specific provider if available
|
||||
if (selectedProvider && aiServiceManager.isProviderAvailable(selectedProvider)) {
|
||||
const service = aiServiceManager.getService(selectedProvider);
|
||||
const service = await aiServiceManager.getService(selectedProvider);
|
||||
log.info(`[LLMCompletionStage] Using specific service for ${selectedProvider}`);
|
||||
|
||||
// Generate completion and wrap with enhanced stream handling
|
||||
|
@ -11,8 +11,7 @@ import type { ServiceProviders } from '../../interfaces/ai_service_interfaces.js
|
||||
|
||||
// Import new configuration system
|
||||
import {
|
||||
getProviderPrecedence,
|
||||
getPreferredProvider,
|
||||
getSelectedProvider,
|
||||
parseModelIdentifier,
|
||||
getDefaultModelForProvider,
|
||||
createModelConfig
|
||||
@ -99,22 +98,33 @@ export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput,
|
||||
}
|
||||
}
|
||||
|
||||
// Get default provider and model using the new configuration system
|
||||
// Get selected provider and model using the new configuration system
|
||||
try {
|
||||
// Use the new configuration helpers - no string parsing!
|
||||
const preferredProvider = await getPreferredProvider();
|
||||
// Use the configuration helpers to get a validated model config
|
||||
const selectedProvider = await getSelectedProvider();
|
||||
|
||||
if (!preferredProvider) {
|
||||
throw new Error('No AI providers are configured. Please check your AI settings.');
|
||||
if (!selectedProvider) {
|
||||
throw new Error('No AI provider is selected. Please select a provider in your AI settings.');
|
||||
}
|
||||
|
||||
const modelName = await getDefaultModelForProvider(preferredProvider);
|
||||
// First try to get a valid model config (this checks both selection and configuration)
|
||||
const { getValidModelConfig } = await import('../../config/configuration_helpers.js');
|
||||
const modelConfig = await getValidModelConfig(selectedProvider);
|
||||
|
||||
if (!modelName) {
|
||||
throw new Error(`No default model configured for provider ${preferredProvider}. Please set a default model in your AI settings.`);
|
||||
if (modelConfig) {
|
||||
// We have a valid configured model
|
||||
updatedOptions.model = modelConfig.model;
|
||||
} else {
|
||||
// No model configured, try to fetch and set a default from the service
|
||||
const fetchedModel = await this.fetchAndSetDefaultModel(selectedProvider);
|
||||
if (!fetchedModel) {
|
||||
throw new Error(`No default model configured for provider ${selectedProvider}. Please set a default model in your AI settings or ensure the provider service is available.`);
|
||||
}
|
||||
// Use the fetched model
|
||||
updatedOptions.model = fetchedModel;
|
||||
}
|
||||
|
||||
log.info(`Selected provider: ${preferredProvider}, model: ${modelName}`);
|
||||
log.info(`Selected provider: ${selectedProvider}, model: ${updatedOptions.model}`);
|
||||
|
||||
// Determine query complexity
|
||||
let queryComplexity = 'low';
|
||||
@ -142,15 +152,14 @@ export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput,
|
||||
queryComplexity = contentLength > SEARCH_CONSTANTS.CONTEXT.CONTENT_LENGTH.HIGH_THRESHOLD ? 'high' : 'medium';
|
||||
}
|
||||
|
||||
// Set the model and add provider metadata
|
||||
updatedOptions.model = modelName;
|
||||
this.addProviderMetadata(updatedOptions, preferredProvider as ServiceProviders, modelName);
|
||||
// Add provider metadata (model is already set above)
|
||||
this.addProviderMetadata(updatedOptions, selectedProvider as ServiceProviders, updatedOptions.model);
|
||||
|
||||
log.info(`Selected model: ${modelName} from provider: ${preferredProvider} for query complexity: ${queryComplexity}`);
|
||||
log.info(`Selected model: ${updatedOptions.model} from provider: ${selectedProvider} for query complexity: ${queryComplexity}`);
|
||||
log.info(`[ModelSelectionStage] Final options: ${JSON.stringify({
|
||||
model: updatedOptions.model,
|
||||
stream: updatedOptions.stream,
|
||||
provider: preferredProvider,
|
||||
provider: selectedProvider,
|
||||
enableTools: updatedOptions.enableTools
|
||||
})}`);
|
||||
|
||||
@ -210,39 +219,41 @@ export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput,
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine model based on provider precedence using the new configuration system
|
||||
* Determine model based on selected provider using the new configuration system
|
||||
* This method is now simplified and delegates to the main model selection logic
|
||||
*/
|
||||
private async determineDefaultModel(input: ModelSelectionInput): Promise<string> {
|
||||
try {
|
||||
// Use the new configuration system
|
||||
const providers = await getProviderPrecedence();
|
||||
// Use the same logic as the main process method
|
||||
const { getValidModelConfig, getSelectedProvider } = await import('../../config/configuration_helpers.js');
|
||||
const selectedProvider = await getSelectedProvider();
|
||||
|
||||
// Use only providers that are available
|
||||
const availableProviders = providers.filter(provider =>
|
||||
aiServiceManager.isProviderAvailable(provider));
|
||||
|
||||
if (availableProviders.length === 0) {
|
||||
throw new Error('No AI providers are available');
|
||||
if (!selectedProvider) {
|
||||
throw new Error('No AI provider is selected. Please select a provider in your AI settings.');
|
||||
}
|
||||
|
||||
// Get the first available provider and its default model
|
||||
const defaultProvider = availableProviders[0];
|
||||
const defaultModel = await getDefaultModelForProvider(defaultProvider);
|
||||
// Check if the provider is available through the service manager
|
||||
if (!aiServiceManager.isProviderAvailable(selectedProvider)) {
|
||||
throw new Error(`Selected provider ${selectedProvider} is not available`);
|
||||
}
|
||||
|
||||
if (!defaultModel) {
|
||||
throw new Error(`No default model configured for provider ${defaultProvider}. Please configure a default model in your AI settings.`);
|
||||
// Try to get a valid model config
|
||||
const modelConfig = await getValidModelConfig(selectedProvider);
|
||||
|
||||
if (!modelConfig) {
|
||||
throw new Error(`No default model configured for provider ${selectedProvider}. Please configure a default model in your AI settings.`);
|
||||
}
|
||||
|
||||
// Set provider metadata
|
||||
if (!input.options.providerMetadata) {
|
||||
input.options.providerMetadata = {
|
||||
provider: defaultProvider as 'openai' | 'anthropic' | 'ollama' | 'local',
|
||||
modelId: defaultModel
|
||||
provider: selectedProvider as 'openai' | 'anthropic' | 'ollama' | 'local',
|
||||
modelId: modelConfig.model
|
||||
};
|
||||
}
|
||||
|
||||
log.info(`Selected default model ${defaultModel} from provider ${defaultProvider}`);
|
||||
return defaultModel;
|
||||
log.info(`Selected default model ${modelConfig.model} from provider ${selectedProvider}`);
|
||||
return modelConfig.model;
|
||||
} catch (error) {
|
||||
log.error(`Error determining default model: ${error}`);
|
||||
throw error; // Don't provide fallback defaults, let the error propagate
|
||||
@ -271,4 +282,49 @@ export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput,
|
||||
return MODEL_CAPABILITIES['default'].contextWindowTokens;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Use AI service manager to get a configured model for the provider
|
||||
* This eliminates duplication and uses the existing service layer
|
||||
*/
|
||||
private async fetchAndSetDefaultModel(provider: ProviderType): Promise<string | null> {
|
||||
try {
|
||||
log.info(`Getting default model for provider ${provider} using AI service manager`);
|
||||
|
||||
// Use the existing AI service manager instead of duplicating API calls
|
||||
const service = await aiServiceManager.getInstance().getService(provider);
|
||||
|
||||
if (!service || !service.isAvailable()) {
|
||||
log.info(`Provider ${provider} service is not available`);
|
||||
return null;
|
||||
}
|
||||
|
||||
// Check if the service has a method to get available models
|
||||
if (typeof (service as any).getAvailableModels === 'function') {
|
||||
try {
|
||||
const models = await (service as any).getAvailableModels();
|
||||
if (models && models.length > 0) {
|
||||
// Use the first available model - no hardcoded preferences
|
||||
const selectedModel = models[0];
|
||||
|
||||
// Import server-side options to update the default model
|
||||
const optionService = (await import('../../../options.js')).default;
|
||||
const optionKey = `${provider}DefaultModel` as const;
|
||||
|
||||
await optionService.setOption(optionKey, selectedModel);
|
||||
log.info(`Set default ${provider} model to: ${selectedModel}`);
|
||||
return selectedModel;
|
||||
}
|
||||
} catch (modelError) {
|
||||
log.error(`Error fetching models from ${provider} service: ${modelError}`);
|
||||
}
|
||||
}
|
||||
|
||||
log.info(`Provider ${provider} does not support dynamic model fetching`);
|
||||
return null;
|
||||
} catch (error) {
|
||||
log.error(`Error getting default model for provider ${provider}: ${error}`);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ export class SemanticContextExtractionStage extends BasePipelineStage<SemanticCo
|
||||
}
|
||||
|
||||
// Step 2: Format search results into a context string
|
||||
const provider = await providerManager.getPreferredEmbeddingProvider();
|
||||
const provider = await providerManager.getSelectedEmbeddingProvider();
|
||||
const providerId = provider?.name || 'default';
|
||||
|
||||
const context = await contextFormatter.buildContextFromNotes(
|
||||
|
177
apps/server/src/services/llm/provider_validation.ts
Normal file
177
apps/server/src/services/llm/provider_validation.ts
Normal file
@ -0,0 +1,177 @@
|
||||
/**
|
||||
* Provider Validation Service
|
||||
*
|
||||
* Validates AI provider configurations before initializing the embedding system.
|
||||
* This prevents startup errors when AI is enabled but providers are misconfigured.
|
||||
*/
|
||||
|
||||
import log from "../log.js";
|
||||
import options from "../options.js";
|
||||
import type { EmbeddingProvider } from "./embeddings/embeddings_interface.js";
|
||||
|
||||
export interface ProviderValidationResult {
|
||||
hasValidProviders: boolean;
|
||||
validEmbeddingProviders: EmbeddingProvider[];
|
||||
validChatProviders: string[];
|
||||
errors: string[];
|
||||
warnings: string[];
|
||||
}
|
||||
|
||||
/**
|
||||
* Simplified provider validation - just checks configuration without creating providers
|
||||
*/
|
||||
export async function validateProviders(): Promise<ProviderValidationResult> {
|
||||
const result: ProviderValidationResult = {
|
||||
hasValidProviders: false,
|
||||
validEmbeddingProviders: [],
|
||||
validChatProviders: [],
|
||||
errors: [],
|
||||
warnings: []
|
||||
};
|
||||
|
||||
try {
|
||||
// Check if AI is enabled
|
||||
const aiEnabled = await options.getOptionBool('aiEnabled');
|
||||
if (!aiEnabled) {
|
||||
result.warnings.push("AI features are disabled");
|
||||
return result;
|
||||
}
|
||||
|
||||
// Check configuration only - don't create providers
|
||||
await checkEmbeddingProviderConfigs(result);
|
||||
await checkChatProviderConfigs(result);
|
||||
|
||||
// Determine if we have any valid providers based on configuration
|
||||
result.hasValidProviders = result.validChatProviders.length > 0;
|
||||
|
||||
if (!result.hasValidProviders) {
|
||||
result.errors.push("No valid AI providers are configured");
|
||||
}
|
||||
|
||||
} catch (error: any) {
|
||||
result.errors.push(`Error during provider validation: ${error.message || 'Unknown error'}`);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check embedding provider configurations without creating providers
|
||||
*/
|
||||
async function checkEmbeddingProviderConfigs(result: ProviderValidationResult): Promise<void> {
|
||||
try {
|
||||
// Check OpenAI embedding configuration
|
||||
const openaiApiKey = await options.getOption('openaiApiKey');
|
||||
const openaiBaseUrl = await options.getOption('openaiBaseUrl');
|
||||
if (openaiApiKey || openaiBaseUrl) {
|
||||
if (!openaiApiKey) {
|
||||
result.warnings.push("OpenAI embedding: No API key (may work with compatible endpoints)");
|
||||
}
|
||||
log.info("OpenAI embedding provider configuration available");
|
||||
}
|
||||
|
||||
// Check Ollama embedding configuration
|
||||
const ollamaEmbeddingBaseUrl = await options.getOption('ollamaEmbeddingBaseUrl');
|
||||
if (ollamaEmbeddingBaseUrl) {
|
||||
log.info("Ollama embedding provider configuration available");
|
||||
}
|
||||
|
||||
// Check Voyage embedding configuration
|
||||
const voyageApiKey = await options.getOption('voyageApiKey' as any);
|
||||
if (voyageApiKey) {
|
||||
log.info("Voyage embedding provider configuration available");
|
||||
}
|
||||
|
||||
// Local provider is always available
|
||||
log.info("Local embedding provider available as fallback");
|
||||
|
||||
} catch (error: any) {
|
||||
result.errors.push(`Error checking embedding provider configs: ${error.message || 'Unknown error'}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check chat provider configurations without creating providers
|
||||
*/
|
||||
async function checkChatProviderConfigs(result: ProviderValidationResult): Promise<void> {
|
||||
try {
|
||||
// Check OpenAI chat provider
|
||||
const openaiApiKey = await options.getOption('openaiApiKey');
|
||||
const openaiBaseUrl = await options.getOption('openaiBaseUrl');
|
||||
|
||||
if (openaiApiKey || openaiBaseUrl) {
|
||||
if (!openaiApiKey) {
|
||||
result.warnings.push("OpenAI chat: No API key (may work with compatible endpoints)");
|
||||
}
|
||||
result.validChatProviders.push('openai');
|
||||
}
|
||||
|
||||
// Check Anthropic chat provider
|
||||
const anthropicApiKey = await options.getOption('anthropicApiKey');
|
||||
if (anthropicApiKey) {
|
||||
result.validChatProviders.push('anthropic');
|
||||
}
|
||||
|
||||
// Check Ollama chat provider
|
||||
const ollamaBaseUrl = await options.getOption('ollamaBaseUrl');
|
||||
if (ollamaBaseUrl) {
|
||||
result.validChatProviders.push('ollama');
|
||||
}
|
||||
|
||||
if (result.validChatProviders.length === 0) {
|
||||
result.warnings.push("No chat providers configured. Please configure at least one provider.");
|
||||
}
|
||||
|
||||
} catch (error: any) {
|
||||
result.errors.push(`Error checking chat provider configs: ${error.message || 'Unknown error'}`);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Check if any chat providers are configured
|
||||
*/
|
||||
export async function hasWorkingChatProviders(): Promise<boolean> {
|
||||
const validation = await validateProviders();
|
||||
return validation.validChatProviders.length > 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if any embedding providers are configured (simplified)
|
||||
*/
|
||||
export async function hasWorkingEmbeddingProviders(): Promise<boolean> {
|
||||
if (!(await options.getOptionBool('aiEnabled'))) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check if any embedding provider is configured
|
||||
const openaiKey = await options.getOption('openaiApiKey');
|
||||
const openaiBaseUrl = await options.getOption('openaiBaseUrl');
|
||||
const ollamaUrl = await options.getOption('ollamaEmbeddingBaseUrl');
|
||||
const voyageKey = await options.getOption('voyageApiKey' as any);
|
||||
|
||||
// Local provider is always available as fallback
|
||||
return !!(openaiKey || openaiBaseUrl || ollamaUrl || voyageKey) || true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Log validation results in a user-friendly way
|
||||
*/
|
||||
export function logValidationResults(validation: ProviderValidationResult): void {
|
||||
if (validation.hasValidProviders) {
|
||||
log.info(`AI provider validation passed: ${validation.validEmbeddingProviders.length} embedding providers, ${validation.validChatProviders.length} chat providers`);
|
||||
|
||||
if (validation.validEmbeddingProviders.length > 0) {
|
||||
log.info(`Working embedding providers: ${validation.validEmbeddingProviders.map(p => p.name).join(', ')}`);
|
||||
}
|
||||
|
||||
if (validation.validChatProviders.length > 0) {
|
||||
log.info(`Working chat providers: ${validation.validChatProviders.join(', ')}`);
|
||||
}
|
||||
} else {
|
||||
log.info("AI provider validation failed: No working providers found");
|
||||
}
|
||||
|
||||
validation.warnings.forEach(warning => log.info(`Provider validation: ${warning}`));
|
||||
validation.errors.forEach(error => log.error(`Provider validation: ${error}`));
|
||||
}
|
@ -606,4 +606,12 @@ export class AnthropicService extends BaseAIService {
|
||||
|
||||
return convertedTools;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear cached Anthropic client to force recreation with new settings
|
||||
*/
|
||||
clearCache(): void {
|
||||
this.client = null;
|
||||
log.info('Anthropic client cache cleared');
|
||||
}
|
||||
}
|
||||
|
@ -526,4 +526,13 @@ export class OllamaService extends BaseAIService {
|
||||
log.info(`Added tool execution feedback: ${toolExecutionStatus.length} statuses`);
|
||||
return updatedMessages;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear cached Ollama client to force recreation with new settings
|
||||
*/
|
||||
clearCache(): void {
|
||||
// Ollama service doesn't maintain a persistent client like OpenAI/Anthropic
|
||||
// but we can clear any future cached state here if needed
|
||||
log.info('Ollama client cache cleared (no persistent client to clear)');
|
||||
}
|
||||
}
|
||||
|
@ -14,7 +14,9 @@ export class OpenAIService extends BaseAIService {
|
||||
}
|
||||
|
||||
override isAvailable(): boolean {
|
||||
return super.isAvailable() && !!options.getOption('openaiApiKey');
|
||||
// Make API key optional to support OpenAI-compatible endpoints that don't require authentication
|
||||
// The provider is considered available as long as the parent checks pass
|
||||
return super.isAvailable();
|
||||
}
|
||||
|
||||
private getClient(apiKey: string, baseUrl?: string): OpenAI {
|
||||
@ -29,7 +31,7 @@ export class OpenAIService extends BaseAIService {
|
||||
|
||||
async generateChatCompletion(messages: Message[], opts: ChatCompletionOptions = {}): Promise<ChatResponse> {
|
||||
if (!this.isAvailable()) {
|
||||
throw new Error('OpenAI service is not available. Check API key and AI settings.');
|
||||
throw new Error('OpenAI service is not available. Check AI settings.');
|
||||
}
|
||||
|
||||
// Get provider-specific options from the central provider manager
|
||||
@ -257,4 +259,12 @@ export class OpenAIService extends BaseAIService {
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear cached OpenAI client to force recreation with new settings
|
||||
*/
|
||||
clearCache(): void {
|
||||
this.openai = null;
|
||||
log.info('OpenAI client cache cleared');
|
||||
}
|
||||
}
|
||||
|
@ -86,6 +86,29 @@ export function registerEmbeddingProvider(provider: EmbeddingProvider) {
|
||||
log.info(`Registered embedding provider: ${provider.name}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Unregister an embedding provider
|
||||
*/
|
||||
export function unregisterEmbeddingProvider(name: string): boolean {
|
||||
const existed = providers.has(name);
|
||||
if (existed) {
|
||||
providers.delete(name);
|
||||
log.info(`Unregistered embedding provider: ${name}`);
|
||||
}
|
||||
return existed;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear all embedding providers
|
||||
*/
|
||||
export function clearAllEmbeddingProviders(): void {
|
||||
const providerNames = Array.from(providers.keys());
|
||||
providers.clear();
|
||||
if (providerNames.length > 0) {
|
||||
log.info(`Cleared all embedding providers: ${providerNames.join(', ')}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all registered embedding providers
|
||||
*/
|
||||
@ -101,35 +124,126 @@ export function getEmbeddingProvider(name: string): EmbeddingProvider | undefine
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all enabled embedding providers
|
||||
* Get or create a specific embedding provider with inline validation
|
||||
*/
|
||||
export async function getEnabledEmbeddingProviders(): Promise<EmbeddingProvider[]> {
|
||||
export async function getOrCreateEmbeddingProvider(providerName: string): Promise<EmbeddingProvider | null> {
|
||||
// Return existing provider if already created and valid
|
||||
const existing = providers.get(providerName);
|
||||
if (existing) {
|
||||
return existing;
|
||||
}
|
||||
|
||||
// Create and validate provider on-demand
|
||||
try {
|
||||
let provider: EmbeddingProvider | null = null;
|
||||
|
||||
switch (providerName) {
|
||||
case 'ollama': {
|
||||
const baseUrl = await options.getOption('ollamaEmbeddingBaseUrl');
|
||||
if (!baseUrl) return null;
|
||||
|
||||
const model = await options.getOption('ollamaEmbeddingModel');
|
||||
provider = new OllamaEmbeddingProvider({
|
||||
model,
|
||||
dimension: 768,
|
||||
type: 'float32',
|
||||
baseUrl
|
||||
});
|
||||
|
||||
// Validate by initializing (if provider supports it)
|
||||
if ('initialize' in provider && typeof provider.initialize === 'function') {
|
||||
await provider.initialize();
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case 'openai': {
|
||||
const apiKey = await options.getOption('openaiApiKey');
|
||||
const baseUrl = await options.getOption('openaiBaseUrl');
|
||||
if (!apiKey && !baseUrl) return null;
|
||||
|
||||
const model = await options.getOption('openaiEmbeddingModel');
|
||||
provider = new OpenAIEmbeddingProvider({
|
||||
model,
|
||||
dimension: 1536,
|
||||
type: 'float32',
|
||||
apiKey: apiKey || '',
|
||||
baseUrl: baseUrl || 'https://api.openai.com/v1'
|
||||
});
|
||||
|
||||
if (!apiKey) {
|
||||
log.info('OpenAI embedding provider created without API key for compatible endpoints');
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case 'voyage': {
|
||||
const apiKey = await options.getOption('voyageApiKey' as any);
|
||||
if (!apiKey) return null;
|
||||
|
||||
const model = await options.getOption('voyageEmbeddingModel') || 'voyage-2';
|
||||
provider = new VoyageEmbeddingProvider({
|
||||
model,
|
||||
dimension: 1024,
|
||||
type: 'float32',
|
||||
apiKey,
|
||||
baseUrl: 'https://api.voyageai.com/v1'
|
||||
});
|
||||
break;
|
||||
}
|
||||
|
||||
case 'local': {
|
||||
provider = new SimpleLocalEmbeddingProvider({
|
||||
model: 'local',
|
||||
dimension: 384,
|
||||
type: 'float32'
|
||||
});
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
|
||||
if (provider) {
|
||||
registerEmbeddingProvider(provider);
|
||||
log.info(`Created and validated ${providerName} embedding provider`);
|
||||
return provider;
|
||||
}
|
||||
} catch (error: any) {
|
||||
log.error(`Failed to create ${providerName} embedding provider: ${error.message || 'Unknown error'}`);
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all enabled embedding providers for the specified feature
|
||||
*/
|
||||
export async function getEnabledEmbeddingProviders(feature: 'embeddings' | 'chat' = 'embeddings'): Promise<EmbeddingProvider[]> {
|
||||
if (!(await options.getOptionBool('aiEnabled'))) {
|
||||
return [];
|
||||
}
|
||||
|
||||
// Get providers from database ordered by priority
|
||||
const dbProviders = await sql.getRows(`
|
||||
SELECT providerId, name, config
|
||||
FROM embedding_providers
|
||||
ORDER BY priority DESC`
|
||||
);
|
||||
|
||||
const result: EmbeddingProvider[] = [];
|
||||
|
||||
for (const row of dbProviders) {
|
||||
const rowData = row as any;
|
||||
const provider = providers.get(rowData.name);
|
||||
// Get the selected provider for the feature
|
||||
const selectedProvider = feature === 'embeddings'
|
||||
? await options.getOption('embeddingSelectedProvider')
|
||||
: await options.getOption('aiSelectedProvider');
|
||||
|
||||
if (provider) {
|
||||
// Try to get or create the specific selected provider
|
||||
const provider = await getOrCreateEmbeddingProvider(selectedProvider);
|
||||
if (!provider) {
|
||||
throw new Error(`Failed to create selected embedding provider: ${selectedProvider}. Please check your configuration.`);
|
||||
}
|
||||
result.push(provider);
|
||||
} else {
|
||||
// Only log error if we haven't logged it before for this provider
|
||||
if (!loggedProviderErrors.has(rowData.name)) {
|
||||
log.error(`Enabled embedding provider ${rowData.name} not found in registered providers`);
|
||||
loggedProviderErrors.add(rowData.name);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Always ensure local provider as fallback
|
||||
const localProvider = await getOrCreateEmbeddingProvider('local');
|
||||
if (localProvider && !result.some(p => p.name === 'local')) {
|
||||
result.push(localProvider);
|
||||
}
|
||||
|
||||
return result;
|
||||
@ -232,144 +346,18 @@ export async function getEmbeddingProviderConfigs() {
|
||||
return await sql.getRows("SELECT * FROM embedding_providers ORDER BY priority DESC");
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the default embedding providers
|
||||
*/
|
||||
export async function initializeDefaultProviders() {
|
||||
// Register built-in providers
|
||||
try {
|
||||
// Register OpenAI provider if API key is configured
|
||||
const openaiApiKey = await options.getOption('openaiApiKey');
|
||||
if (openaiApiKey) {
|
||||
const openaiModel = await options.getOption('openaiEmbeddingModel') || 'text-embedding-3-small';
|
||||
const openaiBaseUrl = await options.getOption('openaiBaseUrl') || 'https://api.openai.com/v1';
|
||||
|
||||
registerEmbeddingProvider(new OpenAIEmbeddingProvider({
|
||||
model: openaiModel,
|
||||
dimension: 1536, // OpenAI's typical dimension
|
||||
type: 'float32',
|
||||
apiKey: openaiApiKey,
|
||||
baseUrl: openaiBaseUrl
|
||||
}));
|
||||
|
||||
// Create OpenAI provider config if it doesn't exist
|
||||
const existingOpenAI = await sql.getRow(
|
||||
"SELECT * FROM embedding_providers WHERE name = ?",
|
||||
['openai']
|
||||
);
|
||||
|
||||
if (!existingOpenAI) {
|
||||
await createEmbeddingProviderConfig('openai', {
|
||||
model: openaiModel,
|
||||
dimension: 1536,
|
||||
type: 'float32'
|
||||
}, 100);
|
||||
}
|
||||
}
|
||||
|
||||
// Register Voyage provider if API key is configured
|
||||
const voyageApiKey = await options.getOption('voyageApiKey' as any);
|
||||
if (voyageApiKey) {
|
||||
const voyageModel = await options.getOption('voyageEmbeddingModel') || 'voyage-2';
|
||||
const voyageBaseUrl = 'https://api.voyageai.com/v1';
|
||||
|
||||
registerEmbeddingProvider(new VoyageEmbeddingProvider({
|
||||
model: voyageModel,
|
||||
dimension: 1024, // Voyage's embedding dimension
|
||||
type: 'float32',
|
||||
apiKey: voyageApiKey,
|
||||
baseUrl: voyageBaseUrl
|
||||
}));
|
||||
|
||||
// Create Voyage provider config if it doesn't exist
|
||||
const existingVoyage = await sql.getRow(
|
||||
"SELECT * FROM embedding_providers WHERE name = ?",
|
||||
['voyage']
|
||||
);
|
||||
|
||||
if (!existingVoyage) {
|
||||
await createEmbeddingProviderConfig('voyage', {
|
||||
model: voyageModel,
|
||||
dimension: 1024,
|
||||
type: 'float32'
|
||||
}, 75);
|
||||
}
|
||||
}
|
||||
|
||||
// Register Ollama provider if base URL is configured
|
||||
const ollamaBaseUrl = await options.getOption('ollamaBaseUrl');
|
||||
if (ollamaBaseUrl) {
|
||||
// Use specific embedding models if available
|
||||
const embeddingModel = await options.getOption('ollamaEmbeddingModel');
|
||||
|
||||
try {
|
||||
// Create provider with initial dimension to be updated during initialization
|
||||
const ollamaProvider = new OllamaEmbeddingProvider({
|
||||
model: embeddingModel,
|
||||
dimension: 768, // Initial value, will be updated during initialization
|
||||
type: 'float32',
|
||||
baseUrl: ollamaBaseUrl
|
||||
});
|
||||
|
||||
// Register the provider
|
||||
registerEmbeddingProvider(ollamaProvider);
|
||||
|
||||
// Initialize the provider to detect model capabilities
|
||||
await ollamaProvider.initialize();
|
||||
|
||||
// Create Ollama provider config if it doesn't exist
|
||||
const existingOllama = await sql.getRow(
|
||||
"SELECT * FROM embedding_providers WHERE name = ?",
|
||||
['ollama']
|
||||
);
|
||||
|
||||
if (!existingOllama) {
|
||||
await createEmbeddingProviderConfig('ollama', {
|
||||
model: embeddingModel,
|
||||
dimension: ollamaProvider.getDimension(),
|
||||
type: 'float32'
|
||||
}, 50);
|
||||
}
|
||||
} catch (error: any) {
|
||||
log.error(`Error initializing Ollama embedding provider: ${error.message || 'Unknown error'}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Always register local provider as fallback
|
||||
registerEmbeddingProvider(new SimpleLocalEmbeddingProvider({
|
||||
model: 'local',
|
||||
dimension: 384,
|
||||
type: 'float32'
|
||||
}));
|
||||
|
||||
// Create local provider config if it doesn't exist
|
||||
const existingLocal = await sql.getRow(
|
||||
"SELECT * FROM embedding_providers WHERE name = ?",
|
||||
['local']
|
||||
);
|
||||
|
||||
if (!existingLocal) {
|
||||
await createEmbeddingProviderConfig('local', {
|
||||
model: 'local',
|
||||
dimension: 384,
|
||||
type: 'float32'
|
||||
}, 10);
|
||||
}
|
||||
} catch (error: any) {
|
||||
log.error(`Error initializing default embedding providers: ${error.message || 'Unknown error'}`);
|
||||
}
|
||||
}
|
||||
|
||||
export default {
|
||||
registerEmbeddingProvider,
|
||||
unregisterEmbeddingProvider,
|
||||
clearAllEmbeddingProviders,
|
||||
getEmbeddingProviders,
|
||||
getEmbeddingProvider,
|
||||
getEnabledEmbeddingProviders,
|
||||
getOrCreateEmbeddingProvider,
|
||||
createEmbeddingProviderConfig,
|
||||
updateEmbeddingProviderConfig,
|
||||
deleteEmbeddingProviderConfig,
|
||||
getEmbeddingProviderConfigs,
|
||||
initializeDefaultProviders
|
||||
getEmbeddingProviderConfigs
|
||||
};
|
||||
|
||||
/**
|
||||
@ -382,7 +370,8 @@ export function getOpenAIOptions(
|
||||
try {
|
||||
const apiKey = options.getOption('openaiApiKey');
|
||||
if (!apiKey) {
|
||||
throw new Error('OpenAI API key is not configured');
|
||||
// Log warning but don't throw - some OpenAI-compatible endpoints don't require API keys
|
||||
log.info('OpenAI API key is not configured. This may cause issues with official OpenAI endpoints.');
|
||||
}
|
||||
|
||||
const baseUrl = options.getOption('openaiBaseUrl') || PROVIDER_CONSTANTS.OPENAI.BASE_URL;
|
||||
@ -407,7 +396,7 @@ export function getOpenAIOptions(
|
||||
|
||||
return {
|
||||
// Connection settings
|
||||
apiKey,
|
||||
apiKey: apiKey || '', // Default to empty string if no API key
|
||||
baseUrl,
|
||||
|
||||
// Provider metadata
|
||||
|
@ -102,12 +102,7 @@ export class NoteSummarizationTool implements ToolHandler {
|
||||
const cleanContent = this.cleanHtml(content);
|
||||
|
||||
// Generate the summary using the AI service
|
||||
const aiService = aiServiceManager.getService();
|
||||
|
||||
if (!aiService) {
|
||||
log.error('No AI service available for summarization');
|
||||
return `Error: No AI service is available for summarization`;
|
||||
}
|
||||
const aiService = await aiServiceManager.getService();
|
||||
|
||||
log.info(`Using ${aiService.getName()} to generate summary`);
|
||||
|
||||
|
@ -312,16 +312,7 @@ export class RelationshipTool implements ToolHandler {
|
||||
}
|
||||
|
||||
// Get the AI service for relationship suggestion
|
||||
const aiService = aiServiceManager.getService();
|
||||
|
||||
if (!aiService) {
|
||||
log.error('No AI service available for relationship suggestions');
|
||||
return {
|
||||
success: false,
|
||||
message: 'AI service not available for relationship suggestions',
|
||||
relatedNotes: relatedResult.relatedNotes
|
||||
};
|
||||
}
|
||||
const aiService = await aiServiceManager.getService();
|
||||
|
||||
log.info(`Using ${aiService.getName()} to suggest relationships for ${relatedResult.relatedNotes.length} related notes`);
|
||||
|
||||
|
@ -122,9 +122,9 @@ export class SearchNotesTool implements ToolHandler {
|
||||
// If summarization is requested
|
||||
if (summarize) {
|
||||
// Try to get an LLM service for summarization
|
||||
const llmService = aiServiceManager.getService();
|
||||
if (llmService) {
|
||||
try {
|
||||
const llmService = await aiServiceManager.getService();
|
||||
|
||||
const messages = [
|
||||
{
|
||||
role: "system" as const,
|
||||
@ -155,7 +155,6 @@ export class SearchNotesTool implements ToolHandler {
|
||||
// Fall through to smart truncation if summarization fails
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
// Fall back to smart truncation if summarization fails or isn't requested
|
||||
|
@ -195,26 +195,32 @@ const defaultOptions: DefaultOption[] = [
|
||||
// AI Options
|
||||
{ name: "aiEnabled", value: "false", isSynced: true },
|
||||
{ name: "openaiApiKey", value: "", isSynced: false },
|
||||
{ name: "openaiDefaultModel", value: "gpt-4o", isSynced: true },
|
||||
{ name: "openaiEmbeddingModel", value: "text-embedding-3-small", isSynced: true },
|
||||
{ name: "openaiDefaultModel", value: "", isSynced: true },
|
||||
{ name: "openaiEmbeddingModel", value: "", isSynced: true },
|
||||
{ name: "openaiBaseUrl", value: "https://api.openai.com/v1", isSynced: true },
|
||||
{ name: "anthropicApiKey", value: "", isSynced: false },
|
||||
{ name: "anthropicDefaultModel", value: "claude-3-opus-20240229", isSynced: true },
|
||||
{ name: "voyageEmbeddingModel", value: "voyage-2", isSynced: true },
|
||||
{ name: "anthropicDefaultModel", value: "", isSynced: true },
|
||||
{ name: "voyageEmbeddingModel", value: "", isSynced: true },
|
||||
{ name: "voyageApiKey", value: "", isSynced: false },
|
||||
{ name: "anthropicBaseUrl", value: "https://api.anthropic.com/v1", isSynced: true },
|
||||
{ name: "ollamaEnabled", value: "false", isSynced: true },
|
||||
{ name: "ollamaDefaultModel", value: "llama3", isSynced: true },
|
||||
{ name: "ollamaDefaultModel", value: "", isSynced: true },
|
||||
{ name: "ollamaBaseUrl", value: "http://localhost:11434", isSynced: true },
|
||||
{ name: "ollamaEmbeddingModel", value: "nomic-embed-text", isSynced: true },
|
||||
{ name: "ollamaEmbeddingModel", value: "", isSynced: true },
|
||||
{ name: "embeddingAutoUpdateEnabled", value: "true", isSynced: true },
|
||||
|
||||
// Embedding-specific provider options
|
||||
{ name: "openaiEmbeddingApiKey", value: "", isSynced: false },
|
||||
{ name: "openaiEmbeddingBaseUrl", value: "https://api.openai.com/v1", isSynced: true },
|
||||
{ name: "voyageEmbeddingBaseUrl", value: "https://api.voyageai.com/v1", isSynced: true },
|
||||
{ name: "ollamaEmbeddingBaseUrl", value: "http://localhost:11434", isSynced: true },
|
||||
|
||||
// Adding missing AI options
|
||||
{ name: "aiTemperature", value: "0.7", isSynced: true },
|
||||
{ name: "aiSystemPrompt", value: "", isSynced: true },
|
||||
{ name: "aiProviderPrecedence", value: "openai,anthropic,ollama", isSynced: true },
|
||||
{ name: "aiSelectedProvider", value: "openai", isSynced: true },
|
||||
{ name: "embeddingDimensionStrategy", value: "auto", isSynced: true },
|
||||
{ name: "embeddingProviderPrecedence", value: "openai,voyage,ollama,local", isSynced: true },
|
||||
{ name: "embeddingSelectedProvider", value: "openai", isSynced: true },
|
||||
{ name: "embeddingSimilarityThreshold", value: "0.75", isSynced: true },
|
||||
{ name: "enableAutomaticIndexing", value: "true", isSynced: true },
|
||||
{ name: "maxNotesPerLlmQuery", value: "3", isSynced: true },
|
||||
|
@ -132,26 +132,29 @@ export interface OptionDefinitions extends KeyboardShortcutsOptions<KeyboardActi
|
||||
openaiApiKey: string;
|
||||
openaiDefaultModel: string;
|
||||
openaiEmbeddingModel: string;
|
||||
openaiEmbeddingApiKey: string;
|
||||
openaiEmbeddingBaseUrl: string;
|
||||
openaiBaseUrl: string;
|
||||
anthropicApiKey: string;
|
||||
anthropicDefaultModel: string;
|
||||
voyageEmbeddingModel: string;
|
||||
voyageApiKey: string;
|
||||
voyageEmbeddingBaseUrl: string;
|
||||
anthropicBaseUrl: string;
|
||||
ollamaEnabled: boolean;
|
||||
ollamaBaseUrl: string;
|
||||
ollamaDefaultModel: string;
|
||||
ollamaEmbeddingModel: string;
|
||||
ollamaEmbeddingBaseUrl: string;
|
||||
codeOpenAiModel: string;
|
||||
aiProviderPrecedence: string;
|
||||
aiSelectedProvider: string;
|
||||
|
||||
// Embedding-related options
|
||||
embeddingAutoUpdateEnabled: boolean;
|
||||
embeddingUpdateInterval: number;
|
||||
embeddingBatchSize: number;
|
||||
embeddingDefaultDimension: number;
|
||||
embeddingsDefaultProvider: string;
|
||||
embeddingProviderPrecedence: string;
|
||||
embeddingSelectedProvider: string;
|
||||
enableAutomaticIndexing: boolean;
|
||||
embeddingGenerationLocation: string;
|
||||
embeddingDimensionStrategy: string;
|
||||
|
Loading…
x
Reference in New Issue
Block a user