mirror of
				https://github.com/TriliumNext/Notes.git
				synced 2025-10-30 04:01:31 +08:00 
			
		
		
		
	feat(llm): create endpoints for starting/stopping embeddings
This commit is contained in:
		
							parent
							
								
									a084805762
								
							
						
					
					
						commit
						49e123f399
					
				| @ -51,6 +51,35 @@ export default class AiSettingsWidget extends OptionsWidget { | ||||
| 
 | ||||
|             await this.updateOption(optionName, value); | ||||
| 
 | ||||
|             // Special handling for aiEnabled option
 | ||||
|             if (optionName === 'aiEnabled') { | ||||
|                 try { | ||||
|                     const isEnabled = value === 'true'; | ||||
|                      | ||||
|                     if (isEnabled) { | ||||
|                         // Start embedding generation
 | ||||
|                         await server.post('llm/embeddings/start'); | ||||
|                         toastService.showMessage(t("ai_llm.embeddings_started") || "Embedding generation started"); | ||||
|                          | ||||
|                         // Start polling for stats updates
 | ||||
|                         this.refreshEmbeddingStats(); | ||||
|                     } else { | ||||
|                         // Stop embedding generation
 | ||||
|                         await server.post('llm/embeddings/stop'); | ||||
|                         toastService.showMessage(t("ai_llm.embeddings_stopped") || "Embedding generation stopped"); | ||||
|                          | ||||
|                         // Clear any active polling intervals
 | ||||
|                         if (this.indexRebuildRefreshInterval) { | ||||
|                             clearInterval(this.indexRebuildRefreshInterval); | ||||
|                             this.indexRebuildRefreshInterval = null; | ||||
|                         } | ||||
|                     } | ||||
|                 } catch (error) { | ||||
|                     console.error('Error toggling embeddings:', error); | ||||
|                     toastService.showError(t("ai_llm.embeddings_toggle_error") || "Error toggling embeddings"); | ||||
|                 } | ||||
|             } | ||||
| 
 | ||||
|             if (validateAfter) { | ||||
|                 await this.displayValidationWarnings(); | ||||
|             } | ||||
|  | ||||
| @ -782,6 +782,49 @@ async function getIndexRebuildStatus(req: Request, res: Response) { | ||||
|     }; | ||||
| } | ||||
| 
 | ||||
| /** | ||||
|  * Start embedding generation when AI is enabled | ||||
|  */ | ||||
| async function startEmbeddings(req: Request, res: Response) { | ||||
|     try { | ||||
|         log.info("Starting embedding generation system"); | ||||
|          | ||||
|         // Initialize the index service if not already initialized
 | ||||
|         await indexService.initialize(); | ||||
|          | ||||
|         // Start automatic indexing
 | ||||
|         await indexService.startEmbeddingGeneration(); | ||||
|          | ||||
|         return { | ||||
|             success: true, | ||||
|             message: "Embedding generation started" | ||||
|         }; | ||||
|     } catch (error: any) { | ||||
|         log.error(`Error starting embeddings: ${error.message || 'Unknown error'}`); | ||||
|         throw new Error(`Failed to start embeddings: ${error.message || 'Unknown error'}`); | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| /** | ||||
|  * Stop embedding generation when AI is disabled | ||||
|  */ | ||||
| async function stopEmbeddings(req: Request, res: Response) { | ||||
|     try { | ||||
|         log.info("Stopping embedding generation system"); | ||||
|          | ||||
|         // Stop automatic indexing
 | ||||
|         await indexService.stopEmbeddingGeneration(); | ||||
|          | ||||
|         return { | ||||
|             success: true, | ||||
|             message: "Embedding generation stopped" | ||||
|         }; | ||||
|     } catch (error: any) { | ||||
|         log.error(`Error stopping embeddings: ${error.message || 'Unknown error'}`); | ||||
|         throw new Error(`Failed to stop embeddings: ${error.message || 'Unknown error'}`); | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| export default { | ||||
|     findSimilarNotes, | ||||
|     searchByText, | ||||
| @ -794,5 +837,7 @@ export default { | ||||
|     retryFailedNote, | ||||
|     retryAllFailedNotes, | ||||
|     rebuildIndex, | ||||
|     getIndexRebuildStatus | ||||
|     getIndexRebuildStatus, | ||||
|     startEmbeddings, | ||||
|     stopEmbeddings | ||||
| }; | ||||
|  | ||||
| @ -400,6 +400,8 @@ function register(app: express.Application) { | ||||
|     asyncApiRoute(PST, "/api/llm/embeddings/retry-all-failed", embeddingsRoute.retryAllFailedNotes); | ||||
|     asyncApiRoute(PST, "/api/llm/embeddings/rebuild-index", embeddingsRoute.rebuildIndex); | ||||
|     asyncApiRoute(GET, "/api/llm/embeddings/index-rebuild-status", embeddingsRoute.getIndexRebuildStatus); | ||||
|     asyncApiRoute(PST, "/api/llm/embeddings/start", embeddingsRoute.startEmbeddings); | ||||
|     asyncApiRoute(PST, "/api/llm/embeddings/stop", embeddingsRoute.stopEmbeddings); | ||||
| 
 | ||||
|     // LLM provider endpoints - moved under /api/llm/providers hierarchy
 | ||||
|     asyncApiRoute(GET, "/api/llm/providers/ollama/models", ollamaRoute.listModels); | ||||
|  | ||||
| @ -605,6 +605,7 @@ export class AIServiceManager implements IAIServiceManager { | ||||
|     private setupProviderChangeListener(): void { | ||||
|         // List of AI-related options that should trigger service recreation
 | ||||
|         const aiRelatedOptions = [ | ||||
|             'aiEnabled', | ||||
|             'aiSelectedProvider', | ||||
|             'embeddingSelectedProvider', | ||||
|             'openaiApiKey', | ||||
| @ -618,10 +619,29 @@ export class AIServiceManager implements IAIServiceManager { | ||||
|             'voyageApiKey' | ||||
|         ]; | ||||
| 
 | ||||
|         eventService.subscribe(['entityChanged'], ({ entityName, entity }) => { | ||||
|         eventService.subscribe(['entityChanged'], async ({ entityName, entity }) => { | ||||
|             if (entityName === 'options' && entity && aiRelatedOptions.includes(entity.name)) { | ||||
|                 log.info(`AI-related option '${entity.name}' changed, recreating LLM services`); | ||||
|                 this.recreateServices(); | ||||
|                  | ||||
|                 // Special handling for aiEnabled toggle
 | ||||
|                 if (entity.name === 'aiEnabled') { | ||||
|                     const isEnabled = entity.value === 'true'; | ||||
|                      | ||||
|                     if (isEnabled) { | ||||
|                         log.info('AI features enabled, initializing AI service and embeddings'); | ||||
|                         // Initialize the AI service
 | ||||
|                         await this.initialize(); | ||||
|                         // Initialize embeddings through index service
 | ||||
|                         await indexService.startEmbeddingGeneration(); | ||||
|                     } else { | ||||
|                         log.info('AI features disabled, stopping embeddings'); | ||||
|                         // Stop embeddings through index service
 | ||||
|                         await indexService.stopEmbeddingGeneration(); | ||||
|                     } | ||||
|                 } else { | ||||
|                     // For other AI-related options, just recreate services
 | ||||
|                     this.recreateServices(); | ||||
|                 } | ||||
|             } | ||||
|         }); | ||||
|     } | ||||
|  | ||||
| @ -9,6 +9,9 @@ import becca from "../../../becca/becca.js"; | ||||
| // Add mutex to prevent concurrent processing
 | ||||
| let isProcessingEmbeddings = false; | ||||
| 
 | ||||
| // Store interval reference for cleanup
 | ||||
| let backgroundProcessingInterval: NodeJS.Timeout | null = null; | ||||
| 
 | ||||
| /** | ||||
|  * Setup event listeners for embedding-related events | ||||
|  */ | ||||
| @ -53,9 +56,15 @@ export function setupEmbeddingEventListeners() { | ||||
|  * Setup background processing of the embedding queue | ||||
|  */ | ||||
| export async function setupEmbeddingBackgroundProcessing() { | ||||
|     // Clear any existing interval
 | ||||
|     if (backgroundProcessingInterval) { | ||||
|         clearInterval(backgroundProcessingInterval); | ||||
|         backgroundProcessingInterval = null; | ||||
|     } | ||||
| 
 | ||||
|     const interval = parseInt(await options.getOption('embeddingUpdateInterval') || '200', 10); | ||||
| 
 | ||||
|     setInterval(async () => { | ||||
|     backgroundProcessingInterval = setInterval(async () => { | ||||
|         try { | ||||
|             // Skip if already processing
 | ||||
|             if (isProcessingEmbeddings) { | ||||
| @ -78,6 +87,17 @@ export async function setupEmbeddingBackgroundProcessing() { | ||||
|     }, interval); | ||||
| } | ||||
| 
 | ||||
| /** | ||||
|  * Stop background processing of the embedding queue | ||||
|  */ | ||||
| export function stopEmbeddingBackgroundProcessing() { | ||||
|     if (backgroundProcessingInterval) { | ||||
|         clearInterval(backgroundProcessingInterval); | ||||
|         backgroundProcessingInterval = null; | ||||
|         log.info("Embedding background processing stopped"); | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| /** | ||||
|  * Initialize embeddings system | ||||
|  */ | ||||
|  | ||||
| @ -58,6 +58,7 @@ export const processNoteWithChunking = async ( | ||||
| export const { | ||||
|     setupEmbeddingEventListeners, | ||||
|     setupEmbeddingBackgroundProcessing, | ||||
|     stopEmbeddingBackgroundProcessing, | ||||
|     initEmbeddings | ||||
| } = events; | ||||
| 
 | ||||
| @ -100,6 +101,7 @@ export default { | ||||
|     // Event handling
 | ||||
|     setupEmbeddingEventListeners: events.setupEmbeddingEventListeners, | ||||
|     setupEmbeddingBackgroundProcessing: events.setupEmbeddingBackgroundProcessing, | ||||
|     stopEmbeddingBackgroundProcessing: events.stopEmbeddingBackgroundProcessing, | ||||
|     initEmbeddings: events.initEmbeddings, | ||||
| 
 | ||||
|     // Stats and maintenance
 | ||||
|  | ||||
| @ -837,6 +837,81 @@ export class IndexService { | ||||
|             return false; | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     /** | ||||
|      * Start embedding generation (called when AI is enabled) | ||||
|      */ | ||||
|     async startEmbeddingGeneration() { | ||||
|         try { | ||||
|             log.info("Starting embedding generation system"); | ||||
|              | ||||
|             // Re-initialize if needed
 | ||||
|             if (!this.initialized) { | ||||
|                 await this.initialize(); | ||||
|             } | ||||
|              | ||||
|             const aiEnabled = options.getOptionOrNull('aiEnabled') === "true"; | ||||
|             if (!aiEnabled) { | ||||
|                 log.error("Cannot start embedding generation - AI features are disabled"); | ||||
|                 throw new Error("AI features must be enabled first"); | ||||
|             } | ||||
| 
 | ||||
|             // Check if this instance should process embeddings
 | ||||
|             const embeddingLocation = await options.getOption('embeddingGenerationLocation') || 'client'; | ||||
|             const isSyncServer = await this.isSyncServerForEmbeddings(); | ||||
|             const shouldProcessEmbeddings = embeddingLocation === 'client' || isSyncServer; | ||||
| 
 | ||||
|             if (!shouldProcessEmbeddings) { | ||||
|                 log.info("This instance is not configured to process embeddings"); | ||||
|                 return; | ||||
|             } | ||||
| 
 | ||||
|             // Setup automatic indexing if enabled
 | ||||
|             if (await options.getOptionBool('embeddingAutoUpdateEnabled')) { | ||||
|                 this.setupAutomaticIndexing(); | ||||
|                 log.info(`Automatic embedding indexing started ${isSyncServer ? 'as sync server' : 'as client'}`); | ||||
|             } | ||||
| 
 | ||||
|             // Re-initialize event listeners
 | ||||
|             this.setupEventListeners(); | ||||
| 
 | ||||
|             // Start processing the queue immediately
 | ||||
|             await this.runBatchIndexing(20); | ||||
|              | ||||
|             log.info("Embedding generation started successfully"); | ||||
|         } catch (error: any) { | ||||
|             log.error(`Error starting embedding generation: ${error.message || "Unknown error"}`); | ||||
|             throw error; | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     /** | ||||
|      * Stop embedding generation (called when AI is disabled) | ||||
|      */ | ||||
|     async stopEmbeddingGeneration() { | ||||
|         try { | ||||
|             log.info("Stopping embedding generation system"); | ||||
|              | ||||
|             // Clear automatic indexing interval
 | ||||
|             if (this.automaticIndexingInterval) { | ||||
|                 clearInterval(this.automaticIndexingInterval); | ||||
|                 this.automaticIndexingInterval = undefined; | ||||
|                 log.info("Automatic indexing stopped"); | ||||
|             } | ||||
| 
 | ||||
|             // Stop the background processing from embeddings/events.ts
 | ||||
|             vectorStore.stopEmbeddingBackgroundProcessing(); | ||||
| 
 | ||||
|             // Mark as not indexing
 | ||||
|             this.indexingInProgress = false; | ||||
|             this.indexRebuildInProgress = false; | ||||
|              | ||||
|             log.info("Embedding generation stopped successfully"); | ||||
|         } catch (error: any) { | ||||
|             log.error(`Error stopping embedding generation: ${error.message || "Unknown error"}`); | ||||
|             throw error; | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| // Create singleton instance
 | ||||
|  | ||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user
	 perf3ct
						perf3ct