mirror of
				https://github.com/TriliumNext/Notes.git
				synced 2025-10-31 13:01:31 +08:00 
			
		
		
		
	set up agentic thinking
This commit is contained in:
		
							parent
							
								
									1a8ce967d9
								
							
						
					
					
						commit
						f6afb1d963
					
				
							
								
								
									
										361
									
								
								src/services/llm/agent_tools/contextual_thinking_tool.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										361
									
								
								src/services/llm/agent_tools/contextual_thinking_tool.ts
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,361 @@ | ||||
| /** | ||||
|  * Contextual Thinking Tool | ||||
|  * | ||||
|  * Provides a way for the LLM agent to expose its reasoning process to the user, | ||||
|  * showing how it explores knowledge and reaches conclusions. This makes the | ||||
|  * agent's thinking more transparent and allows users to understand the context | ||||
|  * behind answers. | ||||
|  * | ||||
|  * Features: | ||||
|  * - Capture and structure the agent's thinking steps | ||||
|  * - Visualize reasoning chains for complex queries | ||||
|  * - Expose confidence levels for different assertions | ||||
|  * - Show how different sources of evidence are weighed | ||||
|  */ | ||||
| 
 | ||||
| import log from '../../log.js'; | ||||
| 
 | ||||
| /** | ||||
|  * Represents a single reasoning step taken by the agent | ||||
|  */ | ||||
| export interface ThinkingStep { | ||||
|   id: string; | ||||
|   content: string; | ||||
|   type: 'observation' | 'hypothesis' | 'question' | 'evidence' | 'conclusion'; | ||||
|   confidence?: number; | ||||
|   sources?: string[]; | ||||
|   parentId?: string; | ||||
|   children?: string[]; | ||||
|   metadata?: Record<string, any>; | ||||
| } | ||||
| 
 | ||||
| /** | ||||
|  * Contains the full reasoning process | ||||
|  */ | ||||
| export interface ThinkingProcess { | ||||
|   id: string; | ||||
|   query: string; | ||||
|   steps: ThinkingStep[]; | ||||
|   status: 'in_progress' | 'completed'; | ||||
|   startTime: number; | ||||
|   endTime?: number; | ||||
| } | ||||
| 
 | ||||
| export class ContextualThinkingTool { | ||||
|   private static thinkingCounter = 0; | ||||
|   private static stepCounter = 0; | ||||
|   private activeProcId?: string; | ||||
|   private processes: Record<string, ThinkingProcess> = {}; | ||||
| 
 | ||||
|   /** | ||||
|    * Start a new thinking process for a given query | ||||
|    * | ||||
|    * @param query The user query that initiated the thinking process | ||||
|    * @returns The ID of the new thinking process | ||||
|    */ | ||||
|   startThinking(query: string): string { | ||||
|     const id = this.generateProcessId(); | ||||
| 
 | ||||
|     this.processes[id] = { | ||||
|       id, | ||||
|       query, | ||||
|       steps: [], | ||||
|       status: 'in_progress', | ||||
|       startTime: Date.now() | ||||
|     }; | ||||
| 
 | ||||
|     this.activeProcId = id; | ||||
|     return id; | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * Add a thinking step to the current active process | ||||
|    * | ||||
|    * @param content The content of the thinking step | ||||
|    * @param type The type of thinking step | ||||
|    * @param options Additional options for the step | ||||
|    * @returns The ID of the new step | ||||
|    */ | ||||
|   addThinkingStep( | ||||
|     content: string, | ||||
|     type: ThinkingStep['type'], | ||||
|     options: { | ||||
|       confidence?: number; | ||||
|       sources?: string[]; | ||||
|       parentId?: string; | ||||
|       metadata?: Record<string, any>; | ||||
|     } = {} | ||||
|   ): string | null { | ||||
|     if (!this.activeProcId || !this.processes[this.activeProcId]) { | ||||
|       log.error("No active thinking process to add step to"); | ||||
|       return null; | ||||
|     } | ||||
| 
 | ||||
|     const stepId = this.generateStepId(); | ||||
|     const step: ThinkingStep = { | ||||
|       id: stepId, | ||||
|       content, | ||||
|       type, | ||||
|       ...options | ||||
|     }; | ||||
| 
 | ||||
|     // Add to parent's children if a parent is specified
 | ||||
|     if (options.parentId) { | ||||
|       const parentIdx = this.processes[this.activeProcId].steps.findIndex( | ||||
|         s => s.id === options.parentId | ||||
|       ); | ||||
| 
 | ||||
|       if (parentIdx >= 0) { | ||||
|         const parent = this.processes[this.activeProcId].steps[parentIdx]; | ||||
|         if (!parent.children) { | ||||
|           parent.children = []; | ||||
|         } | ||||
|         parent.children.push(stepId); | ||||
|         this.processes[this.activeProcId].steps[parentIdx] = parent; | ||||
|       } | ||||
|     } | ||||
| 
 | ||||
|     this.processes[this.activeProcId].steps.push(step); | ||||
|     return stepId; | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * Complete the current thinking process | ||||
|    * | ||||
|    * @param processId The ID of the process to complete (defaults to active process) | ||||
|    * @returns The completed thinking process | ||||
|    */ | ||||
|   completeThinking(processId?: string): ThinkingProcess | null { | ||||
|     const id = processId || this.activeProcId; | ||||
| 
 | ||||
|     if (!id || !this.processes[id]) { | ||||
|       log.error(`Thinking process ${id} not found`); | ||||
|       return null; | ||||
|     } | ||||
| 
 | ||||
|     this.processes[id].status = 'completed'; | ||||
|     this.processes[id].endTime = Date.now(); | ||||
| 
 | ||||
|     if (id === this.activeProcId) { | ||||
|       this.activeProcId = undefined; | ||||
|     } | ||||
| 
 | ||||
|     return this.processes[id]; | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * Get a thinking process by ID | ||||
|    */ | ||||
|   getThinkingProcess(processId: string): ThinkingProcess | null { | ||||
|     return this.processes[processId] || null; | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * Get the active thinking process | ||||
|    */ | ||||
|   getActiveThinkingProcess(): ThinkingProcess | null { | ||||
|     if (!this.activeProcId) return null; | ||||
|     return this.processes[this.activeProcId] || null; | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * Generate a user-friendly HTML representation of the thinking process | ||||
|    * | ||||
|    * @param processId The ID of the process to visualize | ||||
|    * @returns HTML string representing the thinking process | ||||
|    */ | ||||
|   visualizeThinking(processId: string): string { | ||||
|     const process = this.getThinkingProcess(processId); | ||||
|     if (!process) { | ||||
|       return `<div class="thinking-error">Thinking process ${processId} not found</div>`; | ||||
|     } | ||||
| 
 | ||||
|     let html = ` | ||||
|       <div class="thinking-process"> | ||||
|         <div class="thinking-header"> | ||||
|           <h3>Thinking Process for: "${process.query}"</h3> | ||||
|           <div class="thinking-metadata"> | ||||
|             <span>Status: ${process.status}</span> | ||||
|             <span>Steps: ${process.steps.length}</span> | ||||
|             <span>Time: ${this.formatDuration(process.startTime, process.endTime || Date.now())}</span> | ||||
|           </div> | ||||
|         </div> | ||||
|         <div class="thinking-steps"> | ||||
|     `;
 | ||||
| 
 | ||||
|     // Find root steps (those without parents)
 | ||||
|     const rootSteps = process.steps.filter(step => !step.parentId); | ||||
| 
 | ||||
|     // Recursively render the thinking tree
 | ||||
|     for (const rootStep of rootSteps) { | ||||
|       html += this.renderStepTree(rootStep, process.steps); | ||||
|     } | ||||
| 
 | ||||
|     html += ` | ||||
|         </div> | ||||
|       </div> | ||||
|     `;
 | ||||
| 
 | ||||
|     return html; | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * Generate a concise text representation of the thinking process | ||||
|    * that can be displayed inline in the chat for transparency | ||||
|    * | ||||
|    * @param processId The ID of the process to summarize | ||||
|    * @returns Text summary of the reasoning process | ||||
|    */ | ||||
|   getThinkingSummary(processId?: string): string { | ||||
|     const id = processId || this.activeProcId; | ||||
|     if (!id || !this.processes[id]) { | ||||
|       return "No thinking process available."; | ||||
|     } | ||||
| 
 | ||||
|     const process = this.processes[id]; | ||||
|     let summary = `Thinking about: "${process.query}"\n\n`; | ||||
| 
 | ||||
|     // Group steps by type
 | ||||
|     const stepsByType: Record<string, ThinkingStep[]> = {}; | ||||
|     for (const step of process.steps) { | ||||
|       if (!stepsByType[step.type]) { | ||||
|         stepsByType[step.type] = []; | ||||
|       } | ||||
|       stepsByType[step.type].push(step); | ||||
|     } | ||||
| 
 | ||||
|     // Show observations first
 | ||||
|     if (stepsByType['observation'] && stepsByType['observation'].length > 0) { | ||||
|       summary += "🔍 Observations:\n"; | ||||
|       for (const step of stepsByType['observation'].slice(0, 3)) { | ||||
|         summary += `- ${step.content}\n`; | ||||
|       } | ||||
|       if (stepsByType['observation'].length > 3) { | ||||
|         summary += `- ...and ${stepsByType['observation'].length - 3} more observations\n`; | ||||
|       } | ||||
|       summary += "\n"; | ||||
|     } | ||||
| 
 | ||||
|     // Show questions the agent asked itself
 | ||||
|     if (stepsByType['question'] && stepsByType['question'].length > 0) { | ||||
|       summary += "❓ Questions considered:\n"; | ||||
|       for (const step of stepsByType['question'].slice(0, 3)) { | ||||
|         summary += `- ${step.content}\n`; | ||||
|       } | ||||
|       if (stepsByType['question'].length > 3) { | ||||
|         summary += `- ...and ${stepsByType['question'].length - 3} more questions\n`; | ||||
|       } | ||||
|       summary += "\n"; | ||||
|     } | ||||
| 
 | ||||
|     // Show evidence
 | ||||
|     if (stepsByType['evidence'] && stepsByType['evidence'].length > 0) { | ||||
|       summary += "📋 Evidence found:\n"; | ||||
|       for (const step of stepsByType['evidence'].slice(0, 3)) { | ||||
|         summary += `- ${step.content}\n`; | ||||
|       } | ||||
|       if (stepsByType['evidence'].length > 3) { | ||||
|         summary += `- ...and ${stepsByType['evidence'].length - 3} more pieces of evidence\n`; | ||||
|       } | ||||
|       summary += "\n"; | ||||
|     } | ||||
| 
 | ||||
|     // Show conclusions
 | ||||
|     if (stepsByType['conclusion'] && stepsByType['conclusion'].length > 0) { | ||||
|       summary += "✅ Conclusions:\n"; | ||||
|       for (const step of stepsByType['conclusion']) { | ||||
|         const confidence = step.confidence ? ` (${Math.round(step.confidence * 100)}% confidence)` : ''; | ||||
|         summary += `- ${step.content}${confidence}\n`; | ||||
|       } | ||||
|     } | ||||
| 
 | ||||
|     return summary; | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * Reset the active thinking process | ||||
|    */ | ||||
|   resetActiveThinking(): void { | ||||
|     this.activeProcId = undefined; | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * Generate a unique ID for a thinking process | ||||
|    */ | ||||
|   private generateProcessId(): string { | ||||
|     return `thinking_${Date.now()}_${ContextualThinkingTool.thinkingCounter++}`; | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * Generate a unique ID for a thinking step | ||||
|    */ | ||||
|   private generateStepId(): string { | ||||
|     return `step_${Date.now()}_${ContextualThinkingTool.stepCounter++}`; | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * Format duration between two timestamps | ||||
|    */ | ||||
|   private formatDuration(start: number, end: number): string { | ||||
|     const durationMs = end - start; | ||||
|     if (durationMs < 1000) { | ||||
|       return `${durationMs}ms`; | ||||
|     } else if (durationMs < 60000) { | ||||
|       return `${Math.round(durationMs / 1000)}s`; | ||||
|     } else { | ||||
|       return `${Math.round(durationMs / 60000)}m ${Math.round((durationMs % 60000) / 1000)}s`; | ||||
|     } | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * Recursively render a step and its children | ||||
|    */ | ||||
|   private renderStepTree(step: ThinkingStep, allSteps: ThinkingStep[]): string { | ||||
|     const typeIcons: Record<string, string> = { | ||||
|       'observation': '🔍', | ||||
|       'hypothesis': '🤔', | ||||
|       'question': '❓', | ||||
|       'evidence': '📋', | ||||
|       'conclusion': '✅' | ||||
|     }; | ||||
| 
 | ||||
|     const icon = typeIcons[step.type] || '•'; | ||||
|     const confidenceDisplay = step.confidence !== undefined | ||||
|       ? `<span class="confidence">${Math.round(step.confidence * 100)}%</span>` | ||||
|       : ''; | ||||
| 
 | ||||
|     let html = ` | ||||
|       <div class="thinking-step thinking-${step.type}"> | ||||
|         <div class="step-header"> | ||||
|           <span class="step-icon">${icon}</span> | ||||
|           <span class="step-type">${step.type}</span> | ||||
|           ${confidenceDisplay} | ||||
|         </div> | ||||
|         <div class="step-content">${step.content}</div> | ||||
|     `;
 | ||||
| 
 | ||||
|     // Add sources if available
 | ||||
|     if (step.sources && step.sources.length > 0) { | ||||
|       html += `<div class="step-sources">Sources: ${step.sources.join(', ')}</div>`; | ||||
|     } | ||||
| 
 | ||||
|     // Recursively render children
 | ||||
|     if (step.children && step.children.length > 0) { | ||||
|       html += `<div class="step-children">`; | ||||
| 
 | ||||
|       for (const childId of step.children) { | ||||
|         const childStep = allSteps.find(s => s.id === childId); | ||||
|         if (childStep) { | ||||
|           html += this.renderStepTree(childStep, allSteps); | ||||
|         } | ||||
|       } | ||||
| 
 | ||||
|       html += `</div>`; | ||||
|     } | ||||
| 
 | ||||
|     html += `</div>`; | ||||
|     return html; | ||||
|   } | ||||
| } | ||||
| 
 | ||||
| export default ContextualThinkingTool; | ||||
							
								
								
									
										130
									
								
								src/services/llm/agent_tools/index.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										130
									
								
								src/services/llm/agent_tools/index.ts
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,130 @@ | ||||
| /** | ||||
|  * Agent Tools Index | ||||
|  * | ||||
|  * This file exports all available agent tools for use by the LLM. | ||||
|  * Tools are prioritized in order of importance/impact. | ||||
|  */ | ||||
| 
 | ||||
| import { VectorSearchTool } from './vector_search_tool.js'; | ||||
| import { NoteNavigatorTool } from './note_navigator_tool.js'; | ||||
| import { QueryDecompositionTool } from './query_decomposition_tool.js'; | ||||
| import { ContextualThinkingTool } from './contextual_thinking_tool.js'; | ||||
| 
 | ||||
| // Import services needed for initialization
 | ||||
| import SemanticContextService from '../semantic_context_service.js'; | ||||
| import aiServiceManager from '../ai_service_manager.js'; | ||||
| import log from '../../log.js'; | ||||
| 
 | ||||
| /** | ||||
|  * Manages all agent tools and provides a unified interface for the LLM agent | ||||
|  */ | ||||
| export class AgentToolsManager { | ||||
|   private vectorSearchTool: VectorSearchTool | null = null; | ||||
|   private noteNavigatorTool: NoteNavigatorTool | null = null; | ||||
|   private queryDecompositionTool: QueryDecompositionTool | null = null; | ||||
|   private contextualThinkingTool: ContextualThinkingTool | null = null; | ||||
|   private initialized = false; | ||||
| 
 | ||||
|   constructor() { | ||||
|     // Initialize tools only when requested to avoid circular dependencies
 | ||||
|   } | ||||
| 
 | ||||
|   async initialize(aiServiceManager: any): Promise<void> { | ||||
|     try { | ||||
|       if (this.initialized) { | ||||
|         return; | ||||
|       } | ||||
| 
 | ||||
|       log.info("Initializing LLM agent tools..."); | ||||
| 
 | ||||
|       // Create tools
 | ||||
|       this.vectorSearchTool = new VectorSearchTool(); | ||||
|       this.noteNavigatorTool = new NoteNavigatorTool(); | ||||
|       this.queryDecompositionTool = new QueryDecompositionTool(); | ||||
|       this.contextualThinkingTool = new ContextualThinkingTool(); | ||||
| 
 | ||||
|       // Get semantic context service and set it in the vector search tool
 | ||||
|       const semanticContext = aiServiceManager.getSemanticContextService(); | ||||
|       this.vectorSearchTool.setSemanticContext(semanticContext); | ||||
| 
 | ||||
|       this.initialized = true; | ||||
|       log.info("LLM agent tools initialized successfully"); | ||||
|     } catch (error: any) { | ||||
|       log.error(`Failed to initialize LLM agent tools: ${error.message}`); | ||||
|       throw new Error(`Agent tools initialization failed: ${error.message}`); | ||||
|     } | ||||
|   } | ||||
| 
 | ||||
|   isInitialized(): boolean { | ||||
|     return this.initialized; | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * Get all available agent tools | ||||
|    * @returns Object containing all initialized tools | ||||
|    */ | ||||
|   getAllTools() { | ||||
|     if (!this.initialized) { | ||||
|       throw new Error("Agent tools not initialized. Call initialize() first."); | ||||
|     } | ||||
| 
 | ||||
|     return { | ||||
|       vectorSearch: this.vectorSearchTool, | ||||
|       noteNavigator: this.noteNavigatorTool, | ||||
|       queryDecomposition: this.queryDecompositionTool, | ||||
|       contextualThinking: this.contextualThinkingTool | ||||
|     }; | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * Get the vector search tool | ||||
|    */ | ||||
|   getVectorSearchTool(): VectorSearchTool { | ||||
|     if (!this.initialized || !this.vectorSearchTool) { | ||||
|       throw new Error("Vector search tool not initialized"); | ||||
|     } | ||||
|     return this.vectorSearchTool; | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * Get the note structure navigator tool | ||||
|    */ | ||||
|   getNoteNavigatorTool(): NoteNavigatorTool { | ||||
|     if (!this.initialized || !this.noteNavigatorTool) { | ||||
|       throw new Error("Note navigator tool not initialized"); | ||||
|     } | ||||
|     return this.noteNavigatorTool; | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * Get the query decomposition tool | ||||
|    */ | ||||
|   getQueryDecompositionTool(): QueryDecompositionTool { | ||||
|     if (!this.initialized || !this.queryDecompositionTool) { | ||||
|       throw new Error("Query decomposition tool not initialized"); | ||||
|     } | ||||
|     return this.queryDecompositionTool; | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * Get the contextual thinking tool | ||||
|    */ | ||||
|   getContextualThinkingTool(): ContextualThinkingTool { | ||||
|     if (!this.initialized || !this.contextualThinkingTool) { | ||||
|       throw new Error("Contextual thinking tool not initialized"); | ||||
|     } | ||||
|     return this.contextualThinkingTool; | ||||
|   } | ||||
| } | ||||
| 
 | ||||
| // Export a singleton instance
 | ||||
| const agentTools = new AgentToolsManager(); | ||||
| export default agentTools; | ||||
| 
 | ||||
| // Also export individual tool classes for direct use if needed
 | ||||
| export { | ||||
|   VectorSearchTool, | ||||
|   NoteNavigatorTool, | ||||
|   QueryDecompositionTool, | ||||
|   ContextualThinkingTool | ||||
| }; | ||||
							
								
								
									
										463
									
								
								src/services/llm/agent_tools/note_navigator_tool.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										463
									
								
								src/services/llm/agent_tools/note_navigator_tool.ts
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,463 @@ | ||||
| /** | ||||
|  * Note Structure Navigator Tool | ||||
|  * | ||||
|  * This tool enables the LLM agent to navigate through the hierarchical | ||||
|  * structure of notes in the knowledge base. It provides methods for: | ||||
|  * - Finding paths between notes | ||||
|  * - Exploring parent-child relationships | ||||
|  * - Discovering note attributes and metadata | ||||
|  * - Understanding the context of a note within the broader structure | ||||
|  * | ||||
|  * This helps the LLM agent provide more accurate and contextually relevant responses. | ||||
|  */ | ||||
| 
 | ||||
| import becca from '../../../becca/becca.js'; | ||||
| import log from '../../log.js'; | ||||
| import type BNote from '../../../becca/entities/bnote.js'; | ||||
| import type BAttribute from '../../../becca/entities/battribute.js'; | ||||
| 
 | ||||
| export interface NoteInfo { | ||||
|   noteId: string; | ||||
|   title: string; | ||||
|   type: string; | ||||
|   mime?: string; | ||||
|   dateCreated?: string; | ||||
|   dateModified?: string; | ||||
|   isProtected: boolean; | ||||
|   isArchived: boolean; | ||||
|   attributeNames: string[]; | ||||
|   hasChildren: boolean; | ||||
| } | ||||
| 
 | ||||
| export interface NotePathInfo { | ||||
|   notePath: string[]; | ||||
|   notePathTitles: string[]; | ||||
| } | ||||
| 
 | ||||
| export interface NoteHierarchyLevel { | ||||
|   noteId: string; | ||||
|   title: string; | ||||
|   level: number; | ||||
|   children?: NoteHierarchyLevel[]; | ||||
| } | ||||
| 
 | ||||
| export class NoteNavigatorTool { | ||||
|   private maxPathLength: number = 20; | ||||
|   private maxBreadth: number = 100; | ||||
|   private maxDepth: number = 5; | ||||
| 
 | ||||
|   /** | ||||
|    * Get detailed information about a note | ||||
|    */ | ||||
|   getNoteInfo(noteId: string): NoteInfo | null { | ||||
|     try { | ||||
|       const note = becca.notes[noteId]; | ||||
|       if (!note) { | ||||
|         return null; | ||||
|       } | ||||
| 
 | ||||
|       // Get attribute names for this note
 | ||||
|       const attributeNames = note.ownedAttributes | ||||
|         .map(attr => attr.name) | ||||
|         .filter((value, index, self) => self.indexOf(value) === index); // unique values
 | ||||
| 
 | ||||
|       return { | ||||
|         noteId: note.noteId, | ||||
|         title: note.title, | ||||
|         type: note.type, | ||||
|         mime: note.mime, | ||||
|         dateCreated: note.dateCreated, | ||||
|         dateModified: note.dateModified, | ||||
|         isProtected: note.isProtected ?? false, | ||||
|         isArchived: note.isArchived || false, | ||||
|         attributeNames, | ||||
|         hasChildren: note.children.length > 0 | ||||
|       }; | ||||
|     } catch (error: any) { | ||||
|       log.error(`Error getting note info: ${error.message}`); | ||||
|       return null; | ||||
|     } | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * Get all paths to a note from the root | ||||
|    */ | ||||
|   getNotePathsFromRoot(noteId: string): NotePathInfo[] { | ||||
|     try { | ||||
|       const note = becca.notes[noteId]; | ||||
|       if (!note) { | ||||
|         return []; | ||||
|       } | ||||
| 
 | ||||
|       // Get all possible paths to this note
 | ||||
|       const allPaths = note.getAllNotePaths(); | ||||
|       if (!allPaths || allPaths.length === 0) { | ||||
|         return []; | ||||
|       } | ||||
| 
 | ||||
|       // Convert path IDs to titles
 | ||||
|       return allPaths.map(path => { | ||||
|         const titles = path.map(id => { | ||||
|           const pathNote = becca.notes[id]; | ||||
|           return pathNote ? pathNote.title : id; | ||||
|         }); | ||||
| 
 | ||||
|         return { | ||||
|           notePath: path, | ||||
|           notePathTitles: titles | ||||
|         }; | ||||
|       }).sort((a, b) => a.notePath.length - b.notePath.length); // Sort by path length, shortest first
 | ||||
|     } catch (error: any) { | ||||
|       log.error(`Error getting note paths: ${error.message}`); | ||||
|       return []; | ||||
|     } | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * Get the parent notes of a given note | ||||
|    */ | ||||
|   getParentNotes(noteId: string): NoteInfo[] { | ||||
|     try { | ||||
|       const note = becca.notes[noteId]; | ||||
|       if (!note || !note.parents) { | ||||
|         return []; | ||||
|       } | ||||
| 
 | ||||
|       return note.parents | ||||
|         .map(parent => this.getNoteInfo(parent.noteId)) | ||||
|         .filter((info): info is NoteInfo => info !== null); | ||||
|     } catch (error: any) { | ||||
|       log.error(`Error getting parent notes: ${error.message}`); | ||||
|       return []; | ||||
|     } | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * Get the children notes of a given note | ||||
|    */ | ||||
|   getChildNotes(noteId: string, maxChildren: number = this.maxBreadth): NoteInfo[] { | ||||
|     try { | ||||
|       const note = becca.notes[noteId]; | ||||
|       if (!note || !note.children) { | ||||
|         return []; | ||||
|       } | ||||
| 
 | ||||
|       return note.children | ||||
|         .slice(0, maxChildren) | ||||
|         .map(child => this.getNoteInfo(child.noteId)) | ||||
|         .filter((info): info is NoteInfo => info !== null); | ||||
|     } catch (error: any) { | ||||
|       log.error(`Error getting child notes: ${error.message}`); | ||||
|       return []; | ||||
|     } | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * Get a note's hierarchy (children up to specified depth) | ||||
|    * This is useful for the LLM to understand the structure within a note's subtree | ||||
|    */ | ||||
|   getNoteHierarchy(noteId: string, depth: number = 2): NoteHierarchyLevel | null { | ||||
|     if (depth < 0 || depth > this.maxDepth) { | ||||
|       depth = this.maxDepth; | ||||
|     } | ||||
| 
 | ||||
|     try { | ||||
|       const note = becca.notes[noteId]; | ||||
|       if (!note) { | ||||
|         return null; | ||||
|       } | ||||
| 
 | ||||
|       const result: NoteHierarchyLevel = { | ||||
|         noteId: note.noteId, | ||||
|         title: note.title, | ||||
|         level: 0 | ||||
|       }; | ||||
| 
 | ||||
|       // Recursively get children if depth allows
 | ||||
|       if (depth > 0 && note.children.length > 0) { | ||||
|         result.children = note.children | ||||
|           .slice(0, this.maxBreadth) | ||||
|           .map(child => this._getHierarchyLevel(child.noteId, 1, depth)) | ||||
|           .filter((node): node is NoteHierarchyLevel => node !== null); | ||||
|       } | ||||
| 
 | ||||
|       return result; | ||||
|     } catch (error: any) { | ||||
|       log.error(`Error getting note hierarchy: ${error.message}`); | ||||
|       return null; | ||||
|     } | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * Recursive helper for getNoteHierarchy | ||||
|    */ | ||||
|   private _getHierarchyLevel(noteId: string, currentLevel: number, maxDepth: number): NoteHierarchyLevel | null { | ||||
|     try { | ||||
|       const note = becca.notes[noteId]; | ||||
|       if (!note) { | ||||
|         return null; | ||||
|       } | ||||
| 
 | ||||
|       const result: NoteHierarchyLevel = { | ||||
|         noteId: note.noteId, | ||||
|         title: note.title, | ||||
|         level: currentLevel | ||||
|       }; | ||||
| 
 | ||||
|       // Recursively get children if depth allows
 | ||||
|       if (currentLevel < maxDepth && note.children.length > 0) { | ||||
|         result.children = note.children | ||||
|           .slice(0, this.maxBreadth) | ||||
|           .map(child => this._getHierarchyLevel(child.noteId, currentLevel + 1, maxDepth)) | ||||
|           .filter((node): node is NoteHierarchyLevel => node !== null); | ||||
|       } | ||||
| 
 | ||||
|       return result; | ||||
|     } catch (error) { | ||||
|       return null; | ||||
|     } | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * Get attributes of a note | ||||
|    */ | ||||
|   getNoteAttributes(noteId: string): BAttribute[] { | ||||
|     try { | ||||
|       const note = becca.notes[noteId]; | ||||
|       if (!note) { | ||||
|         return []; | ||||
|       } | ||||
| 
 | ||||
|       return note.ownedAttributes; | ||||
|     } catch (error: any) { | ||||
|       log.error(`Error getting note attributes: ${error.message}`); | ||||
|       return []; | ||||
|     } | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * Find the shortest path between two notes | ||||
|    */ | ||||
|   findPathBetweenNotes(fromNoteId: string, toNoteId: string): NotePathInfo | null { | ||||
|     try { | ||||
|       if (fromNoteId === toNoteId) { | ||||
|         const note = becca.notes[fromNoteId]; | ||||
|         if (!note) return null; | ||||
| 
 | ||||
|         return { | ||||
|           notePath: [fromNoteId], | ||||
|           notePathTitles: [note.title] | ||||
|         }; | ||||
|       } | ||||
| 
 | ||||
|       // Simple breadth-first search to find shortest path
 | ||||
|       const visited = new Set<string>(); | ||||
|       const queue: Array<{noteId: string, path: string[], titles: string[]}> = []; | ||||
| 
 | ||||
|       // Initialize with the starting note
 | ||||
|       const startNote = becca.notes[fromNoteId]; | ||||
|       if (!startNote) return null; | ||||
| 
 | ||||
|       queue.push({ | ||||
|         noteId: fromNoteId, | ||||
|         path: [fromNoteId], | ||||
|         titles: [startNote.title] | ||||
|       }); | ||||
| 
 | ||||
|       visited.add(fromNoteId); | ||||
| 
 | ||||
|       while (queue.length > 0 && queue[0].path.length <= this.maxPathLength) { | ||||
|         const {noteId, path, titles} = queue.shift()!; | ||||
|         const note = becca.notes[noteId]; | ||||
| 
 | ||||
|         if (!note) continue; | ||||
| 
 | ||||
|         // Get IDs of all connected notes (parents and children)
 | ||||
|         const connections: string[] = [ | ||||
|           ...note.parents.map(p => p.noteId), | ||||
|           ...note.children.map(c => c.noteId) | ||||
|         ]; | ||||
| 
 | ||||
|         for (const connectedId of connections) { | ||||
|           if (visited.has(connectedId)) continue; | ||||
| 
 | ||||
|           const connectedNote = becca.notes[connectedId]; | ||||
|           if (!connectedNote) continue; | ||||
| 
 | ||||
|           const newPath = [...path, connectedId]; | ||||
|           const newTitles = [...titles, connectedNote.title]; | ||||
| 
 | ||||
|           // Check if we found the target
 | ||||
|           if (connectedId === toNoteId) { | ||||
|             return { | ||||
|               notePath: newPath, | ||||
|               notePathTitles: newTitles | ||||
|             }; | ||||
|           } | ||||
| 
 | ||||
|           // Continue BFS
 | ||||
|           queue.push({ | ||||
|             noteId: connectedId, | ||||
|             path: newPath, | ||||
|             titles: newTitles | ||||
|           }); | ||||
| 
 | ||||
|           visited.add(connectedId); | ||||
|         } | ||||
|       } | ||||
| 
 | ||||
|       // No path found
 | ||||
|       return null; | ||||
|     } catch (error: any) { | ||||
|       log.error(`Error finding path between notes: ${error.message}`); | ||||
|       return null; | ||||
|     } | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * Search for notes by title | ||||
|    */ | ||||
|   searchNotesByTitle(searchTerm: string, limit: number = 10): NoteInfo[] { | ||||
|     try { | ||||
|       if (!searchTerm || searchTerm.trim().length === 0) { | ||||
|         return []; | ||||
|       } | ||||
| 
 | ||||
|       searchTerm = searchTerm.toLowerCase(); | ||||
|       const results: NoteInfo[] = []; | ||||
| 
 | ||||
|       // Simple in-memory search through all notes
 | ||||
|       for (const noteId in becca.notes) { | ||||
|         if (results.length >= limit) break; | ||||
| 
 | ||||
|         const note = becca.notes[noteId]; | ||||
|         if (!note || note.isDeleted) continue; | ||||
| 
 | ||||
|         if (note.title.toLowerCase().includes(searchTerm)) { | ||||
|           const info = this.getNoteInfo(noteId); | ||||
|           if (info) results.push(info); | ||||
|         } | ||||
|       } | ||||
| 
 | ||||
|       return results; | ||||
|     } catch (error: any) { | ||||
|       log.error(`Error searching notes by title: ${error.message}`); | ||||
|       return []; | ||||
|     } | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * Get clones of a note (if any) | ||||
|    */ | ||||
|   getNoteClones(noteId: string): NoteInfo[] { | ||||
|     try { | ||||
|       const note = becca.notes[noteId]; | ||||
|       if (!note) { | ||||
|         return []; | ||||
|       } | ||||
| 
 | ||||
|       // A note has clones if it has multiple parents
 | ||||
|       if (note.parents.length <= 1) { | ||||
|         return []; | ||||
|       } | ||||
| 
 | ||||
|       // Return parent notes, which represent different contexts for this note
 | ||||
|       return this.getParentNotes(noteId); | ||||
|     } catch (error: any) { | ||||
|       log.error(`Error getting note clones: ${error.message}`); | ||||
|       return []; | ||||
|     } | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * Generate a readable overview of a note's position in the hierarchy | ||||
|    * This is useful for the LLM to understand the context of a note | ||||
|    */ | ||||
|   getNoteContextDescription(noteId: string): string { | ||||
|     try { | ||||
|       const note = becca.notes[noteId]; | ||||
|       if (!note) { | ||||
|         return "Note not found."; | ||||
|       } | ||||
| 
 | ||||
|       const paths = this.getNotePathsFromRoot(noteId); | ||||
|       if (paths.length === 0) { | ||||
|         return `Note "${note.title}" exists but has no path from root.`; | ||||
|       } | ||||
| 
 | ||||
|       let result = ""; | ||||
| 
 | ||||
|       // Basic note info
 | ||||
|       result += `Note: "${note.title}" (${note.type})\n`; | ||||
| 
 | ||||
|       // Is it cloned?
 | ||||
|       if (paths.length > 1) { | ||||
|         result += `This note appears in ${paths.length} different locations:\n`; | ||||
| 
 | ||||
|         // Show max 3 paths to avoid overwhelming context
 | ||||
|         for (let i = 0; i < Math.min(3, paths.length); i++) { | ||||
|           const path = paths[i]; | ||||
|           result += `${i+1}. ${path.notePathTitles.join(' > ')}\n`; | ||||
|         } | ||||
| 
 | ||||
|         if (paths.length > 3) { | ||||
|           result += `... and ${paths.length - 3} more locations\n`; | ||||
|         } | ||||
|       } else { | ||||
|         // Just one path
 | ||||
|         const path = paths[0]; | ||||
|         result += `Path: ${path.notePathTitles.join(' > ')}\n`; | ||||
|       } | ||||
| 
 | ||||
|       // Children info
 | ||||
|       const children = this.getChildNotes(noteId, 5); | ||||
|       if (children.length > 0) { | ||||
|         result += `\nContains ${note.children.length} child notes`; | ||||
|         if (children.length < note.children.length) { | ||||
|           result += ` (showing first ${children.length})`; | ||||
|         } | ||||
|         result += `:\n`; | ||||
| 
 | ||||
|         for (const child of children) { | ||||
|           result += `- ${child.title} (${child.type})\n`; | ||||
|         } | ||||
| 
 | ||||
|         if (children.length < note.children.length) { | ||||
|           result += `... and ${note.children.length - children.length} more\n`; | ||||
|         } | ||||
|       } else { | ||||
|         result += "\nThis note has no child notes.\n"; | ||||
|       } | ||||
| 
 | ||||
|       // Attributes summary
 | ||||
|       const attributes = this.getNoteAttributes(noteId); | ||||
|       if (attributes.length > 0) { | ||||
|         result += `\nNote has ${attributes.length} attributes.\n`; | ||||
| 
 | ||||
|         // Group attributes by name
 | ||||
|         const attrMap: Record<string, string[]> = {}; | ||||
|         for (const attr of attributes) { | ||||
|           if (!attrMap[attr.name]) { | ||||
|             attrMap[attr.name] = []; | ||||
|           } | ||||
|           attrMap[attr.name].push(attr.value); | ||||
|         } | ||||
| 
 | ||||
|         for (const [name, values] of Object.entries(attrMap)) { | ||||
|           if (values.length === 1) { | ||||
|             result += `- ${name}: ${values[0]}\n`; | ||||
|           } else { | ||||
|             result += `- ${name}: ${values.length} values\n`; | ||||
|           } | ||||
|         } | ||||
|       } | ||||
| 
 | ||||
|       return result; | ||||
|     } catch (error: any) { | ||||
|       log.error(`Error getting note context: ${error.message}`); | ||||
|       return "Error generating note context description."; | ||||
|     } | ||||
|   } | ||||
| } | ||||
| 
 | ||||
| export default NoteNavigatorTool; | ||||
							
								
								
									
										390
									
								
								src/services/llm/agent_tools/query_decomposition_tool.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										390
									
								
								src/services/llm/agent_tools/query_decomposition_tool.ts
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,390 @@ | ||||
| /** | ||||
|  * Query Decomposition Tool | ||||
|  * | ||||
|  * This tool helps the LLM agent break down complex user queries into | ||||
|  * sub-questions that can be answered individually and then synthesized | ||||
|  * into a comprehensive response. | ||||
|  * | ||||
|  * Features: | ||||
|  * - Analyze query complexity | ||||
|  * - Extract multiple intents from a single question | ||||
|  * - Create a multi-stage research plan | ||||
|  * - Track progress through complex information gathering | ||||
|  */ | ||||
| 
 | ||||
| import log from '../../log.js'; | ||||
| 
 | ||||
| export interface SubQuery { | ||||
|   id: string; | ||||
|   text: string; | ||||
|   reason: string; | ||||
|   isAnswered: boolean; | ||||
|   answer?: string; | ||||
| } | ||||
| 
 | ||||
| export interface DecomposedQuery { | ||||
|   originalQuery: string; | ||||
|   subQueries: SubQuery[]; | ||||
|   status: 'pending' | 'in_progress' | 'completed'; | ||||
|   complexity: number; | ||||
| } | ||||
| 
 | ||||
| export class QueryDecompositionTool { | ||||
|   private static queryCounter: number = 0; | ||||
| 
 | ||||
|   /** | ||||
|    * Break down a complex query into smaller, more manageable sub-queries | ||||
|    * | ||||
|    * @param query The original user query | ||||
|    * @param context Optional context about the current note being viewed | ||||
|    * @returns A decomposed query object with sub-queries | ||||
|    */ | ||||
|   decomposeQuery(query: string, context?: string): DecomposedQuery { | ||||
|     try { | ||||
|       // Assess query complexity to determine if decomposition is needed
 | ||||
|       const complexity = this.assessQueryComplexity(query); | ||||
| 
 | ||||
|       // For simple queries, just return the original as a single sub-query
 | ||||
|       if (complexity < 3) { | ||||
|         return { | ||||
|           originalQuery: query, | ||||
|           subQueries: [{ | ||||
|             id: this.generateSubQueryId(), | ||||
|             text: query, | ||||
|             reason: 'Direct question that can be answered without decomposition', | ||||
|             isAnswered: false | ||||
|           }], | ||||
|           status: 'pending', | ||||
|           complexity | ||||
|         }; | ||||
|       } | ||||
| 
 | ||||
|       // For complex queries, perform decomposition
 | ||||
|       const subQueries = this.createSubQueries(query, context); | ||||
| 
 | ||||
|       return { | ||||
|         originalQuery: query, | ||||
|         subQueries, | ||||
|         status: 'pending', | ||||
|         complexity | ||||
|       }; | ||||
|     } catch (error: any) { | ||||
|       log.error(`Error decomposing query: ${error.message}`); | ||||
| 
 | ||||
|       // Fallback to treating it as a simple query
 | ||||
|       return { | ||||
|         originalQuery: query, | ||||
|         subQueries: [{ | ||||
|           id: this.generateSubQueryId(), | ||||
|           text: query, | ||||
|           reason: 'Error in decomposition, treating as simple query', | ||||
|           isAnswered: false | ||||
|         }], | ||||
|         status: 'pending', | ||||
|         complexity: 1 | ||||
|       }; | ||||
|     } | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * Update a sub-query with its answer | ||||
|    * | ||||
|    * @param decomposedQuery The decomposed query object | ||||
|    * @param subQueryId The ID of the sub-query to update | ||||
|    * @param answer The answer to the sub-query | ||||
|    * @returns The updated decomposed query | ||||
|    */ | ||||
|   updateSubQueryAnswer( | ||||
|     decomposedQuery: DecomposedQuery, | ||||
|     subQueryId: string, | ||||
|     answer: string | ||||
|   ): DecomposedQuery { | ||||
|     const updatedSubQueries = decomposedQuery.subQueries.map(sq => { | ||||
|       if (sq.id === subQueryId) { | ||||
|         return { | ||||
|           ...sq, | ||||
|           answer, | ||||
|           isAnswered: true | ||||
|         }; | ||||
|       } | ||||
|       return sq; | ||||
|     }); | ||||
| 
 | ||||
|     // Check if all sub-queries are answered
 | ||||
|     const allAnswered = updatedSubQueries.every(sq => sq.isAnswered); | ||||
| 
 | ||||
|     return { | ||||
|       ...decomposedQuery, | ||||
|       subQueries: updatedSubQueries, | ||||
|       status: allAnswered ? 'completed' : 'in_progress' | ||||
|     }; | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * Synthesize all sub-query answers into a comprehensive response | ||||
|    * | ||||
|    * @param decomposedQuery The decomposed query with all sub-queries answered | ||||
|    * @returns A synthesized answer to the original query | ||||
|    */ | ||||
|   synthesizeAnswer(decomposedQuery: DecomposedQuery): string { | ||||
|     try { | ||||
|       // Ensure all sub-queries are answered
 | ||||
|       if (!decomposedQuery.subQueries.every(sq => sq.isAnswered)) { | ||||
|         return "Cannot synthesize answer - not all sub-queries have been answered."; | ||||
|       } | ||||
| 
 | ||||
|       // For simple queries with just one sub-query, return the answer directly
 | ||||
|       if (decomposedQuery.subQueries.length === 1) { | ||||
|         return decomposedQuery.subQueries[0].answer || ""; | ||||
|       } | ||||
| 
 | ||||
|       // For complex queries, build a structured response that references each sub-answer
 | ||||
|       let synthesized = `Answer to: "${decomposedQuery.originalQuery}"\n\n`; | ||||
| 
 | ||||
|       // Group by themes if there are many sub-queries
 | ||||
|       if (decomposedQuery.subQueries.length > 3) { | ||||
|         // Here we would ideally group related sub-queries, but for now we'll just present them in order
 | ||||
|         synthesized += "Based on the information gathered:\n\n"; | ||||
| 
 | ||||
|         for (const sq of decomposedQuery.subQueries) { | ||||
|           synthesized += `${sq.answer}\n\n`; | ||||
|         } | ||||
|       } else { | ||||
|         // For fewer sub-queries, present each one with its question
 | ||||
|         for (const sq of decomposedQuery.subQueries) { | ||||
|           synthesized += `${sq.answer}\n\n`; | ||||
|         } | ||||
|       } | ||||
| 
 | ||||
|       return synthesized.trim(); | ||||
|     } catch (error: any) { | ||||
|       log.error(`Error synthesizing answer: ${error.message}`); | ||||
|       return "Error synthesizing the final answer."; | ||||
|     } | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * Generate a status report on the progress of answering a complex query | ||||
|    * | ||||
|    * @param decomposedQuery The decomposed query | ||||
|    * @returns A status report string | ||||
|    */ | ||||
|   getQueryStatus(decomposedQuery: DecomposedQuery): string { | ||||
|     const answeredCount = decomposedQuery.subQueries.filter(sq => sq.isAnswered).length; | ||||
|     const totalCount = decomposedQuery.subQueries.length; | ||||
| 
 | ||||
|     let status = `Progress: ${answeredCount}/${totalCount} sub-queries answered\n\n`; | ||||
| 
 | ||||
|     for (const sq of decomposedQuery.subQueries) { | ||||
|       status += `${sq.isAnswered ? '✓' : '○'} ${sq.text}\n`; | ||||
|       if (sq.isAnswered) { | ||||
|         status += `   Answer: ${this.truncateText(sq.answer || "", 100)}\n`; | ||||
|       } | ||||
|     } | ||||
| 
 | ||||
|     return status; | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * Assess the complexity of a query on a scale of 1-10 | ||||
|    * This helps determine how many sub-queries are needed | ||||
|    * | ||||
|    * @param query The query to assess | ||||
|    * @returns A complexity score from 1-10 | ||||
|    */ | ||||
|   assessQueryComplexity(query: string): number { | ||||
|     // Count the number of question marks as a basic indicator
 | ||||
|     const questionMarkCount = (query.match(/\?/g) || []).length; | ||||
| 
 | ||||
|     // Count potential sub-questions based on question words
 | ||||
|     const questionWords = ['what', 'how', 'why', 'where', 'when', 'who', 'which']; | ||||
|     const questionWordMatches = questionWords.map(word => { | ||||
|       const regex = new RegExp(`\\b${word}\\b`, 'gi'); | ||||
|       return (query.match(regex) || []).length; | ||||
|     }); | ||||
| 
 | ||||
|     const questionWordCount = questionWordMatches.reduce((sum, count) => sum + count, 0); | ||||
| 
 | ||||
|     // Look for conjunctions which might join multiple questions
 | ||||
|     const conjunctionCount = (query.match(/\b(and|or|but|as well as)\b/gi) || []).length; | ||||
| 
 | ||||
|     // Look for complex requirements
 | ||||
|     const comparisonCount = (query.match(/\b(compare|versus|vs|difference|similarities?)\b/gi) || []).length; | ||||
|     const analysisCount = (query.match(/\b(analyze|examine|investigate|explore|explain|discuss)\b/gi) || []).length; | ||||
| 
 | ||||
|     // Calculate base complexity
 | ||||
|     let complexity = 1; | ||||
| 
 | ||||
|     // Add for multiple questions
 | ||||
|     complexity += Math.min(2, questionMarkCount); | ||||
| 
 | ||||
|     // Add for question words beyond the first one
 | ||||
|     complexity += Math.min(2, Math.max(0, questionWordCount - 1)); | ||||
| 
 | ||||
|     // Add for conjunctions that might join questions
 | ||||
|     complexity += Math.min(2, conjunctionCount); | ||||
| 
 | ||||
|     // Add for comparative/analytical requirements
 | ||||
|     complexity += Math.min(2, comparisonCount + analysisCount); | ||||
| 
 | ||||
|     // Add for overall length/complexity
 | ||||
|     if (query.length > 100) complexity += 1; | ||||
|     if (query.length > 200) complexity += 1; | ||||
| 
 | ||||
|     // Ensure we stay in the 1-10 range
 | ||||
|     return Math.max(1, Math.min(10, complexity)); | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * Generate a unique ID for a sub-query | ||||
|    */ | ||||
|   private generateSubQueryId(): string { | ||||
|     return `sq_${Date.now()}_${QueryDecompositionTool.queryCounter++}`; | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * Create sub-queries based on the original query and optional context | ||||
|    */ | ||||
|   private createSubQueries(query: string, context?: string): SubQuery[] { | ||||
|     const subQueries: SubQuery[] = []; | ||||
| 
 | ||||
|     // Simple heuristics for breaking down the query
 | ||||
|     // In a real implementation, this would be much more sophisticated,
 | ||||
|     // using natural language understanding to identify different intents
 | ||||
| 
 | ||||
|     // 1. Look for multiple question marks
 | ||||
|     const questionSplit = query.split(/\?/).filter(q => q.trim().length > 0); | ||||
| 
 | ||||
|     if (questionSplit.length > 1) { | ||||
|       // Multiple distinct questions detected
 | ||||
|       for (let i = 0; i < questionSplit.length; i++) { | ||||
|         const text = questionSplit[i].trim() + '?'; | ||||
|         subQueries.push({ | ||||
|           id: this.generateSubQueryId(), | ||||
|           text, | ||||
|           reason: `Separate question ${i+1} detected in the original query`, | ||||
|           isAnswered: false | ||||
|         }); | ||||
|       } | ||||
|       return subQueries; | ||||
|     } | ||||
| 
 | ||||
|     // 2. Look for "and", "or", etc. connecting potentially separate questions
 | ||||
|     const conjunctions = [ | ||||
|       { regex: /\b(compare|versus|vs\.?|difference between|similarities between)\b/i, label: 'comparison' }, | ||||
|       { regex: /\b(list|enumerate)\b/i, label: 'listing' }, | ||||
|       { regex: /\b(analyze|examine|investigate|explore)\b/i, label: 'analysis' }, | ||||
|       { regex: /\b(explain|why)\b/i, label: 'explanation' }, | ||||
|       { regex: /\b(how to|steps to|process of)\b/i, label: 'procedure' } | ||||
|     ]; | ||||
| 
 | ||||
|     // Check for comparison queries - these often need multiple sub-queries
 | ||||
|     for (const conj of conjunctions) { | ||||
|       if (conj.regex.test(query)) { | ||||
|         if (conj.label === 'comparison') { | ||||
|           // For comparisons, we need to research each item, then compare them
 | ||||
|           const comparisonMatch = query.match(/\b(compare|versus|vs\.?|difference between|similarities between)\s+(.+?)\s+(and|with|to)\s+(.+?)(\?|$)/i); | ||||
| 
 | ||||
|           if (comparisonMatch) { | ||||
|             const item1 = comparisonMatch[2].trim(); | ||||
|             const item2 = comparisonMatch[4].trim(); | ||||
| 
 | ||||
|             subQueries.push({ | ||||
|               id: this.generateSubQueryId(), | ||||
|               text: `What are the key characteristics of ${item1}?`, | ||||
|               reason: `Need to understand ${item1} for the comparison`, | ||||
|               isAnswered: false | ||||
|             }); | ||||
| 
 | ||||
|             subQueries.push({ | ||||
|               id: this.generateSubQueryId(), | ||||
|               text: `What are the key characteristics of ${item2}?`, | ||||
|               reason: `Need to understand ${item2} for the comparison`, | ||||
|               isAnswered: false | ||||
|             }); | ||||
| 
 | ||||
|             subQueries.push({ | ||||
|               id: this.generateSubQueryId(), | ||||
|               text: `What are the main differences and similarities between ${item1} and ${item2}?`, | ||||
|               reason: 'Direct comparison after understanding each item', | ||||
|               isAnswered: false | ||||
|             }); | ||||
| 
 | ||||
|             return subQueries; | ||||
|           } | ||||
|         } | ||||
|       } | ||||
|     } | ||||
| 
 | ||||
|     // 3. For complex questions without clear separation, create topic-based sub-queries
 | ||||
|     if (query.length > 100) { | ||||
|       // Extract potential key topics from the query
 | ||||
|       const words = query.toLowerCase().split(/\W+/).filter(w => | ||||
|         w.length > 3 && | ||||
|         !['what', 'when', 'where', 'which', 'with', 'would', 'could', 'should', 'have', 'this', 'that', 'there', 'their'].includes(w) | ||||
|       ); | ||||
| 
 | ||||
|       // Count word frequencies
 | ||||
|       const wordFrequency: Record<string, number> = {}; | ||||
|       for (const word of words) { | ||||
|         wordFrequency[word] = (wordFrequency[word] || 0) + 1; | ||||
|       } | ||||
| 
 | ||||
|       // Get top frequent words
 | ||||
|       const topWords = Object.entries(wordFrequency) | ||||
|         .sort((a, b) => b[1] - a[1]) | ||||
|         .slice(0, 3) | ||||
|         .map(entry => entry[0]); | ||||
| 
 | ||||
|       if (topWords.length > 0) { | ||||
|         // Create factual sub-query
 | ||||
|         subQueries.push({ | ||||
|           id: this.generateSubQueryId(), | ||||
|           text: `What are the key facts about ${topWords.join(' and ')} relevant to this question?`, | ||||
|           reason: 'Gathering basic information about main topics', | ||||
|           isAnswered: false | ||||
|         }); | ||||
| 
 | ||||
|         // Create relationship sub-query if multiple top words
 | ||||
|         if (topWords.length > 1) { | ||||
|           subQueries.push({ | ||||
|             id: this.generateSubQueryId(), | ||||
|             text: `How do ${topWords.join(' and ')} relate to each other?`, | ||||
|             reason: 'Understanding relationships between key topics', | ||||
|             isAnswered: false | ||||
|           }); | ||||
|         } | ||||
| 
 | ||||
|         // Add the original query as the final synthesizing question
 | ||||
|         subQueries.push({ | ||||
|           id: this.generateSubQueryId(), | ||||
|           text: query, | ||||
|           reason: 'Original question to be answered after gathering information', | ||||
|           isAnswered: false | ||||
|         }); | ||||
| 
 | ||||
|         return subQueries; | ||||
|       } | ||||
|     } | ||||
| 
 | ||||
|     // Fallback: If we can't meaningfully decompose, just use the original query
 | ||||
|     subQueries.push({ | ||||
|       id: this.generateSubQueryId(), | ||||
|       text: query, | ||||
|       reason: 'Question treated as a single unit', | ||||
|       isAnswered: false | ||||
|     }); | ||||
| 
 | ||||
|     return subQueries; | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * Truncate text to a maximum length with ellipsis | ||||
|    */ | ||||
|   private truncateText(text: string, maxLength: number): string { | ||||
|     if (text.length <= maxLength) return text; | ||||
|     return text.substring(0, maxLength - 3) + '...'; | ||||
|   } | ||||
| } | ||||
| 
 | ||||
| export default QueryDecompositionTool; | ||||
							
								
								
									
										185
									
								
								src/services/llm/agent_tools/vector_search_tool.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										185
									
								
								src/services/llm/agent_tools/vector_search_tool.ts
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,185 @@ | ||||
| /** | ||||
|  * Vector Search Tool | ||||
|  * | ||||
|  * This tool enables the LLM agent to perform semantic vector-based searches | ||||
|  * over the content in the notes database. It handles: | ||||
|  * - Finding semantically related notes to a query | ||||
|  * - Extracting relevant sections from notes | ||||
|  * - Providing relevant context for LLM to generate accurate responses | ||||
|  * | ||||
|  * The tool uses embeddings to find notes with similar semantic meaning, | ||||
|  * allowing the LLM to find relevant information even when exact keywords | ||||
|  * are not present. | ||||
|  */ | ||||
| 
 | ||||
| import log from '../../log.js'; | ||||
| 
 | ||||
| // Define interface for semantic context service to avoid circular imports
 | ||||
| interface ISemanticContextService { | ||||
|   semanticSearch(query: string, options: any): Promise<any[]>; | ||||
|   semanticSearchChunks(query: string, options: any): Promise<any[]>; | ||||
| } | ||||
| 
 | ||||
| export interface VectorSearchResult { | ||||
|   noteId: string; | ||||
|   title: string; | ||||
|   contentPreview: string; | ||||
|   similarity: number; | ||||
|   parentId?: string; | ||||
|   dateCreated?: string; | ||||
|   dateModified?: string; | ||||
| } | ||||
| 
 | ||||
| export interface SearchResultItem { | ||||
|   noteId: string; | ||||
|   noteTitle: string; | ||||
|   contentPreview: string; | ||||
|   similarity: number; | ||||
|   parentId?: string; | ||||
|   dateCreated?: string; | ||||
|   dateModified?: string; | ||||
| } | ||||
| 
 | ||||
| export interface ChunkSearchResultItem { | ||||
|   noteId: string; | ||||
|   noteTitle: string; | ||||
|   chunk: string; | ||||
|   similarity: number; | ||||
|   parentId?: string; | ||||
| } | ||||
| 
 | ||||
| export class VectorSearchTool { | ||||
|   private semanticContext: ISemanticContextService | null = null; | ||||
|   private maxResults: number = 5; | ||||
| 
 | ||||
|   constructor() { | ||||
|     // The semantic context will be set later via setSemanticContext
 | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * Set the semantic context service instance | ||||
|    */ | ||||
|   setSemanticContext(semanticContext: ISemanticContextService): void { | ||||
|     this.semanticContext = semanticContext; | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * Search for notes semantically related to a query | ||||
|    */ | ||||
|   async searchNotes(query: string, options: { | ||||
|     parentNoteId?: string, | ||||
|     maxResults?: number, | ||||
|     similarityThreshold?: number | ||||
|   } = {}): Promise<VectorSearchResult[]> { | ||||
|     try { | ||||
|       if (!this.semanticContext) { | ||||
|         throw new Error("Semantic context service not set. Call setSemanticContext() first."); | ||||
|       } | ||||
| 
 | ||||
|       if (!query || query.trim().length === 0) { | ||||
|         return []; | ||||
|       } | ||||
| 
 | ||||
|       const maxResults = options.maxResults || this.maxResults; | ||||
|       const similarityThreshold = options.similarityThreshold || 0.65; // Default threshold
 | ||||
|       const parentNoteId = options.parentNoteId; // Optional filtering by parent
 | ||||
| 
 | ||||
|       // Search notes using the semantic context service
 | ||||
|       const results = await this.semanticContext.semanticSearch(query, { | ||||
|         maxResults, | ||||
|         similarityThreshold, | ||||
|         ancestorNoteId: parentNoteId | ||||
|       }); | ||||
| 
 | ||||
|       if (!results || results.length === 0) { | ||||
|         return []; | ||||
|       } | ||||
| 
 | ||||
|       // Transform results to the tool's format
 | ||||
|       return results.map((result: SearchResultItem) => ({ | ||||
|         noteId: result.noteId, | ||||
|         title: result.noteTitle, | ||||
|         contentPreview: result.contentPreview, | ||||
|         similarity: result.similarity, | ||||
|         parentId: result.parentId, | ||||
|         dateCreated: result.dateCreated, | ||||
|         dateModified: result.dateModified | ||||
|       })); | ||||
|     } catch (error: any) { | ||||
|       log.error(`Error in vector search: ${error.message}`); | ||||
|       return []; | ||||
|     } | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * Search for content chunks within notes that are semantically related to a query | ||||
|    */ | ||||
|   async searchContentChunks(query: string, options: { | ||||
|     noteId?: string, | ||||
|     maxResults?: number, | ||||
|     similarityThreshold?: number | ||||
|   } = {}): Promise<VectorSearchResult[]> { | ||||
|     try { | ||||
|       if (!this.semanticContext) { | ||||
|         throw new Error("Semantic context service not set. Call setSemanticContext() first."); | ||||
|       } | ||||
| 
 | ||||
|       if (!query || query.trim().length === 0) { | ||||
|         return []; | ||||
|       } | ||||
| 
 | ||||
|       const maxResults = options.maxResults || this.maxResults; | ||||
|       const similarityThreshold = options.similarityThreshold || 0.70; // Higher threshold for chunks
 | ||||
|       const noteId = options.noteId; // Optional filtering by specific note
 | ||||
| 
 | ||||
|       // Search content chunks using the semantic context service
 | ||||
|       const results = await this.semanticContext.semanticSearchChunks(query, { | ||||
|         maxResults, | ||||
|         similarityThreshold, | ||||
|         noteId | ||||
|       }); | ||||
| 
 | ||||
|       if (!results || results.length === 0) { | ||||
|         return []; | ||||
|       } | ||||
| 
 | ||||
|       // Transform results to the tool's format
 | ||||
|       return results.map((result: ChunkSearchResultItem) => ({ | ||||
|         noteId: result.noteId, | ||||
|         title: result.noteTitle, | ||||
|         contentPreview: result.chunk, // Use the chunk content as preview
 | ||||
|         similarity: result.similarity, | ||||
|         parentId: result.parentId | ||||
|       })); | ||||
|     } catch (error: any) { | ||||
|       log.error(`Error in content chunk search: ${error.message}`); | ||||
|       return []; | ||||
|     } | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * Elaborate on why certain results were returned for a query | ||||
|    */ | ||||
|   explainResults(query: string, results: VectorSearchResult[]): string { | ||||
|     if (!query || !results || results.length === 0) { | ||||
|       return "No results to explain."; | ||||
|     } | ||||
| 
 | ||||
|     let explanation = `For query "${query}", I found these semantically related notes:\n\n`; | ||||
| 
 | ||||
|     results.forEach((result, index) => { | ||||
|       explanation += `${index + 1}. "${result.title}" (similarity: ${(result.similarity * 100).toFixed(1)}%)\n`; | ||||
|       explanation += `   Preview: ${result.contentPreview.substring(0, 150)}...\n`; | ||||
| 
 | ||||
|       if (index < results.length - 1) { | ||||
|         explanation += "\n"; | ||||
|       } | ||||
|     }); | ||||
| 
 | ||||
|     explanation += "\nThese results were found based on semantic similarity rather than just keyword matching."; | ||||
| 
 | ||||
|     return explanation; | ||||
|   } | ||||
| } | ||||
| 
 | ||||
| export default VectorSearchTool; | ||||
| @ -8,6 +8,7 @@ import { ContextExtractor } from './context/index.js'; | ||||
| import semanticContextService from './semantic_context_service.js'; | ||||
| import indexService from './index_service.js'; | ||||
| import { getEmbeddingProvider, getEnabledEmbeddingProviders } from './embeddings/providers.js'; | ||||
| import agentTools from './agent_tools/index.js'; | ||||
| 
 | ||||
| type ServiceProviders = 'openai' | 'anthropic' | 'ollama'; | ||||
| 
 | ||||
| @ -281,6 +282,128 @@ export class AIServiceManager { | ||||
|     getIndexService() { | ||||
|         return indexService; | ||||
|     } | ||||
| 
 | ||||
|     /** | ||||
|      * Initialize agent tools for enhanced LLM features | ||||
|      */ | ||||
|     async initializeAgentTools(): Promise<void> { | ||||
|         try { | ||||
|             await agentTools.initialize(this); | ||||
|             log.info("Agent tools initialized successfully"); | ||||
|         } catch (error: any) { | ||||
|             log.error(`Error initializing agent tools: ${error.message}`); | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     /** | ||||
|      * Get the agent tools manager | ||||
|      * This provides access to all agent tools | ||||
|      */ | ||||
|     getAgentTools() { | ||||
|         return agentTools; | ||||
|     } | ||||
| 
 | ||||
|     /** | ||||
|      * Get the vector search tool for semantic similarity search | ||||
|      */ | ||||
|     getVectorSearchTool() { | ||||
|         return agentTools.getVectorSearchTool(); | ||||
|     } | ||||
| 
 | ||||
|     /** | ||||
|      * Get the note navigator tool for hierarchical exploration | ||||
|      */ | ||||
|     getNoteNavigatorTool() { | ||||
|         return agentTools.getNoteNavigatorTool(); | ||||
|     } | ||||
| 
 | ||||
|     /** | ||||
|      * Get the query decomposition tool for complex queries | ||||
|      */ | ||||
|     getQueryDecompositionTool() { | ||||
|         return agentTools.getQueryDecompositionTool(); | ||||
|     } | ||||
| 
 | ||||
|     /** | ||||
|      * Get the contextual thinking tool for transparent reasoning | ||||
|      */ | ||||
|     getContextualThinkingTool() { | ||||
|         return agentTools.getContextualThinkingTool(); | ||||
|     } | ||||
| 
 | ||||
|     /** | ||||
|      * Get whether AI features are enabled from options | ||||
|      */ | ||||
|     getAIEnabled(): boolean { | ||||
|         return options.getOptionBool('aiEnabled'); | ||||
|     } | ||||
| 
 | ||||
|     /** | ||||
|      * Set up embeddings provider for AI features | ||||
|      */ | ||||
|     async setupEmbeddingsProvider(): Promise<void> { | ||||
|         try { | ||||
|             if (!this.getAIEnabled()) { | ||||
|                 log.info('AI features are disabled'); | ||||
|                 return; | ||||
|             } | ||||
| 
 | ||||
|             const preferredProvider = options.getOption('embeddingsDefaultProvider') || 'openai'; | ||||
| 
 | ||||
|             // Check if we have enabled providers
 | ||||
|             const enabledProviders = await getEnabledEmbeddingProviders(); | ||||
| 
 | ||||
|             if (enabledProviders.length === 0) { | ||||
|                 log.info('No embedding providers are enabled'); | ||||
|                 return; | ||||
|             } | ||||
| 
 | ||||
|             // Validate that preferred provider is enabled
 | ||||
|             const isPreferredEnabled = enabledProviders.some(p => p.name === preferredProvider); | ||||
| 
 | ||||
|             if (!isPreferredEnabled) { | ||||
|                 log.info(`Preferred provider "${preferredProvider}" is not enabled. Using first available.`); | ||||
|             } | ||||
| 
 | ||||
|             // Initialize embedding providers
 | ||||
|             log.info('Embedding providers initialized successfully'); | ||||
|         } catch (error: any) { | ||||
|             log.error(`Error setting up embedding providers: ${error.message}`); | ||||
|             throw error; | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     /** | ||||
|      * Initialize the AI Service | ||||
|      */ | ||||
|     async initialize(): Promise<void> { | ||||
|         try { | ||||
|             log.info("Initializing AI service..."); | ||||
| 
 | ||||
|             // Check if AI is enabled in options
 | ||||
|             const isAIEnabled = this.getAIEnabled(); | ||||
| 
 | ||||
|             if (!isAIEnabled) { | ||||
|                 log.info("AI features are disabled in options"); | ||||
|                 return; | ||||
|             } | ||||
| 
 | ||||
|             // Set up embeddings provider if AI is enabled
 | ||||
|             await this.setupEmbeddingsProvider(); | ||||
| 
 | ||||
|             // Initialize index service
 | ||||
|             await this.getIndexService().initialize(); | ||||
| 
 | ||||
|             // Initialize agent tools with this service manager instance
 | ||||
|             await agentTools.initialize(this); | ||||
| 
 | ||||
|             this.initialized = true; | ||||
|             log.info("AI service initialized successfully"); | ||||
|         } catch (error: any) { | ||||
|             log.error(`Error initializing AI service: ${error.message}`); | ||||
|             throw error; | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| // Don't create singleton immediately, use a lazy-loading pattern
 | ||||
| @ -321,6 +444,26 @@ export default { | ||||
|     }, | ||||
|     getIndexService() { | ||||
|         return getInstance().getIndexService(); | ||||
|     }, | ||||
|     // Agent tools related methods
 | ||||
|     async initializeAgentTools(): Promise<void> { | ||||
|         const manager = getInstance(); | ||||
|         return manager.initializeAgentTools(); | ||||
|     }, | ||||
|     getAgentTools() { | ||||
|         return getInstance().getAgentTools(); | ||||
|     }, | ||||
|     getVectorSearchTool() { | ||||
|         return getInstance().getVectorSearchTool(); | ||||
|     }, | ||||
|     getNoteNavigatorTool() { | ||||
|         return getInstance().getNoteNavigatorTool(); | ||||
|     }, | ||||
|     getQueryDecompositionTool() { | ||||
|         return getInstance().getQueryDecompositionTool(); | ||||
|     }, | ||||
|     getContextualThinkingTool() { | ||||
|         return getInstance().getContextualThinkingTool(); | ||||
|     } | ||||
| }; | ||||
| 
 | ||||
|  | ||||
| @ -6,6 +6,7 @@ import log from "../log.js"; | ||||
| import type { Message } from "./ai_interface.js"; | ||||
| import { cosineSimilarity } from "./embeddings/index.js"; | ||||
| import sanitizeHtml from "sanitize-html"; | ||||
| import aiServiceManager from "./ai_service_manager.js"; | ||||
| 
 | ||||
| /** | ||||
|  * TriliumContextService provides intelligent context management for working with large knowledge bases | ||||
| @ -617,6 +618,140 @@ Example: ["exact topic mentioned", "related concept 1", "related concept 2"]`; | ||||
|             }; | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     /** | ||||
|      * Enhance LLM context with agent tools | ||||
|      * | ||||
|      * This adds context from agent tools such as: | ||||
|      * 1. Vector search results relevant to the query | ||||
|      * 2. Note hierarchy information | ||||
|      * 3. Query decomposition planning | ||||
|      * 4. Contextual thinking visualization | ||||
|      * | ||||
|      * @param noteId The current note being viewed | ||||
|      * @param query The user's query | ||||
|      * @param showThinking Whether to include the agent's thinking process | ||||
|      * @returns Enhanced context string | ||||
|      */ | ||||
|     async getAgentToolsContext(noteId: string, query: string, showThinking: boolean = false): Promise<string> { | ||||
|         try { | ||||
|             const agentTools = aiServiceManager.getAgentTools(); | ||||
|             let context = ""; | ||||
| 
 | ||||
|             // 1. Get vector search results related to the query
 | ||||
|             try { | ||||
|                 const vectorSearchTool = agentTools.getVectorSearchTool(); | ||||
|                 const searchResults = await vectorSearchTool.searchNotes(query, { | ||||
|                     parentNoteId: noteId, | ||||
|                     maxResults: 5 | ||||
|                 }); | ||||
| 
 | ||||
|                 if (searchResults.length > 0) { | ||||
|                     context += "## Related Information\n\n"; | ||||
|                     for (const result of searchResults) { | ||||
|                         context += `### ${result.title}\n`; | ||||
|                         context += `${result.contentPreview}\n\n`; | ||||
|                     } | ||||
|                     context += "\n"; | ||||
|                 } | ||||
|             } catch (error: any) { | ||||
|                 log.error(`Error getting vector search context: ${error.message}`); | ||||
|             } | ||||
| 
 | ||||
|             // 2. Get note structure context
 | ||||
|             try { | ||||
|                 const navigatorTool = agentTools.getNoteNavigatorTool(); | ||||
|                 const noteContext = navigatorTool.getNoteContextDescription(noteId); | ||||
| 
 | ||||
|                 if (noteContext) { | ||||
|                     context += "## Current Note Context\n\n"; | ||||
|                     context += noteContext + "\n\n"; | ||||
|                 } | ||||
|             } catch (error: any) { | ||||
|                 log.error(`Error getting note structure context: ${error.message}`); | ||||
|             } | ||||
| 
 | ||||
|             // 3. Use query decomposition if it's a complex query
 | ||||
|             try { | ||||
|                 const decompositionTool = agentTools.getQueryDecompositionTool(); | ||||
|                 const complexity = decompositionTool.assessQueryComplexity(query); | ||||
| 
 | ||||
|                 if (complexity > 5) { // Only for fairly complex queries
 | ||||
|                     const decomposed = decompositionTool.decomposeQuery(query); | ||||
| 
 | ||||
|                     if (decomposed.subQueries.length > 1) { | ||||
|                         context += "## Query Analysis\n\n"; | ||||
|                         context += `This is a complex query (complexity: ${complexity}/10). It can be broken down into:\n\n`; | ||||
| 
 | ||||
|                         for (const sq of decomposed.subQueries) { | ||||
|                             context += `- ${sq.text}\n  Reason: ${sq.reason}\n\n`; | ||||
|                         } | ||||
|                     } | ||||
|                 } | ||||
|             } catch (error: any) { | ||||
|                 log.error(`Error decomposing query: ${error.message}`); | ||||
|             } | ||||
| 
 | ||||
|             // 4. Show thinking process if enabled
 | ||||
|             if (showThinking) { | ||||
|                 try { | ||||
|                     const thinkingTool = agentTools.getContextualThinkingTool(); | ||||
|                     const thinkingId = thinkingTool.startThinking(query); | ||||
| 
 | ||||
|                     // Add a thinking step to demonstrate the feature
 | ||||
|                     // In a real implementation, the LLM would add these steps
 | ||||
|                     thinkingTool.addThinkingStep( | ||||
|                         "Analyzing the query to understand what information is needed", | ||||
|                         "observation", | ||||
|                         { confidence: 1.0 } | ||||
|                     ); | ||||
| 
 | ||||
|                     // Add sample thinking for the context
 | ||||
|                     const parentId = thinkingTool.addThinkingStep( | ||||
|                         "Looking for related notes in the knowledge base", | ||||
|                         "hypothesis", | ||||
|                         { confidence: 0.9 } | ||||
|                     ); | ||||
| 
 | ||||
|                     if (parentId) { | ||||
|                         // Use the VectorSearchTool to find relevant notes
 | ||||
|                         const vectorSearchTool = aiServiceManager.getVectorSearchTool(); | ||||
|                         const searchResults = await vectorSearchTool.searchNotes(query, { | ||||
|                             parentNoteId: parentId, | ||||
|                             maxResults: 5 | ||||
|                         }); | ||||
| 
 | ||||
|                         if (searchResults.length > 0) { | ||||
|                             context += "## Related Information\n\n"; | ||||
|                             for (const result of searchResults) { | ||||
|                                 context += `### ${result.title}\n`; | ||||
|                                 context += `${result.contentPreview}\n\n`; | ||||
|                             } | ||||
|                             context += "\n"; | ||||
|                         } | ||||
|                     } | ||||
| 
 | ||||
|                     thinkingTool.addThinkingStep( | ||||
|                         "The most relevant information appears to be in the current note and its semantic neighborhood", | ||||
|                         "conclusion", | ||||
|                         { confidence: 0.85 } | ||||
|                     ); | ||||
| 
 | ||||
|                     // Complete the thinking and add it to context
 | ||||
|                     thinkingTool.completeThinking(thinkingId); | ||||
|                     context += "## Thinking Process\n\n"; | ||||
|                     context += thinkingTool.getThinkingSummary(thinkingId) + "\n\n"; | ||||
|                 } catch (error: any) { | ||||
|                     log.error(`Error generating thinking process: ${error.message}`); | ||||
|                 } | ||||
|             } | ||||
| 
 | ||||
|             return context; | ||||
|         } catch (error: any) { | ||||
|             log.error(`Error getting agent tools context: ${error.message}`); | ||||
|             return ""; | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| export default new TriliumContextService(); | ||||
|  | ||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user
	 perf3ct
						perf3ct