add agentic thinking to chat

This commit is contained in:
perf3ct 2025-03-19 18:49:14 +00:00
parent 492c05bad4
commit 352204bf78
No known key found for this signature in database
GPG Key ID: 569C4EEC436F5232
9 changed files with 488 additions and 176 deletions

View File

@ -33,6 +33,7 @@ export default class LlmChatPanel extends BasicWidget {
private loadingIndicator!: HTMLElement; private loadingIndicator!: HTMLElement;
private sourcesList!: HTMLElement; private sourcesList!: HTMLElement;
private useAdvancedContextCheckbox!: HTMLInputElement; private useAdvancedContextCheckbox!: HTMLInputElement;
private showThinkingCheckbox!: HTMLInputElement;
private validationWarning!: HTMLElement; private validationWarning!: HTMLElement;
private sessionId: string | null = null; private sessionId: string | null = null;
private currentNoteId: string | null = null; private currentNoteId: string | null = null;
@ -63,15 +64,19 @@ export default class LlmChatPanel extends BasicWidget {
<form class="note-context-chat-form d-flex flex-column border-top p-2"> <form class="note-context-chat-form d-flex flex-column border-top p-2">
<div class="d-flex mb-2 align-items-center context-option-container"> <div class="d-flex mb-2 align-items-center context-option-container">
<div class="form-check form-switch"> <div class="form-check form-switch me-3">
<input class="form-check-input use-advanced-context-checkbox" type="checkbox" id="useEnhancedContext" checked> <input class="form-check-input use-advanced-context-checkbox" type="checkbox" id="useEnhancedContext" checked>
<label class="form-check-label" for="useEnhancedContext"> <label class="form-check-label" for="useEnhancedContext" title="${t('ai.enhanced_context_description')}">
${t('ai.use_enhanced_context')} ${t('ai.use_enhanced_context')}
<i class="bx bx-info-circle ms-1 small text-muted"></i>
</label> </label>
</div> </div>
<div class="ms-2 small text-muted"> <div class="form-check form-switch">
<i class="bx bx-info-circle"></i> <input class="form-check-input show-thinking-checkbox" type="checkbox" id="showThinking">
<span>${t('ai.enhanced_context_description')}</span> <label class="form-check-label" for="showThinking" title="${t('ai.show_thinking_description')}">
${t('ai.show_thinking')}
<i class="bx bx-info-circle ms-1 small text-muted"></i>
</label>
</div> </div>
</div> </div>
<div class="d-flex chat-input-container"> <div class="d-flex chat-input-container">
@ -97,6 +102,7 @@ export default class LlmChatPanel extends BasicWidget {
this.loadingIndicator = element.querySelector('.loading-indicator') as HTMLElement; this.loadingIndicator = element.querySelector('.loading-indicator') as HTMLElement;
this.sourcesList = element.querySelector('.sources-list') as HTMLElement; this.sourcesList = element.querySelector('.sources-list') as HTMLElement;
this.useAdvancedContextCheckbox = element.querySelector('.use-advanced-context-checkbox') as HTMLInputElement; this.useAdvancedContextCheckbox = element.querySelector('.use-advanced-context-checkbox') as HTMLInputElement;
this.showThinkingCheckbox = element.querySelector('.show-thinking-checkbox') as HTMLInputElement;
this.validationWarning = element.querySelector('.provider-validation-warning') as HTMLElement; this.validationWarning = element.querySelector('.provider-validation-warning') as HTMLElement;
// Set up event delegation for the settings link // Set up event delegation for the settings link
@ -167,12 +173,17 @@ export default class LlmChatPanel extends BasicWidget {
try { try {
const useAdvancedContext = this.useAdvancedContextCheckbox.checked; const useAdvancedContext = this.useAdvancedContextCheckbox.checked;
const showThinking = this.showThinkingCheckbox.checked;
// Add logging to verify parameters
console.log(`Sending message with: useAdvancedContext=${useAdvancedContext}, showThinking=${showThinking}, noteId=${this.currentNoteId}`);
// Create the message parameters // Create the message parameters
const messageParams = { const messageParams = {
content, content,
contextNoteId: this.currentNoteId, contextNoteId: this.currentNoteId,
useAdvancedContext useAdvancedContext,
showThinking
}; };
// First, send the message via POST request // First, send the message via POST request
@ -192,7 +203,7 @@ export default class LlmChatPanel extends BasicWidget {
} }
// Then set up streaming via EventSource // Then set up streaming via EventSource
const streamUrl = `./api/llm/sessions/${this.sessionId}/messages?format=stream&useAdvancedContext=${useAdvancedContext}`; const streamUrl = `./api/llm/sessions/${this.sessionId}/messages?format=stream&useAdvancedContext=${useAdvancedContext}&showThinking=${showThinking}`;
const source = new EventSource(streamUrl); const source = new EventSource(streamUrl);
let assistantResponse = ''; let assistantResponse = '';
@ -415,9 +426,24 @@ export default class LlmChatPanel extends BasicWidget {
private formatMarkdown(content: string): string { private formatMarkdown(content: string): string {
if (!content) return ''; if (!content) return '';
// First, extract code blocks to protect them from other replacements // Check if content contains HTML sections for thinking visualization
if (content.includes('<div class="thinking-process">') ||
content.includes('<div class=\'thinking-process\'>')) {
console.log('Detected thinking process visualization in response');
// For content with HTML thinking visualizations, we need to protect them
}
// First, extract HTML thinking visualization to protect it from replacements
const thinkingBlocks: string[] = [];
let processedContent = content.replace(/<div class=['"](thinking-process|reasoning-process)['"][\s\S]*?<\/div>/g, (match) => {
const placeholder = `__THINKING_BLOCK_${thinkingBlocks.length}__`;
thinkingBlocks.push(match);
return placeholder;
});
// Then extract code blocks to protect them from other replacements
const codeBlocks: string[] = []; const codeBlocks: string[] = [];
let processedContent = content.replace(/```(\w+)?\n([\s\S]+?)\n```/gs, (match, language, code) => { processedContent = processedContent.replace(/```(\w+)?\n([\s\S]+?)\n```/gs, (match, language, code) => {
const placeholder = `__CODE_BLOCK_${codeBlocks.length}__`; const placeholder = `__CODE_BLOCK_${codeBlocks.length}__`;
const languageClass = language ? ` language-${language}` : ''; const languageClass = language ? ` language-${language}` : '';
codeBlocks.push(`<pre class="code${languageClass}"><code>${code}</code></pre>`); codeBlocks.push(`<pre class="code${languageClass}"><code>${code}</code></pre>`);
@ -436,6 +462,11 @@ export default class LlmChatPanel extends BasicWidget {
processedContent = processedContent.replace(`__CODE_BLOCK_${index}__`, block); processedContent = processedContent.replace(`__CODE_BLOCK_${index}__`, block);
}); });
// Restore thinking visualization blocks
thinkingBlocks.forEach((block, index) => {
processedContent = processedContent.replace(`__THINKING_BLOCK_${index}__`, block);
});
return processedContent; return processedContent;
} }

View File

@ -1941,3 +1941,53 @@ footer.file-footer button {
.ck-content .admonition.important::before { content: "\ea7c"; } .ck-content .admonition.important::before { content: "\ea7c"; }
.ck-content .admonition.caution::before { content: "\eac7"; } .ck-content .admonition.caution::before { content: "\eac7"; }
.ck-content .admonition.warning::before { content: "\eac5"; } .ck-content .admonition.warning::before { content: "\eac5"; }
.chat-options-container {
display: flex;
margin: 5px 0;
align-items: center;
padding: 0 10px;
}
.chat-option {
display: flex;
align-items: center;
font-size: 0.9em;
margin-right: 15px;
cursor: pointer;
}
.chat-option input[type="checkbox"] {
margin-right: 5px;
}
/* Style for thinking process in chat responses */
.thinking-process {
background-color: rgba(0, 0, 0, 0.05);
border-left: 3px solid var(--main-text-color);
padding: 10px;
margin: 10px 0;
border-radius: 4px;
}
.thinking-step {
margin-bottom: 8px;
padding-left: 10px;
}
.thinking-step.observation {
border-left: 2px solid #69c7ff;
}
.thinking-step.hypothesis {
border-left: 2px solid #9839f7;
}
.thinking-step.evidence {
border-left: 2px solid #40c025;
}
.thinking-step.conclusion {
border-left: 2px solid #e2aa03;
font-weight: bold;
}

View File

@ -1815,11 +1815,34 @@
"description": "Select one or more languages that should appear in the language selection in the Basic Properties section of a read-only or editable text note. This will allow features such as spell-checking or right-to-left support." "description": "Select one or more languages that should appear in the language selection in the Basic Properties section of a read-only or editable text note. This will allow features such as spell-checking or right-to-left support."
}, },
"ai": { "ai": {
"sources": "Sources", "ai_settings": "AI Settings",
"api_key": "API Key",
"api_key_tooltip": "API key for accessing the service",
"confirm_delete_embeddings": "Are you sure you want to delete all AI embeddings? This will remove all semantic search capabilities until notes are reindexed, which can take a significant amount of time.",
"empty_key_warning": "Warning: Empty API key. You need to configure your API key in settings.",
"enable_ai": "Enable AI Features",
"enhanced_context_description": "Uses semantic search to find relevant information across your notes",
"enter_message": "Enter your message...", "enter_message": "Enter your message...",
"use_advanced_context": "Use Enhanced Note Context", "error_contacting_provider": "Error contacting AI provider. Please check your settings and internet connection.",
"advanced_context_helps": "Helps with large knowledge bases and limited context windows", "error_generating_response": "Error generating AI response",
"use_enhanced_context": "Use Enhanced Note Context", "index_all_notes": "Index All Notes",
"enhanced_context_description": "Improves answers by including more relevant note content" "index_status": "Index Status",
"indexed_notes": "Indexed Notes",
"indexing_stopped": "Indexing stopped",
"indexing_in_progress": "Indexing in progress...",
"last_indexed": "Last Indexed",
"n_notes_queued": "{{ count }} note queued for indexing",
"n_notes_queued_plural": "{{ count }} notes queued for indexing",
"note_chat": "Note Chat",
"notes_indexed": "{{ count }} note indexed",
"notes_indexed_plural": "{{ count }} notes indexed",
"processing": "Processing",
"reset_embeddings": "Reset Embeddings",
"show_thinking": "Show Thinking",
"show_thinking_description": "Reveals the reasoning process used to generate responses",
"sources": "Sources",
"start_indexing": "Start Indexing",
"use_advanced_context": "Use Advanced Context",
"use_enhanced_context": "Use Enhanced Note Context"
} }
} }

View File

@ -545,18 +545,26 @@ Now, based on the above information, please answer: ${query}`;
async function sendMessage(req: Request, res: Response) { async function sendMessage(req: Request, res: Response) {
try { try {
// Extract parameters differently based on the request method // Extract parameters differently based on the request method
let content, useAdvancedContext, sessionId; let content, useAdvancedContext, showThinking, sessionId;
if (req.method === 'POST') { if (req.method === 'POST') {
// For POST requests, get content from the request body // For POST requests, get content from the request body
const requestBody = req.body || {}; const requestBody = req.body || {};
content = requestBody.content; content = requestBody.content;
useAdvancedContext = requestBody.useAdvancedContext || false; useAdvancedContext = requestBody.useAdvancedContext || false;
showThinking = requestBody.showThinking || false;
// Add logging for POST requests
log.info(`LLM POST message: sessionId=${req.params.sessionId}, useAdvancedContext=${useAdvancedContext}, showThinking=${showThinking}, contentLength=${content ? content.length : 0}`);
} else if (req.method === 'GET') { } else if (req.method === 'GET') {
// For GET (streaming) requests, get format from query params // For GET (streaming) requests, get format from query params
// The content should have been sent in a previous POST request // The content should have been sent in a previous POST request
useAdvancedContext = req.query.useAdvancedContext === 'true'; useAdvancedContext = req.query.useAdvancedContext === 'true';
showThinking = req.query.showThinking === 'true';
content = ''; // We don't need content for GET requests content = ''; // We don't need content for GET requests
// Add logging for GET requests
log.info(`LLM GET stream: sessionId=${req.params.sessionId}, useAdvancedContext=${useAdvancedContext}, showThinking=${showThinking}`);
} }
// Get sessionId from URL params since it's part of the route // Get sessionId from URL params since it's part of the route
@ -644,7 +652,16 @@ async function sendMessage(req: Request, res: Response) {
if (useAdvancedContext) { if (useAdvancedContext) {
// Use the Trilium-specific approach // Use the Trilium-specific approach
const contextNoteId = session.noteContext || null; const contextNoteId = session.noteContext || null;
const results = await triliumContextService.processQuery(messageContent, service, contextNoteId);
// Log that we're calling triliumContextService with the parameters
log.info(`Using enhanced context with: noteId=${contextNoteId}, showThinking=${showThinking}`);
const results = await triliumContextService.processQuery(
messageContent,
service,
contextNoteId,
showThinking // Pass the showThinking parameter
);
// Get the generated context // Get the generated context
const context = results.context; const context = results.context;

View File

@ -13,7 +13,8 @@
* - Show how different sources of evidence are weighed * - Show how different sources of evidence are weighed
*/ */
import log from '../../log.js'; import log from "../../log.js";
import aiServiceManager from "../ai_service_manager.js";
/** /**
* Represents a single reasoning step taken by the agent * Represents a single reasoning step taken by the agent
@ -159,117 +160,79 @@ export class ContextualThinkingTool {
} }
/** /**
* Generate a user-friendly HTML representation of the thinking process * Visualize the thinking process as HTML for display in the UI
* *
* @param processId The ID of the process to visualize * @param thinkingId The ID of the thinking process to visualize
* @returns HTML string representing the thinking process * @returns HTML representation of the thinking process
*/ */
visualizeThinking(processId: string): string { visualizeThinking(thinkingId: string): string {
const process = this.getThinkingProcess(processId); log.info(`Visualizing thinking process: thinkingId=${thinkingId}`);
const process = this.getThinkingProcess(thinkingId);
if (!process) { if (!process) {
return `<div class="thinking-error">Thinking process ${processId} not found</div>`; log.info(`No thinking process found for id: ${thinkingId}`);
return "<div class='thinking-process'>No thinking process found</div>";
} }
let html = ` log.info(`Found thinking process with ${process.steps.length} steps for query: "${process.query.substring(0, 50)}..."`);
<div class="thinking-process">
<div class="thinking-header">
<h3>Thinking Process for: "${process.query}"</h3>
<div class="thinking-metadata">
<span>Status: ${process.status}</span>
<span>Steps: ${process.steps.length}</span>
<span>Time: ${this.formatDuration(process.startTime, process.endTime || Date.now())}</span>
</div>
</div>
<div class="thinking-steps">
`;
// Find root steps (those without parents) let html = "<div class='thinking-process'>";
const rootSteps = process.steps.filter(step => !step.parentId); html += `<h4>Thinking Process</h4>`;
// Recursively render the thinking tree for (const step of process.steps) {
for (const rootStep of rootSteps) { html += `<div class='thinking-step ${step.type || ""}'>`;
html += this.renderStepTree(rootStep, process.steps);
// Add an icon based on step type
const icon = this.getStepIcon(step.type);
html += `<span class='bx ${icon}'></span> `;
// Add the step content
html += step.content;
// Show confidence if available
if (step.metadata?.confidence) {
const confidence = Math.round((step.metadata.confidence as number) * 100);
html += ` <span class='thinking-confidence'>(Confidence: ${confidence}%)</span>`;
} }
html += ` html += `</div>`;
</div> }
</div>
`;
html += "</div>";
return html; return html;
} }
/** /**
* Generate a concise text representation of the thinking process * Get an appropriate icon for a thinking step type
* that can be displayed inline in the chat for transparency
*
* @param processId The ID of the process to summarize
* @returns Text summary of the reasoning process
*/ */
getThinkingSummary(processId?: string): string { private getStepIcon(type: string): string {
const id = processId || this.activeProcId; switch (type) {
if (!id || !this.processes[id]) { case 'observation':
return 'bx-search';
case 'hypothesis':
return 'bx-bulb';
case 'evidence':
return 'bx-list-check';
case 'conclusion':
return 'bx-check-circle';
default:
return 'bx-message-square-dots';
}
}
/**
* Get a plain text summary of the thinking process
*
* @param thinkingId The ID of the thinking process to summarize
* @returns Text summary of the thinking process
*/
getThinkingSummary(thinkingId: string): string {
const process = this.getThinkingProcess(thinkingId);
if (!process) {
return "No thinking process available."; return "No thinking process available.";
} }
const process = this.processes[id]; return this.visualizeThinking(thinkingId);
let summary = `Thinking about: "${process.query}"\n\n`;
// Group steps by type
const stepsByType: Record<string, ThinkingStep[]> = {};
for (const step of process.steps) {
if (!stepsByType[step.type]) {
stepsByType[step.type] = [];
}
stepsByType[step.type].push(step);
}
// Show observations first
if (stepsByType['observation'] && stepsByType['observation'].length > 0) {
summary += "🔍 Observations:\n";
for (const step of stepsByType['observation'].slice(0, 3)) {
summary += `- ${step.content}\n`;
}
if (stepsByType['observation'].length > 3) {
summary += `- ...and ${stepsByType['observation'].length - 3} more observations\n`;
}
summary += "\n";
}
// Show questions the agent asked itself
if (stepsByType['question'] && stepsByType['question'].length > 0) {
summary += "❓ Questions considered:\n";
for (const step of stepsByType['question'].slice(0, 3)) {
summary += `- ${step.content}\n`;
}
if (stepsByType['question'].length > 3) {
summary += `- ...and ${stepsByType['question'].length - 3} more questions\n`;
}
summary += "\n";
}
// Show evidence
if (stepsByType['evidence'] && stepsByType['evidence'].length > 0) {
summary += "📋 Evidence found:\n";
for (const step of stepsByType['evidence'].slice(0, 3)) {
summary += `- ${step.content}\n`;
}
if (stepsByType['evidence'].length > 3) {
summary += `- ...and ${stepsByType['evidence'].length - 3} more pieces of evidence\n`;
}
summary += "\n";
}
// Show conclusions
if (stepsByType['conclusion'] && stepsByType['conclusion'].length > 0) {
summary += "✅ Conclusions:\n";
for (const step of stepsByType['conclusion']) {
const confidence = step.confidence ? ` (${Math.round(step.confidence * 100)}% confidence)` : '';
summary += `- ${step.content}${confidence}\n`;
}
}
return summary;
} }
/** /**

View File

@ -18,6 +18,10 @@ export interface ChatCompletionOptions {
model?: string; model?: string;
temperature?: number; temperature?: number;
maxTokens?: number; maxTokens?: number;
topP?: number;
frequencyPenalty?: number;
presencePenalty?: number;
showThinking?: boolean;
systemPrompt?: string; systemPrompt?: string;
stream?: boolean; // Whether to stream the response stream?: boolean; // Whether to stream the response
} }
@ -51,3 +55,34 @@ export interface AIService {
*/ */
getName(): string; getName(): string;
} }
/**
* Interface for the semantic context service, which provides enhanced context retrieval
* for AI conversations based on semantic similarity.
*/
export interface SemanticContextService {
/**
* Initialize the semantic context service
*/
initialize(): Promise<void>;
/**
* Retrieve semantic context based on relevance to user query
*/
getSemanticContext(noteId: string, userQuery: string, maxResults?: number): Promise<string>;
/**
* Get progressive context based on depth
*/
getProgressiveContext?(noteId: string, depth?: number): Promise<string>;
/**
* Get smart context selection that adapts to query complexity
*/
getSmartContext?(noteId: string, userQuery: string): Promise<string>;
/**
* Enhance LLM context with agent tools
*/
getAgentToolsContext(noteId: string, query: string, showThinking?: boolean): Promise<string>;
}

View File

@ -1,5 +1,5 @@
import options from '../options.js'; import options from '../options.js';
import type { AIService, ChatCompletionOptions, ChatResponse, Message } from './ai_interface.js'; import type { AIService, ChatCompletionOptions, ChatResponse, Message, SemanticContextService } from './ai_interface.js';
import { OpenAIService } from './providers/openai_service.js'; import { OpenAIService } from './providers/openai_service.js';
import { AnthropicService } from './providers/anthropic_service.js'; import { AnthropicService } from './providers/anthropic_service.js';
import { OllamaService } from './providers/ollama_service.js'; import { OllamaService } from './providers/ollama_service.js';
@ -271,8 +271,8 @@ export class AIServiceManager {
* Get the semantic context service for advanced context handling * Get the semantic context service for advanced context handling
* @returns The semantic context service instance * @returns The semantic context service instance
*/ */
getSemanticContextService() { getSemanticContextService(): SemanticContextService {
return semanticContextService; return semanticContextService as unknown as SemanticContextService;
} }
/** /**
@ -439,7 +439,7 @@ export default {
getContextExtractor() { getContextExtractor() {
return getInstance().getContextExtractor(); return getInstance().getContextExtractor();
}, },
getSemanticContextService() { getSemanticContextService(): SemanticContextService {
return getInstance().getSemanticContextService(); return getInstance().getSemanticContextService();
}, },
getIndexService() { getIndexService() {

View File

@ -215,15 +215,7 @@ export class ChatService {
} }
/** /**
* Send a context-aware message with automatically included semantic context from a note * Send a message with enhanced semantic note context
* This method combines the query with relevant note context before sending to the AI
*
* @param sessionId - The ID of the chat session
* @param content - The user's message content
* @param noteId - The ID of the note to add context from
* @param options - Optional completion options
* @param streamCallback - Optional streaming callback
* @returns The updated chat session
*/ */
async sendContextAwareMessage( async sendContextAwareMessage(
sessionId: string, sessionId: string,
@ -234,14 +226,102 @@ export class ChatService {
): Promise<ChatSession> { ): Promise<ChatSession> {
const session = await this.getOrCreateSession(sessionId); const session = await this.getOrCreateSession(sessionId);
// Get semantically relevant context based on the user's message // Add user message
const context = await contextExtractor.getSmartContext(noteId, content); const userMessage: Message = {
role: 'user',
content
};
// Combine the user's message with the relevant context session.messages.push(userMessage);
const enhancedContent = `${content}\n\nHere's relevant information from my notes that may help:\n\n${context}`; session.isStreaming = true;
// Send the enhanced message // Set up streaming if callback provided
return this.sendMessage(sessionId, enhancedContent, options, streamCallback); if (streamCallback) {
this.streamingCallbacks.set(session.id, streamCallback);
}
try {
// Immediately save the user message
await chatStorageService.updateChat(session.id, session.messages);
// Get the Trilium Context Service for enhanced context
const contextService = aiServiceManager.getSemanticContextService();
// Get showThinking option if it exists
const showThinking = options?.showThinking === true;
// Get enhanced context for this note and query
const enhancedContext = await contextService.getAgentToolsContext(
noteId,
content,
showThinking
);
// Prepend a system message with context
const systemMessage: Message = {
role: 'system',
content: `You are an AI assistant helping with Trilium Notes. Use this context to answer the user's question:\n\n${enhancedContext}`
};
// Create messages array with system message
const messagesWithContext = [systemMessage, ...session.messages];
// Generate AI response
const response = await aiServiceManager.generateChatCompletion(
messagesWithContext,
options
);
// Add assistant message
const assistantMessage: Message = {
role: 'assistant',
content: response.text
};
session.messages.push(assistantMessage);
session.isStreaming = false;
// Save the complete conversation (without system message)
await chatStorageService.updateChat(session.id, session.messages);
// If first message, update the title
if (session.messages.length <= 2 && (!session.title || session.title === 'New Chat')) {
const title = this.generateTitleFromMessages(session.messages);
session.title = title;
await chatStorageService.updateChat(session.id, session.messages, title);
}
// Notify streaming is complete
if (streamCallback) {
streamCallback(response.text, true);
this.streamingCallbacks.delete(session.id);
}
return session;
} catch (error: any) {
session.isStreaming = false;
console.error('Error in context-aware chat:', error);
// Add error message
const errorMessage: Message = {
role: 'assistant',
content: `Error: Failed to generate response with note context. ${error.message || 'Please try again.'}`
};
session.messages.push(errorMessage);
// Save the conversation with error
await chatStorageService.updateChat(session.id, session.messages);
// Notify streaming is complete with error
if (streamCallback) {
streamCallback(errorMessage.content, true);
this.streamingCallbacks.delete(session.id);
}
return session;
}
} }
/** /**

View File

@ -77,6 +77,15 @@ Example: ["exact topic mentioned", "related concept 1", "related concept 2"]`;
throw new Error(`No embedding provider available. Could not initialize context service.`); throw new Error(`No embedding provider available. Could not initialize context service.`);
} }
// Initialize agent tools to ensure they're ready
try {
await aiServiceManager.getInstance().initializeAgentTools();
log.info("Agent tools initialized for use with TriliumContextService");
} catch (toolError) {
log.error(`Error initializing agent tools: ${toolError}`);
// Continue even if agent tools fail to initialize
}
this.initialized = true; this.initialized = true;
log.info(`Trilium context service initialized with provider: ${this.provider.name}`); log.info(`Trilium context service initialized with provider: ${this.provider.name}`);
} catch (error: unknown) { } catch (error: unknown) {
@ -549,17 +558,16 @@ Example: ["exact topic mentioned", "related concept 1", "related concept 2"]`;
} }
/** /**
* Process a user query with the Trilium-specific approach: * Process a user query to find relevant context in Trilium notes
* 1. Generate search queries from the original question
* 2. Find relevant notes using those queries
* 3. Build a context string from the relevant notes
*
* @param userQuestion - The user's original question
* @param llmService - The LLM service to use
* @param contextNoteId - Optional note ID to restrict search to
* @returns Object with context and notes
*/ */
async processQuery(userQuestion: string, llmService: any, contextNoteId: string | null = null) { async processQuery(
userQuestion: string,
llmService: any,
contextNoteId: string | null = null,
showThinking: boolean = false
) {
log.info(`Processing query with: question="${userQuestion.substring(0, 50)}...", noteId=${contextNoteId}, showThinking=${showThinking}`);
if (!this.initialized) { if (!this.initialized) {
try { try {
await this.initialize(); await this.initialize();
@ -602,8 +610,28 @@ Example: ["exact topic mentioned", "related concept 1", "related concept 2"]`;
// Step 3: Build context from the notes // Step 3: Build context from the notes
const context = await this.buildContextFromNotes(relevantNotes, userQuestion); const context = await this.buildContextFromNotes(relevantNotes, userQuestion);
// Step 4: Add agent tools context with thinking process if requested
let enhancedContext = context;
try {
// Get agent tools context using either the specific note or the most relevant notes
const agentContext = await this.getAgentToolsContext(
contextNoteId || (relevantNotes[0]?.noteId || ""),
userQuestion,
showThinking,
relevantNotes // Pass all relevant notes for context
);
if (agentContext) {
enhancedContext = `${context}\n\n${agentContext}`;
log.info(`Added agent tools context (${agentContext.length} characters)`);
}
} catch (error) {
log.error(`Error getting agent tools context: ${error}`);
// Continue with just the basic context
}
return { return {
context, context: enhancedContext,
notes: relevantNotes, notes: relevantNotes,
queries: searchQueries queries: searchQueries
}; };
@ -628,18 +656,43 @@ Example: ["exact topic mentioned", "related concept 1", "related concept 2"]`;
* 3. Query decomposition planning * 3. Query decomposition planning
* 4. Contextual thinking visualization * 4. Contextual thinking visualization
* *
* @param noteId The current note being viewed * @param noteId The current note being viewed (or most relevant note)
* @param query The user's query * @param query The user's query
* @param showThinking Whether to include the agent's thinking process * @param showThinking Whether to include the agent's thinking process
* @param relevantNotes Optional array of relevant notes from vector search
* @returns Enhanced context string * @returns Enhanced context string
*/ */
async getAgentToolsContext(noteId: string, query: string, showThinking: boolean = false): Promise<string> { async getAgentToolsContext(
noteId: string,
query: string,
showThinking: boolean = false,
relevantNotes: Array<any> = []
): Promise<string> {
log.info(`Getting agent tools context: noteId=${noteId}, query="${query.substring(0, 50)}...", showThinking=${showThinking}, relevantNotesCount=${relevantNotes.length}`);
try { try {
const agentTools = aiServiceManager.getAgentTools(); const agentTools = aiServiceManager.getAgentTools();
let context = ""; let context = "";
// 1. Get vector search results related to the query // 1. Get vector search results related to the query
try { try {
// If we already have relevant notes from vector search, use those
if (relevantNotes && relevantNotes.length > 0) {
log.info(`Using ${relevantNotes.length} provided relevant notes instead of running vector search again`);
context += "## Related Information\n\n";
for (const result of relevantNotes.slice(0, 5)) {
context += `### ${result.title}\n`;
// Use the content if available, otherwise get a preview
const contentPreview = result.content
? this.sanitizeNoteContent(result.content).substring(0, 300) + "..."
: result.contentPreview || "[No preview available]";
context += `${contentPreview}\n\n`;
}
context += "\n";
} else {
// Run vector search if we don't have relevant notes
const vectorSearchTool = agentTools.getVectorSearchTool(); const vectorSearchTool = agentTools.getVectorSearchTool();
const searchResults = await vectorSearchTool.searchNotes(query, { const searchResults = await vectorSearchTool.searchNotes(query, {
parentNoteId: noteId, parentNoteId: noteId,
@ -654,6 +707,7 @@ Example: ["exact topic mentioned", "related concept 1", "related concept 2"]`;
} }
context += "\n"; context += "\n";
} }
}
} catch (error: any) { } catch (error: any) {
log.error(`Error getting vector search context: ${error.message}`); log.error(`Error getting vector search context: ${error.message}`);
} }
@ -694,55 +748,114 @@ Example: ["exact topic mentioned", "related concept 1", "related concept 2"]`;
// 4. Show thinking process if enabled // 4. Show thinking process if enabled
if (showThinking) { if (showThinking) {
log.info("Showing thinking process - creating visual reasoning steps");
try { try {
const thinkingTool = agentTools.getContextualThinkingTool(); const thinkingTool = agentTools.getContextualThinkingTool();
const thinkingId = thinkingTool.startThinking(query); const thinkingId = thinkingTool.startThinking(query);
log.info(`Started thinking process with ID: ${thinkingId}`);
// Add a thinking step to demonstrate the feature // Add initial thinking steps
// In a real implementation, the LLM would add these steps
thinkingTool.addThinkingStep( thinkingTool.addThinkingStep(
"Analyzing the query to understand what information is needed", "Analyzing the user's query to understand the information needs",
"observation", "observation",
{ confidence: 1.0 } { confidence: 1.0 }
); );
// Add sample thinking for the context // Add query exploration steps
const parentId = thinkingTool.addThinkingStep( const parentId = thinkingTool.addThinkingStep(
"Looking for related notes in the knowledge base", "Exploring knowledge base to find relevant information",
"hypothesis", "hypothesis",
{ confidence: 0.9 } { confidence: 0.9 }
); );
if (parentId) { // Add information about relevant notes if available
// Use the VectorSearchTool to find relevant notes if (relevantNotes && relevantNotes.length > 0) {
const vectorSearchTool = aiServiceManager.getVectorSearchTool(); const noteTitles = relevantNotes.slice(0, 5).map(n => n.title).join(", ");
const searchResults = await vectorSearchTool.searchNotes(query, {
parentNoteId: parentId,
maxResults: 5
});
if (searchResults.length > 0) {
context += "## Related Information\n\n";
for (const result of searchResults) {
context += `### ${result.title}\n`;
context += `${result.contentPreview}\n\n`;
}
context += "\n";
}
}
thinkingTool.addThinkingStep( thinkingTool.addThinkingStep(
"The most relevant information appears to be in the current note and its semantic neighborhood", `Found ${relevantNotes.length} potentially relevant notes through semantic search, including: ${noteTitles}`,
"conclusion", "evidence",
{ confidence: 0.85, parentId: parentId || undefined }
);
}
// Add step about note hierarchy if a specific note is being viewed
if (noteId && noteId !== "") {
try {
const navigatorTool = agentTools.getNoteNavigatorTool();
// Get parent notes since we don't have getNoteHierarchyInfo
const parents = navigatorTool.getParentNotes(noteId);
if (parents && parents.length > 0) {
const parentInfo = parents.map(p => p.title).join(" > ");
thinkingTool.addThinkingStep(
`Identified note hierarchy context: ${parentInfo}`,
"evidence",
{ confidence: 0.9, parentId: parentId || undefined }
);
}
} catch (error) {
log.error(`Error getting note hierarchy: ${error}`);
}
}
// Add query decomposition if it's a complex query
try {
const decompositionTool = agentTools.getQueryDecompositionTool();
const complexity = decompositionTool.assessQueryComplexity(query);
if (complexity > 4) {
thinkingTool.addThinkingStep(
`This is a ${complexity > 7 ? "very complex" : "moderately complex"} query (complexity: ${complexity}/10)`,
"observation",
{ confidence: 0.8 }
);
const decomposed = decompositionTool.decomposeQuery(query);
if (decomposed.subQueries.length > 1) {
const decompId = thinkingTool.addThinkingStep(
"Breaking down query into sub-questions to address systematically",
"hypothesis",
{ confidence: 0.85 } { confidence: 0.85 }
); );
// Complete the thinking and add it to context for (const sq of decomposed.subQueries) {
thinkingTool.addThinkingStep(
`Subquery: ${sq.text} - ${sq.reason}`,
"evidence",
{ confidence: 0.8, parentId: decompId || undefined }
);
}
}
} else {
thinkingTool.addThinkingStep(
`This is a straightforward query (complexity: ${complexity}/10) that can be addressed directly`,
"observation",
{ confidence: 0.9 }
);
}
} catch (error) {
log.error(`Error in query decomposition: ${error}`);
}
// Add final conclusions
thinkingTool.addThinkingStep(
"Ready to formulate response based on available information and query understanding",
"conclusion",
{ confidence: 0.95 }
);
// Complete the thinking process and add the visualization to context
thinkingTool.completeThinking(thinkingId); thinkingTool.completeThinking(thinkingId);
context += "## Thinking Process\n\n"; const visualization = thinkingTool.visualizeThinking(thinkingId);
context += thinkingTool.getThinkingSummary(thinkingId) + "\n\n";
if (visualization) {
context += "## Reasoning Process\n\n";
context += visualization + "\n\n";
log.info(`Added thinking visualization to context (${visualization.length} characters)`);
}
} catch (error: any) { } catch (error: any) {
log.error(`Error generating thinking process: ${error.message}`); log.error(`Error creating thinking visualization: ${error.message}`);
} }
} }