mirror of
https://github.com/TriliumNext/Notes.git
synced 2025-08-10 10:22:29 +08:00
add agentic thinking to chat
This commit is contained in:
parent
492c05bad4
commit
352204bf78
@ -33,6 +33,7 @@ export default class LlmChatPanel extends BasicWidget {
|
||||
private loadingIndicator!: HTMLElement;
|
||||
private sourcesList!: HTMLElement;
|
||||
private useAdvancedContextCheckbox!: HTMLInputElement;
|
||||
private showThinkingCheckbox!: HTMLInputElement;
|
||||
private validationWarning!: HTMLElement;
|
||||
private sessionId: string | null = null;
|
||||
private currentNoteId: string | null = null;
|
||||
@ -63,15 +64,19 @@ export default class LlmChatPanel extends BasicWidget {
|
||||
|
||||
<form class="note-context-chat-form d-flex flex-column border-top p-2">
|
||||
<div class="d-flex mb-2 align-items-center context-option-container">
|
||||
<div class="form-check form-switch">
|
||||
<div class="form-check form-switch me-3">
|
||||
<input class="form-check-input use-advanced-context-checkbox" type="checkbox" id="useEnhancedContext" checked>
|
||||
<label class="form-check-label" for="useEnhancedContext">
|
||||
<label class="form-check-label" for="useEnhancedContext" title="${t('ai.enhanced_context_description')}">
|
||||
${t('ai.use_enhanced_context')}
|
||||
<i class="bx bx-info-circle ms-1 small text-muted"></i>
|
||||
</label>
|
||||
</div>
|
||||
<div class="ms-2 small text-muted">
|
||||
<i class="bx bx-info-circle"></i>
|
||||
<span>${t('ai.enhanced_context_description')}</span>
|
||||
<div class="form-check form-switch">
|
||||
<input class="form-check-input show-thinking-checkbox" type="checkbox" id="showThinking">
|
||||
<label class="form-check-label" for="showThinking" title="${t('ai.show_thinking_description')}">
|
||||
${t('ai.show_thinking')}
|
||||
<i class="bx bx-info-circle ms-1 small text-muted"></i>
|
||||
</label>
|
||||
</div>
|
||||
</div>
|
||||
<div class="d-flex chat-input-container">
|
||||
@ -97,6 +102,7 @@ export default class LlmChatPanel extends BasicWidget {
|
||||
this.loadingIndicator = element.querySelector('.loading-indicator') as HTMLElement;
|
||||
this.sourcesList = element.querySelector('.sources-list') as HTMLElement;
|
||||
this.useAdvancedContextCheckbox = element.querySelector('.use-advanced-context-checkbox') as HTMLInputElement;
|
||||
this.showThinkingCheckbox = element.querySelector('.show-thinking-checkbox') as HTMLInputElement;
|
||||
this.validationWarning = element.querySelector('.provider-validation-warning') as HTMLElement;
|
||||
|
||||
// Set up event delegation for the settings link
|
||||
@ -167,12 +173,17 @@ export default class LlmChatPanel extends BasicWidget {
|
||||
|
||||
try {
|
||||
const useAdvancedContext = this.useAdvancedContextCheckbox.checked;
|
||||
const showThinking = this.showThinkingCheckbox.checked;
|
||||
|
||||
// Add logging to verify parameters
|
||||
console.log(`Sending message with: useAdvancedContext=${useAdvancedContext}, showThinking=${showThinking}, noteId=${this.currentNoteId}`);
|
||||
|
||||
// Create the message parameters
|
||||
const messageParams = {
|
||||
content,
|
||||
contextNoteId: this.currentNoteId,
|
||||
useAdvancedContext
|
||||
useAdvancedContext,
|
||||
showThinking
|
||||
};
|
||||
|
||||
// First, send the message via POST request
|
||||
@ -192,7 +203,7 @@ export default class LlmChatPanel extends BasicWidget {
|
||||
}
|
||||
|
||||
// Then set up streaming via EventSource
|
||||
const streamUrl = `./api/llm/sessions/${this.sessionId}/messages?format=stream&useAdvancedContext=${useAdvancedContext}`;
|
||||
const streamUrl = `./api/llm/sessions/${this.sessionId}/messages?format=stream&useAdvancedContext=${useAdvancedContext}&showThinking=${showThinking}`;
|
||||
const source = new EventSource(streamUrl);
|
||||
|
||||
let assistantResponse = '';
|
||||
@ -415,9 +426,24 @@ export default class LlmChatPanel extends BasicWidget {
|
||||
private formatMarkdown(content: string): string {
|
||||
if (!content) return '';
|
||||
|
||||
// First, extract code blocks to protect them from other replacements
|
||||
// Check if content contains HTML sections for thinking visualization
|
||||
if (content.includes('<div class="thinking-process">') ||
|
||||
content.includes('<div class=\'thinking-process\'>')) {
|
||||
console.log('Detected thinking process visualization in response');
|
||||
// For content with HTML thinking visualizations, we need to protect them
|
||||
}
|
||||
|
||||
// First, extract HTML thinking visualization to protect it from replacements
|
||||
const thinkingBlocks: string[] = [];
|
||||
let processedContent = content.replace(/<div class=['"](thinking-process|reasoning-process)['"][\s\S]*?<\/div>/g, (match) => {
|
||||
const placeholder = `__THINKING_BLOCK_${thinkingBlocks.length}__`;
|
||||
thinkingBlocks.push(match);
|
||||
return placeholder;
|
||||
});
|
||||
|
||||
// Then extract code blocks to protect them from other replacements
|
||||
const codeBlocks: string[] = [];
|
||||
let processedContent = content.replace(/```(\w+)?\n([\s\S]+?)\n```/gs, (match, language, code) => {
|
||||
processedContent = processedContent.replace(/```(\w+)?\n([\s\S]+?)\n```/gs, (match, language, code) => {
|
||||
const placeholder = `__CODE_BLOCK_${codeBlocks.length}__`;
|
||||
const languageClass = language ? ` language-${language}` : '';
|
||||
codeBlocks.push(`<pre class="code${languageClass}"><code>${code}</code></pre>`);
|
||||
@ -436,6 +462,11 @@ export default class LlmChatPanel extends BasicWidget {
|
||||
processedContent = processedContent.replace(`__CODE_BLOCK_${index}__`, block);
|
||||
});
|
||||
|
||||
// Restore thinking visualization blocks
|
||||
thinkingBlocks.forEach((block, index) => {
|
||||
processedContent = processedContent.replace(`__THINKING_BLOCK_${index}__`, block);
|
||||
});
|
||||
|
||||
return processedContent;
|
||||
}
|
||||
|
||||
|
@ -1941,3 +1941,53 @@ footer.file-footer button {
|
||||
.ck-content .admonition.important::before { content: "\ea7c"; }
|
||||
.ck-content .admonition.caution::before { content: "\eac7"; }
|
||||
.ck-content .admonition.warning::before { content: "\eac5"; }
|
||||
|
||||
.chat-options-container {
|
||||
display: flex;
|
||||
margin: 5px 0;
|
||||
align-items: center;
|
||||
padding: 0 10px;
|
||||
}
|
||||
|
||||
.chat-option {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
font-size: 0.9em;
|
||||
margin-right: 15px;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
.chat-option input[type="checkbox"] {
|
||||
margin-right: 5px;
|
||||
}
|
||||
|
||||
/* Style for thinking process in chat responses */
|
||||
.thinking-process {
|
||||
background-color: rgba(0, 0, 0, 0.05);
|
||||
border-left: 3px solid var(--main-text-color);
|
||||
padding: 10px;
|
||||
margin: 10px 0;
|
||||
border-radius: 4px;
|
||||
}
|
||||
|
||||
.thinking-step {
|
||||
margin-bottom: 8px;
|
||||
padding-left: 10px;
|
||||
}
|
||||
|
||||
.thinking-step.observation {
|
||||
border-left: 2px solid #69c7ff;
|
||||
}
|
||||
|
||||
.thinking-step.hypothesis {
|
||||
border-left: 2px solid #9839f7;
|
||||
}
|
||||
|
||||
.thinking-step.evidence {
|
||||
border-left: 2px solid #40c025;
|
||||
}
|
||||
|
||||
.thinking-step.conclusion {
|
||||
border-left: 2px solid #e2aa03;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
@ -1815,11 +1815,34 @@
|
||||
"description": "Select one or more languages that should appear in the language selection in the Basic Properties section of a read-only or editable text note. This will allow features such as spell-checking or right-to-left support."
|
||||
},
|
||||
"ai": {
|
||||
"sources": "Sources",
|
||||
"ai_settings": "AI Settings",
|
||||
"api_key": "API Key",
|
||||
"api_key_tooltip": "API key for accessing the service",
|
||||
"confirm_delete_embeddings": "Are you sure you want to delete all AI embeddings? This will remove all semantic search capabilities until notes are reindexed, which can take a significant amount of time.",
|
||||
"empty_key_warning": "Warning: Empty API key. You need to configure your API key in settings.",
|
||||
"enable_ai": "Enable AI Features",
|
||||
"enhanced_context_description": "Uses semantic search to find relevant information across your notes",
|
||||
"enter_message": "Enter your message...",
|
||||
"use_advanced_context": "Use Enhanced Note Context",
|
||||
"advanced_context_helps": "Helps with large knowledge bases and limited context windows",
|
||||
"use_enhanced_context": "Use Enhanced Note Context",
|
||||
"enhanced_context_description": "Improves answers by including more relevant note content"
|
||||
"error_contacting_provider": "Error contacting AI provider. Please check your settings and internet connection.",
|
||||
"error_generating_response": "Error generating AI response",
|
||||
"index_all_notes": "Index All Notes",
|
||||
"index_status": "Index Status",
|
||||
"indexed_notes": "Indexed Notes",
|
||||
"indexing_stopped": "Indexing stopped",
|
||||
"indexing_in_progress": "Indexing in progress...",
|
||||
"last_indexed": "Last Indexed",
|
||||
"n_notes_queued": "{{ count }} note queued for indexing",
|
||||
"n_notes_queued_plural": "{{ count }} notes queued for indexing",
|
||||
"note_chat": "Note Chat",
|
||||
"notes_indexed": "{{ count }} note indexed",
|
||||
"notes_indexed_plural": "{{ count }} notes indexed",
|
||||
"processing": "Processing",
|
||||
"reset_embeddings": "Reset Embeddings",
|
||||
"show_thinking": "Show Thinking",
|
||||
"show_thinking_description": "Reveals the reasoning process used to generate responses",
|
||||
"sources": "Sources",
|
||||
"start_indexing": "Start Indexing",
|
||||
"use_advanced_context": "Use Advanced Context",
|
||||
"use_enhanced_context": "Use Enhanced Note Context"
|
||||
}
|
||||
}
|
||||
|
@ -545,18 +545,26 @@ Now, based on the above information, please answer: ${query}`;
|
||||
async function sendMessage(req: Request, res: Response) {
|
||||
try {
|
||||
// Extract parameters differently based on the request method
|
||||
let content, useAdvancedContext, sessionId;
|
||||
let content, useAdvancedContext, showThinking, sessionId;
|
||||
|
||||
if (req.method === 'POST') {
|
||||
// For POST requests, get content from the request body
|
||||
const requestBody = req.body || {};
|
||||
content = requestBody.content;
|
||||
useAdvancedContext = requestBody.useAdvancedContext || false;
|
||||
showThinking = requestBody.showThinking || false;
|
||||
|
||||
// Add logging for POST requests
|
||||
log.info(`LLM POST message: sessionId=${req.params.sessionId}, useAdvancedContext=${useAdvancedContext}, showThinking=${showThinking}, contentLength=${content ? content.length : 0}`);
|
||||
} else if (req.method === 'GET') {
|
||||
// For GET (streaming) requests, get format from query params
|
||||
// The content should have been sent in a previous POST request
|
||||
useAdvancedContext = req.query.useAdvancedContext === 'true';
|
||||
showThinking = req.query.showThinking === 'true';
|
||||
content = ''; // We don't need content for GET requests
|
||||
|
||||
// Add logging for GET requests
|
||||
log.info(`LLM GET stream: sessionId=${req.params.sessionId}, useAdvancedContext=${useAdvancedContext}, showThinking=${showThinking}`);
|
||||
}
|
||||
|
||||
// Get sessionId from URL params since it's part of the route
|
||||
@ -644,7 +652,16 @@ async function sendMessage(req: Request, res: Response) {
|
||||
if (useAdvancedContext) {
|
||||
// Use the Trilium-specific approach
|
||||
const contextNoteId = session.noteContext || null;
|
||||
const results = await triliumContextService.processQuery(messageContent, service, contextNoteId);
|
||||
|
||||
// Log that we're calling triliumContextService with the parameters
|
||||
log.info(`Using enhanced context with: noteId=${contextNoteId}, showThinking=${showThinking}`);
|
||||
|
||||
const results = await triliumContextService.processQuery(
|
||||
messageContent,
|
||||
service,
|
||||
contextNoteId,
|
||||
showThinking // Pass the showThinking parameter
|
||||
);
|
||||
|
||||
// Get the generated context
|
||||
const context = results.context;
|
||||
|
@ -13,7 +13,8 @@
|
||||
* - Show how different sources of evidence are weighed
|
||||
*/
|
||||
|
||||
import log from '../../log.js';
|
||||
import log from "../../log.js";
|
||||
import aiServiceManager from "../ai_service_manager.js";
|
||||
|
||||
/**
|
||||
* Represents a single reasoning step taken by the agent
|
||||
@ -159,117 +160,79 @@ export class ContextualThinkingTool {
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a user-friendly HTML representation of the thinking process
|
||||
* Visualize the thinking process as HTML for display in the UI
|
||||
*
|
||||
* @param processId The ID of the process to visualize
|
||||
* @returns HTML string representing the thinking process
|
||||
* @param thinkingId The ID of the thinking process to visualize
|
||||
* @returns HTML representation of the thinking process
|
||||
*/
|
||||
visualizeThinking(processId: string): string {
|
||||
const process = this.getThinkingProcess(processId);
|
||||
visualizeThinking(thinkingId: string): string {
|
||||
log.info(`Visualizing thinking process: thinkingId=${thinkingId}`);
|
||||
|
||||
const process = this.getThinkingProcess(thinkingId);
|
||||
if (!process) {
|
||||
return `<div class="thinking-error">Thinking process ${processId} not found</div>`;
|
||||
log.info(`No thinking process found for id: ${thinkingId}`);
|
||||
return "<div class='thinking-process'>No thinking process found</div>";
|
||||
}
|
||||
|
||||
let html = `
|
||||
<div class="thinking-process">
|
||||
<div class="thinking-header">
|
||||
<h3>Thinking Process for: "${process.query}"</h3>
|
||||
<div class="thinking-metadata">
|
||||
<span>Status: ${process.status}</span>
|
||||
<span>Steps: ${process.steps.length}</span>
|
||||
<span>Time: ${this.formatDuration(process.startTime, process.endTime || Date.now())}</span>
|
||||
</div>
|
||||
</div>
|
||||
<div class="thinking-steps">
|
||||
`;
|
||||
log.info(`Found thinking process with ${process.steps.length} steps for query: "${process.query.substring(0, 50)}..."`);
|
||||
|
||||
// Find root steps (those without parents)
|
||||
const rootSteps = process.steps.filter(step => !step.parentId);
|
||||
let html = "<div class='thinking-process'>";
|
||||
html += `<h4>Thinking Process</h4>`;
|
||||
|
||||
// Recursively render the thinking tree
|
||||
for (const rootStep of rootSteps) {
|
||||
html += this.renderStepTree(rootStep, process.steps);
|
||||
for (const step of process.steps) {
|
||||
html += `<div class='thinking-step ${step.type || ""}'>`;
|
||||
|
||||
// Add an icon based on step type
|
||||
const icon = this.getStepIcon(step.type);
|
||||
html += `<span class='bx ${icon}'></span> `;
|
||||
|
||||
// Add the step content
|
||||
html += step.content;
|
||||
|
||||
// Show confidence if available
|
||||
if (step.metadata?.confidence) {
|
||||
const confidence = Math.round((step.metadata.confidence as number) * 100);
|
||||
html += ` <span class='thinking-confidence'>(Confidence: ${confidence}%)</span>`;
|
||||
}
|
||||
|
||||
html += `</div>`;
|
||||
}
|
||||
|
||||
html += `
|
||||
</div>
|
||||
</div>
|
||||
`;
|
||||
|
||||
html += "</div>";
|
||||
return html;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a concise text representation of the thinking process
|
||||
* that can be displayed inline in the chat for transparency
|
||||
*
|
||||
* @param processId The ID of the process to summarize
|
||||
* @returns Text summary of the reasoning process
|
||||
* Get an appropriate icon for a thinking step type
|
||||
*/
|
||||
getThinkingSummary(processId?: string): string {
|
||||
const id = processId || this.activeProcId;
|
||||
if (!id || !this.processes[id]) {
|
||||
private getStepIcon(type: string): string {
|
||||
switch (type) {
|
||||
case 'observation':
|
||||
return 'bx-search';
|
||||
case 'hypothesis':
|
||||
return 'bx-bulb';
|
||||
case 'evidence':
|
||||
return 'bx-list-check';
|
||||
case 'conclusion':
|
||||
return 'bx-check-circle';
|
||||
default:
|
||||
return 'bx-message-square-dots';
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a plain text summary of the thinking process
|
||||
*
|
||||
* @param thinkingId The ID of the thinking process to summarize
|
||||
* @returns Text summary of the thinking process
|
||||
*/
|
||||
getThinkingSummary(thinkingId: string): string {
|
||||
const process = this.getThinkingProcess(thinkingId);
|
||||
if (!process) {
|
||||
return "No thinking process available.";
|
||||
}
|
||||
|
||||
const process = this.processes[id];
|
||||
let summary = `Thinking about: "${process.query}"\n\n`;
|
||||
|
||||
// Group steps by type
|
||||
const stepsByType: Record<string, ThinkingStep[]> = {};
|
||||
for (const step of process.steps) {
|
||||
if (!stepsByType[step.type]) {
|
||||
stepsByType[step.type] = [];
|
||||
}
|
||||
stepsByType[step.type].push(step);
|
||||
}
|
||||
|
||||
// Show observations first
|
||||
if (stepsByType['observation'] && stepsByType['observation'].length > 0) {
|
||||
summary += "🔍 Observations:\n";
|
||||
for (const step of stepsByType['observation'].slice(0, 3)) {
|
||||
summary += `- ${step.content}\n`;
|
||||
}
|
||||
if (stepsByType['observation'].length > 3) {
|
||||
summary += `- ...and ${stepsByType['observation'].length - 3} more observations\n`;
|
||||
}
|
||||
summary += "\n";
|
||||
}
|
||||
|
||||
// Show questions the agent asked itself
|
||||
if (stepsByType['question'] && stepsByType['question'].length > 0) {
|
||||
summary += "❓ Questions considered:\n";
|
||||
for (const step of stepsByType['question'].slice(0, 3)) {
|
||||
summary += `- ${step.content}\n`;
|
||||
}
|
||||
if (stepsByType['question'].length > 3) {
|
||||
summary += `- ...and ${stepsByType['question'].length - 3} more questions\n`;
|
||||
}
|
||||
summary += "\n";
|
||||
}
|
||||
|
||||
// Show evidence
|
||||
if (stepsByType['evidence'] && stepsByType['evidence'].length > 0) {
|
||||
summary += "📋 Evidence found:\n";
|
||||
for (const step of stepsByType['evidence'].slice(0, 3)) {
|
||||
summary += `- ${step.content}\n`;
|
||||
}
|
||||
if (stepsByType['evidence'].length > 3) {
|
||||
summary += `- ...and ${stepsByType['evidence'].length - 3} more pieces of evidence\n`;
|
||||
}
|
||||
summary += "\n";
|
||||
}
|
||||
|
||||
// Show conclusions
|
||||
if (stepsByType['conclusion'] && stepsByType['conclusion'].length > 0) {
|
||||
summary += "✅ Conclusions:\n";
|
||||
for (const step of stepsByType['conclusion']) {
|
||||
const confidence = step.confidence ? ` (${Math.round(step.confidence * 100)}% confidence)` : '';
|
||||
summary += `- ${step.content}${confidence}\n`;
|
||||
}
|
||||
}
|
||||
|
||||
return summary;
|
||||
return this.visualizeThinking(thinkingId);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -18,6 +18,10 @@ export interface ChatCompletionOptions {
|
||||
model?: string;
|
||||
temperature?: number;
|
||||
maxTokens?: number;
|
||||
topP?: number;
|
||||
frequencyPenalty?: number;
|
||||
presencePenalty?: number;
|
||||
showThinking?: boolean;
|
||||
systemPrompt?: string;
|
||||
stream?: boolean; // Whether to stream the response
|
||||
}
|
||||
@ -51,3 +55,34 @@ export interface AIService {
|
||||
*/
|
||||
getName(): string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Interface for the semantic context service, which provides enhanced context retrieval
|
||||
* for AI conversations based on semantic similarity.
|
||||
*/
|
||||
export interface SemanticContextService {
|
||||
/**
|
||||
* Initialize the semantic context service
|
||||
*/
|
||||
initialize(): Promise<void>;
|
||||
|
||||
/**
|
||||
* Retrieve semantic context based on relevance to user query
|
||||
*/
|
||||
getSemanticContext(noteId: string, userQuery: string, maxResults?: number): Promise<string>;
|
||||
|
||||
/**
|
||||
* Get progressive context based on depth
|
||||
*/
|
||||
getProgressiveContext?(noteId: string, depth?: number): Promise<string>;
|
||||
|
||||
/**
|
||||
* Get smart context selection that adapts to query complexity
|
||||
*/
|
||||
getSmartContext?(noteId: string, userQuery: string): Promise<string>;
|
||||
|
||||
/**
|
||||
* Enhance LLM context with agent tools
|
||||
*/
|
||||
getAgentToolsContext(noteId: string, query: string, showThinking?: boolean): Promise<string>;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
import options from '../options.js';
|
||||
import type { AIService, ChatCompletionOptions, ChatResponse, Message } from './ai_interface.js';
|
||||
import type { AIService, ChatCompletionOptions, ChatResponse, Message, SemanticContextService } from './ai_interface.js';
|
||||
import { OpenAIService } from './providers/openai_service.js';
|
||||
import { AnthropicService } from './providers/anthropic_service.js';
|
||||
import { OllamaService } from './providers/ollama_service.js';
|
||||
@ -271,8 +271,8 @@ export class AIServiceManager {
|
||||
* Get the semantic context service for advanced context handling
|
||||
* @returns The semantic context service instance
|
||||
*/
|
||||
getSemanticContextService() {
|
||||
return semanticContextService;
|
||||
getSemanticContextService(): SemanticContextService {
|
||||
return semanticContextService as unknown as SemanticContextService;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -439,7 +439,7 @@ export default {
|
||||
getContextExtractor() {
|
||||
return getInstance().getContextExtractor();
|
||||
},
|
||||
getSemanticContextService() {
|
||||
getSemanticContextService(): SemanticContextService {
|
||||
return getInstance().getSemanticContextService();
|
||||
},
|
||||
getIndexService() {
|
||||
|
@ -215,15 +215,7 @@ export class ChatService {
|
||||
}
|
||||
|
||||
/**
|
||||
* Send a context-aware message with automatically included semantic context from a note
|
||||
* This method combines the query with relevant note context before sending to the AI
|
||||
*
|
||||
* @param sessionId - The ID of the chat session
|
||||
* @param content - The user's message content
|
||||
* @param noteId - The ID of the note to add context from
|
||||
* @param options - Optional completion options
|
||||
* @param streamCallback - Optional streaming callback
|
||||
* @returns The updated chat session
|
||||
* Send a message with enhanced semantic note context
|
||||
*/
|
||||
async sendContextAwareMessage(
|
||||
sessionId: string,
|
||||
@ -234,14 +226,102 @@ export class ChatService {
|
||||
): Promise<ChatSession> {
|
||||
const session = await this.getOrCreateSession(sessionId);
|
||||
|
||||
// Get semantically relevant context based on the user's message
|
||||
const context = await contextExtractor.getSmartContext(noteId, content);
|
||||
// Add user message
|
||||
const userMessage: Message = {
|
||||
role: 'user',
|
||||
content
|
||||
};
|
||||
|
||||
// Combine the user's message with the relevant context
|
||||
const enhancedContent = `${content}\n\nHere's relevant information from my notes that may help:\n\n${context}`;
|
||||
session.messages.push(userMessage);
|
||||
session.isStreaming = true;
|
||||
|
||||
// Send the enhanced message
|
||||
return this.sendMessage(sessionId, enhancedContent, options, streamCallback);
|
||||
// Set up streaming if callback provided
|
||||
if (streamCallback) {
|
||||
this.streamingCallbacks.set(session.id, streamCallback);
|
||||
}
|
||||
|
||||
try {
|
||||
// Immediately save the user message
|
||||
await chatStorageService.updateChat(session.id, session.messages);
|
||||
|
||||
// Get the Trilium Context Service for enhanced context
|
||||
const contextService = aiServiceManager.getSemanticContextService();
|
||||
|
||||
// Get showThinking option if it exists
|
||||
const showThinking = options?.showThinking === true;
|
||||
|
||||
// Get enhanced context for this note and query
|
||||
const enhancedContext = await contextService.getAgentToolsContext(
|
||||
noteId,
|
||||
content,
|
||||
showThinking
|
||||
);
|
||||
|
||||
// Prepend a system message with context
|
||||
const systemMessage: Message = {
|
||||
role: 'system',
|
||||
content: `You are an AI assistant helping with Trilium Notes. Use this context to answer the user's question:\n\n${enhancedContext}`
|
||||
};
|
||||
|
||||
// Create messages array with system message
|
||||
const messagesWithContext = [systemMessage, ...session.messages];
|
||||
|
||||
// Generate AI response
|
||||
const response = await aiServiceManager.generateChatCompletion(
|
||||
messagesWithContext,
|
||||
options
|
||||
);
|
||||
|
||||
// Add assistant message
|
||||
const assistantMessage: Message = {
|
||||
role: 'assistant',
|
||||
content: response.text
|
||||
};
|
||||
|
||||
session.messages.push(assistantMessage);
|
||||
session.isStreaming = false;
|
||||
|
||||
// Save the complete conversation (without system message)
|
||||
await chatStorageService.updateChat(session.id, session.messages);
|
||||
|
||||
// If first message, update the title
|
||||
if (session.messages.length <= 2 && (!session.title || session.title === 'New Chat')) {
|
||||
const title = this.generateTitleFromMessages(session.messages);
|
||||
session.title = title;
|
||||
await chatStorageService.updateChat(session.id, session.messages, title);
|
||||
}
|
||||
|
||||
// Notify streaming is complete
|
||||
if (streamCallback) {
|
||||
streamCallback(response.text, true);
|
||||
this.streamingCallbacks.delete(session.id);
|
||||
}
|
||||
|
||||
return session;
|
||||
|
||||
} catch (error: any) {
|
||||
session.isStreaming = false;
|
||||
console.error('Error in context-aware chat:', error);
|
||||
|
||||
// Add error message
|
||||
const errorMessage: Message = {
|
||||
role: 'assistant',
|
||||
content: `Error: Failed to generate response with note context. ${error.message || 'Please try again.'}`
|
||||
};
|
||||
|
||||
session.messages.push(errorMessage);
|
||||
|
||||
// Save the conversation with error
|
||||
await chatStorageService.updateChat(session.id, session.messages);
|
||||
|
||||
// Notify streaming is complete with error
|
||||
if (streamCallback) {
|
||||
streamCallback(errorMessage.content, true);
|
||||
this.streamingCallbacks.delete(session.id);
|
||||
}
|
||||
|
||||
return session;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -77,6 +77,15 @@ Example: ["exact topic mentioned", "related concept 1", "related concept 2"]`;
|
||||
throw new Error(`No embedding provider available. Could not initialize context service.`);
|
||||
}
|
||||
|
||||
// Initialize agent tools to ensure they're ready
|
||||
try {
|
||||
await aiServiceManager.getInstance().initializeAgentTools();
|
||||
log.info("Agent tools initialized for use with TriliumContextService");
|
||||
} catch (toolError) {
|
||||
log.error(`Error initializing agent tools: ${toolError}`);
|
||||
// Continue even if agent tools fail to initialize
|
||||
}
|
||||
|
||||
this.initialized = true;
|
||||
log.info(`Trilium context service initialized with provider: ${this.provider.name}`);
|
||||
} catch (error: unknown) {
|
||||
@ -549,17 +558,16 @@ Example: ["exact topic mentioned", "related concept 1", "related concept 2"]`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Process a user query with the Trilium-specific approach:
|
||||
* 1. Generate search queries from the original question
|
||||
* 2. Find relevant notes using those queries
|
||||
* 3. Build a context string from the relevant notes
|
||||
*
|
||||
* @param userQuestion - The user's original question
|
||||
* @param llmService - The LLM service to use
|
||||
* @param contextNoteId - Optional note ID to restrict search to
|
||||
* @returns Object with context and notes
|
||||
* Process a user query to find relevant context in Trilium notes
|
||||
*/
|
||||
async processQuery(userQuestion: string, llmService: any, contextNoteId: string | null = null) {
|
||||
async processQuery(
|
||||
userQuestion: string,
|
||||
llmService: any,
|
||||
contextNoteId: string | null = null,
|
||||
showThinking: boolean = false
|
||||
) {
|
||||
log.info(`Processing query with: question="${userQuestion.substring(0, 50)}...", noteId=${contextNoteId}, showThinking=${showThinking}`);
|
||||
|
||||
if (!this.initialized) {
|
||||
try {
|
||||
await this.initialize();
|
||||
@ -602,8 +610,28 @@ Example: ["exact topic mentioned", "related concept 1", "related concept 2"]`;
|
||||
// Step 3: Build context from the notes
|
||||
const context = await this.buildContextFromNotes(relevantNotes, userQuestion);
|
||||
|
||||
// Step 4: Add agent tools context with thinking process if requested
|
||||
let enhancedContext = context;
|
||||
try {
|
||||
// Get agent tools context using either the specific note or the most relevant notes
|
||||
const agentContext = await this.getAgentToolsContext(
|
||||
contextNoteId || (relevantNotes[0]?.noteId || ""),
|
||||
userQuestion,
|
||||
showThinking,
|
||||
relevantNotes // Pass all relevant notes for context
|
||||
);
|
||||
|
||||
if (agentContext) {
|
||||
enhancedContext = `${context}\n\n${agentContext}`;
|
||||
log.info(`Added agent tools context (${agentContext.length} characters)`);
|
||||
}
|
||||
} catch (error) {
|
||||
log.error(`Error getting agent tools context: ${error}`);
|
||||
// Continue with just the basic context
|
||||
}
|
||||
|
||||
return {
|
||||
context,
|
||||
context: enhancedContext,
|
||||
notes: relevantNotes,
|
||||
queries: searchQueries
|
||||
};
|
||||
@ -628,31 +656,57 @@ Example: ["exact topic mentioned", "related concept 1", "related concept 2"]`;
|
||||
* 3. Query decomposition planning
|
||||
* 4. Contextual thinking visualization
|
||||
*
|
||||
* @param noteId The current note being viewed
|
||||
* @param noteId The current note being viewed (or most relevant note)
|
||||
* @param query The user's query
|
||||
* @param showThinking Whether to include the agent's thinking process
|
||||
* @param relevantNotes Optional array of relevant notes from vector search
|
||||
* @returns Enhanced context string
|
||||
*/
|
||||
async getAgentToolsContext(noteId: string, query: string, showThinking: boolean = false): Promise<string> {
|
||||
async getAgentToolsContext(
|
||||
noteId: string,
|
||||
query: string,
|
||||
showThinking: boolean = false,
|
||||
relevantNotes: Array<any> = []
|
||||
): Promise<string> {
|
||||
log.info(`Getting agent tools context: noteId=${noteId}, query="${query.substring(0, 50)}...", showThinking=${showThinking}, relevantNotesCount=${relevantNotes.length}`);
|
||||
|
||||
try {
|
||||
const agentTools = aiServiceManager.getAgentTools();
|
||||
let context = "";
|
||||
|
||||
// 1. Get vector search results related to the query
|
||||
try {
|
||||
const vectorSearchTool = agentTools.getVectorSearchTool();
|
||||
const searchResults = await vectorSearchTool.searchNotes(query, {
|
||||
parentNoteId: noteId,
|
||||
maxResults: 5
|
||||
});
|
||||
|
||||
if (searchResults.length > 0) {
|
||||
// If we already have relevant notes from vector search, use those
|
||||
if (relevantNotes && relevantNotes.length > 0) {
|
||||
log.info(`Using ${relevantNotes.length} provided relevant notes instead of running vector search again`);
|
||||
context += "## Related Information\n\n";
|
||||
for (const result of searchResults) {
|
||||
|
||||
for (const result of relevantNotes.slice(0, 5)) {
|
||||
context += `### ${result.title}\n`;
|
||||
context += `${result.contentPreview}\n\n`;
|
||||
// Use the content if available, otherwise get a preview
|
||||
const contentPreview = result.content
|
||||
? this.sanitizeNoteContent(result.content).substring(0, 300) + "..."
|
||||
: result.contentPreview || "[No preview available]";
|
||||
|
||||
context += `${contentPreview}\n\n`;
|
||||
}
|
||||
context += "\n";
|
||||
} else {
|
||||
// Run vector search if we don't have relevant notes
|
||||
const vectorSearchTool = agentTools.getVectorSearchTool();
|
||||
const searchResults = await vectorSearchTool.searchNotes(query, {
|
||||
parentNoteId: noteId,
|
||||
maxResults: 5
|
||||
});
|
||||
|
||||
if (searchResults.length > 0) {
|
||||
context += "## Related Information\n\n";
|
||||
for (const result of searchResults) {
|
||||
context += `### ${result.title}\n`;
|
||||
context += `${result.contentPreview}\n\n`;
|
||||
}
|
||||
context += "\n";
|
||||
}
|
||||
}
|
||||
} catch (error: any) {
|
||||
log.error(`Error getting vector search context: ${error.message}`);
|
||||
@ -694,55 +748,114 @@ Example: ["exact topic mentioned", "related concept 1", "related concept 2"]`;
|
||||
|
||||
// 4. Show thinking process if enabled
|
||||
if (showThinking) {
|
||||
log.info("Showing thinking process - creating visual reasoning steps");
|
||||
try {
|
||||
const thinkingTool = agentTools.getContextualThinkingTool();
|
||||
const thinkingId = thinkingTool.startThinking(query);
|
||||
log.info(`Started thinking process with ID: ${thinkingId}`);
|
||||
|
||||
// Add a thinking step to demonstrate the feature
|
||||
// In a real implementation, the LLM would add these steps
|
||||
// Add initial thinking steps
|
||||
thinkingTool.addThinkingStep(
|
||||
"Analyzing the query to understand what information is needed",
|
||||
"Analyzing the user's query to understand the information needs",
|
||||
"observation",
|
||||
{ confidence: 1.0 }
|
||||
);
|
||||
|
||||
// Add sample thinking for the context
|
||||
// Add query exploration steps
|
||||
const parentId = thinkingTool.addThinkingStep(
|
||||
"Looking for related notes in the knowledge base",
|
||||
"Exploring knowledge base to find relevant information",
|
||||
"hypothesis",
|
||||
{ confidence: 0.9 }
|
||||
);
|
||||
|
||||
if (parentId) {
|
||||
// Use the VectorSearchTool to find relevant notes
|
||||
const vectorSearchTool = aiServiceManager.getVectorSearchTool();
|
||||
const searchResults = await vectorSearchTool.searchNotes(query, {
|
||||
parentNoteId: parentId,
|
||||
maxResults: 5
|
||||
});
|
||||
// Add information about relevant notes if available
|
||||
if (relevantNotes && relevantNotes.length > 0) {
|
||||
const noteTitles = relevantNotes.slice(0, 5).map(n => n.title).join(", ");
|
||||
thinkingTool.addThinkingStep(
|
||||
`Found ${relevantNotes.length} potentially relevant notes through semantic search, including: ${noteTitles}`,
|
||||
"evidence",
|
||||
{ confidence: 0.85, parentId: parentId || undefined }
|
||||
);
|
||||
}
|
||||
|
||||
if (searchResults.length > 0) {
|
||||
context += "## Related Information\n\n";
|
||||
for (const result of searchResults) {
|
||||
context += `### ${result.title}\n`;
|
||||
context += `${result.contentPreview}\n\n`;
|
||||
// Add step about note hierarchy if a specific note is being viewed
|
||||
if (noteId && noteId !== "") {
|
||||
try {
|
||||
const navigatorTool = agentTools.getNoteNavigatorTool();
|
||||
|
||||
// Get parent notes since we don't have getNoteHierarchyInfo
|
||||
const parents = navigatorTool.getParentNotes(noteId);
|
||||
|
||||
if (parents && parents.length > 0) {
|
||||
const parentInfo = parents.map(p => p.title).join(" > ");
|
||||
thinkingTool.addThinkingStep(
|
||||
`Identified note hierarchy context: ${parentInfo}`,
|
||||
"evidence",
|
||||
{ confidence: 0.9, parentId: parentId || undefined }
|
||||
);
|
||||
}
|
||||
context += "\n";
|
||||
} catch (error) {
|
||||
log.error(`Error getting note hierarchy: ${error}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Add query decomposition if it's a complex query
|
||||
try {
|
||||
const decompositionTool = agentTools.getQueryDecompositionTool();
|
||||
const complexity = decompositionTool.assessQueryComplexity(query);
|
||||
|
||||
if (complexity > 4) {
|
||||
thinkingTool.addThinkingStep(
|
||||
`This is a ${complexity > 7 ? "very complex" : "moderately complex"} query (complexity: ${complexity}/10)`,
|
||||
"observation",
|
||||
{ confidence: 0.8 }
|
||||
);
|
||||
|
||||
const decomposed = decompositionTool.decomposeQuery(query);
|
||||
if (decomposed.subQueries.length > 1) {
|
||||
const decompId = thinkingTool.addThinkingStep(
|
||||
"Breaking down query into sub-questions to address systematically",
|
||||
"hypothesis",
|
||||
{ confidence: 0.85 }
|
||||
);
|
||||
|
||||
for (const sq of decomposed.subQueries) {
|
||||
thinkingTool.addThinkingStep(
|
||||
`Subquery: ${sq.text} - ${sq.reason}`,
|
||||
"evidence",
|
||||
{ confidence: 0.8, parentId: decompId || undefined }
|
||||
);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
thinkingTool.addThinkingStep(
|
||||
`This is a straightforward query (complexity: ${complexity}/10) that can be addressed directly`,
|
||||
"observation",
|
||||
{ confidence: 0.9 }
|
||||
);
|
||||
}
|
||||
} catch (error) {
|
||||
log.error(`Error in query decomposition: ${error}`);
|
||||
}
|
||||
|
||||
// Add final conclusions
|
||||
thinkingTool.addThinkingStep(
|
||||
"The most relevant information appears to be in the current note and its semantic neighborhood",
|
||||
"Ready to formulate response based on available information and query understanding",
|
||||
"conclusion",
|
||||
{ confidence: 0.85 }
|
||||
{ confidence: 0.95 }
|
||||
);
|
||||
|
||||
// Complete the thinking and add it to context
|
||||
// Complete the thinking process and add the visualization to context
|
||||
thinkingTool.completeThinking(thinkingId);
|
||||
context += "## Thinking Process\n\n";
|
||||
context += thinkingTool.getThinkingSummary(thinkingId) + "\n\n";
|
||||
const visualization = thinkingTool.visualizeThinking(thinkingId);
|
||||
|
||||
if (visualization) {
|
||||
context += "## Reasoning Process\n\n";
|
||||
context += visualization + "\n\n";
|
||||
log.info(`Added thinking visualization to context (${visualization.length} characters)`);
|
||||
}
|
||||
} catch (error: any) {
|
||||
log.error(`Error generating thinking process: ${error.message}`);
|
||||
log.error(`Error creating thinking visualization: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user