diff --git a/src/public/app/widgets/llm_chat/communication.ts b/src/public/app/widgets/llm_chat/communication.ts
index 0a206b100..183f41865 100644
--- a/src/public/app/widgets/llm_chat/communication.ts
+++ b/src/public/app/widgets/llm_chat/communication.ts
@@ -70,7 +70,7 @@ export async function setupStreamingResponse(
return;
}
- console.log(`[${responseId}] LLM Stream message received via CustomEvent: session=${sessionId}, content=${!!message.content}, contentLength=${message.content?.length || 0}, thinking=${!!message.thinking}, toolExecution=${!!message.toolExecution}, done=${!!message.done}`);
+ console.log(`[${responseId}] LLM Stream message received via CustomEvent: session=${sessionId}, content=${!!message.content}, contentLength=${message.content?.length || 0}, thinking=${!!message.thinking}, toolExecution=${!!message.toolExecution}, done=${!!message.done}, type=${message.type || 'llm-stream'}`);
// Mark first message received
if (!receivedAnyMessage) {
@@ -84,12 +84,49 @@ export async function setupStreamingResponse(
}
}
+ // Handle specific message types
+ if (message.type === 'tool_execution_start') {
+ onThinkingUpdate('Executing tools...');
+ // Also trigger tool execution UI with a specific format
+ onToolExecution({
+ action: 'start',
+ tool: 'tools',
+ result: 'Executing tools...'
+ });
+ return; // Skip accumulating content from this message
+ }
+
+ if (message.type === 'tool_result' && message.toolExecution) {
+ onToolExecution(message.toolExecution);
+ return; // Skip accumulating content from this message
+ }
+
+ if (message.type === 'tool_execution_error' && message.toolExecution) {
+ onToolExecution({
+ ...message.toolExecution,
+ action: 'error',
+ error: message.toolExecution.error || 'Unknown error during tool execution'
+ });
+ return; // Skip accumulating content from this message
+ }
+
+ if (message.type === 'tool_completion_processing') {
+ onThinkingUpdate('Generating response with tool results...');
+ // Also trigger tool execution UI with a specific format
+ onToolExecution({
+ action: 'generating',
+ tool: 'tools',
+ result: 'Generating response with tool results...'
+ });
+ return; // Skip accumulating content from this message
+ }
+
// Handle content updates
if (message.content) {
receivedAnyContent = true;
-
+
console.log(`[${responseId}] Received content chunk of length ${message.content.length}, preview: "${message.content.substring(0, 50)}${message.content.length > 50 ? '...' : ''}"`);
-
+
// Add to our accumulated response
assistantResponse += message.content;
@@ -111,10 +148,13 @@ export async function setupStreamingResponse(
}, 30000);
}
- // Handle tool execution updates
+ // Handle tool execution updates (legacy format and standard format with llm-stream type)
if (message.toolExecution) {
- console.log(`[${responseId}] Received tool execution update: action=${message.toolExecution.action || 'unknown'}`);
- onToolExecution(message.toolExecution);
+ // Only process if we haven't already handled this message via specific message types
+ if (message.type === 'llm-stream' || !message.type) {
+ console.log(`[${responseId}] Received tool execution update: action=${message.toolExecution.action || 'unknown'}`);
+ onToolExecution(message.toolExecution);
+ }
}
// Handle thinking state updates
diff --git a/src/public/app/widgets/llm_chat/llm_chat_panel.ts b/src/public/app/widgets/llm_chat/llm_chat_panel.ts
index 7fe22d8ab..d79b48716 100644
--- a/src/public/app/widgets/llm_chat/llm_chat_panel.ts
+++ b/src/public/app/widgets/llm_chat/llm_chat_panel.ts
@@ -7,10 +7,10 @@ import appContext from "../../components/app_context.js";
import server from "../../services/server.js";
import libraryLoader from "../../services/library_loader.js";
-import { TPL, addMessageToChat, showSources, hideSources, showLoadingIndicator, hideLoadingIndicator, renderToolStepsHtml } from "./ui.js";
+import { TPL, addMessageToChat, showSources, hideSources, showLoadingIndicator, hideLoadingIndicator } from "./ui.js";
import { formatMarkdown } from "./utils.js";
import { createChatSession, checkSessionExists, setupStreamingResponse, getDirectResponse } from "./communication.js";
-import { extractToolExecutionSteps, extractFinalResponse, extractInChatToolSteps } from "./message_processor.js";
+import { extractInChatToolSteps } from "./message_processor.js";
import { validateEmbeddingProviders } from "./validation.js";
import type { MessageData, ToolExecutionStep, ChatData } from "./types.js";
import { applySyntaxHighlight } from "../../services/syntax_highlight.js";
@@ -245,7 +245,7 @@ export default class LlmChatPanel extends BasicWidget {
- ${renderToolStepsHtml(steps)}
+ ${this.renderToolStepsHtml(steps)}
`;
@@ -261,6 +261,52 @@ export default class LlmChatPanel extends BasicWidget {
}
}
+ /**
+ * Render HTML for tool execution steps
+ */
+ private renderToolStepsHtml(steps: ToolExecutionStep[]): string {
+ if (!steps || steps.length === 0) return '';
+
+ return steps.map(step => {
+ let icon = 'bx-info-circle';
+ let className = 'info';
+ let content = '';
+
+ if (step.type === 'executing') {
+ icon = 'bx-code-block';
+ className = 'executing';
+ content = `${step.content || 'Executing tools...'}
`;
+ } else if (step.type === 'result') {
+ icon = 'bx-terminal';
+ className = 'result';
+ content = `
+ Tool: ${step.name || 'unknown'}
+ ${step.content || ''}
+ `;
+ } else if (step.type === 'error') {
+ icon = 'bx-error-circle';
+ className = 'error';
+ content = `
+ Tool: ${step.name || 'unknown'}
+ ${step.content || 'Error occurred'}
+ `;
+ } else if (step.type === 'generating') {
+ icon = 'bx-message-dots';
+ className = 'generating';
+ content = `${step.content || 'Generating response...'}
`;
+ }
+
+ return `
+
+ `;
+ }).join('');
+ }
+
async refresh() {
if (!this.isVisible()) {
return;
@@ -493,10 +539,10 @@ export default class LlmChatPanel extends BasicWidget {
}
/**
- * Update the UI with streaming content as it arrives
+ * Update the UI with streaming content
*/
private updateStreamingUI(assistantResponse: string) {
- const logId = `ui-update-${Date.now()}`;
+ const logId = `LlmChatPanel-${Date.now()}`;
console.log(`[${logId}] Updating UI with response text: ${assistantResponse.length} chars`);
if (!this.noteContextChatMessages) {
@@ -504,70 +550,19 @@ export default class LlmChatPanel extends BasicWidget {
return;
}
- // Extract the tool execution steps and final response
- const toolSteps = extractToolExecutionSteps(assistantResponse);
- const finalResponseText = extractFinalResponse(assistantResponse);
+ // With our new structured message approach, we don't need to extract tool steps from
+ // the assistantResponse anymore, as tool execution is handled separately via dedicated messages
// Find existing assistant message or create one if needed
let assistantElement = this.noteContextChatMessages.querySelector('.assistant-message:last-child .message-content');
- // First, check if we need to add the tool execution steps to the chat flow
- if (toolSteps.length > 0) {
- // Look for an existing tool execution element in the chat flow
- let toolExecutionElement = this.noteContextChatMessages.querySelector('.chat-tool-execution');
-
- if (!toolExecutionElement) {
- // Create a new tool execution element in the chat flow
- // Place it right before the assistant message if it exists, or at the end of chat
- toolExecutionElement = document.createElement('div');
- toolExecutionElement.className = 'chat-tool-execution mb-3';
-
- // If there's an assistant message, insert before it
- const assistantMessage = this.noteContextChatMessages.querySelector('.assistant-message:last-child');
- if (assistantMessage) {
- this.noteContextChatMessages.insertBefore(toolExecutionElement, assistantMessage);
- } else {
- // Otherwise append to the end
- this.noteContextChatMessages.appendChild(toolExecutionElement);
- }
- }
-
- // Update the tool execution content
- toolExecutionElement.innerHTML = `
-
- `;
-
- // Add event listener for the clear button
- const clearButton = toolExecutionElement.querySelector('.tool-execution-chat-clear');
- if (clearButton) {
- clearButton.addEventListener('click', (e) => {
- e.preventDefault();
- e.stopPropagation();
- toolExecutionElement?.remove();
- });
- }
- }
-
- // Now update or create the assistant message with the final response
- if (finalResponseText) {
+ // Now update or create the assistant message with the response
+ if (assistantResponse) {
if (assistantElement) {
- console.log(`[${logId}] Found existing assistant message element, updating with final response`);
+ console.log(`[${logId}] Found existing assistant message element, updating with response`);
try {
- // Format the final response with markdown
- const formattedResponse = formatMarkdown(finalResponseText);
+ // Format the response with markdown
+ const formattedResponse = formatMarkdown(assistantResponse);
// Update the content
assistantElement.innerHTML = formattedResponse || '';
@@ -575,12 +570,12 @@ export default class LlmChatPanel extends BasicWidget {
// Apply syntax highlighting to any code blocks in the updated content
applySyntaxHighlight($(assistantElement as HTMLElement));
- console.log(`[${logId}] Successfully updated existing element with final response`);
+ console.log(`[${logId}] Successfully updated existing element with response`);
} catch (err) {
console.error(`[${logId}] Error updating existing element:`, err);
// Fallback to text content if HTML update fails
try {
- assistantElement.textContent = finalResponseText;
+ assistantElement.textContent = assistantResponse;
console.log(`[${logId}] Fallback to text content successful`);
} catch (fallbackErr) {
console.error(`[${logId}] Even fallback update failed:`, fallbackErr);
@@ -589,7 +584,7 @@ export default class LlmChatPanel extends BasicWidget {
} else {
console.log(`[${logId}] No existing assistant message element found, creating new one`);
// Create a new message in the chat
- this.addMessageToChat('assistant', finalResponseText);
+ this.addMessageToChat('assistant', assistantResponse);
console.log(`[${logId}] Successfully added new assistant message`);
}
}
@@ -683,7 +678,9 @@ export default class LlmChatPanel extends BasicWidget {
if (!stepsContainer) return;
// Process based on action type
- if (toolExecutionData.action === 'start') {
+ const action = toolExecutionData.action || '';
+
+ if (action === 'start' || action === 'executing') {
// Tool execution started
const step = document.createElement('div');
step.className = 'tool-step executing p-2 mb-2 rounded';
@@ -692,21 +689,47 @@ export default class LlmChatPanel extends BasicWidget {
Executing tool: ${toolExecutionData.tool || 'unknown'}
+ ${toolExecutionData.args ? `
Args: ${JSON.stringify(toolExecutionData.args || {}, null, 2)}
-
+ ` : ''}
`;
stepsContainer.appendChild(step);
}
- else if (toolExecutionData.action === 'result') {
+ else if (action === 'result' || action === 'complete') {
// Tool execution completed with results
const step = document.createElement('div');
step.className = 'tool-step result p-2 mb-2 rounded';
let resultDisplay = '';
- // Format the result based on type
- if (typeof toolExecutionData.result === 'object') {
+ // Special handling for search_notes tool which has a specific structure
+ if (toolExecutionData.tool === 'search_notes' &&
+ typeof toolExecutionData.result === 'object' &&
+ toolExecutionData.result.results) {
+
+ const results = toolExecutionData.result.results;
+
+ if (results.length === 0) {
+ resultDisplay = `No notes found matching the search criteria.
`;
+ } else {
+ resultDisplay = `
+
+
Found ${results.length} notes:
+
+ ${results.map((note: any) => `
+ -
+ ${note.title}
+ ${note.similarity < 1 ? `(similarity: ${(note.similarity * 100).toFixed(0)}%)` : ''}
+
+ `).join('')}
+
+
+ `;
+ }
+ }
+ // Format the result based on type for other tools
+ else if (typeof toolExecutionData.result === 'object') {
// For objects, format as pretty JSON
resultDisplay = `${JSON.stringify(toolExecutionData.result, null, 2)}
`;
} else {
@@ -724,8 +747,23 @@ export default class LlmChatPanel extends BasicWidget {
`;
stepsContainer.appendChild(step);
+
+ // Add event listeners for note links if this is a search_notes result
+ if (toolExecutionData.tool === 'search_notes') {
+ const noteLinks = step.querySelectorAll('.note-link');
+ noteLinks.forEach(link => {
+ link.addEventListener('click', (e) => {
+ e.preventDefault();
+ const noteId = (e.currentTarget as HTMLElement).getAttribute('data-note-id');
+ if (noteId) {
+ // Open the note in a new tab but don't switch to it
+ appContext.tabManager.openTabWithNoteWithHoisting(noteId, { activate: false });
+ }
+ });
+ });
+ }
}
- else if (toolExecutionData.action === 'error') {
+ else if (action === 'error') {
// Tool execution failed
const step = document.createElement('div');
step.className = 'tool-step error p-2 mb-2 rounded';
@@ -740,6 +778,18 @@ export default class LlmChatPanel extends BasicWidget {
`;
stepsContainer.appendChild(step);
}
+ else if (action === 'generating') {
+ // Generating final response with tool results
+ const step = document.createElement('div');
+ step.className = 'tool-step generating p-2 mb-2 rounded';
+ step.innerHTML = `
+
+
+ Generating response with tool results...
+
+ `;
+ stepsContainer.appendChild(step);
+ }
// Make sure the loading indicator is shown during tool execution
this.loadingIndicator.style.display = 'flex';
diff --git a/src/public/app/widgets/llm_chat/message_processor.ts b/src/public/app/widgets/llm_chat/message_processor.ts
index cc20df084..139a3d611 100644
--- a/src/public/app/widgets/llm_chat/message_processor.ts
+++ b/src/public/app/widgets/llm_chat/message_processor.ts
@@ -3,66 +3,6 @@
*/
import type { ToolExecutionStep } from "./types.js";
-/**
- * Extract tool execution steps from the response
- */
-export function extractToolExecutionSteps(content: string): ToolExecutionStep[] {
- if (!content) return [];
-
- const steps: ToolExecutionStep[] = [];
-
- // Check for executing tools marker
- if (content.includes('[Executing tools...]')) {
- steps.push({
- type: 'executing',
- content: 'Executing tools...'
- });
- }
-
- // Extract tool results with regex
- const toolResultRegex = /\[Tool: ([^\]]+)\]([\s\S]*?)(?=\[|$)/g;
- let match;
-
- while ((match = toolResultRegex.exec(content)) !== null) {
- const toolName = match[1];
- const toolContent = match[2].trim();
-
- steps.push({
- type: toolContent.includes('Error:') ? 'error' : 'result',
- name: toolName,
- content: toolContent
- });
- }
-
- // Check for generating response marker
- if (content.includes('[Generating response with tool results...]')) {
- steps.push({
- type: 'generating',
- content: 'Generating response with tool results...'
- });
- }
-
- return steps;
-}
-
-/**
- * Extract the final response without tool execution steps
- */
-export function extractFinalResponse(content: string): string {
- if (!content) return '';
-
- // Remove all tool execution markers and their content
- let finalResponse = content
- .replace(/\[Executing tools\.\.\.\]\n*/g, '')
- .replace(/\[Tool: [^\]]+\][\s\S]*?(?=\[|$)/g, '')
- .replace(/\[Generating response with tool results\.\.\.\]\n*/g, '');
-
- // Trim any extra whitespace
- finalResponse = finalResponse.trim();
-
- return finalResponse;
-}
-
/**
* Extract tool execution steps from the DOM that are within the chat flow
*/
diff --git a/src/services/llm/pipeline/chat_pipeline.ts b/src/services/llm/pipeline/chat_pipeline.ts
index de9d7b9c6..12be376a0 100644
--- a/src/services/llm/pipeline/chat_pipeline.ts
+++ b/src/services/llm/pipeline/chat_pipeline.ts
@@ -345,9 +345,10 @@ export class ChatPipeline {
// If streaming was enabled, send an update to the user
if (isStreaming && streamCallback) {
streamingPaused = true;
- // IMPORTANT: Don't send done:true here, as it causes the client to stop processing messages
- // Instead, send a marker message that indicates tools will be executed
- streamCallback('\n\n[Executing tools...]\n\n', false);
+ // Send a dedicated message with a specific type for tool execution
+ streamCallback('', false, {
+ type: 'tool_execution_start'
+ });
}
while (toolCallIterations < maxToolCallIterations) {
@@ -406,6 +407,7 @@ export class ChatPipeline {
// Send the structured tool result directly so the client has the raw data
streamCallback('', false, {
+ type: 'tool_result',
toolExecution: {
action: 'result',
tool: toolName,
@@ -414,15 +416,21 @@ export class ChatPipeline {
}
});
- // Still send the formatted text for backwards compatibility
- // This will be phased out once the client is updated
- const formattedToolResult = `[Tool: ${toolName || 'unknown'}]\n${msg.content}\n\n`;
- streamCallback(formattedToolResult, false);
+ // No longer need to send formatted text version
+ // The client should use the structured data instead
} catch (err) {
log.error(`Error sending structured tool result: ${err}`);
- // Fall back to the old format if there's an error
- const formattedToolResult = `[Tool: ${toolName || 'unknown'}]\n${msg.content}\n\n`;
- streamCallback(formattedToolResult, false);
+ // Use structured format here too instead of falling back to text format
+ streamCallback('', false, {
+ type: 'tool_result',
+ toolExecution: {
+ action: 'result',
+ tool: toolName || 'unknown',
+ toolCallId: msg.tool_call_id,
+ result: msg.content,
+ error: String(err)
+ }
+ });
}
}
});
@@ -437,7 +445,9 @@ export class ChatPipeline {
// If streaming, show progress to the user
if (isStreaming && streamCallback) {
- streamCallback('[Generating response with tool results...]\n\n', false);
+ streamCallback('', false, {
+ type: 'tool_completion_processing'
+ });
}
// Extract tool execution status information for Ollama feedback
@@ -513,7 +523,13 @@ export class ChatPipeline {
// If streaming, show error to the user
if (isStreaming && streamCallback) {
- streamCallback(`[Tool execution error: ${error.message || 'unknown error'}]\n\n`, false);
+ streamCallback('', false, {
+ type: 'tool_execution_error',
+ toolExecution: {
+ action: 'error',
+ error: error.message || 'unknown error'
+ }
+ });
}
// For Ollama, create tool execution status with the error
diff --git a/src/services/llm/rest_chat_service.ts b/src/services/llm/rest_chat_service.ts
index 23bb8d129..1fdc2285c 100644
--- a/src/services/llm/rest_chat_service.ts
+++ b/src/services/llm/rest_chat_service.ts
@@ -6,14 +6,15 @@ import type { Message, ChatCompletionOptions, ChatResponse, StreamChunk } from "
* Interface for WebSocket LLM streaming messages
*/
interface LLMStreamMessage {
- type: 'llm-stream';
+ type: 'llm-stream' | 'tool_execution_start' | 'tool_result' | 'tool_execution_error' | 'tool_completion_processing';
sessionId: string;
content?: string;
thinking?: string;
toolExecution?: {
action?: string;
tool?: string;
- result?: string;
+ toolCallId?: string;
+ result?: string | Record;
error?: string;
args?: Record;
};
@@ -1165,7 +1166,7 @@ class RestChatService {
// Enhanced logging for each chunk
log.info(`Received stream chunk from ${service.getName()} with ${chunk.text.length} chars of text, done=${!!chunk.done}`);
-
+
// Send each individual chunk via WebSocket as it arrives
wsService.sendMessageToAllClients({
type: 'llm-stream',
@@ -1214,7 +1215,7 @@ class RestChatService {
content: messageContent, // Send the accumulated content
done: true
} as LLMStreamMessage);
-
+
log.info(`Sent explicit final completion message with accumulated content`);
} else {
log.info(`Final done flag was already sent with content chunk, no need for extra message`);
diff --git a/src/services/ws.ts b/src/services/ws.ts
index 9bc5d7c72..14f1c88fc 100644
--- a/src/services/ws.ts
+++ b/src/services/ws.ts
@@ -56,7 +56,7 @@ interface Message {
originEntityId?: string | null;
lastModifiedMs?: number;
filePath?: string;
-
+
// LLM streaming specific fields
sessionId?: string;
content?: string;
@@ -64,7 +64,8 @@ interface Message {
toolExecution?: {
action?: string;
tool?: string;
- result?: string;
+ toolCallId?: string;
+ result?: string | Record;
error?: string;
args?: Record;
};
@@ -144,7 +145,7 @@ function sendMessageToAllClients(message: Message) {
clientCount++;
}
});
-
+
// Log WebSocket client count for debugging
if (message.type === "llm-stream") {
log.info(`[WS-SERVER] Sent LLM stream message to ${clientCount} clients`);