diff --git a/src/public/app/widgets/llm_chat/llm_chat_panel.ts b/src/public/app/widgets/llm_chat/llm_chat_panel.ts
index 9cfaed741..7fe22d8ab 100644
--- a/src/public/app/widgets/llm_chat/llm_chat_panel.ts
+++ b/src/public/app/widgets/llm_chat/llm_chat_panel.ts
@@ -635,16 +635,117 @@ export default class LlmChatPanel extends BasicWidget {
}
/**
- * Show tool execution information in the UI
+ * Handle tool execution updates
*/
private showToolExecutionInfo(toolExecutionData: any) {
console.log(`Showing tool execution info: ${JSON.stringify(toolExecutionData)}`);
- // We'll update the in-chat tool execution area in the updateStreamingUI method
- // This method is now just a hook for the WebSocket handlers
+ // Create or get the tool execution container
+ let toolExecutionElement = this.noteContextChatMessages.querySelector('.chat-tool-execution');
+ if (!toolExecutionElement) {
+ toolExecutionElement = document.createElement('div');
+ toolExecutionElement.className = 'chat-tool-execution mb-3';
+
+ // Create header with title and controls
+ const header = document.createElement('div');
+ header.className = 'tool-execution-header d-flex align-items-center p-2 rounded';
+ header.innerHTML = `
+
+ Tool Execution
+
+
+
+ `;
+ toolExecutionElement.appendChild(header);
+
+ // Add click handler for clear button
+ const clearButton = toolExecutionElement.querySelector('.tool-execution-chat-clear');
+ if (clearButton) {
+ clearButton.addEventListener('click', () => {
+ const stepsContainer = toolExecutionElement?.querySelector('.tool-execution-container');
+ if (stepsContainer) {
+ stepsContainer.innerHTML = '';
+ }
+ });
+ }
+
+ // Create container for tool steps
+ const stepsContainer = document.createElement('div');
+ stepsContainer.className = 'tool-execution-container p-2 rounded mb-2';
+ toolExecutionElement.appendChild(stepsContainer);
+
+ // Add to chat messages
+ this.noteContextChatMessages.appendChild(toolExecutionElement);
+ }
+
+ // Get the steps container
+ const stepsContainer = toolExecutionElement.querySelector('.tool-execution-container');
+ if (!stepsContainer) return;
+
+ // Process based on action type
+ if (toolExecutionData.action === 'start') {
+ // Tool execution started
+ const step = document.createElement('div');
+ step.className = 'tool-step executing p-2 mb-2 rounded';
+ step.innerHTML = `
+
+
+ Executing tool: ${toolExecutionData.tool || 'unknown'}
+
+
+ Args: ${JSON.stringify(toolExecutionData.args || {}, null, 2)}
+
+ `;
+ stepsContainer.appendChild(step);
+ }
+ else if (toolExecutionData.action === 'result') {
+ // Tool execution completed with results
+ const step = document.createElement('div');
+ step.className = 'tool-step result p-2 mb-2 rounded';
+
+ let resultDisplay = '';
+
+ // Format the result based on type
+ if (typeof toolExecutionData.result === 'object') {
+ // For objects, format as pretty JSON
+ resultDisplay = `${JSON.stringify(toolExecutionData.result, null, 2)}
`;
+ } else {
+ // For simple values, display as text
+ resultDisplay = `${String(toolExecutionData.result)}
`;
+ }
+
+ step.innerHTML = `
+
+
+ Tool: ${toolExecutionData.tool || 'unknown'}
+
+
+ ${resultDisplay}
+
+ `;
+ stepsContainer.appendChild(step);
+ }
+ else if (toolExecutionData.action === 'error') {
+ // Tool execution failed
+ const step = document.createElement('div');
+ step.className = 'tool-step error p-2 mb-2 rounded';
+ step.innerHTML = `
+
+
+ Error in tool: ${toolExecutionData.tool || 'unknown'}
+
+
+ ${toolExecutionData.error || 'Unknown error'}
+
+ `;
+ stepsContainer.appendChild(step);
+ }
// Make sure the loading indicator is shown during tool execution
this.loadingIndicator.style.display = 'flex';
+
+ // Scroll the chat container to show the tool execution
+ this.chatContainer.scrollTop = this.chatContainer.scrollHeight;
}
/**
diff --git a/src/public/stylesheets/llm_chat.css b/src/public/stylesheets/llm_chat.css
index ae8dad6a1..58b324069 100644
--- a/src/public/stylesheets/llm_chat.css
+++ b/src/public/stylesheets/llm_chat.css
@@ -87,6 +87,80 @@
margin-bottom: 0;
}
+/* Tool step specific styling */
+.tool-step.executing {
+ background-color: rgba(0, 123, 255, 0.05);
+ border-color: rgba(0, 123, 255, 0.2);
+}
+
+.tool-step.result {
+ background-color: rgba(40, 167, 69, 0.05);
+ border-color: rgba(40, 167, 69, 0.2);
+}
+
+.tool-step.error {
+ background-color: rgba(220, 53, 69, 0.05);
+ border-color: rgba(220, 53, 69, 0.2);
+}
+
+/* Tool result formatting */
+.tool-result pre {
+ margin: 0.5rem 0;
+ padding: 0.5rem;
+ background-color: rgba(0, 0, 0, 0.03);
+ border-radius: 0.25rem;
+ overflow: auto;
+ max-height: 300px;
+}
+
+.tool-result code {
+ font-family: "SFMono-Regular", Consolas, "Liberation Mono", Menlo, monospace;
+ font-size: 0.9em;
+}
+
+.tool-args code {
+ display: block;
+ padding: 0.5rem;
+ background-color: rgba(0, 0, 0, 0.03);
+ border-radius: 0.25rem;
+ margin-top: 0.25rem;
+ font-size: 0.85em;
+ color: var(--muted-text-color);
+ white-space: pre-wrap;
+ overflow: auto;
+ max-height: 100px;
+}
+
+/* Tool Execution in Chat Styling */
+.chat-tool-execution {
+ padding: 0 0 0 36px; /* Aligned with message content, accounting for avatar width */
+ width: 100%;
+ margin-bottom: 1rem;
+}
+
+.tool-execution-container {
+ background-color: var(--accented-background-color, rgba(245, 247, 250, 0.7));
+ border: 1px solid var(--subtle-border-color);
+ border-radius: 0.375rem;
+ box-shadow: 0 1px 3px rgba(0, 0, 0, 0.05);
+ overflow: hidden;
+ max-width: calc(100% - 20px);
+}
+
+.tool-execution-header {
+ background-color: var(--main-background-color);
+ border-bottom: 1px solid var(--subtle-border-color);
+ margin-bottom: 0.5rem;
+ color: var(--muted-text-color);
+ font-weight: 500;
+}
+
+.tool-execution-chat-steps {
+ padding: 0.5rem;
+ max-height: 300px;
+ overflow-y: auto;
+}
+
/* Make error text more visible */
.text-danger {
color: #dc3545 !important;
@@ -165,31 +239,4 @@
justify-content: center;
padding: 1rem;
color: var(--muted-text-color);
-}
-
-/* Tool Execution in Chat Styling */
-.chat-tool-execution {
- padding: 0 0 0 36px; /* Aligned with message content, accounting for avatar width */
- width: 100%;
-}
-
-.tool-execution-container {
- background-color: var(--accented-background-color, rgba(245, 247, 250, 0.7));
- border: 1px solid var(--subtle-border-color);
- border-radius: 0.375rem;
- box-shadow: 0 1px 3px rgba(0, 0, 0, 0.05);
- overflow: hidden;
- max-width: calc(100% - 20px);
-}
-
-.tool-execution-header {
- border-bottom: 1px solid var(--subtle-border-color);
- padding-bottom: 0.5rem;
- color: var(--muted-text-color);
-}
-
-.tool-execution-chat-steps {
- padding: 0.5rem;
- max-height: 300px;
- overflow-y: auto;
}
\ No newline at end of file
diff --git a/src/services/llm/pipeline/chat_pipeline.ts b/src/services/llm/pipeline/chat_pipeline.ts
index ea7e9145c..de9d7b9c6 100644
--- a/src/services/llm/pipeline/chat_pipeline.ts
+++ b/src/services/llm/pipeline/chat_pipeline.ts
@@ -295,7 +295,7 @@ export class ChatPipeline {
accumulatedText += processedChunk.text;
// Forward to callback with original chunk data in case it contains additional information
- await streamCallback!(processedChunk.text, processedChunk.done, chunk);
+ streamCallback(processedChunk.text, processedChunk.done, chunk);
});
}
@@ -347,7 +347,7 @@ export class ChatPipeline {
streamingPaused = true;
// IMPORTANT: Don't send done:true here, as it causes the client to stop processing messages
// Instead, send a marker message that indicates tools will be executed
- await streamCallback('\n\n[Executing tools...]\n\n', false);
+ streamCallback('\n\n[Executing tools...]\n\n', false);
}
while (toolCallIterations < maxToolCallIterations) {
@@ -388,8 +388,42 @@ export class ChatPipeline {
if (isStreaming && streamCallback) {
// For each tool result, format a readable message for the user
const toolName = this.getToolNameFromToolCallId(currentMessages, msg.tool_call_id || '');
- const formattedToolResult = `[Tool: ${toolName || 'unknown'}]\n${msg.content}\n\n`;
- streamCallback(formattedToolResult, false);
+
+ // Create a structured tool result message
+ // The client will receive this structured data and can display it properly
+ try {
+ // Parse the result content if it's JSON
+ let parsedContent = msg.content;
+ try {
+ // Check if the content is JSON
+ if (msg.content.trim().startsWith('{') || msg.content.trim().startsWith('[')) {
+ parsedContent = JSON.parse(msg.content);
+ }
+ } catch (e) {
+ // If parsing fails, keep the original content
+ log.info(`Could not parse tool result as JSON: ${e}`);
+ }
+
+ // Send the structured tool result directly so the client has the raw data
+ streamCallback('', false, {
+ toolExecution: {
+ action: 'result',
+ tool: toolName,
+ toolCallId: msg.tool_call_id,
+ result: parsedContent
+ }
+ });
+
+ // Still send the formatted text for backwards compatibility
+ // This will be phased out once the client is updated
+ const formattedToolResult = `[Tool: ${toolName || 'unknown'}]\n${msg.content}\n\n`;
+ streamCallback(formattedToolResult, false);
+ } catch (err) {
+ log.error(`Error sending structured tool result: ${err}`);
+ // Fall back to the old format if there's an error
+ const formattedToolResult = `[Tool: ${toolName || 'unknown'}]\n${msg.content}\n\n`;
+ streamCallback(formattedToolResult, false);
+ }
}
});
@@ -403,7 +437,7 @@ export class ChatPipeline {
// If streaming, show progress to the user
if (isStreaming && streamCallback) {
- await streamCallback('[Generating response with tool results...]\n\n', false);
+ streamCallback('[Generating response with tool results...]\n\n', false);
}
// Extract tool execution status information for Ollama feedback
@@ -479,7 +513,7 @@ export class ChatPipeline {
// If streaming, show error to the user
if (isStreaming && streamCallback) {
- await streamCallback(`[Tool execution error: ${error.message || 'unknown error'}]\n\n`, false);
+ streamCallback(`[Tool execution error: ${error.message || 'unknown error'}]\n\n`, false);
}
// For Ollama, create tool execution status with the error
@@ -529,7 +563,7 @@ export class ChatPipeline {
// If streaming, inform the user about iteration limit
if (isStreaming && streamCallback) {
- await streamCallback(`[Reached maximum of ${maxToolCallIterations} tool calls. Finalizing response...]\n\n`, false);
+ streamCallback(`[Reached maximum of ${maxToolCallIterations} tool calls. Finalizing response...]\n\n`, false);
}
// For Ollama, create a status about reaching max iterations
@@ -573,7 +607,7 @@ export class ChatPipeline {
// Resume streaming with the final response text
// This is where we send the definitive done:true signal with the complete content
- await streamCallback(currentResponse.text, true);
+ streamCallback(currentResponse.text, true);
// Log confirmation
log.info(`Sent final response with done=true signal`);
@@ -587,7 +621,7 @@ export class ChatPipeline {
log.info(`Sending final streaming response without tool calls: ${currentResponse.text.length} chars`);
// Send the final response with done=true to complete the streaming
- await streamCallback(currentResponse.text, true);
+ streamCallback(currentResponse.text, true);
log.info(`Sent final non-tool response with done=true signal`);
}