okay I can call tools?

This commit is contained in:
perf3ct 2025-04-09 20:15:21 +00:00
parent b05b88dd76
commit 7f92dfc3f1
No known key found for this signature in database
GPG Key ID: 569C4EEC436F5232
9 changed files with 317 additions and 44 deletions

View File

@ -25,7 +25,17 @@ const TPL = `
<div class="spinner-border spinner-border-sm text-primary" role="status">
<span class="visually-hidden">Loading...</span>
</div>
<span class="ms-2">${t('ai_llm.agent.processing')}...</span>
<span class="ms-2">${t('ai_llm.agent.processing')}</span>
<div class="tool-execution-info mt-2" style="display: none;">
<!-- Tool execution status will be shown here -->
<div class="tool-execution-status small p-2 bg-light rounded" style="max-height: 150px; overflow-y: auto;">
<div class="d-flex align-items-center">
<i class="bx bx-code-block text-primary me-2"></i>
<span class="fw-bold">Tool Execution:</span>
</div>
<div class="tool-execution-steps ps-3 pt-1"></div>
</div>
</div>
</div>
</div>
@ -87,6 +97,8 @@ export default class LlmChatPanel extends BasicWidget {
private noteContextChatSendButton!: HTMLButtonElement;
private chatContainer!: HTMLElement;
private loadingIndicator!: HTMLElement;
private toolExecutionInfo!: HTMLElement;
private toolExecutionSteps!: HTMLElement;
private sourcesList!: HTMLElement;
private useAdvancedContextCheckbox!: HTMLInputElement;
private showThinkingCheckbox!: HTMLInputElement;
@ -142,6 +154,8 @@ export default class LlmChatPanel extends BasicWidget {
this.noteContextChatSendButton = element.querySelector('.note-context-chat-send-button') as HTMLButtonElement;
this.chatContainer = element.querySelector('.note-context-chat-container') as HTMLElement;
this.loadingIndicator = element.querySelector('.loading-indicator') as HTMLElement;
this.toolExecutionInfo = element.querySelector('.tool-execution-info') as HTMLElement;
this.toolExecutionSteps = element.querySelector('.tool-execution-steps') as HTMLElement;
this.sourcesList = element.querySelector('.sources-list') as HTMLElement;
this.useAdvancedContextCheckbox = element.querySelector('.use-advanced-context-checkbox') as HTMLInputElement;
this.showThinkingCheckbox = element.querySelector('.show-thinking-checkbox') as HTMLInputElement;
@ -498,6 +512,12 @@ export default class LlmChatPanel extends BasicWidget {
// Update the UI with the accumulated response
this.updateStreamingUI(assistantResponse);
} else if (data.toolExecution) {
// Handle tool execution info
this.showToolExecutionInfo(data.toolExecution);
// When tool execution info is received, also show the loading indicator
// in case it's not already visible
this.loadingIndicator.style.display = 'flex';
} else if (data.error) {
// Handle error message
this.hideLoadingIndicator();
@ -736,10 +756,156 @@ export default class LlmChatPanel extends BasicWidget {
private showLoadingIndicator() {
this.loadingIndicator.style.display = 'flex';
// Reset the tool execution area when starting a new request, but keep it visible
// We'll make it visible when we get our first tool execution event
this.toolExecutionInfo.style.display = 'none';
this.toolExecutionSteps.innerHTML = '';
}
private hideLoadingIndicator() {
this.loadingIndicator.style.display = 'none';
this.toolExecutionInfo.style.display = 'none';
}
/**
* Show tool execution information in the UI
*/
private showToolExecutionInfo(toolExecutionData: any) {
// Make sure tool execution info section is visible
this.toolExecutionInfo.style.display = 'block';
// Create a new step element to show the tool being executed
const stepElement = document.createElement('div');
stepElement.className = 'tool-step my-1';
// Basic styling for the step
let stepHtml = '';
if (toolExecutionData.action === 'start') {
// Tool execution starting
stepHtml = `
<div class="d-flex align-items-center">
<i class="bx bx-play-circle text-primary me-1"></i>
<span class="fw-bold">${this.escapeHtml(toolExecutionData.tool || 'Unknown tool')}</span>
</div>
<div class="tool-args small text-muted ps-3">
${this.formatToolArgs(toolExecutionData.args || {})}
</div>
`;
} else if (toolExecutionData.action === 'complete') {
// Tool execution completed
const resultPreview = this.formatToolResult(toolExecutionData.result);
stepHtml = `
<div class="d-flex align-items-center">
<i class="bx bx-check-circle text-success me-1"></i>
<span>${this.escapeHtml(toolExecutionData.tool || 'Unknown tool')} completed</span>
</div>
${resultPreview ? `<div class="tool-result small ps-3 text-muted">${resultPreview}</div>` : ''}
`;
} else if (toolExecutionData.action === 'error') {
// Tool execution error
stepHtml = `
<div class="d-flex align-items-center">
<i class="bx bx-error-circle text-danger me-1"></i>
<span class="text-danger">${this.escapeHtml(toolExecutionData.tool || 'Unknown tool')} error</span>
</div>
<div class="tool-error small text-danger ps-3">
${this.escapeHtml(toolExecutionData.error || 'Unknown error')}
</div>
`;
}
stepElement.innerHTML = stepHtml;
this.toolExecutionSteps.appendChild(stepElement);
// Scroll to bottom of tool execution steps
this.toolExecutionSteps.scrollTop = this.toolExecutionSteps.scrollHeight;
}
/**
* Format tool arguments for display
*/
private formatToolArgs(args: any): string {
if (!args || typeof args !== 'object') return '';
return Object.entries(args)
.map(([key, value]) => {
// Format the value based on its type
let displayValue;
if (typeof value === 'string') {
displayValue = value.length > 50 ? `"${value.substring(0, 47)}..."` : `"${value}"`;
} else if (value === null) {
displayValue = 'null';
} else if (Array.isArray(value)) {
displayValue = '[...]'; // Simplified array representation
} else if (typeof value === 'object') {
displayValue = '{...}'; // Simplified object representation
} else {
displayValue = String(value);
}
return `<span class="text-primary">${this.escapeHtml(key)}</span>: ${this.escapeHtml(displayValue)}`;
})
.join(', ');
}
/**
* Format tool results for display
*/
private formatToolResult(result: any): string {
if (result === undefined || result === null) return '';
// Try to format as JSON if it's an object
if (typeof result === 'object') {
try {
// Get a preview of structured data
const entries = Object.entries(result);
if (entries.length === 0) return 'Empty result';
// Just show first 2 key-value pairs if there are many
const preview = entries.slice(0, 2).map(([key, val]) => {
let valPreview;
if (typeof val === 'string') {
valPreview = val.length > 30 ? `"${val.substring(0, 27)}..."` : `"${val}"`;
} else if (Array.isArray(val)) {
valPreview = `[${val.length} items]`;
} else if (typeof val === 'object' && val !== null) {
valPreview = '{...}';
} else {
valPreview = String(val);
}
return `${key}: ${valPreview}`;
}).join(', ');
return entries.length > 2 ? `${preview}, ... (${entries.length} properties)` : preview;
} catch (e) {
return String(result).substring(0, 100) + (String(result).length > 100 ? '...' : '');
}
}
// For string results
if (typeof result === 'string') {
return result.length > 100 ? result.substring(0, 97) + '...' : result;
}
// Default formatting
return String(result).substring(0, 100) + (String(result).length > 100 ? '...' : '');
}
/**
* Simple HTML escaping for safer content display
*/
private escapeHtml(text: string): string {
if (typeof text !== 'string') {
text = String(text || '');
}
return text
.replace(/&/g, '&amp;')
.replace(/</g, '&lt;')
.replace(/>/g, '&gt;')
.replace(/"/g, '&quot;')
.replace(/'/g, '&#039;');
}
private initializeEventListeners() {

View File

@ -50,6 +50,7 @@ export interface ChatCompletionOptions {
useAdvancedContext?: boolean; // Whether to use advanced context enrichment
toolExecutionStatus?: any[]; // Status information about executed tools for feedback
providerMetadata?: ModelMetadata; // Metadata about the provider and model capabilities
streamCallback?: (text: string, isDone: boolean, originalChunk?: any) => Promise<void> | void; // Callback for streaming
}
export interface ChatResponse {

View File

@ -142,7 +142,7 @@ export class ChatService {
// Execute the pipeline
const response = await pipeline.execute({
messages: session.messages,
options: options || session.options,
options: options || session.options || {},
query: content,
streamCallback
});
@ -231,7 +231,7 @@ export class ChatService {
// Execute the pipeline with note context
const response = await pipeline.execute({
messages: session.messages,
options: options || session.options,
options: options || session.options || {},
noteId,
query: content,
showThinking,

View File

@ -236,26 +236,36 @@ export class ChatPipeline {
log.info(`[ChatPipeline] Request type info - Format: ${input.format || 'not specified'}, Options from pipelineInput: ${JSON.stringify({stream: input.options?.stream})}`);
log.info(`[ChatPipeline] Stream settings - config.enableStreaming: ${streamEnabledInConfig}, format parameter: ${input.format}, modelSelection.options.stream: ${modelSelection.options.stream}, streamCallback available: ${streamCallbackAvailable}`);
// IMPORTANT: Different behavior for GET vs POST requests:
// - For GET requests with streamCallback available: Always enable streaming
// - For POST requests: Use streaming options but don't actually stream (since we can't stream back to client)
// IMPORTANT: Respect the existing stream option but with special handling for callbacks:
// 1. If a stream callback is available, streaming MUST be enabled for it to work
// 2. Otherwise, preserve the original stream setting from input options
// First, determine what the stream value should be based on various factors:
let shouldEnableStream = modelSelection.options.stream;
if (streamCallbackAvailable) {
// If a stream callback is available (GET requests), we can stream the response
modelSelection.options.stream = true;
log.info(`[ChatPipeline] Stream callback available, setting stream=true for real-time streaming`);
// If we have a stream callback, we NEED to enable streaming
// This is critical for GET requests with EventSource
shouldEnableStream = true;
log.info(`[ChatPipeline] Stream callback available, enabling streaming`);
} else if (streamRequestedInOptions) {
// Stream was explicitly requested in options, honor that setting
log.info(`[ChatPipeline] Stream explicitly requested in options: ${streamRequestedInOptions}`);
shouldEnableStream = streamRequestedInOptions;
} else if (streamFormatRequested) {
// Format=stream parameter indicates streaming was requested
log.info(`[ChatPipeline] Stream format requested in parameters`);
shouldEnableStream = true;
} else {
// For POST requests, preserve the stream flag as-is from input options
// This ensures LLM request format is consistent across both GET and POST
if (streamRequestedInOptions) {
log.info(`[ChatPipeline] No stream callback but stream requested in options, preserving stream=true`);
} else {
log.info(`[ChatPipeline] No stream callback and no stream in options, setting stream=false`);
modelSelection.options.stream = false;
}
// No explicit streaming indicators, use config default
log.info(`[ChatPipeline] No explicit stream settings, using config default: ${streamEnabledInConfig}`);
shouldEnableStream = streamEnabledInConfig;
}
log.info(`[ChatPipeline] Final modelSelection.options.stream = ${modelSelection.options.stream}`);
log.info(`[ChatPipeline] Will actual streaming occur? ${streamCallbackAvailable && modelSelection.options.stream}`);
// Set the final stream option
modelSelection.options.stream = shouldEnableStream;
log.info(`[ChatPipeline] Final streaming decision: stream=${shouldEnableStream}, will stream to client=${streamCallbackAvailable && shouldEnableStream}`);
// STAGE 5 & 6: Handle LLM completion and tool execution loop
@ -268,8 +278,9 @@ export class ChatPipeline {
this.updateStageMetrics('llmCompletion', llmStartTime);
log.info(`Received LLM response from model: ${completion.response.model}, provider: ${completion.response.provider}`);
// Handle streaming if enabled and available
if (enableStreaming && completion.response.stream && streamCallback) {
// Handle streaming if enabled and available
// Use shouldEnableStream variable which contains our streaming decision
if (shouldEnableStream && completion.response.stream && streamCallback) {
// Setup stream handler that passes chunks through response processing
await completion.response.stream(async (chunk: StreamChunk) => {
// Process the chunk text
@ -278,8 +289,8 @@ export class ChatPipeline {
// Accumulate text for final response
accumulatedText += processedChunk.text;
// Forward to callback
await streamCallback!(processedChunk.text, processedChunk.done);
// Forward to callback with original chunk data in case it contains additional information
await streamCallback!(processedChunk.text, processedChunk.done, chunk);
});
}
@ -323,7 +334,7 @@ export class ChatPipeline {
});
// Keep track of whether we're in a streaming response
const isStreaming = enableStreaming && streamCallback;
const isStreaming = shouldEnableStream && streamCallback;
let streamingPaused = false;
// If streaming was enabled, send an update to the user

View File

@ -47,8 +47,11 @@ export interface StageMetrics {
/**
* Callback for handling stream chunks
* @param text The text chunk to append to the UI
* @param isDone Whether this is the final chunk
* @param originalChunk The original chunk with all metadata for custom handling
*/
export type StreamCallback = (text: string, isDone: boolean) => Promise<void> | void;
export type StreamCallback = (text: string, isDone: boolean, originalChunk?: any) => Promise<void> | void;
/**
* Common input for all chat-related pipeline stages
@ -151,6 +154,7 @@ export interface ToolExecutionInput extends PipelineInput {
messages: Message[];
options: ChatCompletionOptions;
maxIterations?: number;
streamCallback?: StreamCallback;
}
/**

View File

@ -31,11 +31,16 @@ export class LLMCompletionStage extends BasePipelineStage<LLMCompletionInput, {
// Create a deep copy of options to avoid modifying the original
const updatedOptions: ChatCompletionOptions = JSON.parse(JSON.stringify(options));
// IMPORTANT: Ensure stream property is explicitly set to a boolean value
// This is critical to ensure consistent behavior across all providers
updatedOptions.stream = options.stream === true;
log.info(`[LLMCompletionStage] Explicitly set stream option to boolean: ${updatedOptions.stream}`);
// IMPORTANT: Handle stream option carefully:
// 1. If it's undefined, leave it undefined (provider will use defaults)
// 2. If explicitly set to true/false, ensure it's a proper boolean
if (options.stream !== undefined) {
updatedOptions.stream = options.stream === true;
log.info(`[LLMCompletionStage] Stream explicitly provided in options, set to: ${updatedOptions.stream}`);
} else {
// If undefined, leave it undefined so provider can use its default behavior
log.info(`[LLMCompletionStage] Stream option not explicitly set, leaving as undefined`);
}
// If this is a direct (non-stream) call to Ollama but has the stream flag,
// ensure we set additional metadata to maintain proper state

View File

@ -1,7 +1,7 @@
import { BasePipelineStage } from '../pipeline_stage.js';
import type { ToolExecutionInput } from '../interfaces.js';
import log from '../../../log.js';
import type { ChatResponse, Message } from '../../ai_interface.js';
import log from '../../../log.js';
import type { StreamCallback, ToolExecutionInput } from '../interfaces.js';
import { BasePipelineStage } from '../pipeline_stage.js';
import toolRegistry from '../../tools/tool_registry.js';
/**
@ -21,7 +21,8 @@ export class ToolCallingStage extends BasePipelineStage<ToolExecutionInput, { re
* Process the LLM response and execute any tool calls
*/
protected async process(input: ToolExecutionInput): Promise<{ response: ChatResponse, needsFollowUp: boolean, messages: Message[] }> {
const { response, messages, options } = input;
const { response, messages } = input;
const streamCallback = input.streamCallback as StreamCallback;
log.info(`========== TOOL CALLING STAGE ENTRY ==========`);
log.info(`Response provider: ${response.provider}, model: ${response.model || 'unknown'}`);
@ -148,6 +149,21 @@ export class ToolCallingStage extends BasePipelineStage<ToolExecutionInput, { re
log.info(`Tool parameters: ${Object.keys(args).join(', ')}`);
log.info(`Parameters values: ${Object.entries(args).map(([k, v]) => `${k}=${typeof v === 'string' ? v : JSON.stringify(v)}`).join(', ')}`);
// Emit tool start event if streaming is enabled
if (streamCallback) {
const toolExecutionData = {
action: 'start',
tool: toolCall.function.name,
args: args
};
// Don't wait for this to complete, but log any errors
const callbackResult = streamCallback('', false, { toolExecution: toolExecutionData });
if (callbackResult instanceof Promise) {
callbackResult.catch((e: Error) => log.error(`Error sending tool execution start event: ${e.message}`));
}
}
const executionStart = Date.now();
let result;
try {
@ -155,9 +171,40 @@ export class ToolCallingStage extends BasePipelineStage<ToolExecutionInput, { re
result = await tool.execute(args);
const executionTime = Date.now() - executionStart;
log.info(`================ TOOL EXECUTION COMPLETED in ${executionTime}ms ================`);
// Emit tool completion event if streaming is enabled
if (streamCallback) {
const toolExecutionData = {
action: 'complete',
tool: toolCall.function.name,
result: result
};
// Don't wait for this to complete, but log any errors
const callbackResult = streamCallback('', false, { toolExecution: toolExecutionData });
if (callbackResult instanceof Promise) {
callbackResult.catch((e: Error) => log.error(`Error sending tool execution complete event: ${e.message}`));
}
}
} catch (execError: any) {
const executionTime = Date.now() - executionStart;
log.error(`================ TOOL EXECUTION FAILED in ${executionTime}ms: ${execError.message} ================`);
// Emit tool error event if streaming is enabled
if (streamCallback) {
const toolExecutionData = {
action: 'error',
tool: toolCall.function.name,
error: execError.message || String(execError)
};
// Don't wait for this to complete, but log any errors
const callbackResult = streamCallback('', false, { toolExecution: toolExecutionData });
if (callbackResult instanceof Promise) {
callbackResult.catch((e: Error) => log.error(`Error sending tool execution error event: ${e.message}`));
}
}
throw execError;
}
@ -177,6 +224,22 @@ export class ToolCallingStage extends BasePipelineStage<ToolExecutionInput, { re
} catch (error: any) {
log.error(`Error executing tool ${toolCall.function.name}: ${error.message || String(error)}`);
// Emit tool error event if not already handled in the try/catch above
// and if streaming is enabled
if (streamCallback && error.name !== "ExecutionError") {
const toolExecutionData = {
action: 'error',
tool: toolCall.function.name,
error: error.message || String(error)
};
// Don't wait for this to complete, but log any errors
const callbackResult = streamCallback('', false, { toolExecution: toolExecutionData });
if (callbackResult instanceof Promise) {
callbackResult.catch((e: Error) => log.error(`Error sending tool execution error event: ${e.message}`));
}
}
// Return error message as result
return {
toolCallId: toolCall.id,

View File

@ -118,17 +118,30 @@ export class OllamaService extends BaseAIService {
log.info(`Stream option in providerOptions: ${providerOptions.stream}`);
log.info(`Stream option type: ${typeof providerOptions.stream}`);
// Stream is a top-level option - ALWAYS set it explicitly to ensure consistency
// This is critical for ensuring streaming works properly
requestBody.stream = providerOptions.stream === true;
log.info(`Set requestBody.stream to boolean: ${requestBody.stream}`);
// Handle streaming in a way that respects the provided option but ensures consistency:
// - If explicitly true, set to true
// - If explicitly false, set to false
// - If undefined, default to false unless we have a streamCallback
if (providerOptions.stream !== undefined) {
// Explicit value provided - respect it
requestBody.stream = providerOptions.stream === true;
log.info(`Stream explicitly provided in options, set to: ${requestBody.stream}`);
} else if (opts.streamCallback) {
// No explicit value but we have a stream callback - enable streaming
requestBody.stream = true;
log.info(`Stream not explicitly set but streamCallback provided, enabling streaming`);
} else {
// Default to false
requestBody.stream = false;
log.info(`Stream not explicitly set and no streamCallback, defaulting to false`);
}
// Log additional information about the streaming context
log.info(`Streaming context: Will stream to client: ${typeof opts.streamCallback === 'function'}`);
// If we have a streaming callback but the stream flag isn't set for some reason, warn about it
if (typeof opts.streamCallback === 'function' && !requestBody.stream) {
log.warn(`WARNING: Stream callback provided but stream=false in request. This may cause streaming issues.`);
log.info(`WARNING: Stream callback provided but stream=false in request. This may cause streaming issues.`);
}
// Add options object if provided

View File

@ -458,12 +458,22 @@ class RestChatService {
temperature: session.metadata.temperature,
maxTokens: session.metadata.maxTokens,
model: session.metadata.model,
// Always set stream to true for all request types to ensure consistency
// This ensures the pipeline always knows streaming is supported, even for POST requests
stream: true
// Set stream based on request type, but ensure it's explicitly a boolean value
// GET requests or format=stream parameter indicates streaming should be used
stream: !!(req.method === 'GET' || req.query.format === 'stream')
},
streamCallback: req.method === 'GET' ? (data, done) => {
res.write(`data: ${JSON.stringify({ content: data, done })}\n\n`);
streamCallback: req.method === 'GET' ? (data, done, rawChunk) => {
// Prepare response data - include both the content and raw chunk data if available
const responseData: any = { content: data, done };
// If there's tool execution information, add it to the response
if (rawChunk && rawChunk.toolExecution) {
responseData.toolExecution = rawChunk.toolExecution;
}
// Send the data as a JSON event
res.write(`data: ${JSON.stringify(responseData)}\n\n`);
if (done) {
res.end();
}