From 9b5167231fc4e318fc6fd798acb0e1e86036fba7 Mon Sep 17 00:00:00 2001 From: perf3ct Date: Thu, 17 Apr 2025 16:10:13 +0000 Subject: [PATCH] add more logs around tool calling and recursion --- src/services/llm/pipeline/chat_pipeline.ts | 44 +++++++- .../pipeline/stages/llm_completion_stage.ts | 104 +++++++++++++++--- .../llm/pipeline/stages/tool_calling_stage.ts | 76 ++++++------- 3 files changed, 168 insertions(+), 56 deletions(-) diff --git a/src/services/llm/pipeline/chat_pipeline.ts b/src/services/llm/pipeline/chat_pipeline.ts index fc630bca8..7b8822403 100644 --- a/src/services/llm/pipeline/chat_pipeline.ts +++ b/src/services/llm/pipeline/chat_pipeline.ts @@ -453,8 +453,11 @@ export class ChatPipeline { ); log.info(`========== TOOL EXECUTION RESULTS ==========`); + log.info(`Received ${toolResultMessages.length} tool results`); toolResultMessages.forEach((msg, idx) => { - log.info(`Tool result ${idx + 1}: tool_call_id=${msg.tool_call_id}, content=${msg.content.substring(0, 50)}...`); + log.info(`Tool result ${idx + 1}: tool_call_id=${msg.tool_call_id}, content=${msg.content.substring(0, 100)}${msg.content.length > 100 ? '...' : ''}`); + log.info(`Tool result status: ${msg.content.startsWith('Error:') ? 'ERROR' : 'SUCCESS'}`); + log.info(`Tool result for: ${this.getToolNameFromToolCallId(currentMessages, msg.tool_call_id || '')}`); // If streaming, show tool executions to the user if (isStreaming && streamCallback) { @@ -559,6 +562,30 @@ export class ChatPipeline { // Generate a new completion with the updated messages const followUpStartTime = Date.now(); + + // Log messages being sent to LLM for tool follow-up + log.info(`========== SENDING TOOL RESULTS TO LLM FOR FOLLOW-UP ==========`); + log.info(`Total messages being sent: ${currentMessages.length}`); + // Log the most recent messages (last 3) for clarity + const recentMessages = currentMessages.slice(-3); + recentMessages.forEach((msg, idx) => { + const position = currentMessages.length - recentMessages.length + idx; + log.info(`Message ${position} (${msg.role}): ${msg.content?.substring(0, 100)}${msg.content?.length > 100 ? '...' : ''}`); + if (msg.tool_calls) { + log.info(` Has ${msg.tool_calls.length} tool calls`); + } + if (msg.tool_call_id) { + log.info(` Tool call ID: ${msg.tool_call_id}`); + } + }); + + log.info(`LLM follow-up request options: ${JSON.stringify({ + model: modelSelection.options.model, + enableTools: true, + stream: modelSelection.options.stream, + provider: currentResponse.provider + })}`); + const followUpCompletion = await this.stages.llmCompletion.execute({ messages: currentMessages, options: { @@ -573,6 +600,15 @@ export class ChatPipeline { }); this.updateStageMetrics('llmCompletion', followUpStartTime); + // Log the follow-up response from the LLM + log.info(`========== LLM FOLLOW-UP RESPONSE RECEIVED ==========`); + log.info(`Follow-up response model: ${followUpCompletion.response.model}, provider: ${followUpCompletion.response.provider}`); + log.info(`Follow-up response text: ${followUpCompletion.response.text?.substring(0, 150)}${followUpCompletion.response.text?.length > 150 ? '...' : ''}`); + log.info(`Follow-up contains tool calls: ${!!followUpCompletion.response.tool_calls && followUpCompletion.response.tool_calls.length > 0}`); + if (followUpCompletion.response.tool_calls && followUpCompletion.response.tool_calls.length > 0) { + log.info(`Follow-up has ${followUpCompletion.response.tool_calls.length} new tool calls`); + } + // Update current response for the next iteration currentResponse = followUpCompletion.response; @@ -650,6 +686,12 @@ export class ChatPipeline { } }); + // Log the error follow-up response from the LLM + log.info(`========== ERROR FOLLOW-UP RESPONSE RECEIVED ==========`); + log.info(`Error follow-up response model: ${errorFollowUpCompletion.response.model}, provider: ${errorFollowUpCompletion.response.provider}`); + log.info(`Error follow-up response text: ${errorFollowUpCompletion.response.text?.substring(0, 150)}${errorFollowUpCompletion.response.text?.length > 150 ? '...' : ''}`); + log.info(`Error follow-up contains tool calls: ${!!errorFollowUpCompletion.response.tool_calls && errorFollowUpCompletion.response.tool_calls.length > 0}`); + // Update current response and break the tool loop currentResponse = errorFollowUpCompletion.response; break; diff --git a/src/services/llm/pipeline/stages/llm_completion_stage.ts b/src/services/llm/pipeline/stages/llm_completion_stage.ts index 9b181ca91..7dd6984c8 100644 --- a/src/services/llm/pipeline/stages/llm_completion_stage.ts +++ b/src/services/llm/pipeline/stages/llm_completion_stage.ts @@ -15,19 +15,43 @@ export class LLMCompletionStage extends BasePipelineStage { - const { messages, options, provider } = input; + const { messages, options } = input; - // Log input options - log.info(`[LLMCompletionStage] Input options: ${JSON.stringify({ - model: options.model, - provider, + // Add detailed logging about the input messages, particularly useful for tool follow-ups + log.info(`========== LLM COMPLETION STAGE - INPUT MESSAGES ==========`); + log.info(`Total input messages: ${messages.length}`); + + // Log if tool messages are present (used for follow-ups) + const toolMessages = messages.filter(m => m.role === 'tool'); + if (toolMessages.length > 0) { + log.info(`Contains ${toolMessages.length} tool result messages - likely a tool follow-up request`); + } + + // Log the last few messages to understand conversation context + const lastMessages = messages.slice(-3); + lastMessages.forEach((msg, idx) => { + const msgPosition = messages.length - lastMessages.length + idx; + log.info(`Message ${msgPosition} (${msg.role}): ${msg.content?.substring(0, 150)}${msg.content?.length > 150 ? '...' : ''}`); + if (msg.tool_calls) { + log.info(` Contains ${msg.tool_calls.length} tool calls`); + } + if (msg.tool_call_id) { + log.info(` Tool call ID: ${msg.tool_call_id}`); + } + }); + + // Log completion options + log.info(`LLM completion options: ${JSON.stringify({ + model: options.model || 'default', + temperature: options.temperature, + enableTools: options.enableTools, stream: options.stream, - enableTools: options.enableTools + hasToolExecutionStatus: !!options.toolExecutionStatus })}`); // Create a deep copy of options to avoid modifying the original @@ -39,19 +63,19 @@ export class LLMCompletionStage extends BasePipelineStage { // Create an enhanced chunk with the raw provider data const enhancedChunk = { - text, + text, done, // Include raw provider data if available raw: rawProviderData }; - + // Call the original callback if provided if (originalStreamCallback) { return originalStreamCallback(text, done, enhancedChunk); @@ -70,8 +94,8 @@ export class LLMCompletionStage extends BasePipelineStage { return originalStream(async (chunk) => { @@ -107,14 +131,36 @@ export class LLMCompletionStage extends BasePipelineStage 0) { + if (response.tool_calls && response.tool_calls.length > 0) { + log.info(`Response contains ${response.tool_calls.length} tool calls`); + response.tool_calls.forEach((toolCall: any, idx: number) => { + log.info(`Tool call ${idx + 1}: ${toolCall.function?.name || 'unnamed'}`); + const args = typeof toolCall.function?.arguments === 'string' + ? toolCall.function?.arguments + : JSON.stringify(toolCall.function?.arguments); + log.info(`Arguments: ${args?.substring(0, 100) || '{}'}`); + }); + } else { + log.info(`Response contains no tool calls - plain text response`); + } + + if (toolMessages.length > 0 && !response.tool_calls) { + log.info(`This appears to be a final response after tool execution (no new tool calls)`); + } else if (toolMessages.length > 0 && response.tool_calls && response.tool_calls.length > 0) { + log.info(`This appears to be a continued tool execution flow (tools followed by more tools)`); + } + } + return { response }; } // Use auto-selection if no specific provider log.info(`[LLMCompletionStage] Using auto-selected service`); const response = await aiServiceManager.generateChatCompletion(messages, updatedOptions); - + // Add similar stream enhancement for auto-selected provider if (response.stream && typeof response.stream === 'function' && updatedOptions.stream) { const originalStream = response.stream; @@ -132,7 +178,29 @@ export class LLMCompletionStage extends BasePipelineStage 0) { + if (response.tool_calls && response.tool_calls.length > 0) { + log.info(`Response contains ${response.tool_calls.length} tool calls`); + response.tool_calls.forEach((toolCall: any, idx: number) => { + log.info(`Tool call ${idx + 1}: ${toolCall.function?.name || 'unnamed'}`); + const args = typeof toolCall.function?.arguments === 'string' + ? toolCall.function?.arguments + : JSON.stringify(toolCall.function?.arguments); + log.info(`Arguments: ${args?.substring(0, 100) || '{}'}`); + }); + } else { + log.info(`Response contains no tool calls - plain text response`); + } + + if (toolMessages.length > 0 && !response.tool_calls) { + log.info(`This appears to be a final response after tool execution (no new tool calls)`); + } else if (toolMessages.length > 0 && response.tool_calls && response.tool_calls.length > 0) { + log.info(`This appears to be a continued tool execution flow (tools followed by more tools)`); + } + } + return { response }; } } diff --git a/src/services/llm/pipeline/stages/tool_calling_stage.ts b/src/services/llm/pipeline/stages/tool_calling_stage.ts index 6d53fdee6..bab29fdf1 100644 --- a/src/services/llm/pipeline/stages/tool_calling_stage.ts +++ b/src/services/llm/pipeline/stages/tool_calling_stage.ts @@ -362,53 +362,55 @@ export class ToolCallingStage extends BasePipelineStage { - // Format the result content based on type - let content: string; + // Add each tool result to the messages array + const toolResultMessages: Message[] = []; - if (typeof result.result === 'string') { - content = result.result; - log.info(`Tool returned string result (${content.length} chars)`); - } else { - // For object results, format as JSON - try { - content = JSON.stringify(result.result, null, 2); - log.info(`Tool returned object result with keys: ${Object.keys(result.result).join(', ')}`); - } catch (error) { - content = String(result.result); - log.info(`Failed to stringify object result: ${error}`); - } - } + for (const result of toolResults) { + const { toolCallId, name, result: toolResult } = result; - log.info(`Adding tool result message - Tool: ${result.name}, ID: ${result.toolCallId || 'unknown'}, Length: ${content.length}`); + // Format result for message + const resultContent = typeof toolResult === 'string' + ? toolResult + : JSON.stringify(toolResult, null, 2); - // Create a properly formatted tool response message - updatedMessages.push({ + // Add a new message for the tool result + const toolMessage: Message = { role: 'tool', - content: content, - name: result.name, - tool_call_id: result.toolCallId - }); + content: resultContent, + name: name, + tool_call_id: toolCallId + }; - // Log a sample of the content for debugging - const contentPreview = content.substring(0, 100) + (content.length > 100 ? '...' : ''); - log.info(`Tool result preview: ${contentPreview}`); - }); + // Log detailed info about each tool result + log.info(`-------- Tool Result for ${name} (ID: ${toolCallId}) --------`); + log.info(`Result type: ${typeof toolResult}`); + log.info(`Result preview: ${resultContent.substring(0, 150)}${resultContent.length > 150 ? '...' : ''}`); + log.info(`Tool result status: ${resultContent.startsWith('Error:') ? 'ERROR' : 'SUCCESS'}`); - log.info(`Added ${toolResults.length} tool results to conversation`); - - // If we have tool results, we need a follow-up call to the LLM - const needsFollowUp = toolResults.length > 0; - - if (needsFollowUp) { - log.info(`Tool execution complete, LLM follow-up required with ${updatedMessages.length} messages`); + updatedMessages.push(toolMessage); + toolResultMessages.push(toolMessage); } + // Log the decision about follow-up + log.info(`========== FOLLOW-UP DECISION ==========`); + const hasToolResults = toolResultMessages.length > 0; + const hasErrors = toolResultMessages.some(msg => msg.content.startsWith('Error:')); + const needsFollowUp = hasToolResults; + + log.info(`Follow-up needed: ${needsFollowUp}`); + log.info(`Reasoning: ${hasToolResults ? 'Has tool results to process' : 'No tool results'} ${hasErrors ? ', contains errors' : ''}`); + log.info(`Total messages to return to pipeline: ${updatedMessages.length}`); + log.info(`Last 3 messages in conversation:`); + const lastMessages = updatedMessages.slice(-3); + lastMessages.forEach((msg, idx) => { + const position = updatedMessages.length - lastMessages.length + idx; + log.info(`Message ${position} (${msg.role}): ${msg.content?.substring(0, 100)}${msg.content?.length > 100 ? '...' : ''}`); + }); + return { response, - needsFollowUp, - messages: updatedMessages + messages: updatedMessages, + needsFollowUp }; }