mirror of
https://github.com/TriliumNext/Notes.git
synced 2025-10-18 04:21:32 +08:00
add more logs around tool calling and recursion
This commit is contained in:
parent
6f3d2b6ee2
commit
9b5167231f
@ -453,8 +453,11 @@ export class ChatPipeline {
|
||||
);
|
||||
|
||||
log.info(`========== TOOL EXECUTION RESULTS ==========`);
|
||||
log.info(`Received ${toolResultMessages.length} tool results`);
|
||||
toolResultMessages.forEach((msg, idx) => {
|
||||
log.info(`Tool result ${idx + 1}: tool_call_id=${msg.tool_call_id}, content=${msg.content.substring(0, 50)}...`);
|
||||
log.info(`Tool result ${idx + 1}: tool_call_id=${msg.tool_call_id}, content=${msg.content.substring(0, 100)}${msg.content.length > 100 ? '...' : ''}`);
|
||||
log.info(`Tool result status: ${msg.content.startsWith('Error:') ? 'ERROR' : 'SUCCESS'}`);
|
||||
log.info(`Tool result for: ${this.getToolNameFromToolCallId(currentMessages, msg.tool_call_id || '')}`);
|
||||
|
||||
// If streaming, show tool executions to the user
|
||||
if (isStreaming && streamCallback) {
|
||||
@ -559,6 +562,30 @@ export class ChatPipeline {
|
||||
|
||||
// Generate a new completion with the updated messages
|
||||
const followUpStartTime = Date.now();
|
||||
|
||||
// Log messages being sent to LLM for tool follow-up
|
||||
log.info(`========== SENDING TOOL RESULTS TO LLM FOR FOLLOW-UP ==========`);
|
||||
log.info(`Total messages being sent: ${currentMessages.length}`);
|
||||
// Log the most recent messages (last 3) for clarity
|
||||
const recentMessages = currentMessages.slice(-3);
|
||||
recentMessages.forEach((msg, idx) => {
|
||||
const position = currentMessages.length - recentMessages.length + idx;
|
||||
log.info(`Message ${position} (${msg.role}): ${msg.content?.substring(0, 100)}${msg.content?.length > 100 ? '...' : ''}`);
|
||||
if (msg.tool_calls) {
|
||||
log.info(` Has ${msg.tool_calls.length} tool calls`);
|
||||
}
|
||||
if (msg.tool_call_id) {
|
||||
log.info(` Tool call ID: ${msg.tool_call_id}`);
|
||||
}
|
||||
});
|
||||
|
||||
log.info(`LLM follow-up request options: ${JSON.stringify({
|
||||
model: modelSelection.options.model,
|
||||
enableTools: true,
|
||||
stream: modelSelection.options.stream,
|
||||
provider: currentResponse.provider
|
||||
})}`);
|
||||
|
||||
const followUpCompletion = await this.stages.llmCompletion.execute({
|
||||
messages: currentMessages,
|
||||
options: {
|
||||
@ -573,6 +600,15 @@ export class ChatPipeline {
|
||||
});
|
||||
this.updateStageMetrics('llmCompletion', followUpStartTime);
|
||||
|
||||
// Log the follow-up response from the LLM
|
||||
log.info(`========== LLM FOLLOW-UP RESPONSE RECEIVED ==========`);
|
||||
log.info(`Follow-up response model: ${followUpCompletion.response.model}, provider: ${followUpCompletion.response.provider}`);
|
||||
log.info(`Follow-up response text: ${followUpCompletion.response.text?.substring(0, 150)}${followUpCompletion.response.text?.length > 150 ? '...' : ''}`);
|
||||
log.info(`Follow-up contains tool calls: ${!!followUpCompletion.response.tool_calls && followUpCompletion.response.tool_calls.length > 0}`);
|
||||
if (followUpCompletion.response.tool_calls && followUpCompletion.response.tool_calls.length > 0) {
|
||||
log.info(`Follow-up has ${followUpCompletion.response.tool_calls.length} new tool calls`);
|
||||
}
|
||||
|
||||
// Update current response for the next iteration
|
||||
currentResponse = followUpCompletion.response;
|
||||
|
||||
@ -650,6 +686,12 @@ export class ChatPipeline {
|
||||
}
|
||||
});
|
||||
|
||||
// Log the error follow-up response from the LLM
|
||||
log.info(`========== ERROR FOLLOW-UP RESPONSE RECEIVED ==========`);
|
||||
log.info(`Error follow-up response model: ${errorFollowUpCompletion.response.model}, provider: ${errorFollowUpCompletion.response.provider}`);
|
||||
log.info(`Error follow-up response text: ${errorFollowUpCompletion.response.text?.substring(0, 150)}${errorFollowUpCompletion.response.text?.length > 150 ? '...' : ''}`);
|
||||
log.info(`Error follow-up contains tool calls: ${!!errorFollowUpCompletion.response.tool_calls && errorFollowUpCompletion.response.tool_calls.length > 0}`);
|
||||
|
||||
// Update current response and break the tool loop
|
||||
currentResponse = errorFollowUpCompletion.response;
|
||||
break;
|
||||
|
@ -15,19 +15,43 @@ export class LLMCompletionStage extends BasePipelineStage<LLMCompletionInput, {
|
||||
|
||||
/**
|
||||
* Generate LLM completion using the AI service
|
||||
*
|
||||
*
|
||||
* This enhanced version supports better streaming by forwarding raw provider data
|
||||
* and ensuring consistent handling of stream options.
|
||||
*/
|
||||
protected async process(input: LLMCompletionInput): Promise<{ response: ChatResponse }> {
|
||||
const { messages, options, provider } = input;
|
||||
const { messages, options } = input;
|
||||
|
||||
// Log input options
|
||||
log.info(`[LLMCompletionStage] Input options: ${JSON.stringify({
|
||||
model: options.model,
|
||||
provider,
|
||||
// Add detailed logging about the input messages, particularly useful for tool follow-ups
|
||||
log.info(`========== LLM COMPLETION STAGE - INPUT MESSAGES ==========`);
|
||||
log.info(`Total input messages: ${messages.length}`);
|
||||
|
||||
// Log if tool messages are present (used for follow-ups)
|
||||
const toolMessages = messages.filter(m => m.role === 'tool');
|
||||
if (toolMessages.length > 0) {
|
||||
log.info(`Contains ${toolMessages.length} tool result messages - likely a tool follow-up request`);
|
||||
}
|
||||
|
||||
// Log the last few messages to understand conversation context
|
||||
const lastMessages = messages.slice(-3);
|
||||
lastMessages.forEach((msg, idx) => {
|
||||
const msgPosition = messages.length - lastMessages.length + idx;
|
||||
log.info(`Message ${msgPosition} (${msg.role}): ${msg.content?.substring(0, 150)}${msg.content?.length > 150 ? '...' : ''}`);
|
||||
if (msg.tool_calls) {
|
||||
log.info(` Contains ${msg.tool_calls.length} tool calls`);
|
||||
}
|
||||
if (msg.tool_call_id) {
|
||||
log.info(` Tool call ID: ${msg.tool_call_id}`);
|
||||
}
|
||||
});
|
||||
|
||||
// Log completion options
|
||||
log.info(`LLM completion options: ${JSON.stringify({
|
||||
model: options.model || 'default',
|
||||
temperature: options.temperature,
|
||||
enableTools: options.enableTools,
|
||||
stream: options.stream,
|
||||
enableTools: options.enableTools
|
||||
hasToolExecutionStatus: !!options.toolExecutionStatus
|
||||
})}`);
|
||||
|
||||
// Create a deep copy of options to avoid modifying the original
|
||||
@ -39,19 +63,19 @@ export class LLMCompletionStage extends BasePipelineStage<LLMCompletionInput, {
|
||||
log.info(`[LLMCompletionStage] Stream explicitly set to: ${updatedOptions.stream}`);
|
||||
}
|
||||
|
||||
// Add capture of raw provider data for streaming
|
||||
// Add capture of raw provider data for streaming
|
||||
if (updatedOptions.stream) {
|
||||
// Add a function to capture raw provider data in stream chunks
|
||||
const originalStreamCallback = updatedOptions.streamCallback;
|
||||
updatedOptions.streamCallback = async (text, done, rawProviderData) => {
|
||||
// Create an enhanced chunk with the raw provider data
|
||||
const enhancedChunk = {
|
||||
text,
|
||||
text,
|
||||
done,
|
||||
// Include raw provider data if available
|
||||
raw: rawProviderData
|
||||
};
|
||||
|
||||
|
||||
// Call the original callback if provided
|
||||
if (originalStreamCallback) {
|
||||
return originalStreamCallback(text, done, enhancedChunk);
|
||||
@ -70,8 +94,8 @@ export class LLMCompletionStage extends BasePipelineStage<LLMCompletionInput, {
|
||||
}
|
||||
|
||||
// Determine which provider to use
|
||||
let selectedProvider = provider;
|
||||
if (!selectedProvider && updatedOptions.providerMetadata?.provider) {
|
||||
let selectedProvider = '';
|
||||
if (updatedOptions.providerMetadata?.provider) {
|
||||
selectedProvider = updatedOptions.providerMetadata.provider;
|
||||
log.info(`Using provider ${selectedProvider} from metadata for model ${updatedOptions.model}`);
|
||||
}
|
||||
@ -82,14 +106,14 @@ export class LLMCompletionStage extends BasePipelineStage<LLMCompletionInput, {
|
||||
if (selectedProvider && aiServiceManager.isProviderAvailable(selectedProvider)) {
|
||||
const service = aiServiceManager.getService(selectedProvider);
|
||||
log.info(`[LLMCompletionStage] Using specific service for ${selectedProvider}`);
|
||||
|
||||
|
||||
// Generate completion and wrap with enhanced stream handling
|
||||
const response = await service.generateChatCompletion(messages, updatedOptions);
|
||||
|
||||
|
||||
// If streaming is enabled, enhance the stream method
|
||||
if (response.stream && typeof response.stream === 'function' && updatedOptions.stream) {
|
||||
const originalStream = response.stream;
|
||||
|
||||
|
||||
// Replace the stream method with an enhanced version that captures and forwards raw data
|
||||
response.stream = async (callback) => {
|
||||
return originalStream(async (chunk) => {
|
||||
@ -107,14 +131,36 @@ export class LLMCompletionStage extends BasePipelineStage<LLMCompletionInput, {
|
||||
});
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
// Add enhanced logging for debugging tool execution follow-ups
|
||||
if (toolMessages.length > 0) {
|
||||
if (response.tool_calls && response.tool_calls.length > 0) {
|
||||
log.info(`Response contains ${response.tool_calls.length} tool calls`);
|
||||
response.tool_calls.forEach((toolCall: any, idx: number) => {
|
||||
log.info(`Tool call ${idx + 1}: ${toolCall.function?.name || 'unnamed'}`);
|
||||
const args = typeof toolCall.function?.arguments === 'string'
|
||||
? toolCall.function?.arguments
|
||||
: JSON.stringify(toolCall.function?.arguments);
|
||||
log.info(`Arguments: ${args?.substring(0, 100) || '{}'}`);
|
||||
});
|
||||
} else {
|
||||
log.info(`Response contains no tool calls - plain text response`);
|
||||
}
|
||||
|
||||
if (toolMessages.length > 0 && !response.tool_calls) {
|
||||
log.info(`This appears to be a final response after tool execution (no new tool calls)`);
|
||||
} else if (toolMessages.length > 0 && response.tool_calls && response.tool_calls.length > 0) {
|
||||
log.info(`This appears to be a continued tool execution flow (tools followed by more tools)`);
|
||||
}
|
||||
}
|
||||
|
||||
return { response };
|
||||
}
|
||||
|
||||
// Use auto-selection if no specific provider
|
||||
log.info(`[LLMCompletionStage] Using auto-selected service`);
|
||||
const response = await aiServiceManager.generateChatCompletion(messages, updatedOptions);
|
||||
|
||||
|
||||
// Add similar stream enhancement for auto-selected provider
|
||||
if (response.stream && typeof response.stream === 'function' && updatedOptions.stream) {
|
||||
const originalStream = response.stream;
|
||||
@ -132,7 +178,29 @@ export class LLMCompletionStage extends BasePipelineStage<LLMCompletionInput, {
|
||||
});
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
// Add enhanced logging for debugging tool execution follow-ups
|
||||
if (toolMessages.length > 0) {
|
||||
if (response.tool_calls && response.tool_calls.length > 0) {
|
||||
log.info(`Response contains ${response.tool_calls.length} tool calls`);
|
||||
response.tool_calls.forEach((toolCall: any, idx: number) => {
|
||||
log.info(`Tool call ${idx + 1}: ${toolCall.function?.name || 'unnamed'}`);
|
||||
const args = typeof toolCall.function?.arguments === 'string'
|
||||
? toolCall.function?.arguments
|
||||
: JSON.stringify(toolCall.function?.arguments);
|
||||
log.info(`Arguments: ${args?.substring(0, 100) || '{}'}`);
|
||||
});
|
||||
} else {
|
||||
log.info(`Response contains no tool calls - plain text response`);
|
||||
}
|
||||
|
||||
if (toolMessages.length > 0 && !response.tool_calls) {
|
||||
log.info(`This appears to be a final response after tool execution (no new tool calls)`);
|
||||
} else if (toolMessages.length > 0 && response.tool_calls && response.tool_calls.length > 0) {
|
||||
log.info(`This appears to be a continued tool execution flow (tools followed by more tools)`);
|
||||
}
|
||||
}
|
||||
|
||||
return { response };
|
||||
}
|
||||
}
|
||||
|
@ -362,53 +362,55 @@ export class ToolCallingStage extends BasePipelineStage<ToolExecutionInput, { re
|
||||
log.info(`========== TOOL EXECUTION COMPLETE ==========`);
|
||||
log.info(`Completed execution of ${toolResults.length} tools in ${totalExecutionTime}ms`);
|
||||
|
||||
// Add tool results as messages
|
||||
toolResults.forEach(result => {
|
||||
// Format the result content based on type
|
||||
let content: string;
|
||||
// Add each tool result to the messages array
|
||||
const toolResultMessages: Message[] = [];
|
||||
|
||||
if (typeof result.result === 'string') {
|
||||
content = result.result;
|
||||
log.info(`Tool returned string result (${content.length} chars)`);
|
||||
} else {
|
||||
// For object results, format as JSON
|
||||
try {
|
||||
content = JSON.stringify(result.result, null, 2);
|
||||
log.info(`Tool returned object result with keys: ${Object.keys(result.result).join(', ')}`);
|
||||
} catch (error) {
|
||||
content = String(result.result);
|
||||
log.info(`Failed to stringify object result: ${error}`);
|
||||
}
|
||||
}
|
||||
for (const result of toolResults) {
|
||||
const { toolCallId, name, result: toolResult } = result;
|
||||
|
||||
log.info(`Adding tool result message - Tool: ${result.name}, ID: ${result.toolCallId || 'unknown'}, Length: ${content.length}`);
|
||||
// Format result for message
|
||||
const resultContent = typeof toolResult === 'string'
|
||||
? toolResult
|
||||
: JSON.stringify(toolResult, null, 2);
|
||||
|
||||
// Create a properly formatted tool response message
|
||||
updatedMessages.push({
|
||||
// Add a new message for the tool result
|
||||
const toolMessage: Message = {
|
||||
role: 'tool',
|
||||
content: content,
|
||||
name: result.name,
|
||||
tool_call_id: result.toolCallId
|
||||
});
|
||||
content: resultContent,
|
||||
name: name,
|
||||
tool_call_id: toolCallId
|
||||
};
|
||||
|
||||
// Log a sample of the content for debugging
|
||||
const contentPreview = content.substring(0, 100) + (content.length > 100 ? '...' : '');
|
||||
log.info(`Tool result preview: ${contentPreview}`);
|
||||
});
|
||||
// Log detailed info about each tool result
|
||||
log.info(`-------- Tool Result for ${name} (ID: ${toolCallId}) --------`);
|
||||
log.info(`Result type: ${typeof toolResult}`);
|
||||
log.info(`Result preview: ${resultContent.substring(0, 150)}${resultContent.length > 150 ? '...' : ''}`);
|
||||
log.info(`Tool result status: ${resultContent.startsWith('Error:') ? 'ERROR' : 'SUCCESS'}`);
|
||||
|
||||
log.info(`Added ${toolResults.length} tool results to conversation`);
|
||||
|
||||
// If we have tool results, we need a follow-up call to the LLM
|
||||
const needsFollowUp = toolResults.length > 0;
|
||||
|
||||
if (needsFollowUp) {
|
||||
log.info(`Tool execution complete, LLM follow-up required with ${updatedMessages.length} messages`);
|
||||
updatedMessages.push(toolMessage);
|
||||
toolResultMessages.push(toolMessage);
|
||||
}
|
||||
|
||||
// Log the decision about follow-up
|
||||
log.info(`========== FOLLOW-UP DECISION ==========`);
|
||||
const hasToolResults = toolResultMessages.length > 0;
|
||||
const hasErrors = toolResultMessages.some(msg => msg.content.startsWith('Error:'));
|
||||
const needsFollowUp = hasToolResults;
|
||||
|
||||
log.info(`Follow-up needed: ${needsFollowUp}`);
|
||||
log.info(`Reasoning: ${hasToolResults ? 'Has tool results to process' : 'No tool results'} ${hasErrors ? ', contains errors' : ''}`);
|
||||
log.info(`Total messages to return to pipeline: ${updatedMessages.length}`);
|
||||
log.info(`Last 3 messages in conversation:`);
|
||||
const lastMessages = updatedMessages.slice(-3);
|
||||
lastMessages.forEach((msg, idx) => {
|
||||
const position = updatedMessages.length - lastMessages.length + idx;
|
||||
log.info(`Message ${position} (${msg.role}): ${msg.content?.substring(0, 100)}${msg.content?.length > 100 ? '...' : ''}`);
|
||||
});
|
||||
|
||||
return {
|
||||
response,
|
||||
needsFollowUp,
|
||||
messages: updatedMessages
|
||||
messages: updatedMessages,
|
||||
needsFollowUp
|
||||
};
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user