recursive thinking?

This commit is contained in:
perf3ct 2025-04-09 01:24:32 +00:00
parent 39f979ea8c
commit c95b9f798c
No known key found for this signature in database
GPG Key ID: 569C4EEC436F5232
3 changed files with 127 additions and 3 deletions

View File

@ -35,6 +35,7 @@ export interface ChatCompletionOptions {
enableTools?: boolean; // Whether to enable tool calling
tools?: any[]; // Tools to provide to the LLM
useAdvancedContext?: boolean; // Whether to use advanced context enrichment
toolExecutionStatus?: any[]; // Status information about executed tools for feedback
}
export interface ChatResponse {

View File

@ -367,6 +367,29 @@ export class ChatPipeline {
await streamCallback('[Generating response with tool results...]\n\n', false);
}
// Extract tool execution status information for Ollama feedback
let toolExecutionStatus;
if (currentResponse.provider === 'Ollama') {
// Collect tool execution status from the tool results
toolExecutionStatus = toolResultMessages.map(msg => {
// Determine if this was a successful tool call
const isError = msg.content.startsWith('Error:');
return {
toolCallId: msg.tool_call_id || '',
name: msg.name || 'unknown',
success: !isError,
result: msg.content,
error: isError ? msg.content.substring(7) : undefined
};
});
log.info(`Created tool execution status for Ollama: ${toolExecutionStatus.length} entries`);
toolExecutionStatus.forEach((status, idx) => {
log.info(`Tool status ${idx + 1}: ${status.name} - ${status.success ? 'success' : 'failed'}`);
});
}
// Generate a new completion with the updated messages
const followUpStartTime = Date.now();
const followUpCompletion = await this.stages.llmCompletion.execute({
@ -376,7 +399,9 @@ export class ChatPipeline {
// Ensure tool support is still enabled for follow-up requests
enableTools: true,
// Disable streaming during tool execution follow-ups
stream: false
stream: false,
// Add tool execution status for Ollama provider
...(currentResponse.provider === 'Ollama' ? { toolExecutionStatus } : {})
}
});
this.updateStageMetrics('llmCompletion', followUpStartTime);
@ -418,10 +443,31 @@ export class ChatPipeline {
await streamCallback(`[Tool execution error: ${error.message || 'unknown error'}]\n\n`, false);
}
// For Ollama, create tool execution status with the error
let toolExecutionStatus;
if (currentResponse.provider === 'Ollama' && currentResponse.tool_calls) {
// We need to create error statuses for all tool calls that failed
toolExecutionStatus = currentResponse.tool_calls.map(toolCall => {
return {
toolCallId: toolCall.id || '',
name: toolCall.function?.name || 'unknown',
success: false,
result: `Error: ${error.message || 'unknown error'}`,
error: error.message || 'unknown error'
};
});
log.info(`Created error tool execution status for Ollama: ${toolExecutionStatus.length} entries`);
}
// Make a follow-up request to the LLM with the error information
const errorFollowUpCompletion = await this.stages.llmCompletion.execute({
messages: currentMessages,
options: modelSelection.options
options: {
...modelSelection.options,
// For Ollama, include tool execution status
...(currentResponse.provider === 'Ollama' ? { toolExecutionStatus } : {})
}
});
// Update current response and break the tool loop
@ -445,12 +491,31 @@ export class ChatPipeline {
await streamCallback(`[Reached maximum of ${maxToolCallIterations} tool calls. Finalizing response...]\n\n`, false);
}
// For Ollama, create a status about reaching max iterations
let toolExecutionStatus;
if (currentResponse.provider === 'Ollama' && currentResponse.tool_calls) {
// Create a special status message about max iterations
toolExecutionStatus = [
{
toolCallId: 'max-iterations',
name: 'system',
success: false,
result: `Maximum tool call iterations (${maxToolCallIterations}) reached.`,
error: `Reached the maximum number of allowed tool calls (${maxToolCallIterations}). Please provide a final response with the information gathered so far.`
}
];
log.info(`Created max iterations status for Ollama`);
}
// Make a final request to get a summary response
const finalFollowUpCompletion = await this.stages.llmCompletion.execute({
messages: currentMessages,
options: {
...modelSelection.options,
enableTools: false // Disable tools for the final response
enableTools: false, // Disable tools for the final response
// For Ollama, include tool execution status
...(currentResponse.provider === 'Ollama' ? { toolExecutionStatus } : {})
}
});

View File

@ -39,6 +39,15 @@ interface OllamaResponse {
eval_duration: number;
}
// Add an interface for tool execution feedback status
interface ToolExecutionStatus {
toolCallId: string;
name: string;
success: boolean;
result: string;
error?: string;
}
export class OllamaService extends BaseAIService {
private formatter: OllamaMessageFormatter;
@ -72,6 +81,12 @@ export class OllamaService extends BaseAIService {
const systemPrompt = this.getSystemPrompt(opts.systemPrompt || options.getOption('aiSystemPrompt'));
try {
// Check if we should add tool execution feedback
if (opts.toolExecutionStatus && Array.isArray(opts.toolExecutionStatus) && opts.toolExecutionStatus.length > 0) {
log.info(`Adding tool execution feedback to messages`);
messages = this.addToolExecutionFeedback(messages, opts.toolExecutionStatus);
}
// Determine whether to use the formatter or send messages directly
let messagesToSend: Message[];
@ -465,6 +480,49 @@ export class OllamaService extends BaseAIService {
return 8192; // Default to 8192 tokens if there's an error
}
}
/**
* Adds a system message with feedback about tool execution status
* @param messages The current message array
* @param toolExecutionStatus Array of tool execution status objects
* @returns Updated message array with feedback
*/
private addToolExecutionFeedback(messages: Message[], toolExecutionStatus: ToolExecutionStatus[]): Message[] {
if (!toolExecutionStatus || toolExecutionStatus.length === 0) {
return messages;
}
// Create a copy of the messages
const updatedMessages = [...messages];
// Create a feedback message that explains what happened with each tool call
let feedbackContent = `Tool execution feedback:\n\n`;
toolExecutionStatus.forEach((status, index) => {
// Add status for each tool
const statusText = status.success ? 'successfully executed' : 'failed to execute';
const toolName = status.name || 'unknown tool';
feedbackContent += `Tool call ${index + 1} (${toolName}): ${statusText}\n`;
// Add error information if available and tool failed
if (!status.success && status.error) {
feedbackContent += `Error: ${status.error}\n`;
feedbackContent += `Please fix this issue in your next response or try a different approach.\n`;
}
feedbackContent += `\n`;
});
// Add feedback message to the conversation
updatedMessages.push({
role: 'system',
content: feedbackContent
});
log.info(`Added tool execution feedback: ${toolExecutionStatus.length} statuses`);
return updatedMessages;
}
}
/**