mirror of
https://github.com/TriliumNext/Notes.git
synced 2025-07-28 10:32:27 +08:00
wooo, tool execution container shows for openai
This commit is contained in:
parent
c04e3b2c89
commit
2bc2aa857f
@ -97,6 +97,21 @@ export async function setupStreamingResponse(
|
||||
}
|
||||
|
||||
if (message.type === 'tool_result' && message.toolExecution) {
|
||||
console.log(`[${responseId}] Processing tool result: ${JSON.stringify(message.toolExecution)}`);
|
||||
|
||||
// If tool execution doesn't have an action, add 'result' as the default
|
||||
if (!message.toolExecution.action) {
|
||||
message.toolExecution.action = 'result';
|
||||
}
|
||||
|
||||
// First send a 'start' action to ensure the container is created
|
||||
onToolExecution({
|
||||
action: 'start',
|
||||
tool: 'tools',
|
||||
result: 'Tool execution initialized'
|
||||
});
|
||||
|
||||
// Then send the actual tool execution data
|
||||
onToolExecution(message.toolExecution);
|
||||
return; // Skip accumulating content from this message
|
||||
}
|
||||
@ -156,6 +171,41 @@ export async function setupStreamingResponse(
|
||||
onToolExecution(message.toolExecution);
|
||||
}
|
||||
}
|
||||
|
||||
// Handle tool calls from the raw data or direct in message (OpenAI format)
|
||||
const toolCalls = message.tool_calls || (message.raw && message.raw.tool_calls);
|
||||
if (toolCalls && Array.isArray(toolCalls)) {
|
||||
console.log(`[${responseId}] Received tool calls: ${toolCalls.length} tools`);
|
||||
|
||||
// First send a 'start' action to ensure the container is created
|
||||
onToolExecution({
|
||||
action: 'start',
|
||||
tool: 'tools',
|
||||
result: 'Tool execution initialized'
|
||||
});
|
||||
|
||||
// Then process each tool call
|
||||
for (const toolCall of toolCalls) {
|
||||
let args = toolCall.function?.arguments || {};
|
||||
|
||||
// Try to parse arguments if they're a string
|
||||
if (typeof args === 'string') {
|
||||
try {
|
||||
args = JSON.parse(args);
|
||||
} catch (e) {
|
||||
console.log(`[${responseId}] Could not parse tool arguments as JSON: ${e}`);
|
||||
args = { raw: args };
|
||||
}
|
||||
}
|
||||
|
||||
onToolExecution({
|
||||
action: 'executing',
|
||||
tool: toolCall.function?.name || 'unknown',
|
||||
toolCallId: toolCall.id,
|
||||
args: args
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Handle thinking state updates
|
||||
if (message.thinking) {
|
||||
@ -200,10 +250,15 @@ export async function setupStreamingResponse(
|
||||
onContentUpdate(assistantResponse, true);
|
||||
}
|
||||
|
||||
// Clean up and resolve
|
||||
cleanupEventListener(eventListener);
|
||||
onComplete();
|
||||
resolve();
|
||||
// Set a short delay before cleanup to allow any immediately following
|
||||
// tool execution messages to be processed
|
||||
setTimeout(() => {
|
||||
// Clean up and resolve
|
||||
console.log(`[${responseId}] Cleaning up event listener after delay`);
|
||||
cleanupEventListener(eventListener);
|
||||
onComplete();
|
||||
resolve();
|
||||
}, 1000); // 1 second delay to allow tool execution messages to arrive
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -996,6 +996,23 @@ export default class LlmChatPanel extends BasicWidget {
|
||||
*/
|
||||
private showToolExecutionInfo(toolExecutionData: any) {
|
||||
console.log(`Showing tool execution info: ${JSON.stringify(toolExecutionData)}`);
|
||||
|
||||
// Enhanced debugging for tool execution
|
||||
if (!toolExecutionData) {
|
||||
console.error('Tool execution data is missing or undefined');
|
||||
return;
|
||||
}
|
||||
|
||||
// Check for required properties
|
||||
const actionType = toolExecutionData.action || '';
|
||||
const toolName = toolExecutionData.tool || 'unknown';
|
||||
console.log(`Tool execution details: action=${actionType}, tool=${toolName}, hasResult=${!!toolExecutionData.result}`);
|
||||
|
||||
// Force action to 'result' if missing but result is present
|
||||
if (!actionType && toolExecutionData.result) {
|
||||
console.log('Setting missing action to "result" since result is present');
|
||||
toolExecutionData.action = 'result';
|
||||
}
|
||||
|
||||
// Create or get the tool execution container
|
||||
let toolExecutionElement = this.noteContextChatMessages.querySelector('.chat-tool-execution');
|
||||
@ -1065,8 +1082,8 @@ export default class LlmChatPanel extends BasicWidget {
|
||||
|
||||
let resultDisplay = '';
|
||||
|
||||
// Special handling for search_notes tool which has a specific structure
|
||||
if (toolExecutionData.tool === 'search_notes' &&
|
||||
// Special handling for note search tools which have a specific structure
|
||||
if ((toolExecutionData.tool === 'search_notes' || toolExecutionData.tool === 'keyword_search_notes') &&
|
||||
typeof toolExecutionData.result === 'object' &&
|
||||
toolExecutionData.result.results) {
|
||||
|
||||
@ -1110,8 +1127,8 @@ export default class LlmChatPanel extends BasicWidget {
|
||||
`;
|
||||
stepsContainer.appendChild(step);
|
||||
|
||||
// Add event listeners for note links if this is a search_notes result
|
||||
if (toolExecutionData.tool === 'search_notes') {
|
||||
// Add event listeners for note links if this is a note search result
|
||||
if (toolExecutionData.tool === 'search_notes' || toolExecutionData.tool === 'keyword_search_notes') {
|
||||
const noteLinks = step.querySelectorAll('.note-link');
|
||||
noteLinks.forEach(link => {
|
||||
link.addEventListener('click', (e) => {
|
||||
|
@ -1,6 +1,6 @@
|
||||
import options from '../../options.js';
|
||||
import { BaseAIService } from '../base_ai_service.js';
|
||||
import type { ChatCompletionOptions, ChatResponse, Message } from '../ai_interface.js';
|
||||
import type { ChatCompletionOptions, ChatResponse, Message, StreamChunk } from '../ai_interface.js';
|
||||
import { getOpenAIOptions } from './providers.js';
|
||||
import OpenAI from 'openai';
|
||||
|
||||
@ -135,12 +135,22 @@ export class OpenAIService extends BaseAIService {
|
||||
}
|
||||
|
||||
// Send the chunk to the caller with raw data and any accumulated tool calls
|
||||
await callback({
|
||||
const streamChunk: StreamChunk & { raw: any } = {
|
||||
text: content,
|
||||
done: isDone,
|
||||
raw: chunk,
|
||||
tool_calls: accumulatedToolCalls.length > 0 ? accumulatedToolCalls.filter(Boolean) : undefined
|
||||
});
|
||||
raw: chunk
|
||||
};
|
||||
|
||||
// Add accumulated tool calls to raw data for compatibility with tool execution display
|
||||
if (accumulatedToolCalls.length > 0) {
|
||||
// Add tool calls to raw data for proper display
|
||||
streamChunk.raw = {
|
||||
...streamChunk.raw,
|
||||
tool_calls: accumulatedToolCalls.filter(Boolean)
|
||||
};
|
||||
}
|
||||
|
||||
await callback(streamChunk);
|
||||
|
||||
if (isDone) {
|
||||
break;
|
||||
|
@ -1241,6 +1241,80 @@ class RestChatService {
|
||||
toolExecution: chunk.raw.toolExecution
|
||||
} as LLMStreamMessage);
|
||||
}
|
||||
|
||||
// Handle direct tool_calls in the response (for OpenAI)
|
||||
if (chunk.tool_calls && chunk.tool_calls.length > 0) {
|
||||
log.info(`Detected direct tool_calls in stream chunk: ${chunk.tool_calls.length} tools`);
|
||||
|
||||
// Send tool execution notification
|
||||
wsService.sendMessageToAllClients({
|
||||
type: 'tool_execution_start',
|
||||
sessionId
|
||||
} as LLMStreamMessage);
|
||||
|
||||
// Process each tool call
|
||||
for (const toolCall of chunk.tool_calls) {
|
||||
// Process arguments
|
||||
let args = toolCall.function?.arguments;
|
||||
if (typeof args === 'string') {
|
||||
try {
|
||||
args = JSON.parse(args);
|
||||
} catch (e) {
|
||||
log.info(`Could not parse tool arguments as JSON: ${e}`);
|
||||
args = { raw: args };
|
||||
}
|
||||
}
|
||||
|
||||
// Format into a standardized tool execution message
|
||||
wsService.sendMessageToAllClients({
|
||||
type: 'tool_result',
|
||||
sessionId,
|
||||
toolExecution: {
|
||||
action: 'executing',
|
||||
tool: toolCall.function?.name || 'unknown',
|
||||
toolCallId: toolCall.id,
|
||||
args: args
|
||||
}
|
||||
} as LLMStreamMessage);
|
||||
}
|
||||
}
|
||||
|
||||
// Also handle tool_calls in raw data if present but not directly in chunk
|
||||
if (!chunk.tool_calls && chunk.raw?.tool_calls && Array.isArray(chunk.raw.tool_calls)) {
|
||||
log.info(`Detected tool_calls in raw data: ${chunk.raw.tool_calls.length} tools`);
|
||||
|
||||
// Send tool execution notification if we haven't already
|
||||
wsService.sendMessageToAllClients({
|
||||
type: 'tool_execution_start',
|
||||
sessionId
|
||||
} as LLMStreamMessage);
|
||||
|
||||
// Process each tool call
|
||||
for (const toolCall of chunk.raw.tool_calls) {
|
||||
// Process arguments
|
||||
let args = toolCall.function?.arguments;
|
||||
if (typeof args === 'string') {
|
||||
try {
|
||||
args = JSON.parse(args);
|
||||
} catch (e) {
|
||||
log.info(`Could not parse tool arguments as JSON: ${e}`);
|
||||
args = { raw: args };
|
||||
}
|
||||
}
|
||||
|
||||
// Format into a standardized tool execution message
|
||||
wsService.sendMessageToAllClients({
|
||||
type: 'tool_result',
|
||||
sessionId,
|
||||
toolExecution: {
|
||||
action: 'executing',
|
||||
tool: toolCall.function?.name || 'unknown',
|
||||
toolCallId: toolCall.id,
|
||||
args: args
|
||||
}
|
||||
} as LLMStreamMessage);
|
||||
}
|
||||
}
|
||||
|
||||
// Signal completion when done
|
||||
if (chunk.done) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user