tool calling is close to working

getting closer to calling tools...

we definitely need this

closer to tool execution...

agentic tool calling is...kind of working?
This commit is contained in:
perf3ct 2025-04-06 20:50:08 +00:00
parent eb353df010
commit 26b1b08129
No known key found for this signature in database
GPG Key ID: 569C4EEC436F5232
23 changed files with 1826 additions and 133 deletions

View File

@ -10,11 +10,16 @@
* - Extract multiple intents from a single question
* - Create a multi-stage research plan
* - Track progress through complex information gathering
*
* Integration with pipeline architecture:
* - Can use pipeline stages when available
* - Falls back to direct methods when needed
*/
import log from '../../log.js';
import { AGENT_TOOL_PROMPTS } from '../constants/llm_prompt_constants.js';
import { QUERY_DECOMPOSITION_STRINGS } from '../constants/query_decomposition_constants.js';
import aiServiceManager from '../ai_service_manager.js';
export interface SubQuery {
id: string;

View File

@ -13,14 +13,9 @@
*/
import log from '../../log.js';
import { VectorSearchStage } from '../pipeline/stages/vector_search_stage.js';
import type { ContextService } from '../context/modules/context_service.js';
// Define interface for context service to avoid circular imports
interface IContextService {
findRelevantNotesMultiQuery(queries: string[], contextNoteId: string | null, limit: number): Promise<any[]>;
processQuery(userQuestion: string, llmService: any, contextNoteId: string | null, showThinking: boolean): Promise<any>;
}
export interface VectorSearchResult {
noteId: string;
title: string;
@ -56,17 +51,20 @@ export interface VectorSearchOptions {
}
export class VectorSearchTool {
private contextService: IContextService | null = null;
private contextService: any = null;
private maxResults: number = 5;
private vectorSearchStage: VectorSearchStage;
constructor() {
// Initialization is done by setting context service
// Initialize the vector search stage
this.vectorSearchStage = new VectorSearchStage();
log.info('VectorSearchTool initialized with VectorSearchStage pipeline component');
}
/**
* Set the context service for performing vector searches
*/
setContextService(contextService: IContextService): void {
setContextService(contextService: any): void {
this.contextService = contextService;
log.info('Context service set in VectorSearchTool');
}
@ -79,49 +77,42 @@ export class VectorSearchTool {
contextNoteId?: string,
searchOptions: VectorSearchOptions = {}
): Promise<VectorSearchResult[]> {
if (!this.contextService) {
throw new Error("Context service not set, call setContextService() first");
}
try {
// Set more aggressive defaults to return more content
const options = {
limit: searchOptions.limit || 15, // Increased from default (likely 5 or 10)
threshold: searchOptions.threshold || 0.5, // Lower threshold to include more results (likely 0.65 or 0.7 before)
maxResults: searchOptions.limit || 15, // Increased from default
threshold: searchOptions.threshold || 0.5, // Lower threshold to include more results
useEnhancedQueries: true, // Enable query enhancement by default
includeContent: searchOptions.includeContent !== undefined ? searchOptions.includeContent : true,
...searchOptions
};
log.info(`Vector search: "${query.substring(0, 50)}..." with limit=${options.limit}, threshold=${options.threshold}`);
log.info(`Vector search: "${query.substring(0, 50)}..." with limit=${options.maxResults}, threshold=${options.threshold}`);
// Check if contextService is set again to satisfy TypeScript
if (!this.contextService) {
throw new Error("Context service not set, call setContextService() first");
// Use the pipeline stage for vector search
const result = await this.vectorSearchStage.execute({
query,
noteId: contextNoteId || null,
options: {
maxResults: options.maxResults,
threshold: options.threshold,
useEnhancedQueries: options.useEnhancedQueries
}
});
// Use contextService methods instead of direct imports
const results = await this.contextService.findRelevantNotesMultiQuery(
[query],
contextNoteId || null,
options.limit
);
const searchResults = result.searchResults;
log.info(`Vector search found ${searchResults.length} relevant notes via pipeline`);
// Log the number of results
log.info(`Vector search found ${results.length} relevant notes`);
// Include more content from each note to provide richer context
// If includeContent is true but we're missing content for some notes, fetch it
if (options.includeContent) {
// IMPORTANT: Get content directly without recursive processQuery calls
// This prevents infinite loops where one search triggers another
for (let i = 0; i < results.length; i++) {
const result = results[i];
for (let i = 0; i < searchResults.length; i++) {
const result = searchResults[i];
try {
// Get content directly from note content service
// Get content if missing
if (!result.content) {
const noteContent = await import('../context/note_content.js');
const content = await noteContent.getNoteContent(result.noteId);
if (content) {
// Add content directly without recursive calls
result.content = content.substring(0, 2000); // Limit to 2000 chars
log.info(`Added direct content for note ${result.noteId}, length: ${result.content.length} chars`);
}
@ -132,7 +123,18 @@ export class VectorSearchTool {
}
}
return results;
// Format results to match the expected VectorSearchResult interface
return searchResults.map(note => ({
noteId: note.noteId,
title: note.title,
contentPreview: note.content
? note.content.length > 200
? note.content.substring(0, 200) + '...'
: note.content
: 'No content available',
similarity: note.similarity,
parentId: note.parentId
}));
} catch (error) {
log.error(`Vector search error: ${error}`);
return [];
@ -148,26 +150,24 @@ export class VectorSearchTool {
similarityThreshold?: number
} = {}): Promise<VectorSearchResult[]> {
try {
// Validate contextService is set
if (!this.contextService) {
log.error('Context service not set in VectorSearchTool');
return [];
}
// Set defaults
const maxResults = options.maxResults || this.maxResults;
const threshold = options.similarityThreshold || 0.6;
const parentNoteId = options.parentNoteId || null;
// Use multi-query approach for more robust results
const queries = [query];
const results = await this.contextService.findRelevantNotesMultiQuery(
queries,
parentNoteId,
maxResults
);
// Use the pipeline for consistent search behavior
const result = await this.vectorSearchStage.execute({
query,
noteId: parentNoteId,
options: {
maxResults,
threshold,
useEnhancedQueries: true
}
});
// Format results to match the expected interface
return results.map(result => ({
return result.searchResults.map(result => ({
noteId: result.noteId,
title: result.title,
contentPreview: result.content ?

View File

@ -1,6 +1,11 @@
import type { ToolCall } from './tools/tool_interfaces.js';
export interface Message {
role: 'user' | 'assistant' | 'system';
role: 'user' | 'assistant' | 'system' | 'tool';
content: string;
name?: string;
tool_call_id?: string;
tool_calls?: ToolCall[] | any[];
}
// Interface for streaming response chunks
@ -27,6 +32,8 @@ export interface ChatCompletionOptions {
bypassFormatter?: boolean; // Whether to bypass the message formatter entirely
expectsJsonResponse?: boolean; // Whether this request expects a JSON response
stream?: boolean; // Whether to stream the response
enableTools?: boolean; // Whether to enable tool calling
tools?: any[]; // Tools to provide to the LLM
}
export interface ChatResponse {
@ -40,6 +47,8 @@ export interface ChatResponse {
};
// Stream handler - only present when streaming is enabled
stream?: (callback: (chunk: StreamChunk) => Promise<void> | void) => Promise<string>;
// Tool calls from the LLM
tool_calls?: ToolCall[] | any[];
}
export interface AIService {

View File

@ -393,6 +393,11 @@ export class AIServiceManager implements IAIServiceManager {
// Initialize agent tools with this service manager instance
await agentTools.initialize(this);
// Initialize LLM tools - this is the single place where tools are initialized
const toolInitializer = await import('./tools/tool_initializer.js');
await toolInitializer.default.initializeTools();
log.info("LLM tools initialized successfully");
this.initialized = true;
log.info("AI service initialized successfully");
} catch (error: any) {

View File

@ -561,6 +561,69 @@ export class ContextExtractor {
return ContextExtractor.getFullContext(noteId);
}
/**
* Get note hierarchy information in a formatted string
* @param noteId - The ID of the note to get hierarchy information for
* @returns Formatted string with note hierarchy information
*/
static async getNoteHierarchyInfo(noteId: string): Promise<string> {
const note = becca.getNote(noteId);
if (!note) return 'Note not found';
let info = `**Title**: ${note.title}\n`;
// Add attributes if any
const attributes = note.getAttributes();
if (attributes && attributes.length > 0) {
const relevantAttrs = attributes.filter(attr => !attr.name.startsWith('_'));
if (relevantAttrs.length > 0) {
info += `**Attributes**: ${relevantAttrs.map(attr => `${attr.name}=${attr.value}`).join(', ')}\n`;
}
}
// Add parent path
const parents = await ContextExtractor.getParentNotes(noteId);
if (parents && parents.length > 0) {
const path = parents.map(p => p.title).join(' > ');
info += `**Path**: ${path}\n`;
}
// Add child count
const childNotes = note.getChildNotes();
if (childNotes && childNotes.length > 0) {
info += `**Child notes**: ${childNotes.length}\n`;
// List first few child notes
const childList = childNotes.slice(0, 5).map(child => child.title).join(', ');
if (childList) {
info += `**Examples**: ${childList}${childNotes.length > 5 ? '...' : ''}\n`;
}
}
// Add note type
if (note.type) {
info += `**Type**: ${note.type}\n`;
}
// Add creation/modification dates
if (note.utcDateCreated) {
info += `**Created**: ${new Date(note.utcDateCreated).toLocaleString()}\n`;
}
if (note.utcDateModified) {
info += `**Modified**: ${new Date(note.utcDateModified).toLocaleString()}\n`;
}
return info;
}
/**
* Get note hierarchy information - instance method
*/
async getNoteHierarchyInfo(noteId: string): Promise<string> {
return ContextExtractor.getNoteHierarchyInfo(noteId);
}
/**
* Get note summary - for backward compatibility
*/

View File

@ -97,28 +97,60 @@ export class ContextService {
}
try {
// Step 1: Generate search queries
// Step 1: Generate search queries (skip if tool calling might be enabled)
let searchQueries: string[];
// Check if llmService has tool calling enabled
const isToolsEnabled = llmService &&
typeof llmService === 'object' &&
'constructor' in llmService &&
llmService.constructor.name === 'OllamaService';
if (isToolsEnabled) {
// Skip query generation if tools might be used to avoid race conditions
log.info(`Skipping query enhancement for potential tool-enabled service: ${llmService.constructor.name}`);
searchQueries = [userQuestion]; // Use simple fallback
} else {
try {
searchQueries = await queryEnhancer.generateSearchQueries(userQuestion, llmService);
} catch (error) {
log.error(`Error generating search queries, using fallback: ${error}`);
searchQueries = [userQuestion]; // Fallback to using the original question
}
}
log.info(`Generated search queries: ${JSON.stringify(searchQueries)}`);
// Step 2: Find relevant notes using multi-query approach
// Step 2: Find relevant notes using the pipeline's VectorSearchStage
let relevantNotes: NoteSearchResult[] = [];
try {
// Find notes for each query and combine results
log.info(`Using VectorSearchStage pipeline component to find relevant notes`);
// Create or import the vector search stage
const VectorSearchStage = (await import('../../pipeline/stages/vector_search_stage.js')).VectorSearchStage;
const vectorSearchStage = new VectorSearchStage();
// Use multi-query approach through the pipeline
const allResults: Map<string, NoteSearchResult> = new Map();
// Process searches using the pipeline stage
for (const query of searchQueries) {
const results = await semanticSearch.findRelevantNotes(
log.info(`Executing pipeline vector search for query: "${query.substring(0, 50)}..."`);
// Use the pipeline stage directly
const result = await vectorSearchStage.execute({
query,
contextNoteId,
5 // Limit per query
);
noteId: contextNoteId,
options: {
maxResults: 5, // Limit per query
useEnhancedQueries: false, // Don't enhance these - we already have enhanced queries
threshold: 0.6,
llmService // Pass the LLM service for potential use
}
});
const results = result.searchResults;
log.info(`Pipeline vector search found ${results.length} results for query "${query.substring(0, 50)}..."`);
// Combine results, avoiding duplicates
for (const result of results) {

View File

@ -107,6 +107,73 @@ class TriliumContextService {
contextNoteId: string | null = null,
limit = 10
): Promise<any[]> {
try {
// Use the VectorSearchStage for all searches to ensure consistency
const VectorSearchStage = (await import('./pipeline/stages/vector_search_stage.js')).VectorSearchStage;
const vectorSearchStage = new VectorSearchStage();
const allResults: Map<string, any> = new Map();
log.info(`Finding relevant notes for ${queries.length} queries in context ${contextNoteId || 'global'}`);
// Process each query in parallel using Promise.all for better performance
const searchPromises = queries.map(query =>
vectorSearchStage.execute({
query,
noteId: contextNoteId,
options: {
maxResults: Math.ceil(limit / queries.length), // Distribute limit among queries
useEnhancedQueries: false, // Don't enhance the queries here, as they're already enhanced
threshold: 0.5 // Lower threshold to get more diverse results
}
})
);
const searchResults = await Promise.all(searchPromises);
// Combine all results
for (let i = 0; i < searchResults.length; i++) {
const results = searchResults[i].searchResults;
log.info(`Query "${queries[i].substring(0, 30)}..." returned ${results.length} results`);
// Combine results, avoiding duplicates
for (const result of results) {
if (!allResults.has(result.noteId)) {
allResults.set(result.noteId, result);
} else {
// If note already exists, update similarity to max of both values
const existing = allResults.get(result.noteId);
if (result.similarity > existing.similarity) {
existing.similarity = result.similarity;
allResults.set(result.noteId, existing);
}
}
}
}
// Convert map to array and limit to top results
const finalResults = Array.from(allResults.values())
.sort((a, b) => b.similarity - a.similarity)
.slice(0, limit);
log.info(`Combined ${queries.length} queries into ${finalResults.length} final results`);
return finalResults;
} catch (error) {
log.error(`Error in findRelevantNotesMultiQuery: ${error}`);
// Fall back to legacy approach if the new approach fails
return this.findRelevantNotesMultiQueryLegacy(queries, contextNoteId, limit);
}
}
/**
* Legacy implementation of multi-query search (for fallback)
* @private
*/
private async findRelevantNotesMultiQueryLegacy(
queries: string[],
contextNoteId: string | null = null,
limit = 10
): Promise<any[]> {
log.info(`Using legacy findRelevantNotesMultiQuery implementation for ${queries.length} queries`);
const allResults: Map<string, any> = new Map();
for (const query of queries) {

View File

@ -9,6 +9,7 @@ import {
OLLAMA_CLEANING,
FORMATTER_LOGS
} from '../constants/formatter_constants.js';
import log from '../../log.js';
/**
* Ollama-specific message formatter
@ -31,14 +32,33 @@ export class OllamaMessageFormatter extends BaseMessageFormatter {
formatMessages(messages: Message[], systemPrompt?: string, context?: string, preserveSystemPrompt?: boolean): Message[] {
const formattedMessages: Message[] = [];
// First identify user and system messages
// Log the input messages with all their properties
log.info(`Ollama formatter received ${messages.length} messages`);
messages.forEach((msg, index) => {
const msgKeys = Object.keys(msg);
log.info(`Message ${index} - role: ${msg.role}, keys: ${msgKeys.join(', ')}, content length: ${msg.content.length}`);
// Log special properties if present
if (msg.tool_calls) {
log.info(`Message ${index} has ${msg.tool_calls.length} tool_calls`);
}
if (msg.tool_call_id) {
log.info(`Message ${index} has tool_call_id: ${msg.tool_call_id}`);
}
if (msg.name) {
log.info(`Message ${index} has name: ${msg.name}`);
}
});
// First identify user, system, and tool messages
const systemMessages = messages.filter(msg => msg.role === 'system');
const userMessages = messages.filter(msg => msg.role === 'user' || msg.role === 'assistant');
const nonSystemMessages = messages.filter(msg => msg.role !== 'system');
// Determine if we should preserve the existing system message
if (preserveSystemPrompt && systemMessages.length > 0) {
// Preserve the existing system message
formattedMessages.push(systemMessages[0]);
log.info(`Preserving existing system message: ${systemMessages[0].content.substring(0, 50)}...`);
} else {
// Use provided systemPrompt or default
const basePrompt = systemPrompt || PROVIDER_PROMPTS.COMMON.DEFAULT_ASSISTANT_INTRO;
@ -46,49 +66,78 @@ export class OllamaMessageFormatter extends BaseMessageFormatter {
role: 'system',
content: basePrompt
});
log.info(`Using new system message: ${basePrompt.substring(0, 50)}...`);
}
// If we have context, inject it into the first user message
if (context && userMessages.length > 0) {
if (context && nonSystemMessages.length > 0) {
let injectedContext = false;
for (let i = 0; i < userMessages.length; i++) {
const msg = userMessages[i];
for (let i = 0; i < nonSystemMessages.length; i++) {
const msg = nonSystemMessages[i];
if (msg.role === 'user' && !injectedContext) {
// Simple context injection directly in the user's message
const cleanedContext = this.cleanContextContent(context);
// DEBUG: Log the context before and after cleaning
console.log(`[OllamaFormatter] Context (first 500 chars): ${context.substring(0, 500).replace(/\n/g, '\\n')}...`);
console.log(`[OllamaFormatter] Cleaned context (first 500 chars): ${cleanedContext.substring(0, 500).replace(/\n/g, '\\n')}...`);
log.info(`Injecting context (${cleanedContext.length} chars) into user message`);
const formattedContext = PROVIDER_PROMPTS.OLLAMA.CONTEXT_INJECTION(
cleanedContext,
msg.content
);
// DEBUG: Log the final formatted context
console.log(`[OllamaFormatter] Formatted context (first 500 chars): ${formattedContext.substring(0, 500).replace(/\n/g, '\\n')}...`);
// Log what properties we're preserving
const msgKeys = Object.keys(msg);
const preservedKeys = msgKeys.filter(key => key !== 'role' && key !== 'content');
log.info(`Preserving additional properties in user message: ${preservedKeys.join(', ')}`);
formattedMessages.push({
role: 'user',
content: formattedContext
});
// Create a new message with all original properties, but updated content
const newMessage = {
...msg, // Copy all properties
content: formattedContext // Override content with injected context
};
formattedMessages.push(newMessage);
log.info(`Created user message with context, final keys: ${Object.keys(newMessage).join(', ')}`);
injectedContext = true;
} else {
formattedMessages.push(msg);
// For other messages, preserve all properties including any tool-related ones
log.info(`Preserving message with role ${msg.role}, keys: ${Object.keys(msg).join(', ')}`);
formattedMessages.push({
...msg // Copy all properties
});
}
}
} else {
// No context, just add all messages as-is
for (const msg of userMessages) {
formattedMessages.push(msg);
// Make sure to preserve all properties including tool_calls, tool_call_id, etc.
for (const msg of nonSystemMessages) {
log.info(`Adding message with role ${msg.role} without context injection, keys: ${Object.keys(msg).join(', ')}`);
formattedMessages.push({
...msg // Copy all properties
});
}
}
console.log(FORMATTER_LOGS.OLLAMA.PROCESSED(messages.length, formattedMessages.length));
// Log the final formatted messages
log.info(`Ollama formatter produced ${formattedMessages.length} formatted messages`);
formattedMessages.forEach((msg, index) => {
const msgKeys = Object.keys(msg);
log.info(`Formatted message ${index} - role: ${msg.role}, keys: ${msgKeys.join(', ')}, content length: ${msg.content.length}`);
// Log special properties if present
if (msg.tool_calls) {
log.info(`Formatted message ${index} has ${msg.tool_calls.length} tool_calls`);
}
if (msg.tool_call_id) {
log.info(`Formatted message ${index} has tool_call_id: ${msg.tool_call_id}`);
}
if (msg.name) {
log.info(`Formatted message ${index} has name: ${msg.name}`);
}
});
return formattedMessages;
}

View File

@ -7,6 +7,10 @@ import { MessagePreparationStage } from './stages/message_preparation_stage.js';
import { ModelSelectionStage } from './stages/model_selection_stage.js';
import { LLMCompletionStage } from './stages/llm_completion_stage.js';
import { ResponseProcessingStage } from './stages/response_processing_stage.js';
import { ToolCallingStage } from './stages/tool_calling_stage.js';
import { VectorSearchStage } from './stages/vector_search_stage.js';
import toolRegistry from '../tools/tool_registry.js';
import toolInitializer from '../tools/tool_initializer.js';
import log from '../../log.js';
/**
@ -22,6 +26,8 @@ export class ChatPipeline {
modelSelection: ModelSelectionStage;
llmCompletion: LLMCompletionStage;
responseProcessing: ResponseProcessingStage;
toolCalling: ToolCallingStage;
vectorSearch: VectorSearchStage;
};
config: ChatPipelineConfig;
@ -40,7 +46,9 @@ export class ChatPipeline {
messagePreparation: new MessagePreparationStage(),
modelSelection: new ModelSelectionStage(),
llmCompletion: new LLMCompletionStage(),
responseProcessing: new ResponseProcessingStage()
responseProcessing: new ResponseProcessingStage(),
toolCalling: new ToolCallingStage(),
vectorSearch: new VectorSearchStage()
};
// Set default configuration values
@ -87,6 +95,34 @@ export class ChatPipeline {
contentLength += message.content.length;
}
// Initialize tools if needed
try {
const toolCount = toolRegistry.getAllTools().length;
// If there are no tools registered, initialize them
if (toolCount === 0) {
log.info('No tools found in registry, initializing tools...');
await toolInitializer.initializeTools();
log.info(`Tools initialized, now have ${toolRegistry.getAllTools().length} tools`);
} else {
log.info(`Found ${toolCount} tools already registered`);
}
} catch (error: any) {
log.error(`Error checking/initializing tools: ${error.message || String(error)}`);
}
// First, select the appropriate model based on query complexity and content length
const modelSelectionStartTime = Date.now();
const modelSelection = await this.stages.modelSelection.execute({
options: input.options,
query: input.query,
contentLength
});
this.updateStageMetrics('modelSelection', modelSelectionStartTime);
// Determine if we should use tools or semantic context
const useTools = modelSelection.options.enableTools === true;
// Determine which pipeline flow to use
let context: string | undefined;
@ -102,26 +138,62 @@ export class ChatPipeline {
});
context = agentContext.context;
this.updateStageMetrics('agentToolsContext', contextStartTime);
} else {
// Get semantic context for regular queries
} else if (!useTools) {
// Only get semantic context if tools are NOT enabled
// When tools are enabled, we'll let the LLM request context via tools instead
log.info('Getting semantic context for note using pipeline stages');
// First use the vector search stage to find relevant notes
const vectorSearchStartTime = Date.now();
log.info(`Executing vector search stage for query: "${input.query?.substring(0, 50)}..."`);
const vectorSearchResult = await this.stages.vectorSearch.execute({
query: input.query || '',
noteId: input.noteId,
options: {
maxResults: 10,
useEnhancedQueries: true,
threshold: 0.6
}
});
this.updateStageMetrics('vectorSearch', vectorSearchStartTime);
log.info(`Vector search found ${vectorSearchResult.searchResults.length} relevant notes`);
// Then pass to the semantic context stage to build the formatted context
const semanticContext = await this.stages.semanticContextExtraction.execute({
noteId: input.noteId,
query: input.query,
messages: input.messages
});
context = semanticContext.context;
this.updateStageMetrics('semanticContextExtraction', contextStartTime);
}
}
// Select the appropriate model based on query complexity and content length
const modelSelectionStartTime = Date.now();
const modelSelection = await this.stages.modelSelection.execute({
options: input.options,
query: input.query,
contentLength
} else {
log.info('Tools are enabled - using minimal direct context to avoid race conditions');
// Get context from current note directly without semantic search
if (input.noteId) {
try {
const contextExtractor = new (await import('../../llm/context/index.js')).ContextExtractor();
// Just get the direct content of the current note
context = await contextExtractor.extractContext(input.noteId, {
includeContent: true,
includeParents: true,
includeChildren: true,
includeLinks: true,
includeSimilar: false // Skip semantic search to avoid race conditions
});
this.updateStageMetrics('modelSelection', modelSelectionStartTime);
log.info(`Direct context extracted (${context.length} chars) without semantic search`);
} catch (error: any) {
log.error(`Error extracting direct context: ${error.message}`);
context = ""; // Fallback to empty context if extraction fails
}
} else {
context = ""; // No note ID, so no context
}
}
}
// Prepare messages with context and system prompt
const messagePreparationStartTime = Date.now();
@ -167,17 +239,106 @@ export class ChatPipeline {
});
}
// For non-streaming responses, process the full response
// Process any tool calls in the response
let currentMessages = preparedMessages.messages;
let currentResponse = completion.response;
let needsFollowUp = false;
let toolCallIterations = 0;
const maxToolCallIterations = this.config.maxToolCallIterations;
// Check if tools were enabled in the options
const toolsEnabled = modelSelection.options.enableTools !== false;
log.info(`========== TOOL CALL PROCESSING ==========`);
log.info(`Tools enabled: ${toolsEnabled}`);
log.info(`Tool calls in response: ${currentResponse.tool_calls ? currentResponse.tool_calls.length : 0}`);
log.info(`Current response format: ${typeof currentResponse}`);
log.info(`Response keys: ${Object.keys(currentResponse).join(', ')}`);
// Detailed tool call inspection
if (currentResponse.tool_calls) {
currentResponse.tool_calls.forEach((tool, idx) => {
log.info(`Tool call ${idx+1}: ${JSON.stringify(tool)}`);
});
}
// Process tool calls if present and tools are enabled
if (toolsEnabled && currentResponse.tool_calls && currentResponse.tool_calls.length > 0) {
log.info(`Response contains ${currentResponse.tool_calls.length} tool calls, processing...`);
// Start tool calling loop
log.info(`Starting tool calling loop with max ${maxToolCallIterations} iterations`);
do {
log.info(`Tool calling iteration ${toolCallIterations + 1}`);
// Execute tool calling stage
const toolCallingStartTime = Date.now();
const toolCallingResult = await this.stages.toolCalling.execute({
response: currentResponse,
messages: currentMessages,
options: modelSelection.options
});
this.updateStageMetrics('toolCalling', toolCallingStartTime);
// Update state for next iteration
currentMessages = toolCallingResult.messages;
needsFollowUp = toolCallingResult.needsFollowUp;
// Make another call to the LLM if needed
if (needsFollowUp) {
log.info(`Tool execution completed, making follow-up LLM call (iteration ${toolCallIterations + 1})...`);
// Generate a new LLM response with the updated messages
const followUpStartTime = Date.now();
log.info(`Sending follow-up request to LLM with ${currentMessages.length} messages (including tool results)`);
const followUpCompletion = await this.stages.llmCompletion.execute({
messages: currentMessages,
options: modelSelection.options
});
this.updateStageMetrics('llmCompletion', followUpStartTime);
// Update current response for next iteration
currentResponse = followUpCompletion.response;
// Check for more tool calls
const hasMoreToolCalls = !!(currentResponse.tool_calls && currentResponse.tool_calls.length > 0);
if (hasMoreToolCalls) {
log.info(`Follow-up response contains ${currentResponse.tool_calls?.length || 0} more tool calls`);
} else {
log.info(`Follow-up response contains no more tool calls - completing tool loop`);
}
// Continue loop if there are more tool calls
needsFollowUp = hasMoreToolCalls;
}
// Increment iteration counter
toolCallIterations++;
} while (needsFollowUp && toolCallIterations < maxToolCallIterations);
// If we hit max iterations but still have tool calls, log a warning
if (toolCallIterations >= maxToolCallIterations && needsFollowUp) {
log.error(`Reached maximum tool call iterations (${maxToolCallIterations}), stopping`);
}
log.info(`Completed ${toolCallIterations} tool call iterations`);
}
// For non-streaming responses, process the final response
const processStartTime = Date.now();
const processed = await this.stages.responseProcessing.execute({
response: completion.response,
response: currentResponse,
options: input.options
});
this.updateStageMetrics('responseProcessing', processStartTime);
// Combine response with processed text, using accumulated text if streamed
const finalResponse: ChatResponse = {
...completion.response,
...currentResponse,
text: accumulatedText || processed.text
};

View File

@ -1,4 +1,5 @@
import type { Message, ChatCompletionOptions, ChatResponse, StreamChunk } from '../ai_interface.js';
import type { LLMServiceInterface } from '../interfaces/agent_tool_interfaces.js';
/**
* Base interface for pipeline input
@ -61,6 +62,25 @@ export interface ChatPipelineInput extends PipelineInput {
streamCallback?: StreamCallback;
}
/**
* Options for vector search operations
*/
export interface VectorSearchOptions {
maxResults?: number;
useEnhancedQueries?: boolean;
threshold?: number;
llmService?: LLMServiceInterface;
}
/**
* Input for vector search pipeline stage
*/
export interface VectorSearchInput extends PipelineInput {
query: string;
noteId?: string | null;
options?: VectorSearchOptions;
}
/**
* Base interface for pipeline stage output
*/
@ -130,6 +150,7 @@ export interface ToolExecutionInput extends PipelineInput {
response: ChatResponse;
messages: Message[];
options?: ChatCompletionOptions;
maxIterations?: number;
}
/**

View File

@ -2,6 +2,7 @@ import { BasePipelineStage } from '../pipeline_stage.js';
import type { LLMCompletionInput } from '../interfaces.js';
import type { ChatResponse } from '../../ai_interface.js';
import aiServiceManager from '../../ai_service_manager.js';
import toolRegistry from '../../tools/tool_registry.js';
import log from '../../../log.js';
/**
@ -18,17 +19,33 @@ export class LLMCompletionStage extends BasePipelineStage<LLMCompletionInput, {
protected async process(input: LLMCompletionInput): Promise<{ response: ChatResponse }> {
const { messages, options, provider } = input;
log.info(`Generating LLM completion, provider: ${provider || 'auto'}, model: ${options?.model || 'default'}`);
// Create a copy of options to avoid modifying the original
const updatedOptions = { ...options };
// Check if tools should be enabled
if (updatedOptions.enableTools !== false) {
// Get all available tools from the registry
const toolDefinitions = toolRegistry.getAllToolDefinitions();
if (toolDefinitions.length > 0) {
// Enable tools and add them to the options
updatedOptions.enableTools = true;
updatedOptions.tools = toolDefinitions;
log.info(`Adding ${toolDefinitions.length} tools to LLM request`);
}
}
log.info(`Generating LLM completion, provider: ${provider || 'auto'}, model: ${updatedOptions?.model || 'default'}`);
// If provider is specified, use that specific provider
if (provider && aiServiceManager.isProviderAvailable(provider)) {
const service = aiServiceManager.getService(provider);
const response = await service.generateChatCompletion(messages, options);
const response = await service.generateChatCompletion(messages, updatedOptions);
return { response };
}
// Otherwise use the service manager to select an available provider
const response = await aiServiceManager.generateChatCompletion(messages, options);
const response = await aiServiceManager.generateChatCompletion(messages, updatedOptions);
return { response };
}
}

View File

@ -3,6 +3,7 @@ import type { MessagePreparationInput } from '../interfaces.js';
import type { Message } from '../../ai_interface.js';
import { SYSTEM_PROMPTS } from '../../constants/llm_prompt_constants.js';
import { MessageFormatterFactory } from '../interfaces/message_formatter.js';
import toolRegistry from '../../tools/tool_registry.js';
import log from '../../../log.js';
/**
@ -27,15 +28,31 @@ export class MessagePreparationStage extends BasePipelineStage<MessagePreparatio
provider = providerName;
}
log.info(`Preparing messages for provider: ${provider}, context: ${!!context}, system prompt: ${!!systemPrompt}`);
// Check if tools are enabled
const toolsEnabled = options?.enableTools === true;
log.info(`Preparing messages for provider: ${provider}, context: ${!!context}, system prompt: ${!!systemPrompt}, tools: ${toolsEnabled}`);
// Get appropriate formatter for this provider
const formatter = MessageFormatterFactory.getFormatter(provider);
// Determine the system prompt to use
let finalSystemPrompt = systemPrompt || SYSTEM_PROMPTS.DEFAULT_SYSTEM_PROMPT;
// If tools are enabled, enhance system prompt with tools guidance
if (toolsEnabled) {
const toolCount = toolRegistry.getAllTools().length;
const toolsPrompt = `You have access to ${toolCount} tools to help you respond. When you need information that might be in the user's notes, use the search_notes tool to find relevant content or the read_note tool to read a specific note by ID. Use tools when specific information is required rather than making assumptions.`;
// Add tools guidance to system prompt
finalSystemPrompt = finalSystemPrompt + '\n\n' + toolsPrompt;
log.info(`Enhanced system prompt with tools guidance: ${toolCount} tools available`);
}
// Format messages using provider-specific approach
const formattedMessages = formatter.formatMessages(
messages,
systemPrompt || SYSTEM_PROMPTS.DEFAULT_SYSTEM_PROMPT,
finalSystemPrompt,
context
);

View File

@ -28,6 +28,39 @@ export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput,
// Get default model based on provider precedence
let defaultModel = 'openai:gpt-3.5-turbo'; // Fallback default
// Enable tools by default unless explicitly disabled
updatedOptions.enableTools = updatedOptions.enableTools !== false;
// Add tools if not already provided
if (updatedOptions.enableTools && (!updatedOptions.tools || updatedOptions.tools.length === 0)) {
try {
// Import tool registry and fetch tool definitions
const toolRegistry = (await import('../../tools/tool_registry.js')).default;
const toolDefinitions = toolRegistry.getAllToolDefinitions();
if (toolDefinitions.length > 0) {
updatedOptions.tools = toolDefinitions;
log.info(`Added ${toolDefinitions.length} tools to options`);
} else {
// Try to initialize tools
log.info('No tools found in registry, trying to initialize them');
try {
const toolInitializer = await import('../../tools/tool_initializer.js');
await toolInitializer.default.initializeTools();
// Try again after initialization
const reinitToolDefinitions = toolRegistry.getAllToolDefinitions();
updatedOptions.tools = reinitToolDefinitions;
log.info(`After initialization, added ${reinitToolDefinitions.length} tools to options`);
} catch (initError: any) {
log.error(`Failed to initialize tools: ${initError.message}`);
}
}
} catch (error: any) {
log.error(`Error loading tools: ${error.message}`);
}
}
try {
// Get provider precedence list
const providerPrecedence = await options.getOption('aiProviderPrecedence');
@ -55,7 +88,25 @@ export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput,
if (model) defaultModel = `anthropic:${model}`;
} else if (firstProvider === 'ollama') {
const model = await options.getOption('ollamaDefaultModel');
if (model) defaultModel = `ollama:${model}`;
if (model) {
defaultModel = `ollama:${model}`;
// Special configuration for Ollama
// Since Ollama models have different requirements for tool calling,
// configure based on the model being used
const modelLower = model.toLowerCase();
if (modelLower.includes('llama3') ||
modelLower.includes('mistral') ||
modelLower.includes('dolphin') ||
modelLower.includes('neural') ||
modelLower.includes('mist') ||
modelLower.includes('wizard')) {
// These models are known to support tool calling
log.info(`Using Ollama model ${model} with tool calling support`);
updatedOptions.enableTools = true;
}
}
}
}
}

View File

@ -2,13 +2,20 @@ import { BasePipelineStage } from '../pipeline_stage.js';
import type { SemanticContextExtractionInput } from '../interfaces.js';
import aiServiceManager from '../../ai_service_manager.js';
import log from '../../../log.js';
import { VectorSearchStage } from './vector_search_stage.js';
import contextFormatter from '../../context/modules/context_formatter.js';
import providerManager from '../../context/modules/provider_manager.js';
/**
* Pipeline stage for extracting semantic context from notes
* This uses the new VectorSearchStage to find relevant content
*/
export class SemanticContextExtractionStage extends BasePipelineStage<SemanticContextExtractionInput, { context: string }> {
private vectorSearchStage: VectorSearchStage;
constructor() {
super('SemanticContextExtraction');
this.vectorSearchStage = new VectorSearchStage();
}
/**
@ -18,9 +25,43 @@ export class SemanticContextExtractionStage extends BasePipelineStage<SemanticCo
const { noteId, query, maxResults = 5, messages = [] } = input;
log.info(`Extracting semantic context from note ${noteId}, query: ${query?.substring(0, 50)}...`);
const contextService = aiServiceManager.getContextService();
const context = await contextService.getSemanticContext(noteId, query, maxResults, messages);
try {
// Step 1: Use vector search stage to find relevant notes
const vectorSearchResult = await this.vectorSearchStage.execute({
query,
noteId,
options: {
maxResults,
useEnhancedQueries: true,
threshold: 0.6,
llmService: null // Will use default service
}
});
log.info(`Vector search found ${vectorSearchResult.searchResults.length} relevant notes`);
// If no results, return empty context
if (vectorSearchResult.searchResults.length === 0) {
log.info(`No relevant notes found for context extraction`);
return { context: "" };
}
// Step 2: Format search results into a context string
const provider = await providerManager.getPreferredEmbeddingProvider();
const providerId = provider?.name || 'default';
const context = await contextFormatter.buildContextFromNotes(
vectorSearchResult.searchResults,
query,
providerId,
messages
);
log.info(`Built context of ${context.length} chars from ${vectorSearchResult.searchResults.length} notes`);
return { context };
} catch (error) {
log.error(`Error extracting semantic context: ${error}`);
return { context: "" };
}
}
}

View File

@ -0,0 +1,216 @@
import { BasePipelineStage } from '../pipeline_stage.js';
import type { ToolExecutionInput } from '../interfaces.js';
import log from '../../../log.js';
import type { ChatResponse, Message } from '../../ai_interface.js';
import toolRegistry from '../../tools/tool_registry.js';
/**
* Pipeline stage for handling LLM tool calling
* This stage is responsible for:
* 1. Detecting tool calls in LLM responses
* 2. Executing the appropriate tools
* 3. Adding tool results back to the conversation
* 4. Determining if we need to make another call to the LLM
*/
export class ToolCallingStage extends BasePipelineStage<ToolExecutionInput, { response: ChatResponse, needsFollowUp: boolean, messages: Message[] }> {
constructor() {
super('ToolCalling');
}
/**
* Process the LLM response and execute any tool calls
*/
protected async process(input: ToolExecutionInput): Promise<{ response: ChatResponse, needsFollowUp: boolean, messages: Message[] }> {
const { response, messages, options } = input;
// Check if the response has tool calls
if (!response.tool_calls || response.tool_calls.length === 0) {
// No tool calls, return original response and messages
log.info(`No tool calls detected in response from provider: ${response.provider}`);
return { response, needsFollowUp: false, messages };
}
log.info(`LLM requested ${response.tool_calls.length} tool calls from provider: ${response.provider}`);
// Log response details for debugging
if (response.text) {
log.info(`Response text: "${response.text.substring(0, 200)}${response.text.length > 200 ? '...' : ''}"`);
}
// Check if the registry has any tools
const availableTools = toolRegistry.getAllTools();
log.info(`Available tools in registry: ${availableTools.length}`);
if (availableTools.length === 0) {
log.error(`No tools available in registry, cannot execute tool calls`);
// Try to initialize tools as a recovery step
try {
log.info('Attempting to initialize tools as recovery step');
const toolInitializer = await import('../../tools/tool_initializer.js');
await toolInitializer.default.initializeTools();
log.info(`After recovery initialization: ${toolRegistry.getAllTools().length} tools available`);
} catch (error: any) {
log.error(`Failed to initialize tools in recovery step: ${error.message}`);
}
}
// Create a copy of messages to add the assistant message with tool calls
const updatedMessages = [...messages];
// Add the assistant message with the tool calls
updatedMessages.push({
role: 'assistant',
content: response.text || "",
tool_calls: response.tool_calls
});
// Execute each tool call and add results to messages
const toolResults = await Promise.all(response.tool_calls.map(async (toolCall) => {
try {
log.info(`Tool call received - Name: ${toolCall.function.name}, ID: ${toolCall.id || 'unknown'}`);
// Log parameters
const argsStr = typeof toolCall.function.arguments === 'string'
? toolCall.function.arguments
: JSON.stringify(toolCall.function.arguments);
log.info(`Tool parameters: ${argsStr}`);
// Get the tool from registry
const tool = toolRegistry.getTool(toolCall.function.name);
if (!tool) {
throw new Error(`Tool not found: ${toolCall.function.name}`);
}
// Parse arguments (handle both string and object formats)
let args;
// At this stage, arguments should already be processed by the provider-specific service
// But we still need to handle different formats just in case
if (typeof toolCall.function.arguments === 'string') {
log.info(`Received string arguments in tool calling stage: ${toolCall.function.arguments.substring(0, 50)}...`);
try {
// Try to parse as JSON first
args = JSON.parse(toolCall.function.arguments);
log.info(`Parsed JSON arguments: ${Object.keys(args).join(', ')}`);
} catch (e) {
// If it's not valid JSON, try to check if it's a stringified object with quotes
log.info(`Failed to parse arguments as JSON, trying alternative parsing: ${e.message}`);
// Sometimes LLMs return stringified JSON with escaped quotes or incorrect quotes
// Try to clean it up
try {
const cleaned = toolCall.function.arguments
.replace(/^['"]|['"]$/g, '') // Remove surrounding quotes
.replace(/\\"/g, '"') // Replace escaped quotes
.replace(/([{,])\s*'([^']+)'\s*:/g, '$1"$2":') // Replace single quotes around property names
.replace(/([{,])\s*(\w+)\s*:/g, '$1"$2":'); // Add quotes around unquoted property names
log.info(`Cleaned argument string: ${cleaned}`);
args = JSON.parse(cleaned);
log.info(`Successfully parsed cleaned arguments: ${Object.keys(args).join(', ')}`);
} catch (cleanError) {
// If all parsing fails, treat it as a text argument
log.info(`Failed to parse cleaned arguments: ${cleanError.message}`);
args = { text: toolCall.function.arguments };
log.info(`Using text argument: ${args.text.substring(0, 50)}...`);
}
}
} else {
// Arguments are already an object
args = toolCall.function.arguments;
log.info(`Using object arguments with keys: ${Object.keys(args).join(', ')}`);
}
// Execute the tool
log.info(`================ EXECUTING TOOL: ${toolCall.function.name} ================`);
log.info(`Tool parameters: ${Object.keys(args).join(', ')}`);
log.info(`Parameters values: ${Object.entries(args).map(([k, v]) => `${k}=${typeof v === 'string' ? v : JSON.stringify(v)}`).join(', ')}`);
const executionStart = Date.now();
let result;
try {
log.info(`Starting tool execution for ${toolCall.function.name}...`);
result = await tool.execute(args);
const executionTime = Date.now() - executionStart;
log.info(`================ TOOL EXECUTION COMPLETED in ${executionTime}ms ================`);
} catch (execError: any) {
const executionTime = Date.now() - executionStart;
log.error(`================ TOOL EXECUTION FAILED in ${executionTime}ms: ${execError.message} ================`);
throw execError;
}
// Log execution result
const resultSummary = typeof result === 'string'
? `${result.substring(0, 100)}...`
: `Object with keys: ${Object.keys(result).join(', ')}`;
log.info(`Tool execution completed in ${executionTime}ms - Result: ${resultSummary}`);
// Return result with tool call ID
return {
toolCallId: toolCall.id,
name: toolCall.function.name,
result
};
} catch (error: any) {
log.error(`Error executing tool ${toolCall.function.name}: ${error.message || String(error)}`);
// Return error message as result
return {
toolCallId: toolCall.id,
name: toolCall.function.name,
result: `Error: ${error.message || String(error)}`
};
}
}));
// Add tool results as messages
toolResults.forEach(result => {
// Format the result content based on type
let content: string;
if (typeof result.result === 'string') {
content = result.result;
log.info(`Tool returned string result (${content.length} chars)`);
} else {
// For object results, format as JSON
try {
content = JSON.stringify(result.result, null, 2);
log.info(`Tool returned object result with keys: ${Object.keys(result.result).join(', ')}`);
} catch (error) {
content = String(result.result);
log.info(`Failed to stringify object result: ${error}`);
}
}
log.info(`Adding tool result message - Tool: ${result.name}, ID: ${result.toolCallId || 'unknown'}, Length: ${content.length}`);
// Create a properly formatted tool response message
updatedMessages.push({
role: 'tool',
content: content,
name: result.name,
tool_call_id: result.toolCallId
});
// Log a sample of the content for debugging
const contentPreview = content.substring(0, 100) + (content.length > 100 ? '...' : '');
log.info(`Tool result preview: ${contentPreview}`);
});
log.info(`Added ${toolResults.length} tool results to conversation`);
// If we have tool results, we need a follow-up call to the LLM
const needsFollowUp = toolResults.length > 0;
if (needsFollowUp) {
log.info(`Tool execution complete, LLM follow-up required with ${updatedMessages.length} messages`);
}
return {
response,
needsFollowUp,
messages: updatedMessages
};
}
}

View File

@ -0,0 +1,206 @@
import { BasePipelineStage } from '../pipeline_stage.js';
import type { VectorSearchInput } from '../interfaces.js';
import type { NoteSearchResult } from '../../interfaces/context_interfaces.js';
import log from '../../../log.js';
import queryEnhancer from '../../context/modules/query_enhancer.js';
import semanticSearch from '../../context/modules/semantic_search.js';
import aiServiceManager from '../../ai_service_manager.js';
/**
* Pipeline stage for handling semantic vector search with query enhancement
* This centralizes all semantic search operations into the pipeline
*/
export class VectorSearchStage extends BasePipelineStage<VectorSearchInput, {
searchResults: NoteSearchResult[],
enhancedQueries?: string[]
}> {
constructor() {
super('VectorSearch');
}
/**
* Execute semantic search with optional query enhancement
*/
protected async process(input: VectorSearchInput): Promise<{
searchResults: NoteSearchResult[],
enhancedQueries?: string[]
}> {
const { query, noteId, options = {} } = input;
const {
maxResults = 10,
useEnhancedQueries = true,
threshold = 0.6,
llmService = null
} = options;
log.info(`========== PIPELINE VECTOR SEARCH ==========`);
log.info(`Query: "${query.substring(0, 100)}${query.length > 100 ? '...' : ''}"`);
log.info(`Parameters: noteId=${noteId || 'global'}, maxResults=${maxResults}, useEnhancedQueries=${useEnhancedQueries}, threshold=${threshold}`);
log.info(`LLM Service provided: ${llmService ? 'yes' : 'no'}`);
log.info(`Start timestamp: ${new Date().toISOString()}`);
try {
// STEP 1: Generate enhanced search queries if requested
let searchQueries: string[] = [query];
if (useEnhancedQueries) {
log.info(`PIPELINE VECTOR SEARCH: Generating enhanced queries for: "${query.substring(0, 50)}..."`);
try {
// Get the LLM service to use for query enhancement
let enhancementService = llmService;
// If no service provided, use AI service manager to get the default service
if (!enhancementService) {
log.info(`No LLM service provided, using default from AI service manager`);
const manager = aiServiceManager.getInstance();
const provider = manager.getPreferredProvider();
enhancementService = manager.getService(provider);
log.info(`Using preferred provider "${provider}" with service type ${enhancementService.constructor.name}`);
}
// Create a special service wrapper that prevents recursion
const recursionPreventionService = {
generateChatCompletion: async (messages: any, options: any) => {
// Add flags to prevent recursive calls
const safeOptions = {
...options,
bypassFormatter: true,
_bypassContextProcessing: true,
bypassQueryEnhancement: true, // Critical flag
directToolExecution: true,
enableTools: false // Disable tools for query enhancement
};
// Use the actual service implementation but with safe options
return enhancementService.generateChatCompletion(messages, safeOptions);
}
};
// Call the query enhancer with the safe service
searchQueries = await queryEnhancer.generateSearchQueries(query, recursionPreventionService);
log.info(`PIPELINE VECTOR SEARCH: Generated ${searchQueries.length} enhanced queries`);
} catch (error) {
log.error(`PIPELINE VECTOR SEARCH: Error generating search queries, using original: ${error}`);
searchQueries = [query]; // Fall back to original query
}
} else {
log.info(`PIPELINE VECTOR SEARCH: Using direct query without enhancement: "${query}"`);
}
// STEP 2: Find relevant notes for each query
const allResults = new Map<string, NoteSearchResult>();
log.info(`PIPELINE VECTOR SEARCH: Searching for ${searchQueries.length} queries`);
for (const searchQuery of searchQueries) {
try {
log.info(`PIPELINE VECTOR SEARCH: Processing query: "${searchQuery.substring(0, 50)}..."`);
const results = await semanticSearch.findRelevantNotes(
searchQuery,
noteId || null,
maxResults
);
log.info(`PIPELINE VECTOR SEARCH: Found ${results.length} results for query "${searchQuery.substring(0, 50)}..."`);
// Combine results, avoiding duplicates and keeping the highest similarity score
for (const result of results) {
if (!allResults.has(result.noteId)) {
allResults.set(result.noteId, result);
} else {
// If note already exists, update similarity to max of both values
const existing = allResults.get(result.noteId);
if (existing && result.similarity > existing.similarity) {
existing.similarity = result.similarity;
allResults.set(result.noteId, existing);
}
}
}
} catch (error) {
log.error(`PIPELINE VECTOR SEARCH: Error searching for query "${searchQuery}": ${error}`);
}
}
// STEP 3: Convert to array, filter and sort
const filteredResults = Array.from(allResults.values())
.filter(note => {
// Filter out notes with no content or very minimal content
const hasContent = note.content && note.content.trim().length > 10;
// Apply similarity threshold
const meetsThreshold = note.similarity >= threshold;
if (!hasContent) {
log.info(`PIPELINE VECTOR SEARCH: Filtering out empty/minimal note: "${note.title}" (${note.noteId})`);
}
if (!meetsThreshold) {
log.info(`PIPELINE VECTOR SEARCH: Filtering out low similarity note: "${note.title}" - ${Math.round(note.similarity * 100)}% < ${Math.round(threshold * 100)}%`);
}
return hasContent && meetsThreshold;
})
.sort((a, b) => b.similarity - a.similarity)
.slice(0, maxResults);
log.info(`PIPELINE VECTOR SEARCH: Search complete, returning ${filteredResults.length} results after filtering`);
// Log top results in detail
if (filteredResults.length > 0) {
log.info(`========== VECTOR SEARCH RESULTS ==========`);
log.info(`Found ${filteredResults.length} relevant notes after filtering`);
const topResults = filteredResults.slice(0, 5); // Show top 5 for better diagnostics
topResults.forEach((result, idx) => {
log.info(`Result ${idx+1}:`);
log.info(` Title: "${result.title}"`);
log.info(` NoteID: ${result.noteId}`);
log.info(` Similarity: ${Math.round(result.similarity * 100)}%`);
if (result.content) {
const contentPreview = result.content.length > 150
? `${result.content.substring(0, 150)}...`
: result.content;
log.info(` Content preview: ${contentPreview}`);
log.info(` Content length: ${result.content.length} chars`);
} else {
log.info(` Content: None or not loaded`);
}
});
if (filteredResults.length > 5) {
log.info(`... and ${filteredResults.length - 5} more results not shown`);
}
log.info(`========== END VECTOR SEARCH RESULTS ==========`);
} else {
log.info(`No results found that meet the similarity threshold of ${threshold}`);
}
// Log final statistics
log.info(`Vector search statistics:`);
log.info(` Original query: "${query.substring(0, 50)}${query.length > 50 ? '...' : ''}"`);
if (searchQueries.length > 1) {
log.info(` Enhanced with ${searchQueries.length} search queries`);
searchQueries.forEach((q, i) => {
if (i > 0) { // Skip the original query
log.info(` Query ${i}: "${q.substring(0, 50)}${q.length > 50 ? '...' : ''}"`);
}
});
}
log.info(` Final results: ${filteredResults.length} notes`);
log.info(` End timestamp: ${new Date().toISOString()}`);
log.info(`========== END PIPELINE VECTOR SEARCH ==========`);
return {
searchResults: filteredResults,
enhancedQueries: useEnhancedQueries ? searchQueries : undefined
};
} catch (error: any) {
log.error(`PIPELINE VECTOR SEARCH: Error in vector search stage: ${error.message || String(error)}`);
return {
searchResults: [],
enhancedQueries: undefined
};
}
}
}

View File

@ -3,10 +3,26 @@ import { BaseAIService } from '../base_ai_service.js';
import type { Message, ChatCompletionOptions, ChatResponse } from '../ai_interface.js';
import sanitizeHtml from 'sanitize-html';
import { OllamaMessageFormatter } from '../formatters/ollama_formatter.js';
import log from '../../log.js';
import type { ToolCall } from '../tools/tool_interfaces.js';
import toolRegistry from '../tools/tool_registry.js';
interface OllamaFunctionArguments {
[key: string]: any;
}
interface OllamaFunctionCall {
function: {
name: string;
arguments: OllamaFunctionArguments | string;
};
id?: string;
}
interface OllamaMessage {
role: string;
content: string;
tool_calls?: OllamaFunctionCall[];
}
interface OllamaResponse {
@ -14,6 +30,7 @@ interface OllamaResponse {
created_at: string;
message: OllamaMessage;
done: boolean;
done_reason?: string;
total_duration: number;
load_duration: number;
prompt_eval_count: number;
@ -54,7 +71,7 @@ export class OllamaService extends BaseAIService {
if (opts.bypassFormatter) {
// Bypass the formatter entirely - use messages as is
messagesToSend = [...messages];
console.log(`Bypassing formatter for Ollama request with ${messages.length} messages`);
log.info(`Bypassing formatter for Ollama request with ${messages.length} messages`);
} else {
// Use the formatter to prepare messages
messagesToSend = this.formatter.formatMessages(
@ -63,22 +80,14 @@ export class OllamaService extends BaseAIService {
undefined, // context
opts.preserveSystemPrompt
);
console.log(`Sending to Ollama with formatted messages:`, JSON.stringify(messagesToSend, null, 2));
log.info(`Sending to Ollama with formatted messages: ${messagesToSend.length}`);
}
// Check if this is a request that expects JSON response
const expectsJsonResponse = opts.expectsJsonResponse || false;
if (expectsJsonResponse) {
console.log(`Request expects JSON response, adding response_format parameter`);
}
const response = await fetch(`${apiBase}/api/chat`, {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({
// Build request body
const requestBody: any = {
model,
messages: messagesToSend,
options: {
@ -87,20 +96,140 @@ export class OllamaService extends BaseAIService {
...(expectsJsonResponse ? { response_format: { type: "json_object" } } : {})
},
stream: false
})
};
// Add tools if enabled - put them at the top level for Ollama
if (opts.enableTools !== false) {
// Get tools from registry if not provided in options
if (!opts.tools || opts.tools.length === 0) {
try {
// Get tool definitions from registry
const tools = toolRegistry.getAllToolDefinitions();
requestBody.tools = tools;
log.info(`Adding ${tools.length} tools to request`);
// If no tools found, reinitialize
if (tools.length === 0) {
log.info('No tools found in registry, re-initializing...');
try {
const toolInitializer = await import('../tools/tool_initializer.js');
await toolInitializer.default.initializeTools();
// Try again
requestBody.tools = toolRegistry.getAllToolDefinitions();
log.info(`After re-initialization: ${requestBody.tools.length} tools available`);
} catch (err: any) {
log.error(`Failed to re-initialize tools: ${err.message}`);
}
}
} catch (error: any) {
log.error(`Error getting tools: ${error.message || String(error)}`);
// Create default empty tools array if we couldn't load the tools
requestBody.tools = [];
}
} else {
requestBody.tools = opts.tools;
}
log.info(`Adding ${requestBody.tools.length} tools to Ollama request`);
} else {
log.info('Tools are explicitly disabled for this request');
}
// Log key request details
log.info(`========== OLLAMA API REQUEST ==========`);
log.info(`Model: ${requestBody.model}, Messages: ${requestBody.messages.length}, Tools: ${requestBody.tools ? requestBody.tools.length : 0}`);
log.info(`Temperature: ${temperature}, Stream: ${requestBody.stream}, JSON response expected: ${expectsJsonResponse}`);
// Check message structure and log detailed information about each message
requestBody.messages.forEach((msg: any, index: number) => {
const keys = Object.keys(msg);
log.info(`Message ${index}, Role: ${msg.role}, Keys: ${keys.join(', ')}`);
// Log message content preview
if (msg.content && typeof msg.content === 'string') {
const contentPreview = msg.content.length > 200
? `${msg.content.substring(0, 200)}...`
: msg.content;
log.info(`Message ${index} content: ${contentPreview}`);
}
// Log tool-related details
if (keys.includes('tool_calls')) {
log.info(`Message ${index} has ${msg.tool_calls.length} tool calls:`);
msg.tool_calls.forEach((call: any, callIdx: number) => {
log.info(` Tool call ${callIdx}: ${call.function?.name || 'unknown'}, ID: ${call.id || 'unspecified'}`);
if (call.function?.arguments) {
const argsPreview = typeof call.function.arguments === 'string'
? call.function.arguments.substring(0, 100)
: JSON.stringify(call.function.arguments).substring(0, 100);
log.info(` Arguments: ${argsPreview}...`);
}
});
}
if (keys.includes('tool_call_id')) {
log.info(`Message ${index} is a tool response for tool call ID: ${msg.tool_call_id}`);
}
if (keys.includes('name') && msg.role === 'tool') {
log.info(`Message ${index} is from tool: ${msg.name}`);
}
});
// Log tool definitions
if (requestBody.tools && requestBody.tools.length > 0) {
log.info(`Sending ${requestBody.tools.length} tool definitions:`);
requestBody.tools.forEach((tool: any, toolIdx: number) => {
log.info(` Tool ${toolIdx}: ${tool.function?.name || 'unnamed'}`);
if (tool.function?.description) {
log.info(` Description: ${tool.function.description.substring(0, 100)}...`);
}
if (tool.function?.parameters) {
const paramNames = tool.function.parameters.properties
? Object.keys(tool.function.parameters.properties)
: [];
log.info(` Parameters: ${paramNames.join(', ')}`);
}
});
}
// Log full request body (this will create large logs but is helpful for debugging)
const requestStr = JSON.stringify(requestBody);
log.info(`Full Ollama request (truncated): ${requestStr.substring(0, 1000)}...`);
log.info(`========== END OLLAMA REQUEST ==========`);
// Make API request
const response = await fetch(`${apiBase}/api/chat`, {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify(requestBody)
});
if (!response.ok) {
const errorBody = await response.text();
console.error(`Ollama API error: ${response.status} ${response.statusText}`, errorBody);
log.error(`Ollama API error: ${response.status} ${response.statusText} - ${errorBody}`);
throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
}
const data: OllamaResponse = await response.json();
console.log('Raw response from Ollama:', JSON.stringify(data, null, 2));
console.log('Parsed Ollama response:', JSON.stringify(data, null, 2));
return {
// Log response details
log.info(`========== OLLAMA API RESPONSE ==========`);
log.info(`Model: ${data.model}, Content length: ${data.message.content.length} chars`);
log.info(`Tokens: ${data.prompt_eval_count} prompt, ${data.eval_count} completion, ${data.prompt_eval_count + data.eval_count} total`);
log.info(`Duration: ${data.total_duration}ns total, ${data.prompt_eval_duration}ns prompt, ${data.eval_duration}ns completion`);
log.info(`Done: ${data.done}, Reason: ${data.done_reason || 'not specified'}`);
// Log content preview
const contentPreview = data.message.content.length > 300
? `${data.message.content.substring(0, 300)}...`
: data.message.content;
log.info(`Response content: ${contentPreview}`);
// Handle the response and extract tool calls if present
const chatResponse: ChatResponse = {
text: data.message.content,
model: data.model,
provider: this.getName(),
@ -110,8 +239,97 @@ export class OllamaService extends BaseAIService {
totalTokens: data.prompt_eval_count + data.eval_count
}
};
} catch (error) {
console.error('Ollama service error:', error);
// Add tool calls if present
if (data.message.tool_calls && data.message.tool_calls.length > 0) {
log.info(`Ollama response includes ${data.message.tool_calls.length} tool calls`);
// Log detailed information about each tool call
const transformedToolCalls: ToolCall[] = [];
// Log detailed information about the tool calls in the response
log.info(`========== OLLAMA TOOL CALLS IN RESPONSE ==========`);
data.message.tool_calls.forEach((toolCall, index) => {
log.info(`Tool call ${index + 1}:`);
log.info(` Name: ${toolCall.function?.name || 'unknown'}`);
log.info(` ID: ${toolCall.id || `auto-${index + 1}`}`);
// Generate a unique ID if none is provided
const id = toolCall.id || `tool-call-${Date.now()}-${index}`;
// Handle arguments based on their type
let processedArguments: Record<string, any> | string;
if (typeof toolCall.function.arguments === 'string') {
// Log raw string arguments in full for debugging
log.info(` Raw string arguments: ${toolCall.function.arguments}`);
// Try to parse JSON string arguments
try {
processedArguments = JSON.parse(toolCall.function.arguments);
log.info(` Successfully parsed arguments to object with keys: ${Object.keys(processedArguments).join(', ')}`);
log.info(` Parsed argument values:`);
Object.entries(processedArguments).forEach(([key, value]) => {
const valuePreview = typeof value === 'string'
? (value.length > 100 ? `${value.substring(0, 100)}...` : value)
: JSON.stringify(value);
log.info(` ${key}: ${valuePreview}`);
});
} catch (e) {
// If parsing fails, keep as string and log the error
processedArguments = toolCall.function.arguments;
log.info(` Could not parse arguments as JSON: ${e.message}`);
log.info(` Keeping as string: ${processedArguments.substring(0, 200)}${processedArguments.length > 200 ? '...' : ''}`);
// Try to clean and parse again with more aggressive methods
try {
const cleaned = toolCall.function.arguments
.replace(/^['"]|['"]$/g, '') // Remove surrounding quotes
.replace(/\\"/g, '"') // Replace escaped quotes
.replace(/([{,])\s*'([^']+)'\s*:/g, '$1"$2":') // Replace single quotes around property names
.replace(/([{,])\s*(\w+)\s*:/g, '$1"$2":'); // Add quotes around unquoted property names
log.info(` Attempting to parse cleaned argument: ${cleaned}`);
const reparseArg = JSON.parse(cleaned);
log.info(` Successfully parsed cleaned argument with keys: ${Object.keys(reparseArg).join(', ')}`);
} catch (cleanErr) {
log.info(` Failed to parse cleaned arguments: ${cleanErr.message}`);
}
}
} else {
// If it's already an object, use it directly and log details
processedArguments = toolCall.function.arguments;
log.info(` Object arguments with keys: ${Object.keys(processedArguments).join(', ')}`);
log.info(` Argument values:`);
Object.entries(processedArguments).forEach(([key, value]) => {
const valuePreview = typeof value === 'string'
? (value.length > 100 ? `${value.substring(0, 100)}...` : value)
: JSON.stringify(value);
log.info(` ${key}: ${valuePreview}`);
});
}
// Convert to our standard ToolCall format
transformedToolCalls.push({
id,
type: 'function',
function: {
name: toolCall.function.name,
arguments: processedArguments
}
});
});
// Add transformed tool calls to response
chatResponse.tool_calls = transformedToolCalls;
log.info(`Transformed ${transformedToolCalls.length} tool calls for execution`);
log.info(`========== END OLLAMA TOOL CALLS ==========`);
}
log.info(`========== END OLLAMA RESPONSE ==========`);
return chatResponse;
} catch (error: any) {
log.error(`Ollama service error: ${error.message || String(error)}`);
throw error;
}
}

View File

@ -609,6 +609,56 @@ class RestChatService {
// Use the correct method name: generateChatCompletion
const response = await service.generateChatCompletion(aiMessages, chatOptions);
// Check for tool calls in the response
if (response.tool_calls && response.tool_calls.length > 0) {
log.info(`========== STREAMING TOOL CALLS DETECTED ==========`);
log.info(`Response contains ${response.tool_calls.length} tool calls, executing them...`);
try {
// Execute the tools
const toolResults = await this.executeToolCalls(response);
// Make a follow-up request with the tool results
const toolMessages = [...aiMessages, {
role: 'assistant',
content: response.text || '',
tool_calls: response.tool_calls
}, ...toolResults];
log.info(`Making follow-up request with ${toolResults.length} tool results`);
// Send partial response to let the client know tools are being processed
if (!res.writableEnded) {
res.write(`data: ${JSON.stringify({ content: "Processing tools... " })}\n\n`);
}
// Use non-streaming for the follow-up to get a complete response
const followUpOptions = {...chatOptions, stream: false, enableTools: false}; // Prevent infinite loops
const followUpResponse = await service.generateChatCompletion(toolMessages, followUpOptions);
messageContent = followUpResponse.text || "";
// Send the complete response as a single chunk
if (!res.writableEnded) {
res.write(`data: ${JSON.stringify({ content: messageContent })}\n\n`);
res.write('data: [DONE]\n\n');
res.end();
}
// Store the full response for the session
session.messages.push({
role: 'assistant',
content: messageContent,
timestamp: new Date()
});
return; // Skip the rest of the processing
} catch (toolError) {
log.error(`Error executing tools: ${toolError}`);
// Continue with normal streaming response as fallback
}
}
// Handle streaming if the response includes a stream method
if (response.stream) {
await response.stream((chunk: { text: string; done: boolean }) => {
@ -667,6 +717,113 @@ class RestChatService {
}
}
/**
* Execute tool calls from the LLM response
* @param response The LLM response containing tool calls
*/
private async executeToolCalls(response: any): Promise<Message[]> {
if (!response.tool_calls || response.tool_calls.length === 0) {
return [];
}
log.info(`Executing ${response.tool_calls.length} tool calls from REST chat service`);
// Import tool registry directly to avoid circular dependencies
const toolRegistry = (await import('./tools/tool_registry.js')).default;
// Check if tools are available
const availableTools = toolRegistry.getAllTools();
if (availableTools.length === 0) {
log.error('No tools available in registry for execution');
// Try to initialize tools
try {
const toolInitializer = await import('./tools/tool_initializer.js');
await toolInitializer.default.initializeTools();
log.info(`Initialized ${toolRegistry.getAllTools().length} tools`);
} catch (error) {
log.error(`Failed to initialize tools: ${error}`);
throw new Error('Tool execution failed: No tools available');
}
}
// Execute each tool call and collect results
const toolResults = await Promise.all(response.tool_calls.map(async (toolCall: any) => {
try {
log.info(`Executing tool: ${toolCall.function.name}, ID: ${toolCall.id || 'unknown'}`);
// Get the tool from registry
const tool = toolRegistry.getTool(toolCall.function.name);
if (!tool) {
throw new Error(`Tool not found: ${toolCall.function.name}`);
}
// Parse arguments
let args;
if (typeof toolCall.function.arguments === 'string') {
try {
args = JSON.parse(toolCall.function.arguments);
} catch (e) {
log.error(`Failed to parse tool arguments: ${e.message}`);
// Try cleanup and retry
try {
const cleaned = toolCall.function.arguments
.replace(/^['"]|['"]$/g, '') // Remove surrounding quotes
.replace(/\\"/g, '"') // Replace escaped quotes
.replace(/([{,])\s*'([^']+)'\s*:/g, '$1"$2":') // Replace single quotes around property names
.replace(/([{,])\s*(\w+)\s*:/g, '$1"$2":'); // Add quotes around unquoted property names
args = JSON.parse(cleaned);
} catch (cleanErr) {
// If all parsing fails, use as-is
args = { text: toolCall.function.arguments };
}
}
} else {
args = toolCall.function.arguments;
}
// Log what we're about to execute
log.info(`Executing tool with arguments: ${JSON.stringify(args)}`);
// Execute the tool and get result
const startTime = Date.now();
const result = await tool.execute(args);
const executionTime = Date.now() - startTime;
log.info(`Tool execution completed in ${executionTime}ms`);
// Log the result
const resultPreview = typeof result === 'string'
? result.substring(0, 100) + (result.length > 100 ? '...' : '')
: JSON.stringify(result).substring(0, 100) + '...';
log.info(`Tool result: ${resultPreview}`);
// Format result as a proper message
return {
role: 'tool',
content: typeof result === 'string' ? result : JSON.stringify(result),
name: toolCall.function.name,
tool_call_id: toolCall.id || `tool-${Date.now()}-${Math.random().toString(36).substring(2, 9)}`
};
} catch (error: any) {
log.error(`Error executing tool ${toolCall.function.name}: ${error.message}`);
// Return error as tool result
return {
role: 'tool',
content: `Error: ${error.message}`,
name: toolCall.function.name,
tool_call_id: toolCall.id || `tool-${Date.now()}-${Math.random().toString(36).substring(2, 9)}`
};
}
}));
log.info(`Completed execution of ${toolResults.length} tools`);
return toolResults;
}
/**
* Build context from relevant notes
*/

View File

@ -0,0 +1,101 @@
/**
* Read Note Tool
*
* This tool allows the LLM to read the content of a specific note.
*/
import type { Tool, ToolHandler } from './tool_interfaces.js';
import log from '../../log.js';
import becca from '../../../becca/becca.js';
/**
* Definition of the read note tool
*/
export const readNoteToolDefinition: Tool = {
type: 'function',
function: {
name: 'read_note',
description: 'Read the content of a specific note by its ID',
parameters: {
type: 'object',
properties: {
noteId: {
type: 'string',
description: 'The ID of the note to read'
},
includeAttributes: {
type: 'boolean',
description: 'Whether to include note attributes in the response (default: false)'
}
},
required: ['noteId']
}
}
};
/**
* Read note tool implementation
*/
export class ReadNoteTool implements ToolHandler {
public definition: Tool = readNoteToolDefinition;
/**
* Execute the read note tool
*/
public async execute(args: { noteId: string, includeAttributes?: boolean }): Promise<string | object> {
try {
const { noteId, includeAttributes = false } = args;
log.info(`Executing read_note tool - NoteID: "${noteId}", IncludeAttributes: ${includeAttributes}`);
// Get the note from becca
const note = becca.notes[noteId];
if (!note) {
log.info(`Note with ID ${noteId} not found - returning error`);
return `Error: Note with ID ${noteId} not found`;
}
log.info(`Found note: "${note.title}" (Type: ${note.type})`);
// Get note content
const startTime = Date.now();
const content = await note.getContent();
const duration = Date.now() - startTime;
log.info(`Retrieved note content in ${duration}ms, content length: ${content?.length || 0} chars`);
// Prepare the response
const response: any = {
noteId: note.noteId,
title: note.title,
type: note.type,
content: content || ''
};
// Include attributes if requested
if (includeAttributes) {
const attributes = note.getOwnedAttributes();
log.info(`Including ${attributes.length} attributes in response`);
response.attributes = attributes.map(attr => ({
name: attr.name,
value: attr.value,
type: attr.type
}));
if (attributes.length > 0) {
// Log some example attributes
attributes.slice(0, 3).forEach((attr, index) => {
log.info(`Attribute ${index + 1}: ${attr.name}=${attr.value} (${attr.type})`);
});
}
}
return response;
} catch (error: any) {
log.error(`Error executing read_note tool: ${error.message || String(error)}`);
return `Error: ${error.message || String(error)}`;
}
}
}

View File

@ -0,0 +1,95 @@
/**
* Search Notes Tool
*
* This tool allows the LLM to search for notes using semantic search.
*/
import type { Tool, ToolHandler } from './tool_interfaces.js';
import log from '../../log.js';
import aiServiceManager from '../ai_service_manager.js';
/**
* Definition of the search notes tool
*/
export const searchNotesToolDefinition: Tool = {
type: 'function',
function: {
name: 'search_notes',
description: 'Search for notes in the database using semantic search. Returns notes most semantically related to the query.',
parameters: {
type: 'object',
properties: {
query: {
type: 'string',
description: 'The search query to find semantically related notes'
},
parentNoteId: {
type: 'string',
description: 'Optional parent note ID to restrict search to a specific branch'
},
maxResults: {
type: 'number',
description: 'Maximum number of results to return (default: 5)'
}
},
required: ['query']
}
}
};
/**
* Search notes tool implementation
*/
export class SearchNotesTool implements ToolHandler {
public definition: Tool = searchNotesToolDefinition;
/**
* Execute the search notes tool
*/
public async execute(args: { query: string, parentNoteId?: string, maxResults?: number }): Promise<string | object> {
try {
const { query, parentNoteId, maxResults = 5 } = args;
log.info(`Executing search_notes tool - Query: "${query}", ParentNoteId: ${parentNoteId || 'not specified'}, MaxResults: ${maxResults}`);
// Get the vector search tool from the AI service manager
const vectorSearchTool = aiServiceManager.getVectorSearchTool();
log.info(`Retrieved vector search tool from AI service manager`);
// Execute the search
log.info(`Performing semantic search for: "${query}"`);
const searchStartTime = Date.now();
const results = await vectorSearchTool.searchNotes(query, {
parentNoteId,
maxResults
});
const searchDuration = Date.now() - searchStartTime;
log.info(`Search completed in ${searchDuration}ms, found ${results.length} matching notes`);
if (results.length > 0) {
// Log top results
results.slice(0, 3).forEach((result, index) => {
log.info(`Result ${index + 1}: "${result.title}" (similarity: ${Math.round(result.similarity * 100)}%)`);
});
} else {
log.info(`No matching notes found for query: "${query}"`);
}
// Format the results
return {
count: results.length,
results: results.map(result => ({
noteId: result.noteId,
title: result.title,
preview: result.contentPreview,
similarity: Math.round(result.similarity * 100) / 100,
parentId: result.parentId
}))
};
} catch (error: any) {
log.error(`Error executing search_notes tool: ${error.message || String(error)}`);
return `Error: ${error.message || String(error)}`;
}
}
}

View File

@ -0,0 +1,36 @@
/**
* Tool Initializer
*
* This module initializes all available tools for the LLM to use.
*/
import toolRegistry from './tool_registry.js';
import { SearchNotesTool } from './search_notes_tool.js';
import { ReadNoteTool } from './read_note_tool.js';
import log from '../../log.js';
/**
* Initialize all tools for the LLM
*/
export async function initializeTools(): Promise<void> {
try {
log.info('Initializing LLM tools...');
// Register basic notes tools
toolRegistry.registerTool(new SearchNotesTool());
toolRegistry.registerTool(new ReadNoteTool());
// More tools can be registered here
// Log registered tools
const toolCount = toolRegistry.getAllTools().length;
log.info(`Successfully registered ${toolCount} LLM tools`);
} catch (error: any) {
log.error(`Error initializing LLM tools: ${error.message || String(error)}`);
// Don't throw, just log the error to prevent breaking the pipeline
}
}
export default {
initializeTools
};

View File

@ -0,0 +1,57 @@
/**
* Tool Interfaces
*
* This file defines the interfaces for the LLM tool calling system.
*/
/**
* Interface for a tool definition to be sent to the LLM
*/
export interface Tool {
type: 'function';
function: {
name: string;
description: string;
parameters: {
type: 'object';
properties: Record<string, ToolParameter>;
required: string[];
};
};
}
/**
* Interface for a tool parameter
*/
export interface ToolParameter {
type: string;
description: string;
enum?: string[];
}
/**
* Interface for a tool call from the LLM
*/
export interface ToolCall {
id?: string;
type?: string;
function: {
name: string;
arguments: Record<string, any> | string;
};
}
/**
* Interface for a tool handler that executes a tool
*/
export interface ToolHandler {
/**
* Tool definition to be sent to the LLM
*/
definition: Tool;
/**
* Execute the tool with the given arguments
*/
execute(args: Record<string, any>): Promise<string | object>;
}

View File

@ -0,0 +1,69 @@
/**
* Tool Registry
*
* This file defines the registry for tools that can be called by LLMs.
*/
import type { Tool, ToolHandler } from './tool_interfaces.js';
import log from '../../log.js';
/**
* Registry for tools that can be called by LLMs
*/
export class ToolRegistry {
private static instance: ToolRegistry;
private tools: Map<string, ToolHandler> = new Map();
private constructor() {}
/**
* Get singleton instance of the tool registry
*/
public static getInstance(): ToolRegistry {
if (!ToolRegistry.instance) {
ToolRegistry.instance = new ToolRegistry();
}
return ToolRegistry.instance;
}
/**
* Register a tool with the registry
*/
public registerTool(handler: ToolHandler): void {
const name = handler.definition.function.name;
if (this.tools.has(name)) {
log.info(`Tool '${name}' already registered, replacing...`);
}
this.tools.set(name, handler);
log.info(`Registered tool: ${name}`);
}
/**
* Get a tool by name
*/
public getTool(name: string): ToolHandler | undefined {
return this.tools.get(name);
}
/**
* Get all registered tools
*/
public getAllTools(): ToolHandler[] {
return Array.from(this.tools.values());
}
/**
* Get all tool definitions for sending to LLM
*/
public getAllToolDefinitions(): Tool[] {
const toolDefs = Array.from(this.tools.values()).map(handler => handler.definition);
return toolDefs;
}
}
// Export singleton instance
const toolRegistry = ToolRegistry.getInstance();
export default toolRegistry;