well at least query decomposition is working..for now

This commit is contained in:
perf3ct 2025-04-17 17:19:52 +00:00
parent 5e50a2918d
commit 7062e51f2d
No known key found for this signature in database
GPG Key ID: 569C4EEC436F5232
5 changed files with 1002 additions and 795 deletions

View File

@ -24,306 +24,324 @@ import type { LLMServiceInterface } from '../../interfaces/agent_tool_interfaces
// Options for context processing
export interface ContextOptions {
// Content options
summarizeContent?: boolean;
maxResults?: number;
contextNoteId?: string | null;
// Content options
summarizeContent?: boolean;
maxResults?: number;
contextNoteId?: string | null;
// Processing options
useQueryEnhancement?: boolean;
useQueryDecomposition?: boolean;
// Processing options
useQueryEnhancement?: boolean;
useQueryDecomposition?: boolean;
// Debugging options
showThinking?: boolean;
// Debugging options
showThinking?: boolean;
}
export class ContextService {
private initialized = false;
private initPromise: Promise<void> | null = null;
private contextExtractor: ContextExtractor;
private initialized = false;
private initPromise: Promise<void> | null = null;
private contextExtractor: ContextExtractor;
constructor() {
this.contextExtractor = new ContextExtractor();
}
/**
* Initialize the service
*/
async initialize(): Promise<void> {
if (this.initialized) return;
// Use a promise to prevent multiple simultaneous initializations
if (this.initPromise) return this.initPromise;
this.initPromise = (async () => {
try {
// Initialize provider
const provider = await providerManager.getPreferredEmbeddingProvider();
if (!provider) {
throw new Error(`No embedding provider available. Could not initialize context service.`);
}
// Agent tools are already initialized in the AIServiceManager constructor
// No need to initialize them again
this.initialized = true;
log.info(`Context service initialized with provider: ${provider.name}`);
} catch (error: unknown) {
const errorMessage = error instanceof Error ? error.message : String(error);
log.error(`Failed to initialize context service: ${errorMessage}`);
throw error;
} finally {
this.initPromise = null;
}
})();
return this.initPromise;
}
/**
* Process a user query to find relevant context in Trilium notes
*
* @param userQuestion - The user's query
* @param llmService - The LLM service to use
* @param options - Context processing options
* @returns Context information and relevant notes
*/
async processQuery(
userQuestion: string,
llmService: LLMServiceInterface,
options: ContextOptions = {}
): Promise<{
context: string;
sources: NoteSearchResult[];
thinking?: string;
decomposedQuery?: any;
}> {
// Set default options
const {
summarizeContent = false,
maxResults = 10,
contextNoteId = null,
useQueryEnhancement = true,
useQueryDecomposition = false,
showThinking = false
} = options;
log.info(`Processing query: "${userQuestion.substring(0, 50)}..."`);
log.info(`Options: summarize=${summarizeContent}, maxResults=${maxResults}, contextNoteId=${contextNoteId || 'global'}`);
log.info(`Processing: enhancement=${useQueryEnhancement}, decomposition=${useQueryDecomposition}, showThinking=${showThinking}`);
if (!this.initialized) {
try {
await this.initialize();
} catch (error) {
log.error(`Failed to initialize ContextService: ${error}`);
// Return a fallback response if initialization fails
return {
context: CONTEXT_PROMPTS.NO_NOTES_CONTEXT,
sources: [],
thinking: undefined
};
}
constructor() {
this.contextExtractor = new ContextExtractor();
}
try {
let decomposedQuery;
let searchQueries: string[] = [userQuestion];
let relevantNotes: NoteSearchResult[] = [];
/**
* Initialize the service
*/
async initialize(): Promise<void> {
if (this.initialized) return;
// Step 1: Decompose query if requested
if (useQueryDecomposition) {
log.info(`Decomposing query for better understanding`);
decomposedQuery = queryProcessor.decomposeQuery(userQuestion);
// Use a promise to prevent multiple simultaneous initializations
if (this.initPromise) return this.initPromise;
// Extract sub-queries to use for search
if (decomposedQuery.subQueries.length > 0) {
searchQueries = decomposedQuery.subQueries
.map(sq => sq.text)
.filter(text => text !== userQuestion); // Remove the original query to avoid duplication
this.initPromise = (async () => {
try {
// Initialize provider
const provider = await providerManager.getPreferredEmbeddingProvider();
if (!provider) {
throw new Error(`No embedding provider available. Could not initialize context service.`);
}
// Always include the original query
searchQueries.unshift(userQuestion);
// Agent tools are already initialized in the AIServiceManager constructor
// No need to initialize them again
log.info(`Query decomposed into ${searchQueries.length} search queries`);
this.initialized = true;
log.info(`Context service initialized with provider: ${provider.name}`);
} catch (error: unknown) {
const errorMessage = error instanceof Error ? error.message : String(error);
log.error(`Failed to initialize context service: ${errorMessage}`);
throw error;
} finally {
this.initPromise = null;
}
})();
return this.initPromise;
}
/**
* Process a user query to find relevant context in Trilium notes
*
* @param userQuestion - The user's query
* @param llmService - The LLM service to use
* @param options - Context processing options
* @returns Context information and relevant notes
*/
async processQuery(
userQuestion: string,
llmService: LLMServiceInterface,
options: ContextOptions = {}
): Promise<{
context: string;
sources: NoteSearchResult[];
thinking?: string;
decomposedQuery?: any;
}> {
// Set default options
const {
summarizeContent = false,
maxResults = 10,
contextNoteId = null,
useQueryEnhancement = true,
useQueryDecomposition = false,
showThinking = false
} = options;
log.info(`Processing query: "${userQuestion.substring(0, 50)}..."`);
log.info(`Options: summarize=${summarizeContent}, maxResults=${maxResults}, contextNoteId=${contextNoteId || 'global'}`);
log.info(`Processing: enhancement=${useQueryEnhancement}, decomposition=${useQueryDecomposition}, showThinking=${showThinking}`);
if (!this.initialized) {
try {
await this.initialize();
} catch (error) {
log.error(`Failed to initialize ContextService: ${error}`);
// Return a fallback response if initialization fails
return {
context: CONTEXT_PROMPTS.NO_NOTES_CONTEXT,
sources: [],
thinking: undefined
};
}
}
}
// Step 2: Or enhance query if requested
else if (useQueryEnhancement) {
try {
log.info(`Enhancing query for better semantic matching`);
searchQueries = await queryProcessor.generateSearchQueries(userQuestion, llmService);
log.info(`Generated ${searchQueries.length} enhanced search queries`);
let decomposedQuery;
let searchQueries: string[] = [userQuestion];
let relevantNotes: NoteSearchResult[] = [];
// Step 1: Decompose query if requested
if (useQueryDecomposition) {
log.info(`Decomposing query for better understanding`);
try {
// Use the async version with the LLM service
decomposedQuery = await queryProcessor.decomposeQuery(userQuestion, undefined, llmService);
log.info(`Successfully decomposed query complexity: ${decomposedQuery.complexity}/10 with ${decomposedQuery.subQueries.length} sub-queries`);
} catch (error) {
log.error(`Error in query decomposition, using fallback: ${error}`);
// Fallback to simpler decomposition
decomposedQuery = {
originalQuery: userQuestion,
subQueries: [{
id: `sq_fallback_${Date.now()}`,
text: userQuestion,
reason: "Fallback to original query due to decomposition error",
isAnswered: false
}],
status: 'pending',
complexity: 1
};
}
// Extract sub-queries to use for search
if (decomposedQuery.subQueries.length > 0) {
searchQueries = decomposedQuery.subQueries
.map(sq => sq.text)
.filter(text => text !== userQuestion); // Remove the original query to avoid duplication
// Always include the original query
searchQueries.unshift(userQuestion);
log.info(`Query decomposed into ${searchQueries.length} search queries`);
}
}
// Step 2: Or enhance query if requested
else if (useQueryEnhancement) {
try {
log.info(`Enhancing query for better semantic matching`);
searchQueries = await queryProcessor.generateSearchQueries(userQuestion, llmService);
log.info(`Generated ${searchQueries.length} enhanced search queries`);
} catch (error) {
log.error(`Error generating search queries, using fallback: ${error}`);
searchQueries = [userQuestion]; // Fallback to using the original question
}
}
// Step 3: Find relevant notes using vector search
const allResults = new Map<string, NoteSearchResult>();
for (const query of searchQueries) {
try {
log.info(`Searching for: "${query.substring(0, 50)}..."`);
// Use the unified vector search service
const results = await vectorSearchService.findRelevantNotes(
query,
contextNoteId,
{
maxResults: maxResults,
summarizeContent: summarizeContent,
llmService: summarizeContent ? llmService : null
}
);
log.info(`Found ${results.length} results for query "${query.substring(0, 30)}..."`);
// Combine results, avoiding duplicates
for (const result of results) {
if (!allResults.has(result.noteId)) {
allResults.set(result.noteId, result);
} else {
// If note already exists, update similarity to max of both values
const existing = allResults.get(result.noteId);
if (existing && result.similarity > existing.similarity) {
existing.similarity = result.similarity;
allResults.set(result.noteId, existing);
}
}
}
} catch (error) {
log.error(`Error searching for query "${query}": ${error}`);
}
}
// Convert to array and sort by similarity
relevantNotes = Array.from(allResults.values())
.sort((a, b) => b.similarity - a.similarity)
.slice(0, maxResults);
log.info(`Final combined results: ${relevantNotes.length} relevant notes`);
// Step 4: Build context from the notes
const provider = await providerManager.getPreferredEmbeddingProvider();
const providerId = provider?.name || 'default';
const context = await contextFormatter.buildContextFromNotes(
relevantNotes,
userQuestion,
providerId
);
// Step 5: Add agent tools context if requested
let enhancedContext = context;
let thinkingProcess: string | undefined = undefined;
if (showThinking) {
thinkingProcess = this.generateThinkingProcess(
userQuestion,
searchQueries,
relevantNotes,
decomposedQuery
);
}
return {
context: enhancedContext,
sources: relevantNotes,
thinking: thinkingProcess,
decomposedQuery
};
} catch (error) {
log.error(`Error generating search queries, using fallback: ${error}`);
searchQueries = [userQuestion]; // Fallback to using the original question
log.error(`Error processing query: ${error}`);
return {
context: CONTEXT_PROMPTS.NO_NOTES_CONTEXT,
sources: [],
thinking: undefined
};
}
}
}
// Step 3: Find relevant notes using vector search
const allResults = new Map<string, NoteSearchResult>();
/**
* Generate a thinking process for debugging and transparency
*/
private generateThinkingProcess(
originalQuery: string,
searchQueries: string[],
relevantNotes: NoteSearchResult[],
decomposedQuery?: any
): string {
let thinking = `## Query Processing\n\n`;
thinking += `Original query: "${originalQuery}"\n\n`;
for (const query of searchQueries) {
try {
log.info(`Searching for: "${query.substring(0, 50)}..."`);
// Add decomposition analysis if available
if (decomposedQuery) {
thinking += `Query complexity: ${decomposedQuery.complexity}/10\n\n`;
thinking += `### Decomposed into ${decomposedQuery.subQueries.length} sub-queries:\n`;
// Use the unified vector search service
const results = await vectorSearchService.findRelevantNotes(
decomposedQuery.subQueries.forEach((sq: any, i: number) => {
thinking += `${i + 1}. ${sq.text}\n Reason: ${sq.reason}\n\n`;
});
}
// Add search queries
thinking += `### Search Queries Used:\n`;
searchQueries.forEach((q, i) => {
thinking += `${i + 1}. "${q}"\n`;
});
// Add found sources
thinking += `\n## Sources Retrieved (${relevantNotes.length})\n\n`;
relevantNotes.slice(0, 5).forEach((note, i) => {
thinking += `${i + 1}. "${note.title}" (Score: ${Math.round(note.similarity * 100)}%)\n`;
thinking += ` ID: ${note.noteId}\n`;
// Check if parentPath exists before using it
if ('parentPath' in note && note.parentPath) {
thinking += ` Path: ${note.parentPath}\n`;
}
if (note.content) {
const contentPreview = note.content.length > 100
? note.content.substring(0, 100) + '...'
: note.content;
thinking += ` Preview: ${contentPreview}\n`;
}
thinking += '\n';
});
if (relevantNotes.length > 5) {
thinking += `... and ${relevantNotes.length - 5} more sources\n`;
}
return thinking;
}
/**
* Find notes semantically related to a query
* (Shorthand method that directly uses vectorSearchService)
*/
async findRelevantNotes(
query: string,
contextNoteId: string | null = null,
options: {
maxResults?: number,
summarize?: boolean,
llmService?: LLMServiceInterface | null
} = {}
): Promise<NoteSearchResult[]> {
return vectorSearchService.findRelevantNotes(
query,
contextNoteId,
{
maxResults: maxResults,
summarizeContent: summarizeContent,
llmService: summarizeContent ? llmService : null
maxResults: options.maxResults,
summarizeContent: options.summarize,
llmService: options.llmService
}
);
log.info(`Found ${results.length} results for query "${query.substring(0, 30)}..."`);
// Combine results, avoiding duplicates
for (const result of results) {
if (!allResults.has(result.noteId)) {
allResults.set(result.noteId, result);
} else {
// If note already exists, update similarity to max of both values
const existing = allResults.get(result.noteId);
if (existing && result.similarity > existing.similarity) {
existing.similarity = result.similarity;
allResults.set(result.noteId, existing);
}
}
}
} catch (error) {
log.error(`Error searching for query "${query}": ${error}`);
}
}
// Convert to array and sort by similarity
relevantNotes = Array.from(allResults.values())
.sort((a, b) => b.similarity - a.similarity)
.slice(0, maxResults);
log.info(`Final combined results: ${relevantNotes.length} relevant notes`);
// Step 4: Build context from the notes
const provider = await providerManager.getPreferredEmbeddingProvider();
const providerId = provider?.name || 'default';
const context = await contextFormatter.buildContextFromNotes(
relevantNotes,
userQuestion,
providerId
);
// Step 5: Add agent tools context if requested
let enhancedContext = context;
let thinkingProcess: string | undefined = undefined;
if (showThinking) {
thinkingProcess = this.generateThinkingProcess(
userQuestion,
searchQueries,
relevantNotes,
decomposedQuery
);
}
return {
context: enhancedContext,
sources: relevantNotes,
thinking: thinkingProcess,
decomposedQuery
};
} catch (error) {
log.error(`Error processing query: ${error}`);
return {
context: CONTEXT_PROMPTS.NO_NOTES_CONTEXT,
sources: [],
thinking: undefined
};
}
}
/**
* Generate a thinking process for debugging and transparency
*/
private generateThinkingProcess(
originalQuery: string,
searchQueries: string[],
relevantNotes: NoteSearchResult[],
decomposedQuery?: any
): string {
let thinking = `## Query Processing\n\n`;
thinking += `Original query: "${originalQuery}"\n\n`;
// Add decomposition analysis if available
if (decomposedQuery) {
thinking += `Query complexity: ${decomposedQuery.complexity}/10\n\n`;
thinking += `### Decomposed into ${decomposedQuery.subQueries.length} sub-queries:\n`;
decomposedQuery.subQueries.forEach((sq: any, i: number) => {
thinking += `${i+1}. ${sq.text}\n Reason: ${sq.reason}\n\n`;
});
}
// Add search queries
thinking += `### Search Queries Used:\n`;
searchQueries.forEach((q, i) => {
thinking += `${i+1}. "${q}"\n`;
});
// Add found sources
thinking += `\n## Sources Retrieved (${relevantNotes.length})\n\n`;
relevantNotes.slice(0, 5).forEach((note, i) => {
thinking += `${i+1}. "${note.title}" (Score: ${Math.round(note.similarity * 100)}%)\n`;
thinking += ` ID: ${note.noteId}\n`;
// Check if parentPath exists before using it
if ('parentPath' in note && note.parentPath) {
thinking += ` Path: ${note.parentPath}\n`;
}
if (note.content) {
const contentPreview = note.content.length > 100
? note.content.substring(0, 100) + '...'
: note.content;
thinking += ` Preview: ${contentPreview}\n`;
}
thinking += '\n';
});
if (relevantNotes.length > 5) {
thinking += `... and ${relevantNotes.length - 5} more sources\n`;
}
return thinking;
}
/**
* Find notes semantically related to a query
* (Shorthand method that directly uses vectorSearchService)
*/
async findRelevantNotes(
query: string,
contextNoteId: string | null = null,
options: {
maxResults?: number,
summarize?: boolean,
llmService?: LLMServiceInterface | null
} = {}
): Promise<NoteSearchResult[]> {
return vectorSearchService.findRelevantNotes(
query,
contextNoteId,
{
maxResults: options.maxResults,
summarizeContent: options.summarize,
llmService: options.llmService
}
);
}
}
// Export a singleton instance

File diff suppressed because it is too large Load Diff

View File

@ -12,83 +12,129 @@ import type { SubQuery, DecomposedQuery } from '../context/services/query_proces
export type { SubQuery, DecomposedQuery };
export class QueryDecompositionTool {
/**
* Break down a complex query into smaller, more manageable sub-queries
*
* @param query The original user query
* @param context Optional context about the current note being viewed
* @returns A decomposed query object with sub-queries
*/
decomposeQuery(query: string, context?: string): DecomposedQuery {
log.info('Using compatibility layer for QueryDecompositionTool.decomposeQuery');
return queryProcessor.decomposeQuery(query, context);
}
/**
* Break down a complex query into smaller, more manageable sub-queries
*
* @param query The original user query
* @param context Optional context about the current note being viewed
* @returns A decomposed query object with sub-queries
*/
decomposeQuery(query: string, context?: string): DecomposedQuery {
log.info('Using compatibility layer for QueryDecompositionTool.decomposeQuery');
/**
* Update a sub-query with its answer
*
* @param decomposedQuery The decomposed query object
* @param subQueryId The ID of the sub-query to update
* @param answer The answer to the sub-query
* @returns The updated decomposed query
*/
updateSubQueryAnswer(
decomposedQuery: DecomposedQuery,
subQueryId: string,
answer: string
): DecomposedQuery {
log.info('Using compatibility layer for QueryDecompositionTool.updateSubQueryAnswer');
return queryProcessor.updateSubQueryAnswer(decomposedQuery, subQueryId, answer);
}
// Since the main implementation is now async but we need to maintain a sync interface,
// we'll use a simpler approach that doesn't require LLM
/**
* Synthesize all sub-query answers into a comprehensive response
*
* @param decomposedQuery The decomposed query with all sub-queries answered
* @returns A synthesized answer to the original query
*/
synthesizeAnswer(decomposedQuery: DecomposedQuery): string {
log.info('Using compatibility layer for QueryDecompositionTool.synthesizeAnswer');
return queryProcessor.synthesizeAnswer(decomposedQuery);
}
// Get the complexity to determine approach
const complexity = queryProcessor.assessQueryComplexity(query);
/**
* Generate a status report on the progress of answering a complex query
*
* @param decomposedQuery The decomposed query
* @returns A status report string
*/
getQueryStatus(decomposedQuery: DecomposedQuery): string {
log.info('Using compatibility layer for QueryDecompositionTool.getQueryStatus');
// This method doesn't exist directly in the new implementation
// We'll implement a simple fallback
if (!query || query.trim().length === 0) {
return {
originalQuery: query,
subQueries: [],
status: 'pending',
complexity: 0
};
}
const answeredCount = decomposedQuery.subQueries.filter(sq => sq.isAnswered).length;
const totalCount = decomposedQuery.subQueries.length;
// Create a baseline decomposed query
let subQueries = [];
let status = `Progress: ${answeredCount}/${totalCount} sub-queries answered\n\n`;
// For compatibility, we'll use the basic SubQuery generation
// This avoids the async LLM call which would break the sync interface
const mainSubQuery = {
id: `sq_${Date.now()}_sync_0`,
text: query,
reason: "Main question (for direct matching)",
isAnswered: false
};
for (const sq of decomposedQuery.subQueries) {
status += `${sq.isAnswered ? '✓' : '○'} ${sq.text}\n`;
if (sq.isAnswered && sq.answer) {
status += `Answer: ${sq.answer.substring(0, 100)}${sq.answer.length > 100 ? '...' : ''}\n`;
}
status += '\n';
subQueries.push(mainSubQuery);
// Add a generic exploration query for context
const genericQuery = {
id: `sq_${Date.now()}_sync_1`,
text: `What information is related to ${query}?`,
reason: "General exploration to find related content",
isAnswered: false
};
subQueries.push(genericQuery);
// Simplified implementation that doesn't require async/LLM calls
return {
originalQuery: query,
subQueries: subQueries,
status: 'pending',
complexity
};
}
return status;
}
/**
* Update a sub-query with its answer
*
* @param decomposedQuery The decomposed query object
* @param subQueryId The ID of the sub-query to update
* @param answer The answer to the sub-query
* @returns The updated decomposed query
*/
updateSubQueryAnswer(
decomposedQuery: DecomposedQuery,
subQueryId: string,
answer: string
): DecomposedQuery {
log.info('Using compatibility layer for QueryDecompositionTool.updateSubQueryAnswer');
return queryProcessor.updateSubQueryAnswer(decomposedQuery, subQueryId, answer);
}
/**
* Assess the complexity of a query on a scale of 1-10
*
* @param query The query to assess
* @returns A complexity score from 1-10
*/
assessQueryComplexity(query: string): number {
log.info('Using compatibility layer for QueryDecompositionTool.assessQueryComplexity');
return queryProcessor.assessQueryComplexity(query);
}
/**
* Synthesize all sub-query answers into a comprehensive response
*
* @param decomposedQuery The decomposed query with all sub-queries answered
* @returns A synthesized answer to the original query
*/
synthesizeAnswer(decomposedQuery: DecomposedQuery): string {
log.info('Using compatibility layer for QueryDecompositionTool.synthesizeAnswer');
return queryProcessor.synthesizeAnswer(decomposedQuery);
}
/**
* Generate a status report on the progress of answering a complex query
*
* @param decomposedQuery The decomposed query
* @returns A status report string
*/
getQueryStatus(decomposedQuery: DecomposedQuery): string {
log.info('Using compatibility layer for QueryDecompositionTool.getQueryStatus');
// This method doesn't exist directly in the new implementation
// We'll implement a simple fallback
const answeredCount = decomposedQuery.subQueries.filter(sq => sq.isAnswered).length;
const totalCount = decomposedQuery.subQueries.length;
let status = `Progress: ${answeredCount}/${totalCount} sub-queries answered\n\n`;
for (const sq of decomposedQuery.subQueries) {
status += `${sq.isAnswered ? '✓' : '○'} ${sq.text}\n`;
if (sq.isAnswered && sq.answer) {
status += `Answer: ${sq.answer.substring(0, 100)}${sq.answer.length > 100 ? '...' : ''}\n`;
}
status += '\n';
}
return status;
}
/**
* Assess the complexity of a query on a scale of 1-10
*
* @param query The query to assess
* @returns A complexity score from 1-10
*/
assessQueryComplexity(query: string): number {
log.info('Using compatibility layer for QueryDecompositionTool.assessQueryComplexity');
return queryProcessor.assessQueryComplexity(query);
}
}
// Export default instance for compatibility

View File

@ -19,13 +19,6 @@ export interface LLMServiceInterface {
stream?: boolean;
systemPrompt?: string;
}): Promise<ChatResponse>;
/**
* Generate search queries by decomposing a complex query into simpler ones
* @param query The original user query to decompose
* @returns An array of decomposed search queries
*/
generateSearchQueries?(query: string): Promise<string[]>;
}
/**

View File

@ -168,16 +168,28 @@ export class ChatPipeline {
log.info(`========== STAGE 2: QUERY DECOMPOSITION ==========`);
log.info('Performing query decomposition to generate effective search queries');
const llmService = await this.getLLMService();
let searchQueries = [userQuery]; // Default to original query
let searchQueries = [userQuery];
if (llmService && llmService.generateSearchQueries) {
if (llmService) {
try {
const decompositionResult = await llmService.generateSearchQueries(userQuery);
if (decompositionResult && decompositionResult.length > 0) {
searchQueries = decompositionResult;
log.info(`Generated ${searchQueries.length} search queries: ${JSON.stringify(searchQueries)}`);
// Import the query processor and use its decomposeQuery method
const queryProcessor = (await import('../context/services/query_processor.js')).default;
// Use the enhanced query processor with the LLM service
const decomposedQuery = await queryProcessor.decomposeQuery(userQuery, undefined, llmService);
if (decomposedQuery && decomposedQuery.subQueries && decomposedQuery.subQueries.length > 0) {
// Extract search queries from the decomposed query
searchQueries = decomposedQuery.subQueries.map(sq => sq.text);
// Always include the original query if it's not already included
if (!searchQueries.includes(userQuery)) {
searchQueries.unshift(userQuery);
}
log.info(`Query decomposed with complexity ${decomposedQuery.complexity}/10 into ${searchQueries.length} search queries`);
} else {
log.info('Query decomposition returned no results, using original query');
log.info('Query decomposition returned no sub-queries, using original query');
}
} catch (error: any) {
log.error(`Error in query decomposition: ${error.message || String(error)}`);