mirror of
https://github.com/TriliumNext/Notes.git
synced 2025-08-09 09:42:28 +08:00
it errors, but works
This commit is contained in:
parent
cf0e9242a0
commit
ef6ecdc42d
@ -133,8 +133,20 @@ export default class LlmChatPanel extends BasicWidget {
|
|||||||
try {
|
try {
|
||||||
const useAdvancedContext = this.useAdvancedContextCheckbox.checked;
|
const useAdvancedContext = this.useAdvancedContextCheckbox.checked;
|
||||||
|
|
||||||
// Setup streaming
|
// Create the message parameters
|
||||||
const source = new EventSource(`./api/llm/messages?sessionId=${this.sessionId}&format=stream`);
|
const messageParams = {
|
||||||
|
content,
|
||||||
|
contextNoteId: this.currentNoteId,
|
||||||
|
useAdvancedContext
|
||||||
|
};
|
||||||
|
|
||||||
|
// First, send the message via POST request
|
||||||
|
await server.post<any>(`llm/sessions/${this.sessionId}/messages`, messageParams);
|
||||||
|
|
||||||
|
// Then set up streaming via EventSource
|
||||||
|
const streamUrl = `./api/llm/sessions/${this.sessionId}/messages?format=stream&useAdvancedContext=${useAdvancedContext}`;
|
||||||
|
const source = new EventSource(streamUrl);
|
||||||
|
|
||||||
let assistantResponse = '';
|
let assistantResponse = '';
|
||||||
|
|
||||||
// Handle streaming response
|
// Handle streaming response
|
||||||
@ -171,18 +183,6 @@ export default class LlmChatPanel extends BasicWidget {
|
|||||||
toastService.showError('Error connecting to the LLM service. Please try again.');
|
toastService.showError('Error connecting to the LLM service. Please try again.');
|
||||||
};
|
};
|
||||||
|
|
||||||
// Send the actual message
|
|
||||||
const response = await server.post<any>('llm/messages', {
|
|
||||||
sessionId: this.sessionId,
|
|
||||||
content,
|
|
||||||
contextNoteId: this.currentNoteId,
|
|
||||||
useAdvancedContext
|
|
||||||
});
|
|
||||||
|
|
||||||
// Handle sources if returned in non-streaming response
|
|
||||||
if (response && response.sources && response.sources.length > 0) {
|
|
||||||
this.showSources(response.sources);
|
|
||||||
}
|
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
this.hideLoadingIndicator();
|
this.hideLoadingIndicator();
|
||||||
toastService.showError('Error sending message: ' + (error as Error).message);
|
toastService.showError('Error sending message: ' + (error as Error).message);
|
||||||
|
@ -449,26 +449,57 @@ Now, based on the above notes, please answer: ${query}`;
|
|||||||
*/
|
*/
|
||||||
async function sendMessage(req: Request, res: Response) {
|
async function sendMessage(req: Request, res: Response) {
|
||||||
try {
|
try {
|
||||||
// Extract the content from the request body
|
// Extract parameters differently based on the request method
|
||||||
const { content, sessionId, useAdvancedContext = false } = req.body || {};
|
let content, useAdvancedContext, sessionId;
|
||||||
|
|
||||||
// Validate the content
|
if (req.method === 'POST') {
|
||||||
if (!content || typeof content !== 'string' || content.trim().length === 0) {
|
// For POST requests, get content from the request body
|
||||||
|
const requestBody = req.body || {};
|
||||||
|
content = requestBody.content;
|
||||||
|
useAdvancedContext = requestBody.useAdvancedContext || false;
|
||||||
|
} else if (req.method === 'GET') {
|
||||||
|
// For GET (streaming) requests, get format from query params
|
||||||
|
// The content should have been sent in a previous POST request
|
||||||
|
useAdvancedContext = req.query.useAdvancedContext === 'true';
|
||||||
|
content = ''; // We don't need content for GET requests
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get sessionId from URL params since it's part of the route
|
||||||
|
sessionId = req.params.sessionId;
|
||||||
|
|
||||||
|
// Get the Accept header once at the start
|
||||||
|
const acceptHeader = req.get('Accept');
|
||||||
|
const isStreamingRequest = acceptHeader && acceptHeader.includes('text/event-stream');
|
||||||
|
|
||||||
|
// For GET requests, ensure we have the format=stream parameter
|
||||||
|
if (req.method === 'GET' && (!req.query.format || req.query.format !== 'stream')) {
|
||||||
|
throw new Error('Stream format parameter is required for GET requests');
|
||||||
|
}
|
||||||
|
|
||||||
|
// For POST requests, validate the content
|
||||||
|
if (req.method === 'POST' && (!content || typeof content !== 'string' || content.trim().length === 0)) {
|
||||||
throw new Error('Content cannot be empty');
|
throw new Error('Content cannot be empty');
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get or create the session
|
// Get session
|
||||||
let session: ChatSession;
|
if (!sessionId || !sessions.has(sessionId)) {
|
||||||
|
throw new Error('Session not found');
|
||||||
|
}
|
||||||
|
|
||||||
if (sessionId && sessions.has(sessionId)) {
|
const session = sessions.get(sessionId)!;
|
||||||
session = sessions.get(sessionId)!;
|
session.lastActive = new Date();
|
||||||
session.lastActive = new Date();
|
|
||||||
} else {
|
// For POST requests, store the user message
|
||||||
const result = await createSession(req, res);
|
if (req.method === 'POST' && content) {
|
||||||
if (!result?.id) {
|
// Add message to session
|
||||||
throw new Error('Failed to create a new session');
|
session.messages.push({
|
||||||
}
|
role: 'user',
|
||||||
session = sessions.get(result.id)!;
|
content,
|
||||||
|
timestamp: new Date()
|
||||||
|
});
|
||||||
|
|
||||||
|
// Log a preview of the message
|
||||||
|
log.info(`Processing LLM message: "${content.substring(0, 50)}${content.length > 50 ? '...' : ''}"`);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if AI services are available
|
// Check if AI services are available
|
||||||
@ -495,184 +526,225 @@ async function sendMessage(req: Request, res: Response) {
|
|||||||
throw new Error('No AI service is available');
|
throw new Error('No AI service is available');
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create user message
|
|
||||||
const userMessage: Message = {
|
|
||||||
role: 'user',
|
|
||||||
content
|
|
||||||
};
|
|
||||||
|
|
||||||
// Add message to session
|
|
||||||
session.messages.push({
|
|
||||||
role: 'user',
|
|
||||||
content,
|
|
||||||
timestamp: new Date()
|
|
||||||
});
|
|
||||||
|
|
||||||
// Log a preview of the message
|
|
||||||
log.info(`Processing LLM message: "${content.substring(0, 50)}${content.length > 50 ? '...' : ''}"`);
|
|
||||||
|
|
||||||
// Information to return to the client
|
// Information to return to the client
|
||||||
let aiResponse = '';
|
let aiResponse = '';
|
||||||
let sourceNotes: NoteSource[] = [];
|
let sourceNotes: NoteSource[] = [];
|
||||||
|
|
||||||
// If Advanced Context is enabled, we use the improved method
|
// For POST requests, we need to process the message
|
||||||
if (useAdvancedContext) {
|
// For GET (streaming) requests, we use the latest user message from the session
|
||||||
// Use the Trilium-specific approach
|
if (req.method === 'POST' || isStreamingRequest) {
|
||||||
const contextNoteId = session.noteContext || null;
|
// Get the latest user message for context
|
||||||
const results = await triliumContextService.processQuery(content, service, contextNoteId);
|
const latestUserMessage = session.messages
|
||||||
|
.filter(msg => msg.role === 'user')
|
||||||
|
.pop();
|
||||||
|
|
||||||
// Get the generated context
|
if (!latestUserMessage && req.method === 'GET') {
|
||||||
const context = results.context;
|
throw new Error('No user message found in session');
|
||||||
sourceNotes = results.notes;
|
}
|
||||||
|
|
||||||
// Add system message with the context
|
// Use the latest message content for GET requests
|
||||||
const contextMessage: Message = {
|
const messageContent = req.method === 'POST' ? content : latestUserMessage!.content;
|
||||||
role: 'system',
|
|
||||||
content: context
|
|
||||||
};
|
|
||||||
|
|
||||||
// Format all messages for the AI
|
// If Advanced Context is enabled, we use the improved method
|
||||||
const aiMessages: Message[] = [
|
if (useAdvancedContext) {
|
||||||
contextMessage,
|
// Use the Trilium-specific approach
|
||||||
...session.messages.slice(-10).map(msg => ({
|
const contextNoteId = session.noteContext || null;
|
||||||
role: msg.role,
|
const results = await triliumContextService.processQuery(messageContent, service, contextNoteId);
|
||||||
content: msg.content
|
|
||||||
}))
|
|
||||||
];
|
|
||||||
|
|
||||||
// Configure chat options from session metadata
|
// Get the generated context
|
||||||
const chatOptions: ChatCompletionOptions = {
|
const context = results.context;
|
||||||
temperature: session.metadata.temperature || 0.7,
|
sourceNotes = results.notes;
|
||||||
maxTokens: session.metadata.maxTokens,
|
|
||||||
model: session.metadata.model
|
|
||||||
// 'provider' property has been removed as it's not in the ChatCompletionOptions type
|
|
||||||
};
|
|
||||||
|
|
||||||
// Get streaming response if requested
|
// Add system message with the context
|
||||||
const acceptHeader = req.get('Accept');
|
const contextMessage: Message = {
|
||||||
if (acceptHeader && acceptHeader.includes('text/event-stream')) {
|
role: 'system',
|
||||||
res.setHeader('Content-Type', 'text/event-stream');
|
content: context
|
||||||
res.setHeader('Cache-Control', 'no-cache');
|
};
|
||||||
res.setHeader('Connection', 'keep-alive');
|
|
||||||
|
|
||||||
let messageContent = '';
|
// Format all messages for the AI
|
||||||
|
const aiMessages: Message[] = [
|
||||||
|
contextMessage,
|
||||||
|
...session.messages.slice(-10).map(msg => ({
|
||||||
|
role: msg.role,
|
||||||
|
content: msg.content
|
||||||
|
}))
|
||||||
|
];
|
||||||
|
|
||||||
// Stream the response
|
// Configure chat options from session metadata
|
||||||
await service.sendChatCompletion(
|
const chatOptions: ChatCompletionOptions = {
|
||||||
aiMessages,
|
temperature: session.metadata.temperature || 0.7,
|
||||||
chatOptions,
|
maxTokens: session.metadata.maxTokens,
|
||||||
(chunk: string) => {
|
model: session.metadata.model,
|
||||||
messageContent += chunk;
|
stream: isStreamingRequest ? true : undefined
|
||||||
res.write(`data: ${JSON.stringify({ content: chunk })}\n\n`);
|
};
|
||||||
|
|
||||||
|
// Process based on whether this is a streaming request
|
||||||
|
if (isStreamingRequest) {
|
||||||
|
res.setHeader('Content-Type', 'text/event-stream');
|
||||||
|
res.setHeader('Cache-Control', 'no-cache');
|
||||||
|
res.setHeader('Connection', 'keep-alive');
|
||||||
|
|
||||||
|
let messageContent = '';
|
||||||
|
|
||||||
|
// Use the correct method name: generateChatCompletion
|
||||||
|
const response = await service.generateChatCompletion(aiMessages, chatOptions);
|
||||||
|
|
||||||
|
// Handle streaming if the response includes a stream method
|
||||||
|
if (response.stream) {
|
||||||
|
await response.stream((chunk: { text: string; done: boolean }) => {
|
||||||
|
if (chunk.text) {
|
||||||
|
messageContent += chunk.text;
|
||||||
|
res.write(`data: ${JSON.stringify({ content: chunk.text })}\n\n`);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (chunk.done) {
|
||||||
|
// Signal the end of the stream when done
|
||||||
|
res.write('data: [DONE]\n\n');
|
||||||
|
res.end();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
// If no streaming available, send the response as a single chunk
|
||||||
|
messageContent = response.text;
|
||||||
|
res.write(`data: ${JSON.stringify({ content: messageContent })}\n\n`);
|
||||||
|
res.write('data: [DONE]\n\n');
|
||||||
|
res.end();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Store the full response for the session
|
||||||
|
aiResponse = messageContent;
|
||||||
|
|
||||||
|
// Store the assistant's response in the session
|
||||||
|
session.messages.push({
|
||||||
|
role: 'assistant',
|
||||||
|
content: aiResponse,
|
||||||
|
timestamp: new Date()
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
// Non-streaming approach for POST requests
|
||||||
|
const response = await service.generateChatCompletion(aiMessages, chatOptions);
|
||||||
|
aiResponse = response.text; // Extract the text from the response
|
||||||
|
|
||||||
|
// Store the assistant's response in the session
|
||||||
|
session.messages.push({
|
||||||
|
role: 'assistant',
|
||||||
|
content: aiResponse,
|
||||||
|
timestamp: new Date()
|
||||||
|
});
|
||||||
|
|
||||||
|
// Return the response for POST requests
|
||||||
|
return {
|
||||||
|
content: aiResponse,
|
||||||
|
sources: sourceNotes.map(note => ({
|
||||||
|
noteId: note.noteId,
|
||||||
|
title: note.title,
|
||||||
|
similarity: note.similarity,
|
||||||
|
branchId: note.branchId
|
||||||
|
}))
|
||||||
|
};
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Original approach - find relevant notes through direct embedding comparison
|
||||||
|
const relevantNotes = await findRelevantNotes(
|
||||||
|
content,
|
||||||
|
session.noteContext || null,
|
||||||
|
5
|
||||||
);
|
);
|
||||||
|
|
||||||
// Close the stream
|
sourceNotes = relevantNotes;
|
||||||
res.write('data: [DONE]\n\n');
|
|
||||||
res.end();
|
|
||||||
|
|
||||||
// Store the full response
|
// Build context from relevant notes
|
||||||
aiResponse = messageContent;
|
const context = buildContextFromNotes(relevantNotes, content);
|
||||||
} else {
|
|
||||||
// Non-streaming approach
|
|
||||||
aiResponse = await service.sendChatCompletion(aiMessages, chatOptions);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Original approach - find relevant notes through direct embedding comparison
|
|
||||||
const relevantNotes = await findRelevantNotes(
|
|
||||||
content,
|
|
||||||
session.noteContext || null,
|
|
||||||
5
|
|
||||||
);
|
|
||||||
|
|
||||||
sourceNotes = relevantNotes;
|
// Add system message with the context
|
||||||
|
const contextMessage: Message = {
|
||||||
|
role: 'system',
|
||||||
|
content: context
|
||||||
|
};
|
||||||
|
|
||||||
// Build context from relevant notes
|
// Format all messages for the AI
|
||||||
const context = buildContextFromNotes(relevantNotes, content);
|
const aiMessages: Message[] = [
|
||||||
|
contextMessage,
|
||||||
|
...session.messages.slice(-10).map(msg => ({
|
||||||
|
role: msg.role,
|
||||||
|
content: msg.content
|
||||||
|
}))
|
||||||
|
];
|
||||||
|
|
||||||
// Add system message with the context
|
// Configure chat options from session metadata
|
||||||
const contextMessage: Message = {
|
const chatOptions: ChatCompletionOptions = {
|
||||||
role: 'system',
|
temperature: session.metadata.temperature || 0.7,
|
||||||
content: context
|
maxTokens: session.metadata.maxTokens,
|
||||||
};
|
model: session.metadata.model,
|
||||||
|
stream: isStreamingRequest ? true : undefined
|
||||||
|
};
|
||||||
|
|
||||||
// Format all messages for the AI
|
if (isStreamingRequest) {
|
||||||
const aiMessages: Message[] = [
|
res.setHeader('Content-Type', 'text/event-stream');
|
||||||
contextMessage,
|
res.setHeader('Cache-Control', 'no-cache');
|
||||||
...session.messages.slice(-10).map(msg => ({
|
res.setHeader('Connection', 'keep-alive');
|
||||||
role: msg.role,
|
|
||||||
content: msg.content
|
|
||||||
}))
|
|
||||||
];
|
|
||||||
|
|
||||||
// Configure chat options from session metadata
|
let messageContent = '';
|
||||||
const chatOptions: ChatCompletionOptions = {
|
|
||||||
temperature: session.metadata.temperature || 0.7,
|
|
||||||
maxTokens: session.metadata.maxTokens,
|
|
||||||
model: session.metadata.model
|
|
||||||
// 'provider' property has been removed as it's not in the ChatCompletionOptions type
|
|
||||||
};
|
|
||||||
|
|
||||||
// Get streaming response if requested
|
// Use the correct method name: generateChatCompletion
|
||||||
const acceptHeader = req.get('Accept');
|
const response = await service.generateChatCompletion(aiMessages, chatOptions);
|
||||||
if (acceptHeader && acceptHeader.includes('text/event-stream')) {
|
|
||||||
res.setHeader('Content-Type', 'text/event-stream');
|
|
||||||
res.setHeader('Cache-Control', 'no-cache');
|
|
||||||
res.setHeader('Connection', 'keep-alive');
|
|
||||||
|
|
||||||
let messageContent = '';
|
// Handle streaming if the response includes a stream method
|
||||||
|
if (response.stream) {
|
||||||
|
await response.stream((chunk: { text: string; done: boolean }) => {
|
||||||
|
if (chunk.text) {
|
||||||
|
messageContent += chunk.text;
|
||||||
|
res.write(`data: ${JSON.stringify({ content: chunk.text })}\n\n`);
|
||||||
|
}
|
||||||
|
|
||||||
// Stream the response
|
if (chunk.done) {
|
||||||
await service.sendChatCompletion(
|
// Signal the end of the stream when done
|
||||||
aiMessages,
|
res.write('data: [DONE]\n\n');
|
||||||
chatOptions,
|
res.end();
|
||||||
(chunk: string) => {
|
}
|
||||||
messageContent += chunk;
|
});
|
||||||
res.write(`data: ${JSON.stringify({ content: chunk })}\n\n`);
|
} else {
|
||||||
|
// If no streaming available, send the response as a single chunk
|
||||||
|
messageContent = response.text;
|
||||||
|
res.write(`data: ${JSON.stringify({ content: messageContent })}\n\n`);
|
||||||
|
res.write('data: [DONE]\n\n');
|
||||||
|
res.end();
|
||||||
}
|
}
|
||||||
);
|
|
||||||
|
|
||||||
// Close the stream
|
// Store the full response for the session
|
||||||
res.write('data: [DONE]\n\n');
|
aiResponse = messageContent;
|
||||||
res.end();
|
|
||||||
|
|
||||||
// Store the full response
|
// Store the assistant's response in the session
|
||||||
aiResponse = messageContent;
|
session.messages.push({
|
||||||
} else {
|
role: 'assistant',
|
||||||
// Non-streaming approach
|
content: aiResponse,
|
||||||
aiResponse = await service.sendChatCompletion(aiMessages, chatOptions);
|
timestamp: new Date()
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
// Non-streaming approach for POST requests
|
||||||
|
const response = await service.generateChatCompletion(aiMessages, chatOptions);
|
||||||
|
aiResponse = response.text; // Extract the text from the response
|
||||||
|
|
||||||
|
// Store the assistant's response in the session
|
||||||
|
session.messages.push({
|
||||||
|
role: 'assistant',
|
||||||
|
content: aiResponse,
|
||||||
|
timestamp: new Date()
|
||||||
|
});
|
||||||
|
|
||||||
|
// Return the response for POST requests
|
||||||
|
return {
|
||||||
|
content: aiResponse,
|
||||||
|
sources: sourceNotes.map(note => ({
|
||||||
|
noteId: note.noteId,
|
||||||
|
title: note.title,
|
||||||
|
similarity: note.similarity,
|
||||||
|
branchId: note.branchId
|
||||||
|
}))
|
||||||
|
};
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only store the assistant's message if we're not streaming (otherwise we already did)
|
|
||||||
const acceptHeader = req.get('Accept');
|
|
||||||
if (!acceptHeader || !acceptHeader.includes('text/event-stream')) {
|
|
||||||
// Store the assistant's response in the session
|
|
||||||
session.messages.push({
|
|
||||||
role: 'assistant',
|
|
||||||
content: aiResponse,
|
|
||||||
timestamp: new Date()
|
|
||||||
});
|
|
||||||
|
|
||||||
// Return the response
|
|
||||||
return {
|
|
||||||
content: aiResponse,
|
|
||||||
sources: sourceNotes.map(note => ({
|
|
||||||
noteId: note.noteId,
|
|
||||||
title: note.title,
|
|
||||||
similarity: note.similarity,
|
|
||||||
branchId: note.branchId
|
|
||||||
}))
|
|
||||||
};
|
|
||||||
} else {
|
|
||||||
// For streaming responses, we've already sent the data
|
|
||||||
// But we still need to add the message to the session
|
|
||||||
session.messages.push({
|
|
||||||
role: 'assistant',
|
|
||||||
content: aiResponse,
|
|
||||||
timestamp: new Date()
|
|
||||||
});
|
|
||||||
}
|
|
||||||
} catch (error: any) {
|
} catch (error: any) {
|
||||||
log.error(`Error sending message to LLM: ${error.message}`);
|
log.error(`Error sending message to LLM: ${error.message}`);
|
||||||
throw new Error(`Failed to send message: ${error.message}`);
|
throw new Error(`Failed to send message: ${error.message}`);
|
||||||
|
@ -387,6 +387,7 @@ function register(app: express.Application) {
|
|||||||
apiRoute(PATCH, "/api/llm/sessions/:sessionId", llmRoute.updateSession);
|
apiRoute(PATCH, "/api/llm/sessions/:sessionId", llmRoute.updateSession);
|
||||||
apiRoute(DEL, "/api/llm/sessions/:sessionId", llmRoute.deleteSession);
|
apiRoute(DEL, "/api/llm/sessions/:sessionId", llmRoute.deleteSession);
|
||||||
apiRoute(PST, "/api/llm/sessions/:sessionId/messages", llmRoute.sendMessage);
|
apiRoute(PST, "/api/llm/sessions/:sessionId/messages", llmRoute.sendMessage);
|
||||||
|
route(GET, "/api/llm/sessions/:sessionId/messages", [auth.checkApiAuth, csrfMiddleware], llmRoute.sendMessage, apiResultHandler);
|
||||||
|
|
||||||
// Ollama API endpoints
|
// Ollama API endpoints
|
||||||
route(PST, "/api/ollama/list-models", [auth.checkApiAuth, csrfMiddleware], ollamaRoute.listModels, apiResultHandler);
|
route(PST, "/api/ollama/list-models", [auth.checkApiAuth, csrfMiddleware], ollamaRoute.listModels, apiResultHandler);
|
||||||
|
@ -40,7 +40,20 @@ export class AIServiceManager {
|
|||||||
|
|
||||||
if (customOrder) {
|
if (customOrder) {
|
||||||
try {
|
try {
|
||||||
const parsed = JSON.parse(customOrder);
|
// Try to parse as JSON first
|
||||||
|
let parsed;
|
||||||
|
|
||||||
|
// Handle both array in JSON format and simple string format
|
||||||
|
if (customOrder.startsWith('[') && customOrder.endsWith(']')) {
|
||||||
|
parsed = JSON.parse(customOrder);
|
||||||
|
} else if (typeof customOrder === 'string') {
|
||||||
|
// If it's a simple string (like "ollama"), convert to single-item array
|
||||||
|
parsed = [customOrder];
|
||||||
|
} else {
|
||||||
|
// Fallback to default
|
||||||
|
parsed = defaultOrder;
|
||||||
|
}
|
||||||
|
|
||||||
// Validate that all providers are valid
|
// Validate that all providers are valid
|
||||||
if (Array.isArray(parsed) &&
|
if (Array.isArray(parsed) &&
|
||||||
parsed.every(p => Object.keys(this.services).includes(p))) {
|
parsed.every(p => Object.keys(this.services).includes(p))) {
|
||||||
|
@ -102,12 +102,13 @@ Example: ["exact topic mentioned", "related concept 1", "related concept 2"]`;
|
|||||||
maxTokens: 300
|
maxTokens: 300
|
||||||
};
|
};
|
||||||
|
|
||||||
// Get the response from the LLM
|
// Get the response from the LLM using the correct method name
|
||||||
const response = await llmService.sendTextCompletion(messages, options);
|
const response = await llmService.generateChatCompletion(messages, options);
|
||||||
|
const responseText = response.text; // Extract the text from the response object
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// Parse the JSON response
|
// Parse the JSON response
|
||||||
const jsonStr = response.trim().replace(/```json|```/g, '').trim();
|
const jsonStr = responseText.trim().replace(/```json|```/g, '').trim();
|
||||||
const queries = JSON.parse(jsonStr);
|
const queries = JSON.parse(jsonStr);
|
||||||
|
|
||||||
if (Array.isArray(queries) && queries.length > 0) {
|
if (Array.isArray(queries) && queries.length > 0) {
|
||||||
@ -117,7 +118,7 @@ Example: ["exact topic mentioned", "related concept 1", "related concept 2"]`;
|
|||||||
}
|
}
|
||||||
} catch (parseError) {
|
} catch (parseError) {
|
||||||
// Fallback: if JSON parsing fails, try to extract queries line by line
|
// Fallback: if JSON parsing fails, try to extract queries line by line
|
||||||
const lines = response.split('\n')
|
const lines = responseText.split('\n')
|
||||||
.map((line: string) => line.trim())
|
.map((line: string) => line.trim())
|
||||||
.filter((line: string) => line.length > 0 && !line.startsWith('```'));
|
.filter((line: string) => line.length > 0 && !line.startsWith('```'));
|
||||||
|
|
||||||
@ -176,8 +177,8 @@ Example: ["exact topic mentioned", "related concept 1", "related concept 2"]`;
|
|||||||
|
|
||||||
// Process each query
|
// Process each query
|
||||||
for (const query of queries) {
|
for (const query of queries) {
|
||||||
// Get embeddings for this query
|
// Get embeddings for this query using the correct method name
|
||||||
const queryEmbedding = await this.provider.getEmbedding(query);
|
const queryEmbedding = await this.provider.generateEmbeddings(query);
|
||||||
|
|
||||||
// Find notes similar to this query
|
// Find notes similar to this query
|
||||||
let results;
|
let results;
|
||||||
@ -192,8 +193,8 @@ Example: ["exact topic mentioned", "related concept 1", "related concept 2"]`;
|
|||||||
// Search all notes
|
// Search all notes
|
||||||
results = await vectorStore.findSimilarNotes(
|
results = await vectorStore.findSimilarNotes(
|
||||||
queryEmbedding,
|
queryEmbedding,
|
||||||
this.provider.id,
|
this.provider.name, // Use name property instead of id
|
||||||
this.provider.modelId,
|
this.provider.getConfig().model, // Use getConfig().model instead of modelId
|
||||||
Math.min(limit, 5), // Limit per query
|
Math.min(limit, 5), // Limit per query
|
||||||
0.5 // Lower threshold to get more diverse results
|
0.5 // Lower threshold to get more diverse results
|
||||||
);
|
);
|
||||||
@ -265,8 +266,8 @@ Example: ["exact topic mentioned", "related concept 1", "related concept 2"]`;
|
|||||||
for (const noteId of subtreeNoteIds) {
|
for (const noteId of subtreeNoteIds) {
|
||||||
const noteEmbedding = await vectorStore.getEmbeddingForNote(
|
const noteEmbedding = await vectorStore.getEmbeddingForNote(
|
||||||
noteId,
|
noteId,
|
||||||
this.provider.id,
|
this.provider.name, // Use name property instead of id
|
||||||
this.provider.modelId
|
this.provider.getConfig().model // Use getConfig().model instead of modelId
|
||||||
);
|
);
|
||||||
|
|
||||||
if (noteEmbedding) {
|
if (noteEmbedding) {
|
||||||
@ -338,7 +339,10 @@ Example: ["exact topic mentioned", "related concept 1", "related concept 2"]`;
|
|||||||
*/
|
*/
|
||||||
buildContextFromNotes(sources: any[], query: string): string {
|
buildContextFromNotes(sources: any[], query: string): string {
|
||||||
if (!sources || sources.length === 0) {
|
if (!sources || sources.length === 0) {
|
||||||
return "";
|
// Return a default context instead of empty string
|
||||||
|
return "I am an AI assistant helping you with your Trilium notes. " +
|
||||||
|
"I couldn't find any specific notes related to your query, but I'll try to assist you " +
|
||||||
|
"with general knowledge about Trilium or other topics you're interested in.";
|
||||||
}
|
}
|
||||||
|
|
||||||
let context = `The following are relevant notes from your knowledge base that may help answer the query: "${query}"\n\n`;
|
let context = `The following are relevant notes from your knowledge base that may help answer the query: "${query}"\n\n`;
|
||||||
@ -382,28 +386,62 @@ Example: ["exact topic mentioned", "related concept 1", "related concept 2"]`;
|
|||||||
*/
|
*/
|
||||||
async processQuery(userQuestion: string, llmService: any, contextNoteId: string | null = null) {
|
async processQuery(userQuestion: string, llmService: any, contextNoteId: string | null = null) {
|
||||||
if (!this.initialized) {
|
if (!this.initialized) {
|
||||||
await this.initialize();
|
try {
|
||||||
|
await this.initialize();
|
||||||
|
} catch (error) {
|
||||||
|
log.error(`Failed to initialize TriliumContextService: ${error}`);
|
||||||
|
// Return a fallback response if initialization fails
|
||||||
|
return {
|
||||||
|
context: "I am an AI assistant helping you with your Trilium notes. " +
|
||||||
|
"I'll try to assist you with general knowledge about your query.",
|
||||||
|
notes: [],
|
||||||
|
queries: [userQuestion]
|
||||||
|
};
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Step 1: Generate search queries
|
try {
|
||||||
const searchQueries = await this.generateSearchQueries(userQuestion, llmService);
|
// Step 1: Generate search queries
|
||||||
log.info(`Generated search queries: ${JSON.stringify(searchQueries)}`);
|
let searchQueries: string[];
|
||||||
|
try {
|
||||||
|
searchQueries = await this.generateSearchQueries(userQuestion, llmService);
|
||||||
|
} catch (error) {
|
||||||
|
log.error(`Error generating search queries, using fallback: ${error}`);
|
||||||
|
searchQueries = [userQuestion]; // Fallback to using the original question
|
||||||
|
}
|
||||||
|
log.info(`Generated search queries: ${JSON.stringify(searchQueries)}`);
|
||||||
|
|
||||||
// Step 2: Find relevant notes using those queries
|
// Step 2: Find relevant notes using those queries
|
||||||
const relevantNotes = await this.findRelevantNotesMultiQuery(
|
let relevantNotes: any[] = [];
|
||||||
searchQueries,
|
try {
|
||||||
contextNoteId,
|
relevantNotes = await this.findRelevantNotesMultiQuery(
|
||||||
8 // Get more notes since we're using multiple queries
|
searchQueries,
|
||||||
);
|
contextNoteId,
|
||||||
|
8 // Get more notes since we're using multiple queries
|
||||||
|
);
|
||||||
|
} catch (error) {
|
||||||
|
log.error(`Error finding relevant notes: ${error}`);
|
||||||
|
// Continue with empty notes list
|
||||||
|
}
|
||||||
|
|
||||||
// Step 3: Build context from the notes
|
// Step 3: Build context from the notes
|
||||||
const context = this.buildContextFromNotes(relevantNotes, userQuestion);
|
const context = this.buildContextFromNotes(relevantNotes, userQuestion);
|
||||||
|
|
||||||
return {
|
return {
|
||||||
context,
|
context,
|
||||||
notes: relevantNotes,
|
notes: relevantNotes,
|
||||||
queries: searchQueries
|
queries: searchQueries
|
||||||
};
|
};
|
||||||
|
} catch (error) {
|
||||||
|
log.error(`Error in processQuery: ${error}`);
|
||||||
|
// Return a fallback response if anything fails
|
||||||
|
return {
|
||||||
|
context: "I am an AI assistant helping you with your Trilium notes. " +
|
||||||
|
"I encountered an error while processing your query, but I'll try to assist you anyway.",
|
||||||
|
notes: [],
|
||||||
|
queries: [userQuestion]
|
||||||
|
};
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user