refactor(llm): simplify chat handling by removing session store and directly integrating chat storage service

This commit is contained in:
perf3ct 2025-06-02 22:09:59 +00:00
parent d8d5318ace
commit ed64a5b4f7
No known key found for this signature in database
GPG Key ID: 569C4EEC436F5232
5 changed files with 151 additions and 653 deletions

View File

@ -5,7 +5,6 @@ import options from "../../services/options.js";
// Import the index service for knowledge base management
import indexService from "../../services/llm/index_service.js";
import restChatService from "../../services/llm/rest_chat_service.js";
import chatService from '../../services/llm/chat_service.js';
import chatStorageService from '../../services/llm/chat_storage_service.js';
// Define basic interfaces
@ -190,23 +189,26 @@ async function getSession(req: Request, res: Response) {
* tags: ["llm"]
*/
async function updateSession(req: Request, res: Response) {
// Get the chat using ChatService
// Get the chat using chatStorageService directly
const chatNoteId = req.params.chatNoteId;
const updates = req.body;
try {
// Get the chat
const session = await chatService.getOrCreateSession(chatNoteId);
const chat = await chatStorageService.getChat(chatNoteId);
if (!chat) {
throw new Error(`Chat with ID ${chatNoteId} not found`);
}
// Update title if provided
if (updates.title) {
await chatStorageService.updateChat(chatNoteId, session.messages, updates.title);
await chatStorageService.updateChat(chatNoteId, chat.messages, updates.title);
}
// Return the updated chat
return {
id: chatNoteId,
title: updates.title || session.title,
title: updates.title || chat.title,
updatedAt: new Date()
};
} catch (error) {
@ -248,18 +250,18 @@ async function updateSession(req: Request, res: Response) {
* tags: ["llm"]
*/
async function listSessions(req: Request, res: Response) {
// Get all sessions using ChatService
// Get all sessions using chatStorageService directly
try {
const sessions = await chatService.getAllSessions();
const chats = await chatStorageService.getAllChats();
// Format the response
return {
sessions: sessions.map(session => ({
id: session.id,
title: session.title,
createdAt: new Date(), // Since we don't have this in chat sessions
lastActive: new Date(), // Since we don't have this in chat sessions
messageCount: session.messages.length
sessions: chats.map(chat => ({
id: chat.id,
title: chat.title,
createdAt: chat.createdAt || new Date(),
lastActive: chat.updatedAt || new Date(),
messageCount: chat.messages.length
}))
};
} catch (error) {
@ -814,16 +816,14 @@ async function streamMessage(req: Request, res: Response) {
throw new Error('Content cannot be empty');
}
// Get or create session from Chat Note
// This will check the sessions store first, and if not found, create from the Chat Note
const session = await restChatService.getOrCreateSessionFromChatNote(chatNoteId, true);
if (!session) {
throw new Error('Chat not found and could not be created from note');
// Get or create chat directly from storage (simplified approach)
let chat = await chatStorageService.getChat(chatNoteId);
if (!chat) {
// Create a new chat if it doesn't exist
chat = await chatStorageService.createChat('New Chat');
log.info(`Created new chat with ID: ${chat.id} for stream request`);
}
// Update last active timestamp
session.lastActive = new Date();
// Process mentions if provided
let enhancedContent = content;
if (mentions && Array.isArray(mentions) && mentions.length > 0) {
@ -858,13 +858,15 @@ async function streamMessage(req: Request, res: Response) {
}
}
// Add user message to the session (with enhanced content for processing)
session.messages.push({
// Add user message to the chat (without timestamp since Message interface doesn't support it)
chat.messages.push({
role: 'user',
content: enhancedContent,
timestamp: new Date()
content: enhancedContent
});
// Save the updated chat
await chatStorageService.updateChat(chat.id, chat.messages, chat.title);
// Create request parameters for the pipeline
const requestParams = {
chatNoteId: chatNoteId,

View File

@ -3,7 +3,6 @@
*/
import log from "../../../log.js";
import type { Message } from "../../ai_interface.js";
import SessionsStore from "../sessions_store.js";
/**
* Handles the execution of LLM tools
@ -101,11 +100,6 @@ export class ToolHandler {
: JSON.stringify(result).substring(0, 100) + '...';
log.info(`Tool result: ${resultPreview}`);
// Record tool execution in session if chatNoteId is provided
if (chatNoteId) {
SessionsStore.recordToolExecution(chatNoteId, toolCall, typeof result === 'string' ? result : JSON.stringify(result));
}
// Format result as a proper message
return {
role: 'tool',
@ -116,11 +110,6 @@ export class ToolHandler {
} catch (error: any) {
log.error(`Error executing tool ${toolCall.function.name}: ${error.message}`);
// Record error in session if chatNoteId is provided
if (chatNoteId) {
SessionsStore.recordToolExecution(chatNoteId, toolCall, '', error.message);
}
// Return error as tool result
return {
role: 'tool',

View File

@ -2,7 +2,6 @@
* Chat module export
*/
import restChatService from './rest_chat_service.js';
import sessionsStore from './sessions_store.js';
import { ContextHandler } from './handlers/context_handler.js';
import { ToolHandler } from './handlers/tool_handler.js';
import { StreamHandler } from './handlers/stream_handler.js';
@ -13,7 +12,6 @@ import type { LLMStreamMessage } from '../interfaces/chat_ws_messages.js';
// Export components
export {
restChatService as default,
sessionsStore,
ContextHandler,
ToolHandler,
StreamHandler,

View File

@ -1,5 +1,6 @@
/**
* Service to handle chat API interactions
* Simplified service to handle chat API interactions
* Works directly with ChatStorageService - no complex session management
*/
import log from "../../log.js";
import type { Request, Response } from "express";
@ -8,27 +9,16 @@ import { AIServiceManager } from "../ai_service_manager.js";
import { ChatPipeline } from "../pipeline/chat_pipeline.js";
import type { ChatPipelineInput } from "../pipeline/interfaces.js";
import options from "../../options.js";
import { SEARCH_CONSTANTS } from '../constants/search_constants.js';
// Import our refactored modules
import { ContextHandler } from "./handlers/context_handler.js";
import { ToolHandler } from "./handlers/tool_handler.js";
import { StreamHandler } from "./handlers/stream_handler.js";
import SessionsStore from "./sessions_store.js";
import * as MessageFormatter from "./utils/message_formatter.js";
import type { NoteSource } from "../interfaces/chat_session.js";
import type { LLMStreamMessage } from "../interfaces/chat_ws_messages.js";
import type { ChatMessage } from '../interfaces/chat_session.js';
import type { ChatSession } from '../interfaces/chat_session.js';
import chatStorageService from '../chat_storage_service.js';
import {
isAIEnabled,
getFirstValidModelConfig,
getDefaultModelForProvider,
getPreferredProvider
} from '../config/configuration_helpers.js';
/**
* Service to handle chat API interactions
* Simplified service to handle chat API interactions
*/
class RestChatService {
/**
@ -47,35 +37,15 @@ class RestChatService {
* Check if AI services are available
*/
safelyUseAIManager(): boolean {
// Only use AI manager if database is initialized
if (!this.isDatabaseInitialized()) {
log.info("AI check failed: Database is not initialized");
return false;
}
// Try to access the manager - will create instance only if needed
try {
// Create local instance to avoid circular references
const aiManager = new AIServiceManager();
if (!aiManager) {
log.info("AI check failed: AI manager module is not available");
return false;
}
const isAvailable = aiManager.isAnyServiceAvailable();
log.info(`AI service availability check result: ${isAvailable}`);
if (isAvailable) {
// Additional diagnostics
try {
const providers = aiManager.getAvailableProviders();
log.info(`Available AI providers: ${providers.join(', ')}`);
} catch (err) {
log.info(`Could not get available providers: ${err}`);
}
}
return isAvailable;
} catch (error) {
log.error(`Error accessing AI service manager: ${error}`);
@ -85,299 +55,163 @@ class RestChatService {
/**
* Handle a message sent to an LLM and get a response
* Simplified to work directly with chat storage
*/
async handleSendMessage(req: Request, res: Response) {
log.info("=== Starting handleSendMessage ===");
log.info("=== Starting simplified handleSendMessage ===");
try {
// Extract parameters differently based on the request method
// Extract parameters
let content, useAdvancedContext, showThinking, chatNoteId;
if (req.method === 'POST') {
// For POST requests, get content from the request body
const requestBody = req.body || {};
content = requestBody.content;
useAdvancedContext = requestBody.useAdvancedContext || false;
showThinking = requestBody.showThinking || false;
// Add logging for POST requests
log.info(`LLM POST message: chatNoteId=${req.params.chatNoteId}, useAdvancedContext=${useAdvancedContext}, showThinking=${showThinking}, contentLength=${content ? content.length : 0}`);
log.info(`LLM POST message: chatNoteId=${req.params.chatNoteId}, contentLength=${content ? content.length : 0}`);
} else if (req.method === 'GET') {
// For GET (streaming) requests, get parameters from query params and body
// For streaming requests, we need the content from the body
useAdvancedContext = req.query.useAdvancedContext === 'true' || (req.body && req.body.useAdvancedContext === true);
showThinking = req.query.showThinking === 'true' || (req.body && req.body.showThinking === true);
content = req.body && req.body.content ? req.body.content : '';
// Add detailed logging for GET requests
log.info(`LLM GET stream: chatNoteId=${req.params.chatNoteId}, useAdvancedContext=${useAdvancedContext}, showThinking=${showThinking}`);
log.info(`Parameters from query: useAdvancedContext=${req.query.useAdvancedContext}, showThinking=${req.query.showThinking}`);
log.info(`Parameters from body: useAdvancedContext=${req.body?.useAdvancedContext}, showThinking=${req.body?.showThinking}, content=${content ? `${content.substring(0, 20)}...` : 'none'}`);
log.info(`LLM GET stream: chatNoteId=${req.params.chatNoteId}`);
}
// Get chatNoteId from URL params
chatNoteId = req.params.chatNoteId;
// For GET requests, ensure we have the stream parameter
// Validate inputs
if (req.method === 'GET' && req.query.stream !== 'true') {
throw new Error('Stream parameter must be set to true for GET/streaming requests');
}
// For POST requests, validate the content
if (req.method === 'POST' && (!content || typeof content !== 'string' || content.trim().length === 0)) {
throw new Error('Content cannot be empty');
}
// Get or create session from Chat Note
let session = await this.getOrCreateSessionFromChatNote(chatNoteId, req.method === 'POST');
// Check if AI is enabled
const aiEnabled = await options.getOptionBool('aiEnabled');
if (!aiEnabled) {
return { error: "AI features are disabled. Please enable them in the settings." };
}
// If no session found and we're not allowed to create one (GET request)
if (!session && req.method === 'GET') {
if (!this.safelyUseAIManager()) {
return { error: "AI services are currently unavailable. Please check your configuration." };
}
// Load or create chat directly from storage
let chat = await chatStorageService.getChat(chatNoteId);
if (!chat && req.method === 'GET') {
throw new Error('Chat Note not found, cannot create session for streaming');
}
// For POST requests, if no Chat Note exists, create a new one
if (!session && req.method === 'POST') {
log.info(`No Chat Note found for ${chatNoteId}, creating a new Chat Note and session`);
// Use the new Chat Note's ID for the session
session = SessionsStore.createSession({
chatNoteId: chatNoteId
});
// Update the session ID to match the Chat Note ID
session.id = chatNoteId;
log.info(`Created new Chat Note and session with ID: ${session.id}`);
// Update the parameter to use the new ID
chatNoteId = session.id;
if (!chat && req.method === 'POST') {
log.info(`Creating new chat note with ID: ${chatNoteId}`);
chat = await chatStorageService.createChat('New Chat');
// Update the chat ID to match the requested ID if possible
// In practice, we'll use the generated ID
chatNoteId = chat.id;
}
// At this point, session should never be null
// TypeScript doesn't know this, so we'll add a check
if (!session) {
// This should never happen due to our logic above
throw new Error('Failed to create or retrieve session');
if (!chat) {
throw new Error('Failed to create or retrieve chat');
}
// Update session last active timestamp
SessionsStore.touchSession(session.id);
// For POST requests, store the user message
if (req.method === 'POST' && content && session) {
// Add message to session
session.messages.push({
// For POST requests, add the user message
if (req.method === 'POST' && content) {
chat.messages.push({
role: 'user',
content,
timestamp: new Date()
content
});
// Log a preview of the message
log.info(`Processing LLM message: "${content.substring(0, 50)}${content.length > 50 ? '...' : ''}"`);
}
// Check if AI services are enabled before proceeding
const aiEnabled = await options.getOptionBool('aiEnabled');
log.info(`AI enabled setting: ${aiEnabled}`);
if (!aiEnabled) {
log.info("AI services are disabled by configuration");
return {
error: "AI features are disabled. Please enable them in the settings."
};
}
// Check if AI services are available
log.info("Checking if AI services are available...");
if (!this.safelyUseAIManager()) {
log.info("AI services are not available - checking for specific issues");
try {
// Create a direct instance to avoid circular references
const aiManager = new AIServiceManager();
if (!aiManager) {
log.error("AI service manager is not initialized");
return {
error: "AI service is not properly initialized. Please check your configuration."
};
}
const availableProviders = aiManager.getAvailableProviders();
if (availableProviders.length === 0) {
log.error("No AI providers are available");
return {
error: "No AI providers are configured or available. Please check your AI settings."
};
}
} catch (err) {
log.error(`Detailed AI service check failed: ${err}`);
}
return {
error: "AI services are currently unavailable. Please check your configuration."
};
}
// Create direct instance to avoid circular references
const aiManager = new AIServiceManager();
// Get the default service - just use the first available one
const availableProviders = aiManager.getAvailableProviders();
if (availableProviders.length === 0) {
log.error("No AI providers are available after manager check");
return {
error: "No AI providers are configured or available. Please check your AI settings."
};
}
// Use the first available provider
const providerName = availableProviders[0];
log.info(`Using AI provider: ${providerName}`);
// We know the manager has a 'services' property from our code inspection,
// but TypeScript doesn't know that from the interface.
// This is a workaround to access it
const service = (aiManager as any).services[providerName];
if (!service) {
log.error(`AI service for provider ${providerName} not found`);
return {
error: `Selected AI provider (${providerName}) is not available. Please check your configuration.`
};
}
// Initialize tools
log.info("Initializing LLM agent tools...");
// Ensure tools are initialized to prevent tool execution issues
await ToolHandler.ensureToolsInitialized();
// Create and use the chat pipeline instead of direct processing
// Create and use the chat pipeline
const pipeline = new ChatPipeline({
enableStreaming: req.method === 'GET',
enableMetrics: true,
maxToolCallIterations: 5
});
log.info("Executing chat pipeline...");
// Get user's preferred model
const preferredModel = await this.getPreferredModel();
// Create options object for better tracking
const pipelineOptions = {
// Force useAdvancedContext to be a boolean, no matter what
useAdvancedContext: useAdvancedContext === true,
systemPrompt: session?.messages.find(m => m.role === 'system')?.content,
temperature: session?.metadata.temperature,
maxTokens: session?.metadata.maxTokens,
// Get the user's preferred model if session model is 'default' or not set
model: await this.getPreferredModel(session?.metadata.model),
// Set stream based on request type, but ensure it's explicitly a boolean value
// GET requests or format=stream parameter indicates streaming should be used
systemPrompt: chat.messages.find(m => m.role === 'system')?.content,
model: preferredModel,
stream: !!(req.method === 'GET' || req.query.format === 'stream' || req.query.stream === 'true'),
// Include chatNoteId for tracking tool executions
chatNoteId: chatNoteId
};
// Log the options to verify what's being sent to the pipeline
log.info(`Pipeline input options: ${JSON.stringify({
useAdvancedContext: pipelineOptions.useAdvancedContext,
stream: pipelineOptions.stream
})}`);
log.info(`Pipeline options: ${JSON.stringify({ useAdvancedContext: pipelineOptions.useAdvancedContext, stream: pipelineOptions.stream })}`);
// Import the WebSocket service for direct access
// Import WebSocket service for streaming
const wsService = await import('../../ws.js');
let accumulatedContent = '';
// Create a stream callback wrapper
// This will ensure we properly handle all streaming messages
let messageContent = '';
// Prepare the pipeline input
const pipelineInput: ChatPipelineInput = {
messages: session.messages.map(msg => ({
messages: chat.messages.map(msg => ({
role: msg.role as 'user' | 'assistant' | 'system',
content: msg.content
})),
query: content || '', // Ensure query is always a string, even if content is null/undefined
noteId: session.noteContext ?? undefined,
query: content || '',
noteId: undefined, // TODO: Add context note support if needed
showThinking: showThinking,
options: pipelineOptions,
streamCallback: req.method === 'GET' ? (data, done, rawChunk) => {
try {
// Use WebSocket service to send messages
this.handleStreamCallback(
data, done, rawChunk,
wsService.default, chatNoteId,
messageContent, session, res
);
} catch (error) {
log.error(`Error in stream callback: ${error}`);
// Try to send error message
try {
wsService.default.sendMessageToAllClients({
type: 'llm-stream',
chatNoteId: chatNoteId,
error: `Stream error: ${error instanceof Error ? error.message : 'Unknown error'}`,
done: true
});
// End the response
res.write(`data: ${JSON.stringify({ error: 'Stream error', done: true })}\n\n`);
res.end();
} catch (e) {
log.error(`Failed to send error message: ${e}`);
}
}
this.handleStreamCallback(data, done, rawChunk, wsService.default, chatNoteId, res);
if (data) accumulatedContent += data;
} : undefined
};
// Execute the pipeline
const response = await pipeline.execute(pipelineInput);
// Handle the response
if (req.method === 'POST') {
// Add assistant message to session
session.messages.push({
// Add assistant response to chat
chat.messages.push({
role: 'assistant',
content: response.text || '',
timestamp: new Date()
content: response.text || ''
});
// Extract sources if they're available
// Save the updated chat back to storage (single source of truth)
await chatStorageService.updateChat(chat.id, chat.messages, chat.title);
// Extract sources if available
const sources = (response as any).sources || [];
// Store sources in the session metadata if they're present
if (sources.length > 0) {
session.metadata.sources = sources;
log.info(`Stored ${sources.length} sources in session metadata`);
}
// Return the response with complete metadata
return {
content: response.text || '',
sources: sources,
metadata: {
model: response.model || session.metadata.model,
provider: response.provider || session.metadata.provider,
temperature: session.metadata.temperature,
maxTokens: session.metadata.maxTokens,
lastUpdated: new Date().toISOString(),
toolExecutions: session.metadata.toolExecutions || []
model: response.model,
provider: response.provider,
lastUpdated: new Date().toISOString()
}
};
} else {
// For streaming requests, we've already sent the response
// For streaming, response is already sent via WebSocket/SSE
// Save the accumulated content
if (accumulatedContent) {
chat.messages.push({
role: 'assistant',
content: accumulatedContent
});
await chatStorageService.updateChat(chat.id, chat.messages, chat.title);
}
return null;
}
} catch (processingError: any) {
log.error(`Error processing message: ${processingError}`);
return {
error: `Error processing your request: ${processingError.message}`
};
} catch (error: any) {
log.error(`Error processing message: ${error}`);
return { error: `Error processing your request: ${error.message}` };
}
}
/**
* Handle stream callback for WebSocket communication
* Simplified stream callback handler
*/
private handleStreamCallback(
data: string | null,
@ -385,122 +219,72 @@ class RestChatService {
rawChunk: any,
wsService: any,
chatNoteId: string,
messageContent: string,
session: any,
res: Response
) {
// Only accumulate content that's actually text (not tool execution or thinking info)
if (data) {
messageContent += data;
}
// Create a message object with all necessary fields
const message: LLMStreamMessage = {
type: 'llm-stream',
chatNoteId: chatNoteId
chatNoteId: chatNoteId,
done: done
};
// Add content if available - either the new chunk or full content on completion
if (data) {
message.content = data;
}
// Add thinking info if available in the raw chunk
if (rawChunk && 'thinking' in rawChunk && rawChunk.thinking) {
message.thinking = rawChunk.thinking as string;
}
// Add tool execution info if available in the raw chunk
if (rawChunk && 'toolExecution' in rawChunk && rawChunk.toolExecution) {
// Transform the toolExecution to match the expected format
const toolExec = rawChunk.toolExecution;
message.toolExecution = {
// Use optional chaining for all properties
tool: typeof toolExec.tool === 'string'
? toolExec.tool
: toolExec.tool?.name,
tool: typeof toolExec.tool === 'string' ? toolExec.tool : toolExec.tool?.name,
result: toolExec.result,
// Map arguments to args
args: 'arguments' in toolExec ?
(typeof toolExec.arguments === 'object' ?
toolExec.arguments as Record<string, unknown> : {}) : {},
// Add additional properties if they exist
(typeof toolExec.arguments === 'object' ? toolExec.arguments as Record<string, unknown> : {}) : {},
action: 'action' in toolExec ? toolExec.action as string : undefined,
toolCallId: 'toolCallId' in toolExec ? toolExec.toolCallId as string : undefined,
error: 'error' in toolExec ? toolExec.error as string : undefined
};
}
// Set done flag explicitly
message.done = done;
// On final message, include the complete content too
if (done) {
// Store the response in the session when done
session.messages.push({
role: 'assistant',
content: messageContent,
timestamp: new Date()
});
}
// Send message to all clients
// Send WebSocket message
wsService.sendMessageToAllClients(message);
// Log what was sent (first message and completion)
if (message.thinking || done) {
log.info(
`[WS-SERVER] Sending LLM stream message: chatNoteId=${chatNoteId}, content=${!!message.content}, contentLength=${message.content?.length || 0}, thinking=${!!message.thinking}, toolExecution=${!!message.toolExecution}, done=${done}`
);
}
// For GET requests, also send as server-sent events
// Prepare response data for JSON event
const responseData: any = {
content: data,
done
};
// Add tool execution if available
// Send SSE response
const responseData: any = { content: data, done };
if (rawChunk?.toolExecution) {
responseData.toolExecution = rawChunk.toolExecution;
}
// Send the data as a JSON event
res.write(`data: ${JSON.stringify(responseData)}\n\n`);
if (done) {
res.end();
}
}
/**
* Create a new chat session
* Create a new chat
*/
async createSession(req: Request, res: Response) {
try {
const options: any = req.body || {};
const title = options.title || 'Chat Session';
// Determine the note ID for the chat
let noteId = options.noteId || options.chatNoteId; // Accept either name for backward compatibility
let noteId = options.noteId || options.chatNoteId;
// If currentNoteId is provided, check if it's already an AI Chat note
// Check if currentNoteId is already an AI Chat note
if (!noteId && options.currentNoteId) {
// Import becca to check note type
const becca = (await import('../../../becca/becca.js')).default;
const note = becca.notes[options.currentNoteId];
// Check if this is an AI Chat note by looking at its content structure
if (note) {
try {
const content = note.getContent();
if (content) {
const contentStr = typeof content === 'string' ? content : content.toString();
const parsedContent = JSON.parse(contentStr);
// AI Chat notes have a messages array and noteId in their content
if (parsedContent.messages && Array.isArray(parsedContent.messages) && parsedContent.noteId) {
// This looks like an AI Chat note - use it directly
if (parsedContent.messages && Array.isArray(parsedContent.messages)) {
noteId = options.currentNoteId;
log.info(`Using existing AI Chat note ${noteId} as session`);
}
@ -509,106 +293,69 @@ class RestChatService {
// Not JSON content, so not an AI Chat note
}
}
if (!noteId) {
log.info(`Creating new chat note from context of note ${options.currentNoteId}`);
// Don't use the currentNoteId as the chat note ID - create a new one
}
}
// If we don't have a noteId, create a new Chat Note
// Create new chat if needed
if (!noteId) {
// Create a new Chat Note via the storage service
const chatStorageService = (await import('../../llm/chat_storage_service.js')).default;
const newChat = await chatStorageService.createChat(title);
noteId = newChat.id;
log.info(`Created new Chat Note with ID: ${noteId}`);
} else {
// We have a noteId - this means we're working with an existing aiChat note
// Don't create another note, just use the existing one
log.info(`Using existing Chat Note with ID: ${noteId}`);
}
// Create a new session through our session store using the note ID
const session = SessionsStore.createSession({
chatNoteId: noteId, // This is really the noteId of the chat note
title,
systemPrompt: options.systemPrompt,
contextNoteId: options.contextNoteId,
maxTokens: options.maxTokens,
model: options.model,
provider: options.provider,
temperature: options.temperature
});
return {
id: session.id, // This will be the same as noteId
title: session.title,
createdAt: session.createdAt,
noteId: noteId // Return the note ID for clarity
id: noteId,
title: title,
createdAt: new Date(),
noteId: noteId
};
} catch (error: any) {
log.error(`Error creating LLM session: ${error.message || 'Unknown error'}`);
throw new Error(`Failed to create LLM session: ${error.message || 'Unknown error'}`);
log.error(`Error creating chat session: ${error.message || 'Unknown error'}`);
throw new Error(`Failed to create chat session: ${error.message || 'Unknown error'}`);
}
}
/**
* Get a specific chat session by ID
* Get a chat by ID
*/
async getSession(req: Request, res: Response) {
async getSession(req: Request, res: Response): Promise<any> {
try {
const { sessionId } = req.params;
// Check if session exists
const session = SessionsStore.getSession(sessionId);
if (!session) {
// Instead of throwing an error, return a structured 404 response
// that the frontend can handle gracefully
const chat = await chatStorageService.getChat(sessionId);
if (!chat) {
res.status(404).json({
error: true,
message: `Session with ID ${sessionId} not found`,
code: 'session_not_found',
sessionId
});
return null; // Return null to prevent further processing
return null;
}
// Return session with metadata and additional fields
return {
id: session.id,
title: session.title,
createdAt: session.createdAt,
lastActive: session.lastActive,
messages: session.messages,
noteContext: session.noteContext,
// Include additional fields for the frontend
sources: session.metadata.sources || [],
metadata: {
model: session.metadata.model,
provider: session.metadata.provider,
temperature: session.metadata.temperature,
maxTokens: session.metadata.maxTokens,
lastUpdated: session.lastActive.toISOString(),
// Include simplified tool executions if available
toolExecutions: session.metadata.toolExecutions || []
}
id: chat.id,
title: chat.title,
createdAt: chat.createdAt,
lastActive: chat.updatedAt,
messages: chat.messages,
metadata: chat.metadata || {}
};
} catch (error: any) {
log.error(`Error getting LLM session: ${error.message || 'Unknown error'}`);
log.error(`Error getting chat session: ${error.message || 'Unknown error'}`);
throw new Error(`Failed to get session: ${error.message || 'Unknown error'}`);
}
}
/**
* Delete a chat session
* Delete a chat
*/
async deleteSession(req: Request, res: Response) {
try {
const { sessionId } = req.params;
// Delete the session
const success = SessionsStore.deleteSession(sessionId);
const success = await chatStorageService.deleteChat(sessionId);
if (!success) {
throw new Error(`Session with ID ${sessionId} not found`);
}
@ -618,116 +365,46 @@ class RestChatService {
message: `Session ${sessionId} deleted successfully`
};
} catch (error: any) {
log.error(`Error deleting LLM session: ${error.message || 'Unknown error'}`);
log.error(`Error deleting chat session: ${error.message || 'Unknown error'}`);
throw new Error(`Failed to delete session: ${error.message || 'Unknown error'}`);
}
}
/**
* Get all sessions
* Get all chats
*/
getSessions() {
return SessionsStore.getAllSessions();
}
/**
* Create an in-memory session from a Chat Note
* This treats the Chat Note as the source of truth, using its ID as the session ID
*/
async createSessionFromChatNote(noteId: string): Promise<ChatSession | null> {
async getAllSessions() {
try {
log.info(`Creating in-memory session for Chat Note ID ${noteId}`);
// Import chat storage service
const chatStorageService = (await import('../../llm/chat_storage_service.js')).default;
// Try to get the Chat Note data
const chatNote = await chatStorageService.getChat(noteId);
if (!chatNote) {
log.error(`Chat Note ${noteId} not found, cannot create session`);
return null;
}
log.info(`Found Chat Note ${noteId}, creating in-memory session`);
// Convert Message[] to ChatMessage[] by ensuring the role is compatible
const chatMessages: ChatMessage[] = chatNote.messages.map(msg => ({
role: msg.role === 'tool' ? 'assistant' : msg.role, // Map 'tool' role to 'assistant'
content: msg.content,
timestamp: new Date()
}));
// Create a new session with the same ID as the Chat Note
const session: ChatSession = {
id: chatNote.id, // Use Chat Note ID as the session ID
title: chatNote.title,
messages: chatMessages,
createdAt: chatNote.createdAt || new Date(),
lastActive: new Date(),
metadata: chatNote.metadata || {}
const chats = await chatStorageService.getAllChats();
return {
sessions: chats.map(chat => ({
id: chat.id,
title: chat.title,
createdAt: chat.createdAt,
lastActive: chat.updatedAt,
messageCount: chat.messages.length
}))
};
// Add the session to the in-memory store
SessionsStore.getAllSessions().set(noteId, session);
log.info(`Successfully created in-memory session for Chat Note ${noteId}`);
return session;
} catch (error) {
log.error(`Failed to create session from Chat Note: ${error}`);
return null;
} catch (error: any) {
log.error(`Error listing sessions: ${error}`);
throw new Error(`Failed to list sessions: ${error}`);
}
}
/**
* Get an existing session or create a new one from a Chat Note
* This treats the Chat Note as the source of truth, using its ID as the session ID
* Get the user's preferred model
*/
async getOrCreateSessionFromChatNote(noteId: string, createIfNotFound: boolean = true): Promise<ChatSession | null> {
// First check if we already have this session in memory
let session = SessionsStore.getSession(noteId);
if (session) {
log.info(`Found existing in-memory session for Chat Note ${noteId}`);
return session;
}
// If not in memory, try to create from Chat Note
log.info(`Session not found in memory for Chat Note ${noteId}, attempting to create it`);
// Only try to create if allowed
if (!createIfNotFound) {
log.info(`Not creating new session for ${noteId} as createIfNotFound=false`);
return null;
}
// Create from Chat Note
return await this.createSessionFromChatNote(noteId);
}
/**
* Get the user's preferred model using the new configuration system
*/
async getPreferredModel(sessionModel?: string): Promise<string | undefined> {
// If the session already has a valid model (not 'default'), use it
if (sessionModel && sessionModel !== 'default') {
return sessionModel;
}
async getPreferredModel(): Promise<string | undefined> {
try {
// Use the new configuration system - no string parsing!
const validConfig = await getFirstValidModelConfig();
if (!validConfig) {
log.error('No valid AI model configuration found. Please configure your AI settings.');
return undefined; // Don't provide fallback defaults
log.error('No valid AI model configuration found');
return undefined;
}
log.info(`Selected user's preferred model: ${validConfig.model} from provider: ${validConfig.provider}`);
return validConfig.model;
} catch (error) {
log.error(`Error getting user's preferred model: ${error}`);
return undefined; // Don't provide fallback defaults, let the caller handle it
log.error(`Error getting preferred model: ${error}`);
return undefined;
}
}
}

View File

@ -1,168 +0,0 @@
/**
* In-memory storage for chat sessions
*/
import log from "../../log.js";
import { LLM_CONSTANTS } from '../constants/provider_constants.js';
import { SEARCH_CONSTANTS } from '../constants/search_constants.js';
import type { ChatSession, ChatMessage } from '../interfaces/chat_session.js';
// In-memory storage for sessions
const sessions = new Map<string, ChatSession>();
// Flag to track if cleanup timer has been initialized
let cleanupInitialized = false;
/**
* Provides methods to manage chat sessions
*/
class SessionsStore {
/**
* Initialize the session cleanup timer to remove old/inactive sessions
*/
initializeCleanupTimer(): void {
if (cleanupInitialized) {
return;
}
// Clean sessions that have expired based on the constants
function cleanupOldSessions() {
const expiryTime = new Date(Date.now() - LLM_CONSTANTS.SESSION.SESSION_EXPIRY_MS);
for (const [sessionId, session] of sessions.entries()) {
if (session.lastActive < expiryTime) {
sessions.delete(sessionId);
}
}
}
// Run cleanup at the configured interval
setInterval(cleanupOldSessions, LLM_CONSTANTS.SESSION.CLEANUP_INTERVAL_MS);
cleanupInitialized = true;
log.info("Session cleanup timer initialized");
}
/**
* Get all sessions
*/
getAllSessions(): Map<string, ChatSession> {
return sessions;
}
/**
* Get a specific session by ID
*/
getSession(sessionId: string): ChatSession | undefined {
return sessions.get(sessionId);
}
/**
* Create a new session
*/
createSession(options: {
chatNoteId: string;
title?: string;
systemPrompt?: string;
contextNoteId?: string;
maxTokens?: number;
model?: string;
provider?: string;
temperature?: number;
}): ChatSession {
this.initializeCleanupTimer();
const title = options.title || 'Chat Session';
const sessionId = options.chatNoteId;
const now = new Date();
// Initial system message if provided
const messages: ChatMessage[] = [];
if (options.systemPrompt) {
messages.push({
role: 'system',
content: options.systemPrompt,
timestamp: now
});
}
// Create and store the session
const session: ChatSession = {
id: sessionId,
title,
messages,
createdAt: now,
lastActive: now,
noteContext: options.contextNoteId,
metadata: {
temperature: options.temperature || SEARCH_CONSTANTS.TEMPERATURE.DEFAULT,
maxTokens: options.maxTokens,
model: options.model,
provider: options.provider,
sources: [],
toolExecutions: [],
lastUpdated: now.toISOString()
}
};
sessions.set(sessionId, session);
log.info(`Created in-memory session for Chat Note ID: ${sessionId}`);
return session;
}
/**
* Update a session's last active timestamp
*/
touchSession(sessionId: string): boolean {
const session = sessions.get(sessionId);
if (!session) {
return false;
}
session.lastActive = new Date();
return true;
}
/**
* Delete a session
*/
deleteSession(sessionId: string): boolean {
return sessions.delete(sessionId);
}
/**
* Record a tool execution in the session metadata
*/
recordToolExecution(chatNoteId: string, tool: any, result: string, error?: string): void {
if (!chatNoteId) return;
const session = sessions.get(chatNoteId);
if (!session) return;
try {
const toolExecutions = session.metadata.toolExecutions || [];
// Format tool execution record
const execution = {
id: tool.id || `tool-${Date.now()}-${Math.random().toString(36).substring(2, 7)}`,
name: tool.function?.name || 'unknown',
arguments: typeof tool.function?.arguments === 'string'
? (() => { try { return JSON.parse(tool.function.arguments); } catch { return tool.function.arguments; } })()
: tool.function?.arguments || {},
result: result,
error: error,
timestamp: new Date().toISOString()
};
// Add to tool executions
toolExecutions.push(execution);
session.metadata.toolExecutions = toolExecutions;
log.info(`Recorded tool execution for ${execution.name} in session ${chatNoteId}`);
} catch (err) {
log.error(`Failed to record tool execution: ${err}`);
}
}
}
// Create singleton instance
const sessionsStore = new SessionsStore();
export default sessionsStore;