closer to decoupling sessions...

douple sessions, again

closer...

uhhh

closer....

closer...
This commit is contained in:
perf3ct 2025-04-16 21:53:12 +00:00
parent 2b14f73ad4
commit 5e1fc5625d
No known key found for this signature in database
GPG Key ID: 569C4EEC436F5232
10 changed files with 355 additions and 390 deletions

View File

@ -7,10 +7,11 @@ import type { SessionResponse } from "./types.js";
/**
* Create a new chat session
*/
export async function createChatSession(): Promise<{chatNoteId: string | null, noteId: string | null}> {
export async function createChatSession(currentNoteId?: string): Promise<{chatNoteId: string | null, noteId: string | null}> {
try {
const resp = await server.post<SessionResponse>('llm/chat', {
title: 'Note Chat'
title: 'Note Chat',
currentNoteId: currentNoteId // Pass the current note ID if available
});
if (resp && resp.id) {
@ -36,6 +37,13 @@ export async function createChatSession(): Promise<{chatNoteId: string | null, n
*/
export async function checkSessionExists(chatNoteId: string): Promise<boolean> {
try {
// Validate that we have a proper note ID format, not a session ID
// Note IDs in Trilium are typically longer or in a different format
if (chatNoteId && chatNoteId.length === 16 && /^[A-Za-z0-9]+$/.test(chatNoteId)) {
console.warn(`Invalid note ID format detected: ${chatNoteId} appears to be a legacy session ID`);
return false;
}
const sessionCheck = await server.getWithSilentNotFound<any>(`llm/chat/${chatNoteId}`);
return !!(sessionCheck && sessionCheck.id);
} catch (error: any) {
@ -56,6 +64,13 @@ export async function setupStreamingResponse(
onComplete: () => void,
onError: (error: Error) => void
): Promise<void> {
// Validate that we have a proper note ID format, not a session ID
if (chatNoteId && chatNoteId.length === 16 && /^[A-Za-z0-9]+$/.test(chatNoteId)) {
console.error(`Invalid note ID format: ${chatNoteId} appears to be a legacy session ID`);
onError(new Error("Invalid note ID format - using a legacy session ID"));
return;
}
return new Promise((resolve, reject) => {
let assistantResponse = '';
let postToolResponse = ''; // Separate accumulator for post-tool execution content
@ -74,6 +89,32 @@ export async function setupStreamingResponse(
const responseId = `llm-stream-${Date.now()}-${Math.floor(Math.random() * 1000)}`;
console.log(`[${responseId}] Setting up WebSocket streaming for chat note ${chatNoteId}`);
// Send the initial request to initiate streaming
(async () => {
try {
const streamResponse = await server.post<any>(`llm/chat/${chatNoteId}/messages/stream`, {
content: messageParams.content,
includeContext: messageParams.useAdvancedContext,
options: {
temperature: 0.7,
maxTokens: 2000
}
});
if (!streamResponse || !streamResponse.success) {
console.error(`[${responseId}] Failed to initiate streaming`);
reject(new Error('Failed to initiate streaming'));
return;
}
console.log(`[${responseId}] Streaming initiated successfully`);
} catch (error) {
console.error(`[${responseId}] Error initiating streaming:`, error);
reject(error);
return;
}
})();
// Function to safely perform cleanup
const performCleanup = () => {
if (cleanupTimeoutId) {
@ -116,8 +157,7 @@ export async function setupStreamingResponse(
const message = customEvent.detail;
// Only process messages for our chat note
// Note: The WebSocket messages still use sessionId property for backward compatibility
if (!message || message.sessionId !== chatNoteId) {
if (!message || message.chatNoteId !== chatNoteId) {
return;
}
@ -402,31 +442,6 @@ export async function setupStreamingResponse(
reject(new Error('WebSocket connection not established'));
}
}, 10000);
// Send the streaming request to start the process
console.log(`[${responseId}] Sending HTTP POST request to initiate streaming: /llm/chat/${chatNoteId}/messages/stream`);
server.post(`llm/chat/${chatNoteId}/messages/stream`, {
...messageParams,
stream: true // Explicitly indicate this is a streaming request
}).catch(err => {
console.error(`[${responseId}] HTTP error sending streaming request for chat note ${chatNoteId}:`, err);
// Clean up timeouts
if (initialTimeoutId !== null) {
window.clearTimeout(initialTimeoutId);
initialTimeoutId = null;
}
if (timeoutId !== null) {
window.clearTimeout(timeoutId);
timeoutId = null;
}
// Clean up event listener
cleanupEventListener(eventListener);
reject(err);
});
});
}
@ -449,6 +464,12 @@ function cleanupEventListener(listener: ((event: Event) => void) | null): void {
*/
export async function getDirectResponse(chatNoteId: string, messageParams: any): Promise<any> {
try {
// Validate that we have a proper note ID format, not a session ID
if (chatNoteId && chatNoteId.length === 16 && /^[A-Za-z0-9]+$/.test(chatNoteId)) {
console.error(`Invalid note ID format: ${chatNoteId} appears to be a legacy session ID`);
throw new Error("Invalid note ID format - using a legacy session ID");
}
const postResponse = await server.post<any>(`llm/chat/${chatNoteId}/messages`, {
message: messageParams.content,
includeContext: messageParams.useAdvancedContext,

View File

@ -321,35 +321,13 @@ export default class LlmChatPanel extends BasicWidget {
}
// Load Chat Note ID if available
if (savedData.chatNoteId) {
console.log(`Setting Chat Note ID from saved data: ${savedData.chatNoteId}`);
this.chatNoteId = savedData.chatNoteId;
// Set the noteId as well - this could be different from the chatNoteId
// If we have a separate noteId stored, use it, otherwise default to the chatNoteId
if (savedData.noteId) {
this.noteId = savedData.noteId;
console.log(`Using stored Chat Note ID: ${this.noteId}`);
} else {
// For compatibility with older data, use the chatNoteId as the noteId
this.noteId = savedData.chatNoteId;
console.log(`No Chat Note ID found, using Chat Note ID: ${this.chatNoteId}`);
}
// No need to check if session exists on server since the Chat Note
// is now the source of truth - if we have the Note, we have the session
if (savedData.noteId) {
console.log(`Using noteId as Chat Note ID: ${savedData.noteId}`);
this.chatNoteId = savedData.noteId;
this.noteId = savedData.noteId;
} else {
// For backward compatibility, try to get sessionId
if ((savedData as any).sessionId) {
console.log(`Using legacy sessionId as Chat Note ID: ${(savedData as any).sessionId}`);
this.chatNoteId = (savedData as any).sessionId;
this.noteId = savedData.noteId || (savedData as any).sessionId;
} else {
// No saved Chat Note ID, create a new one
this.chatNoteId = null;
this.noteId = null;
await this.createChatSession();
}
console.log(`No noteId found in saved data, cannot load chat session`);
return false;
}
return true;
@ -491,50 +469,30 @@ export default class LlmChatPanel extends BasicWidget {
}
}
/**
* Create a new chat session
*/
private async createChatSession() {
try {
// Create a new Chat Note to represent this chat session
// The function now returns both chatNoteId and noteId
const result = await createChatSession();
// Create a new chat session, passing the current note ID if it exists
const { chatNoteId, noteId } = await createChatSession(
this.currentNoteId ? this.currentNoteId : undefined
);
if (!result.chatNoteId) {
toastService.showError('Failed to create chat session');
return;
}
if (chatNoteId) {
// If we got back an ID from the API, use it
this.chatNoteId = chatNoteId;
console.log(`Created new chat session with ID: ${result.chatNoteId}`);
this.chatNoteId = result.chatNoteId;
// For new sessions, the noteId should equal the chatNoteId
// This ensures we're using the note ID consistently
this.noteId = noteId || chatNoteId;
// If the API returned a noteId directly, use it
if (result.noteId) {
this.noteId = result.noteId;
console.log(`Using noteId from API response: ${this.noteId}`);
console.log(`Created new chat session with noteId: ${this.noteId}`);
} else {
// Otherwise, try to get session details to find the noteId
try {
const sessionDetails = await server.get<any>(`llm/chat/${this.chatNoteId}`);
if (sessionDetails && sessionDetails.noteId) {
this.noteId = sessionDetails.noteId;
console.log(`Using noteId from session details: ${this.noteId}`);
} else {
// As a last resort, use the current note ID
console.warn(`No noteId found in session details, using parent note ID: ${this.currentNoteId}`);
this.noteId = this.currentNoteId;
}
} catch (detailsError) {
console.error('Could not fetch session details:', detailsError);
// Use current note ID as a fallback
this.noteId = this.currentNoteId;
console.warn(`Using current note ID as fallback: ${this.noteId}`);
}
throw new Error("Failed to create chat session - no ID returned");
}
// Verify that the noteId is valid
if (this.noteId !== this.currentNoteId) {
console.log(`Note ID verification - session's noteId: ${this.noteId}, current note: ${this.currentNoteId}`);
}
// Save the session ID and data
// Save the note ID as the session identifier
await this.saveCurrentData();
} catch (error) {
console.error('Error creating chat session:', error);
@ -818,7 +776,7 @@ export default class LlmChatPanel extends BasicWidget {
similarity?: number;
content?: string;
}>;
}>(`llm/sessions/${this.chatNoteId}`)
}>(`llm/chat/${this.chatNoteId}`)
.then((sessionData) => {
console.log("Got updated session data:", sessionData);

View File

@ -15,40 +15,14 @@ interface ChatMessage {
timestamp?: Date;
}
interface ChatSession {
id: string;
title: string;
messages: ChatMessage[];
createdAt: Date;
lastActive: Date;
noteContext?: string; // Optional noteId that provides context
metadata: Record<string, any>;
}
interface NoteSource {
noteId: string;
title: string;
content?: string;
similarity?: number;
branchId?: string;
}
interface SessionOptions {
title?: string;
systemPrompt?: string;
temperature?: number;
maxTokens?: number;
model?: string;
provider?: string;
contextNoteId?: string;
}
/**
* @swagger
* /api/llm/chat:
* /api/llm/sessions:
* post:
* summary: Create a new LLM chat
* operationId: llm-create-chat
* summary: Create a new LLM chat session
* operationId: llm-create-session
* requestBody:
* required: true
* content:
@ -58,7 +32,7 @@ interface SessionOptions {
* properties:
* title:
* type: string
* description: Title for the chat
* description: Title for the chat session
* systemPrompt:
* type: string
* description: System message to set the behavior of the assistant
@ -76,16 +50,16 @@ interface SessionOptions {
* description: LLM provider to use (e.g., 'openai', 'anthropic', 'ollama')
* contextNoteId:
* type: string
* description: Note ID to use as context for the chat
* description: Note ID to use as context for the session
* responses:
* '200':
* description: Successfully created chat
* description: Successfully created session
* content:
* application/json:
* schema:
* type: object
* properties:
* chatNoteId:
* sessionId:
* type: string
* title:
* type: string
@ -96,25 +70,25 @@ interface SessionOptions {
* - session: []
* tags: ["llm"]
*/
async function createChat(req: Request, res: Response) {
async function createSession(req: Request, res: Response) {
return restChatService.createSession(req, res);
}
/**
* @swagger
* /api/llm/chat/{chatNoteId}:
* /api/llm/sessions/{sessionId}:
* get:
* summary: Retrieve a specific chat by ID
* operationId: llm-get-chat
* summary: Retrieve a specific chat session
* operationId: llm-get-session
* parameters:
* - name: chatNoteId
* - name: sessionId
* in: path
* required: true
* schema:
* type: string
* responses:
* '200':
* description: Chat details
* description: Chat session details
* content:
* application/json:
* schema:
@ -144,12 +118,12 @@ async function createChat(req: Request, res: Response) {
* type: string
* format: date-time
* '404':
* description: Chat not found
* description: Session not found
* security:
* - session: []
* tags: ["llm"]
*/
async function getChat(req: Request, res: Response) {
async function getSession(req: Request, res: Response) {
return restChatService.getSession(req, res);
}
@ -165,6 +139,7 @@ async function getChat(req: Request, res: Response) {
* required: true
* schema:
* type: string
* description: The ID of the chat note (formerly sessionId)
* requestBody:
* required: true
* content:
@ -174,7 +149,7 @@ async function getChat(req: Request, res: Response) {
* properties:
* title:
* type: string
* description: Updated title for the chat
* description: Updated title for the session
* systemPrompt:
* type: string
* description: Updated system prompt
@ -195,7 +170,7 @@ async function getChat(req: Request, res: Response) {
* description: Updated note ID for context
* responses:
* '200':
* description: Chat successfully updated
* description: Session successfully updated
* content:
* application/json:
* schema:
@ -209,12 +184,12 @@ async function getChat(req: Request, res: Response) {
* type: string
* format: date-time
* '404':
* description: Chat not found
* description: Session not found
* security:
* - session: []
* tags: ["llm"]
*/
async function updateChat(req: Request, res: Response) {
async function updateSession(req: Request, res: Response) {
// Get the chat using ChatService
const chatNoteId = req.params.chatNoteId;
const updates = req.body;
@ -242,13 +217,13 @@ async function updateChat(req: Request, res: Response) {
/**
* @swagger
* /api/llm/chat:
* /api/llm/sessions:
* get:
* summary: List all chats
* operationId: llm-list-chats
* summary: List all chat sessions
* operationId: llm-list-sessions
* responses:
* '200':
* description: List of chats
* description: List of chat sessions
* content:
* application/json:
* schema:
@ -272,14 +247,14 @@ async function updateChat(req: Request, res: Response) {
* - session: []
* tags: ["llm"]
*/
async function listChats(req: Request, res: Response) {
// Get all chats using ChatService
async function listSessions(req: Request, res: Response) {
// Get all sessions using ChatService
try {
const sessions = await chatService.getAllSessions();
// Format the response
return {
chats: sessions.map(session => ({
sessions: sessions.map(session => ({
id: session.id,
title: session.title,
createdAt: new Date(), // Since we don't have this in chat sessions
@ -288,33 +263,33 @@ async function listChats(req: Request, res: Response) {
}))
};
} catch (error) {
log.error(`Error listing chats: ${error}`);
throw new Error(`Failed to list chats: ${error}`);
log.error(`Error listing sessions: ${error}`);
throw new Error(`Failed to list sessions: ${error}`);
}
}
/**
* @swagger
* /api/llm/chat/{chatNoteId}:
* /api/llm/sessions/{sessionId}:
* delete:
* summary: Delete a chat
* operationId: llm-delete-chat
* summary: Delete a chat session
* operationId: llm-delete-session
* parameters:
* - name: chatNoteId
* - name: sessionId
* in: path
* required: true
* schema:
* type: string
* responses:
* '200':
* description: Chat successfully deleted
* description: Session successfully deleted
* '404':
* description: Chat not found
* description: Session not found
* security:
* - session: []
* tags: ["llm"]
*/
async function deleteChat(req: Request, res: Response) {
async function deleteSession(req: Request, res: Response) {
return restChatService.deleteSession(req, res);
}
@ -330,6 +305,7 @@ async function deleteChat(req: Request, res: Response) {
* required: true
* schema:
* type: string
* description: The ID of the chat note (formerly sessionId)
* requestBody:
* required: true
* content:
@ -357,7 +333,7 @@ async function deleteChat(req: Request, res: Response) {
* description: Whether to include relevant notes as context
* useNoteContext:
* type: boolean
* description: Whether to use the chat's context note
* description: Whether to use the session's context note
* responses:
* '200':
* description: LLM response
@ -379,10 +355,10 @@ async function deleteChat(req: Request, res: Response) {
* type: string
* similarity:
* type: number
* chatNoteId:
* sessionId:
* type: string
* '404':
* description: Chat not found
* description: Session not found
* '500':
* description: Error processing request
* security:
@ -393,175 +369,6 @@ async function sendMessage(req: Request, res: Response) {
return restChatService.handleSendMessage(req, res);
}
/**
* @swagger
* /api/llm/chat/{chatNoteId}/messages/stream:
* post:
* summary: Start a streaming response via WebSockets
* operationId: llm-stream-message
* parameters:
* - name: chatNoteId
* in: path
* required: true
* schema:
* type: string
* requestBody:
* required: true
* content:
* application/json:
* schema:
* type: object
* properties:
* content:
* type: string
* description: The user message to send to the LLM
* useAdvancedContext:
* type: boolean
* description: Whether to use advanced context extraction
* showThinking:
* type: boolean
* description: Whether to show thinking process in the response
* responses:
* '200':
* description: Streaming started successfully
* '404':
* description: Chat not found
* '500':
* description: Error processing request
* security:
* - session: []
* tags: ["llm"]
*/
async function streamMessage(req: Request, res: Response) {
log.info("=== Starting streamMessage ===");
try {
const chatNoteId = req.params.chatNoteId;
const { content, useAdvancedContext, showThinking } = req.body;
if (!content || typeof content !== 'string' || content.trim().length === 0) {
throw new Error('Content cannot be empty');
}
// Check if session exists in memory
let session = restChatService.getSessions().get(chatNoteId);
// If session doesn't exist in memory, try to create it from the Chat Note
if (!session) {
log.info(`Session not found in memory for Chat Note ${chatNoteId}, attempting to create from Chat Note`);
const restoredSession = await restChatService.createSessionFromChatNote(chatNoteId);
if (!restoredSession) {
// If we can't find the Chat Note, then it's truly not found
log.error(`Chat Note ${chatNoteId} not found, cannot create session`);
throw new Error('Chat Note not found, cannot create session for streaming');
}
session = restoredSession;
}
// Update last active timestamp
session.lastActive = new Date();
// Add user message to the session
session.messages.push({
role: 'user',
content,
timestamp: new Date()
});
// Create request parameters for the pipeline
const requestParams = {
chatNoteId,
content,
useAdvancedContext: useAdvancedContext === true,
showThinking: showThinking === true,
stream: true // Always stream for this endpoint
};
// Create a fake request/response pair to pass to the handler
const fakeReq = {
...req,
method: 'GET', // Set to GET to indicate streaming
query: {
stream: 'true', // Set stream param - don't use format: 'stream' to avoid confusion
useAdvancedContext: String(useAdvancedContext === true),
showThinking: String(showThinking === true)
},
params: {
chatNoteId
},
// Make sure the original content is available to the handler
body: {
content,
useAdvancedContext: useAdvancedContext === true,
showThinking: showThinking === true
}
} as unknown as Request;
// Log to verify correct parameters
log.info(`WebSocket stream settings - useAdvancedContext=${useAdvancedContext === true}, in query=${fakeReq.query.useAdvancedContext}, in body=${fakeReq.body.useAdvancedContext}`);
// Extra safety to ensure the parameters are passed correctly
if (useAdvancedContext === true) {
log.info(`Enhanced context IS enabled for this request`);
} else {
log.info(`Enhanced context is NOT enabled for this request`);
}
// Process the request in the background
Promise.resolve().then(async () => {
try {
await restChatService.handleSendMessage(fakeReq, res);
} catch (error) {
log.error(`Background message processing error: ${error}`);
// Import the WebSocket service
const wsService = (await import('../../services/ws.js')).default;
// Define LLMStreamMessage interface
interface LLMStreamMessage {
type: 'llm-stream';
sessionId: string; // Keep this as sessionId for WebSocket compatibility
content?: string;
thinking?: string;
toolExecution?: any;
done?: boolean;
error?: string;
raw?: unknown;
}
// Send error to client via WebSocket
wsService.sendMessageToAllClients({
type: 'llm-stream',
sessionId: chatNoteId, // Use sessionId property, but pass the chatNoteId
error: `Error processing message: ${error}`,
done: true
} as LLMStreamMessage);
}
});
// Import the WebSocket service
const wsService = (await import('../../services/ws.js')).default;
// Let the client know streaming has started via WebSocket (helps client confirm connection is working)
wsService.sendMessageToAllClients({
type: 'llm-stream',
sessionId: chatNoteId,
thinking: 'Initializing streaming LLM response...'
});
// Let the client know streaming has started via HTTP response
return {
success: true,
message: 'Streaming started',
sessionId: chatNoteId // Keep using sessionId for API response compatibility
};
} catch (error: any) {
log.error(`Error starting message stream: ${error.message}`);
throw error;
}
}
/**
* @swagger
* /api/llm/indexes/stats:
@ -957,13 +764,171 @@ async function indexNote(req: Request, res: Response) {
}
}
/**
* @swagger
* /api/llm/chat/{chatNoteId}/messages/stream:
* post:
* summary: Stream a message to an LLM via WebSocket
* operationId: llm-stream-message
* parameters:
* - name: chatNoteId
* in: path
* required: true
* schema:
* type: string
* description: The ID of the chat note to stream messages to (formerly sessionId)
* requestBody:
* required: true
* content:
* application/json:
* schema:
* type: object
* properties:
* content:
* type: string
* description: The user message to send to the LLM
* useAdvancedContext:
* type: boolean
* description: Whether to use advanced context extraction
* showThinking:
* type: boolean
* description: Whether to show thinking process in the response
* responses:
* '200':
* description: Streaming started successfully
* '404':
* description: Session not found
* '500':
* description: Error processing request
* security:
* - session: []
* tags: ["llm"]
*/
async function streamMessage(req: Request, res: Response) {
log.info("=== Starting streamMessage ===");
try {
const chatNoteId = req.params.chatNoteId;
const { content, useAdvancedContext, showThinking } = req.body;
if (!content || typeof content !== 'string' || content.trim().length === 0) {
throw new Error('Content cannot be empty');
}
// Check if session exists
const session = restChatService.getSessions().get(chatNoteId);
if (!session) {
throw new Error('Chat not found');
}
// Update last active timestamp
session.lastActive = new Date();
// Add user message to the session
session.messages.push({
role: 'user',
content,
timestamp: new Date()
});
// Create request parameters for the pipeline
const requestParams = {
chatNoteId: chatNoteId,
content,
useAdvancedContext: useAdvancedContext === true,
showThinking: showThinking === true,
stream: true // Always stream for this endpoint
};
// Create a fake request/response pair to pass to the handler
const fakeReq = {
...req,
method: 'GET', // Set to GET to indicate streaming
query: {
stream: 'true', // Set stream param - don't use format: 'stream' to avoid confusion
useAdvancedContext: String(useAdvancedContext === true),
showThinking: String(showThinking === true)
},
params: {
chatNoteId: chatNoteId
},
// Make sure the original content is available to the handler
body: {
content,
useAdvancedContext: useAdvancedContext === true,
showThinking: showThinking === true
}
} as unknown as Request;
// Log to verify correct parameters
log.info(`WebSocket stream settings - useAdvancedContext=${useAdvancedContext === true}, in query=${fakeReq.query.useAdvancedContext}, in body=${fakeReq.body.useAdvancedContext}`);
// Extra safety to ensure the parameters are passed correctly
if (useAdvancedContext === true) {
log.info(`Enhanced context IS enabled for this request`);
} else {
log.info(`Enhanced context is NOT enabled for this request`);
}
// Process the request in the background
Promise.resolve().then(async () => {
try {
await restChatService.handleSendMessage(fakeReq, res);
} catch (error) {
log.error(`Background message processing error: ${error}`);
// Import the WebSocket service
const wsService = (await import('../../services/ws.js')).default;
// Define LLMStreamMessage interface
interface LLMStreamMessage {
type: 'llm-stream';
chatNoteId: string;
content?: string;
thinking?: string;
toolExecution?: any;
done?: boolean;
error?: string;
raw?: unknown;
}
// Send error to client via WebSocket
wsService.sendMessageToAllClients({
type: 'llm-stream',
chatNoteId: chatNoteId,
error: `Error processing message: ${error}`,
done: true
} as LLMStreamMessage);
}
});
// Import the WebSocket service
const wsService = (await import('../../services/ws.js')).default;
// Let the client know streaming has started via WebSocket (helps client confirm connection is working)
wsService.sendMessageToAllClients({
type: 'llm-stream',
chatNoteId: chatNoteId,
thinking: 'Initializing streaming LLM response...'
});
// Let the client know streaming has started via HTTP response
return {
success: true,
message: 'Streaming started',
chatNoteId: chatNoteId
};
} catch (error: any) {
log.error(`Error starting message stream: ${error.message}`);
throw error;
}
}
export default {
// Chat management
createChat,
getChat,
updateChat,
listChats,
deleteChat,
// Chat session management
createSession,
getSession,
updateSession,
listSessions,
deleteSession,
sendMessage,
streamMessage,

View File

@ -392,15 +392,14 @@ function register(app: express.Application) {
etapiSpecRoute.register(router);
etapiBackupRoute.register(router);
// LLM chat session management endpoints
apiRoute(PST, "/api/llm/sessions", llmRoute.createSession);
apiRoute(GET, "/api/llm/sessions", llmRoute.listSessions);
apiRoute(GET, "/api/llm/sessions/:sessionId", llmRoute.getSession);
apiRoute(PATCH, "/api/llm/sessions/:sessionId", llmRoute.updateSession);
apiRoute(DEL, "/api/llm/sessions/:sessionId", llmRoute.deleteSession);
apiRoute(PST, "/api/llm/sessions/:sessionId/messages", llmRoute.sendMessage);
apiRoute(GET, "/api/llm/sessions/:sessionId/messages", llmRoute.sendMessage);
apiRoute(PST, "/api/llm/sessions/:sessionId/messages/stream", llmRoute.streamMessage);
// LLM Chat API
apiRoute(PST, "/api/llm/chat", llmRoute.createSession);
apiRoute(GET, "/api/llm/chat", llmRoute.listSessions);
apiRoute(GET, "/api/llm/chat/:sessionId", llmRoute.getSession);
apiRoute(PATCH, "/api/llm/chat/:sessionId", llmRoute.updateSession);
apiRoute(DEL, "/api/llm/chat/:chatNoteId", llmRoute.deleteSession);
apiRoute(PST, "/api/llm/chat/:chatNoteId/messages", llmRoute.sendMessage);
apiRoute(PST, "/api/llm/chat/:chatNoteId/messages/stream", llmRoute.streamMessage);
// LLM index management endpoints - reorganized for REST principles
apiRoute(GET, "/api/llm/indexes/stats", llmRoute.getIndexStats);

View File

@ -38,13 +38,13 @@ export class StreamHandler {
const wsService = (await import('../../../ws.js')).default;
let messageContent = '';
const sessionId = session.id;
const chatNoteId = session.id;
// Immediately send an initial message to confirm WebSocket connection is working
// This helps prevent timeouts on the client side
wsService.sendMessageToAllClients({
type: 'llm-stream',
sessionId,
chatNoteId,
thinking: 'Preparing response...'
} as LLMStreamMessage);
@ -64,19 +64,19 @@ export class StreamHandler {
// Send thinking state notification via WebSocket
wsService.sendMessageToAllClients({
type: 'llm-stream',
sessionId,
chatNoteId,
thinking: 'Analyzing tools needed for this request...'
} as LLMStreamMessage);
try {
// Execute the tools
const toolResults = await ToolHandler.executeToolCalls(response, sessionId);
const toolResults = await ToolHandler.executeToolCalls(response, chatNoteId);
// For each tool execution, send progress update via WebSocket
for (const toolResult of toolResults) {
wsService.sendMessageToAllClients({
type: 'llm-stream',
sessionId,
chatNoteId,
toolExecution: {
action: 'complete',
tool: toolResult.name,
@ -108,7 +108,7 @@ export class StreamHandler {
await this.processStreamedResponse(
followUpResponse,
wsService,
sessionId,
chatNoteId,
session,
toolMessages,
followUpOptions,
@ -120,7 +120,7 @@ export class StreamHandler {
// Send error via WebSocket with done flag
wsService.sendMessageToAllClients({
type: 'llm-stream',
sessionId,
chatNoteId,
error: `Error executing tools: ${toolError instanceof Error ? toolError.message : 'Unknown error'}`,
done: true
} as LLMStreamMessage);
@ -137,7 +137,7 @@ export class StreamHandler {
await this.processStreamedResponse(
response,
wsService,
sessionId,
chatNoteId,
session
);
} else {
@ -149,7 +149,7 @@ export class StreamHandler {
// Send via WebSocket - include both content and done flag in same message
wsService.sendMessageToAllClients({
type: 'llm-stream',
sessionId,
chatNoteId,
content: messageContent,
done: true
} as LLMStreamMessage);
@ -172,14 +172,14 @@ export class StreamHandler {
// Send error via WebSocket
wsService.sendMessageToAllClients({
type: 'llm-stream',
sessionId,
chatNoteId,
error: `Error generating response: ${streamingError instanceof Error ? streamingError.message : 'Unknown error'}`
} as LLMStreamMessage);
// Signal completion
wsService.sendMessageToAllClients({
type: 'llm-stream',
sessionId,
chatNoteId,
done: true
} as LLMStreamMessage);
}
@ -191,7 +191,7 @@ export class StreamHandler {
private static async processStreamedResponse(
response: any,
wsService: any,
sessionId: string,
chatNoteId: string,
session: ChatSession,
toolMessages?: any[],
followUpOptions?: any,
@ -213,7 +213,7 @@ export class StreamHandler {
// Send each individual chunk via WebSocket as it arrives
wsService.sendMessageToAllClients({
type: 'llm-stream',
sessionId,
chatNoteId,
content: chunk.text,
done: !!chunk.done, // Include done flag with each chunk
// Include any raw data from the provider that might contain thinking/tool info
@ -230,7 +230,7 @@ export class StreamHandler {
if (chunk.raw?.thinking) {
wsService.sendMessageToAllClients({
type: 'llm-stream',
sessionId,
chatNoteId,
thinking: chunk.raw.thinking
} as LLMStreamMessage);
}
@ -239,7 +239,7 @@ export class StreamHandler {
if (chunk.raw?.toolExecution) {
wsService.sendMessageToAllClients({
type: 'llm-stream',
sessionId,
chatNoteId,
toolExecution: chunk.raw.toolExecution
} as LLMStreamMessage);
}
@ -251,7 +251,7 @@ export class StreamHandler {
// Send tool execution notification
wsService.sendMessageToAllClients({
type: 'tool_execution_start',
sessionId
chatNoteId
} as LLMStreamMessage);
// Process each tool call
@ -270,7 +270,7 @@ export class StreamHandler {
// Format into a standardized tool execution message
wsService.sendMessageToAllClients({
type: 'tool_result',
sessionId,
chatNoteId,
toolExecution: {
action: 'executing',
tool: toolCall.function?.name || 'unknown',
@ -300,7 +300,7 @@ export class StreamHandler {
};
// Execute the next round of tools
const nextToolResults = await ToolHandler.executeToolCalls(response, sessionId);
const nextToolResults = await ToolHandler.executeToolCalls(response, chatNoteId);
// Create a new messages array with the latest tool results
const nextToolMessages = [...toolMessages, assistantMessage, ...nextToolResults];
@ -320,7 +320,7 @@ export class StreamHandler {
await this.processStreamedResponse(
nextResponse,
wsService,
sessionId,
chatNoteId,
session,
nextToolMessages,
nextFollowUpOptions,
@ -335,7 +335,7 @@ export class StreamHandler {
// Send final message with done flag only (no content)
wsService.sendMessageToAllClients({
type: 'llm-stream',
sessionId,
chatNoteId,
done: true
} as LLMStreamMessage);
}
@ -357,7 +357,7 @@ export class StreamHandler {
// Report the error to the client
wsService.sendMessageToAllClients({
type: 'llm-stream',
sessionId,
chatNoteId,
error: `Error during streaming: ${streamError instanceof Error ? streamError.message : 'Unknown error'}`,
done: true
} as LLMStreamMessage);

View File

@ -12,9 +12,9 @@ export class ToolHandler {
/**
* Execute tool calls from the LLM response
* @param response The LLM response containing tool calls
* @param sessionId Optional session ID for tracking
* @param chatNoteId Optional chat note ID for tracking
*/
static async executeToolCalls(response: any, sessionId?: string): Promise<Message[]> {
static async executeToolCalls(response: any, chatNoteId?: string): Promise<Message[]> {
log.info(`========== TOOL EXECUTION FLOW ==========`);
if (!response.tool_calls || response.tool_calls.length === 0) {
log.info(`No tool calls to execute, returning early`);
@ -101,9 +101,9 @@ export class ToolHandler {
: JSON.stringify(result).substring(0, 100) + '...';
log.info(`Tool result: ${resultPreview}`);
// Record tool execution in session if session ID is provided
if (sessionId) {
SessionsStore.recordToolExecution(sessionId, toolCall, typeof result === 'string' ? result : JSON.stringify(result));
// Record tool execution in session if chatNoteId is provided
if (chatNoteId) {
SessionsStore.recordToolExecution(chatNoteId, toolCall, typeof result === 'string' ? result : JSON.stringify(result));
}
// Format result as a proper message
@ -116,9 +116,9 @@ export class ToolHandler {
} catch (error: any) {
log.error(`Error executing tool ${toolCall.function.name}: ${error.message}`);
// Record error in session if session ID is provided
if (sessionId) {
SessionsStore.recordToolExecution(sessionId, toolCall, '', error.message);
// Record error in session if chatNoteId is provided
if (chatNoteId) {
SessionsStore.recordToolExecution(chatNoteId, toolCall, '', error.message);
}
// Return error as tool result

View File

@ -108,8 +108,8 @@ class RestChatService {
log.info(`Parameters from body: useAdvancedContext=${req.body?.useAdvancedContext}, showThinking=${req.body?.showThinking}, content=${content ? `${content.substring(0, 20)}...` : 'none'}`);
}
// Get chatNoteId from URL params since it's part of the route
chatNoteId = req.params.chatNoteId || req.params.sessionId; // Support both names for backward compatibility
// Get chatNoteId from URL params
chatNoteId = req.params.chatNoteId;
// For GET requests, ensure we have the stream parameter
if (req.method === 'GET' && req.query.stream !== 'true') {
@ -134,16 +134,17 @@ class RestChatService {
log.info(`No Chat Note found for ${chatNoteId}, creating a new Chat Note and session`);
// Create a new Chat Note via the storage service
const chatStorageService = (await import('../../llm/chat_storage_service.js')).default;
const newChat = await chatStorageService.createChat('New Chat');
//const chatStorageService = (await import('../../llm/chat_storage_service.js')).default;
//const newChat = await chatStorageService.createChat('New Chat');
// Use the new Chat Note's ID for the session
session = SessionsStore.createSession({
title: newChat.title
//title: newChat.title,
chatNoteId: chatNoteId
});
// Update the session ID to match the Chat Note ID
session.id = newChat.id;
session.id = chatNoteId;
log.info(`Created new Chat Note and session with ID: ${session.id}`);
@ -271,7 +272,7 @@ class RestChatService {
// GET requests or format=stream parameter indicates streaming should be used
stream: !!(req.method === 'GET' || req.query.format === 'stream' || req.query.stream === 'true'),
// Include chatNoteId for tracking tool executions
sessionId: chatNoteId // Use sessionId property for backward compatibility
chatNoteId: chatNoteId
};
// Log the options to verify what's being sent to the pipeline
@ -312,7 +313,7 @@ class RestChatService {
try {
wsService.default.sendMessageToAllClients({
type: 'llm-stream',
sessionId: chatNoteId, // Use sessionId property for backward compatibility
chatNoteId: chatNoteId,
error: `Stream error: ${error instanceof Error ? error.message : 'Unknown error'}`,
done: true
});
@ -394,7 +395,7 @@ class RestChatService {
// Create a message object with all necessary fields
const message: LLMStreamMessage = {
type: 'llm-stream',
sessionId: chatNoteId // Use sessionId property for backward compatibility
chatNoteId: chatNoteId
};
// Add content if available - either the new chunk or full content on completion
@ -479,8 +480,27 @@ class RestChatService {
const options: any = req.body || {};
const title = options.title || 'Chat Session';
// Use the currentNoteId as the chatNoteId if provided
let chatNoteId = options.chatNoteId;
// If currentNoteId is provided but chatNoteId is not, use currentNoteId
if (!chatNoteId && options.currentNoteId) {
chatNoteId = options.currentNoteId;
log.info(`Using provided currentNoteId ${chatNoteId} as chatNoteId`);
}
// If we still don't have a chatNoteId, create a new Chat Note
if (!chatNoteId) {
// Create a new Chat Note via the storage service
const chatStorageService = (await import('../../llm/chat_storage_service.js')).default;
const newChat = await chatStorageService.createChat(title);
chatNoteId = newChat.id;
log.info(`Created new Chat Note with ID: ${chatNoteId}`);
}
// Create a new session through our session store
const session = SessionsStore.createSession({
chatNoteId,
title,
systemPrompt: options.systemPrompt,
contextNoteId: options.contextNoteId,
@ -493,7 +513,8 @@ class RestChatService {
return {
id: session.id,
title: session.title,
createdAt: session.createdAt
createdAt: session.createdAt,
noteId: chatNoteId // Return the note ID explicitly
};
} catch (error: any) {
log.error(`Error creating LLM session: ${error.message || 'Unknown error'}`);

View File

@ -59,6 +59,7 @@ class SessionsStore {
* Create a new session
*/
createSession(options: {
chatNoteId: string;
title?: string;
systemPrompt?: string;
contextNoteId?: string;
@ -70,7 +71,7 @@ class SessionsStore {
this.initializeCleanupTimer();
const title = options.title || 'Chat Session';
const sessionId = randomString(16);
const sessionId = options.chatNoteId;
const now = new Date();
// Initial system message if provided
@ -103,7 +104,7 @@ class SessionsStore {
};
sessions.set(sessionId, session);
log.info(`Created new session with ID: ${sessionId}`);
log.info(`Created in-memory session for Chat Note ID: ${sessionId}`);
return session;
}
@ -131,10 +132,10 @@ class SessionsStore {
/**
* Record a tool execution in the session metadata
*/
recordToolExecution(sessionId: string, tool: any, result: string, error?: string): void {
if (!sessionId) return;
recordToolExecution(chatNoteId: string, tool: any, result: string, error?: string): void {
if (!chatNoteId) return;
const session = sessions.get(sessionId);
const session = sessions.get(chatNoteId);
if (!session) return;
try {
@ -156,7 +157,7 @@ class SessionsStore {
toolExecutions.push(execution);
session.metadata.toolExecutions = toolExecutions;
log.info(`Recorded tool execution for ${execution.name} in session ${sessionId}`);
log.info(`Recorded tool execution for ${execution.name} in session ${chatNoteId}`);
} catch (err) {
log.error(`Failed to record tool execution: ${err}`);
}

View File

@ -7,7 +7,7 @@
*/
export interface LLMStreamMessage {
type: 'llm-stream' | 'tool_execution_start' | 'tool_result' | 'tool_execution_error' | 'tool_completion_processing';
sessionId: string;
chatNoteId: string;
content?: string;
thinking?: string;
toolExecution?: {

View File

@ -58,7 +58,7 @@ interface Message {
filePath?: string;
// LLM streaming specific fields
sessionId?: string;
chatNoteId?: string;
content?: string;
thinking?: string;
toolExecution?: {
@ -133,7 +133,7 @@ function sendMessageToAllClients(message: Message) {
if (webSocketServer) {
// Special logging for LLM streaming messages
if (message.type === "llm-stream") {
log.info(`[WS-SERVER] Sending LLM stream message: sessionId=${message.sessionId}, content=${!!message.content}, thinking=${!!message.thinking}, toolExecution=${!!message.toolExecution}, done=${!!message.done}`);
log.info(`[WS-SERVER] Sending LLM stream message: chatNoteId=${message.chatNoteId}, content=${!!message.content}, thinking=${!!message.thinking}, toolExecution=${!!message.toolExecution}, done=${!!message.done}`);
} else if (message.type !== "sync-failed" && message.type !== "api-log-messages") {
log.info(`Sending message to all clients: ${jsonStr}`);
}