2025-03-09 02:19:26 +00:00
import type { Request , Response } from "express" ;
import log from "../../services/log.js" ;
import options from "../../services/options.js" ;
2025-04-02 19:14:26 +00:00
2025-03-11 23:26:47 +00:00
// Import the index service for knowledge base management
import indexService from "../../services/llm/index_service.js" ;
2025-04-02 19:14:26 +00:00
import restChatService from "../../services/llm/rest_chat_service.js" ;
2025-03-11 23:04:51 +00:00
2025-03-09 02:19:26 +00:00
// Define basic interfaces
interface ChatMessage {
role : 'user' | 'assistant' | 'system' ;
content : string ;
timestamp? : Date ;
}
interface ChatSession {
id : string ;
title : string ;
messages : ChatMessage [ ] ;
createdAt : Date ;
lastActive : Date ;
noteContext? : string ; // Optional noteId that provides context
metadata : Record < string , any > ;
}
interface NoteSource {
noteId : string ;
title : string ;
content? : string ;
similarity? : number ;
branchId? : string ;
}
interface SessionOptions {
title? : string ;
systemPrompt? : string ;
temperature? : number ;
maxTokens? : number ;
model? : string ;
provider? : string ;
contextNoteId? : string ;
}
// In-memory storage for sessions
// In a production app, this should be stored in a database
2025-04-02 19:14:26 +00:00
const sessions = restChatService . getSessions ( ) ;
2025-03-09 02:19:26 +00:00
// Flag to track if cleanup timer has been initialized
let cleanupInitialized = false ;
/ * *
2025-03-11 23:04:51 +00:00
* Initialize the session cleanup timer to remove old / inactive sessions
2025-03-09 02:19:26 +00:00
* Only call this after database is initialized
* /
function initializeCleanupTimer() {
2025-04-02 19:14:26 +00:00
restChatService . initializeCleanupTimer ( ) ;
2025-03-09 02:19:26 +00:00
cleanupInitialized = true ;
}
/ * *
* Check if the database is initialized
* /
function isDatabaseInitialized ( ) : boolean {
2025-04-02 19:14:26 +00:00
return restChatService . isDatabaseInitialized ( ) ;
2025-03-09 02:19:26 +00:00
}
/ * *
* Get the AI service manager in a way that doesn ' t crash at startup
* /
function safelyUseAIManager ( ) : boolean {
2025-04-02 19:14:26 +00:00
return restChatService . safelyUseAIManager ( ) ;
2025-03-09 02:19:26 +00:00
}
/ * *
2025-03-26 19:19:19 +00:00
* @swagger
* / a p i / l l m / s e s s i o n s :
* post :
* summary : Create a new LLM chat session
* operationId : llm - create - session
* requestBody :
* required : true
* content :
* application / json :
* schema :
* type : object
* properties :
* title :
* type : string
* description : Title for the chat session
* systemPrompt :
* type : string
* description : System message to set the behavior of the assistant
* temperature :
* type : number
* description : Temperature parameter for the LLM ( 0.0 - 1.0 )
* maxTokens :
* type : integer
* description : Maximum tokens to generate in responses
* model :
* type : string
* description : Specific model to use ( depends on provider )
* provider :
* type : string
* description : LLM provider to use ( e . g . , 'openai' , 'anthropic' , 'ollama' )
* contextNoteId :
* type : string
* description : Note ID to use as context for the session
* responses :
* '200' :
* description : Successfully created session
* content :
* application / json :
* schema :
* type : object
* properties :
* sessionId :
* type : string
* title :
* type : string
* createdAt :
* type : string
* format : date - time
* security :
* - session : [ ]
* tags : [ "llm" ]
2025-03-09 02:19:26 +00:00
* /
async function createSession ( req : Request , res : Response ) {
2025-04-02 19:14:26 +00:00
return restChatService . createSession ( req , res ) ;
2025-03-09 02:19:26 +00:00
}
/ * *
2025-03-26 19:19:19 +00:00
* @swagger
* / a p i / l l m / s e s s i o n s / { s e s s i o n I d } :
* get :
* summary : Retrieve a specific chat session by ID
* operationId : llm - get - session
* parameters :
* - name : sessionId
* in : path
* required : true
* schema :
* type : string
* responses :
* '200' :
* description : Chat session details
* content :
* application / json :
* schema :
* type : object
* properties :
* id :
* type : string
* title :
* type : string
* messages :
* type : array
* items :
* type : object
* properties :
* role :
* type : string
* enum : [ user , assistant , system ]
* content :
* type : string
* timestamp :
* type : string
* format : date - time
* createdAt :
* type : string
* format : date - time
* lastActive :
* type : string
* format : date - time
* '404' :
* description : Session not found
* security :
* - session : [ ]
* tags : [ "llm" ]
2025-03-09 02:19:26 +00:00
* /
async function getSession ( req : Request , res : Response ) {
2025-04-02 19:14:26 +00:00
return restChatService . getSession ( req , res ) ;
2025-03-09 02:19:26 +00:00
}
/ * *
2025-03-26 19:19:19 +00:00
* @swagger
* / a p i / l l m / s e s s i o n s / { s e s s i o n I d } :
* put :
* summary : Update a chat session ' s settings
* operationId : llm - update - session
* parameters :
* - name : sessionId
* in : path
* required : true
* schema :
* type : string
* requestBody :
* required : true
* content :
* application / json :
* schema :
* type : object
* properties :
* title :
* type : string
* description : Updated title for the session
* systemPrompt :
* type : string
* description : Updated system prompt
* temperature :
* type : number
* description : Updated temperature setting
* maxTokens :
* type : integer
* description : Updated maximum tokens setting
* model :
* type : string
* description : Updated model selection
* provider :
* type : string
* description : Updated provider selection
* contextNoteId :
* type : string
* description : Updated note ID for context
* responses :
* '200' :
* description : Session successfully updated
* content :
* application / json :
* schema :
* type : object
* properties :
* id :
* type : string
* title :
* type : string
* updatedAt :
* type : string
* format : date - time
* '404' :
* description : Session not found
* security :
* - session : [ ]
* tags : [ "llm" ]
2025-03-09 02:19:26 +00:00
* /
async function updateSession ( req : Request , res : Response ) {
2025-04-02 19:14:26 +00:00
return restChatService . updateSession ( req , res ) ;
2025-03-09 02:19:26 +00:00
}
/ * *
2025-03-26 19:19:19 +00:00
* @swagger
* / a p i / l l m / s e s s i o n s :
* get :
* summary : List all chat sessions
* operationId : llm - list - sessions
* responses :
* '200' :
* description : List of chat sessions
* content :
* application / json :
* schema :
* type : array
* items :
* type : object
* properties :
* id :
* type : string
* title :
* type : string
* createdAt :
* type : string
* format : date - time
* lastActive :
* type : string
* format : date - time
* messageCount :
* type : integer
* security :
* - session : [ ]
* tags : [ "llm" ]
2025-03-09 02:19:26 +00:00
* /
async function listSessions ( req : Request , res : Response ) {
2025-04-02 19:14:26 +00:00
return restChatService . listSessions ( req , res ) ;
2025-03-09 02:19:26 +00:00
}
/ * *
2025-03-26 19:19:19 +00:00
* @swagger
* / a p i / l l m / s e s s i o n s / { s e s s i o n I d } :
* delete :
* summary : Delete a chat session
* operationId : llm - delete - session
* parameters :
* - name : sessionId
* in : path
* required : true
* schema :
* type : string
* responses :
* '200' :
* description : Session successfully deleted
* '404' :
* description : Session not found
* security :
* - session : [ ]
* tags : [ "llm" ]
2025-03-09 02:19:26 +00:00
* /
async function deleteSession ( req : Request , res : Response ) {
2025-04-02 19:14:26 +00:00
return restChatService . deleteSession ( req , res ) ;
2025-03-09 02:19:26 +00:00
}
/ * *
2025-03-10 03:34:48 +00:00
* Find relevant notes based on search query
2025-03-09 02:19:26 +00:00
* /
2025-03-10 03:34:48 +00:00
async function findRelevantNotes ( content : string , contextNoteId : string | null = null , limit = 5 ) : Promise < NoteSource [ ] > {
2025-04-02 19:14:26 +00:00
return restChatService . findRelevantNotes ( content , contextNoteId , limit ) ;
2025-03-09 02:19:26 +00:00
}
/ * *
2025-03-20 00:06:56 +00:00
* Build a prompt with context from relevant notes
2025-03-09 02:19:26 +00:00
* /
function buildContextFromNotes ( sources : NoteSource [ ] , query : string ) : string {
2025-04-02 19:14:26 +00:00
return restChatService . buildContextFromNotes ( sources , query ) ;
2025-03-09 02:19:26 +00:00
}
/ * *
2025-03-26 19:19:19 +00:00
* @swagger
* / a p i / l l m / s e s s i o n s / { s e s s i o n I d } / m e s s a g e s :
* post :
* summary : Send a message to an LLM and get a response
* operationId : llm - send - message
* parameters :
* - name : sessionId
* in : path
* required : true
* schema :
* type : string
* requestBody :
* required : true
* content :
* application / json :
* schema :
* type : object
* properties :
* message :
* type : string
* description : The user message to send to the LLM
* options :
* type : object
* description : Optional parameters for this specific message
* properties :
* temperature :
* type : number
* maxTokens :
* type : integer
* model :
* type : string
* provider :
* type : string
* includeContext :
* type : boolean
* description : Whether to include relevant notes as context
* useNoteContext :
* type : boolean
* description : Whether to use the session ' s context note
* responses :
* '200' :
* description : LLM response
* content :
* application / json :
* schema :
* type : object
* properties :
* response :
* type : string
* sources :
* type : array
* items :
* type : object
* properties :
* noteId :
* type : string
* title :
* type : string
* similarity :
* type : number
* sessionId :
* type : string
* '404' :
* description : Session not found
* '500' :
* description : Error processing request
* security :
* - session : [ ]
* tags : [ "llm" ]
2025-03-09 02:19:26 +00:00
* /
async function sendMessage ( req : Request , res : Response ) {
2025-04-02 19:14:26 +00:00
return restChatService . handleSendMessage ( req , res ) ;
2025-03-09 02:19:26 +00:00
}
2025-03-11 23:26:47 +00:00
/ * *
2025-03-26 19:19:19 +00:00
* @swagger
2025-04-01 10:55:20 -07:00
* / a p i / l l m / i n d e x e s / s t a t s :
2025-03-26 19:19:19 +00:00
* get :
2025-04-01 10:55:20 -07:00
* summary : Get stats about the LLM knowledge base indexing status
2025-03-26 19:19:19 +00:00
* operationId : llm - index - stats
* responses :
* '200' :
2025-04-01 10:55:20 -07:00
* description : Index stats successfully retrieved
2025-03-26 19:19:19 +00:00
* security :
* - session : [ ]
* tags : [ "llm" ]
2025-03-11 23:26:47 +00:00
* /
async function getIndexStats ( req : Request , res : Response ) {
try {
2025-04-01 10:55:20 -07:00
// Check if AI is enabled
const aiEnabled = await options . getOptionBool ( 'aiEnabled' ) ;
if ( ! aiEnabled ) {
return {
success : false ,
message : "AI features are disabled"
} ;
2025-03-11 23:26:47 +00:00
}
2025-04-01 10:55:20 -07:00
// Return indexing stats
2025-03-11 23:26:47 +00:00
const stats = await indexService . getIndexingStats ( ) ;
2025-04-01 10:55:20 -07:00
return {
success : true ,
. . . stats
} ;
2025-03-11 23:26:47 +00:00
} catch ( error : any ) {
log . error ( ` Error getting index stats: ${ error . message || 'Unknown error' } ` ) ;
throw new Error ( ` Failed to get index stats: ${ error . message || 'Unknown error' } ` ) ;
}
}
/ * *
2025-03-26 19:19:19 +00:00
* @swagger
2025-04-01 10:55:20 -07:00
* / a p i / l l m / i n d e x e s :
2025-03-26 19:19:19 +00:00
* post :
2025-04-01 10:55:20 -07:00
* summary : Start or continue indexing the knowledge base
2025-03-26 19:19:19 +00:00
* operationId : llm - start - indexing
* requestBody :
* required : false
* content :
* application / json :
* schema :
* type : object
* properties :
2025-04-01 10:55:20 -07:00
* force :
2025-03-26 19:19:19 +00:00
* type : boolean
* description : Whether to force reindexing of all notes
* responses :
* '200' :
2025-04-01 10:55:20 -07:00
* description : Indexing started successfully
2025-03-26 19:19:19 +00:00
* security :
* - session : [ ]
* tags : [ "llm" ]
2025-03-11 23:26:47 +00:00
* /
async function startIndexing ( req : Request , res : Response ) {
try {
2025-04-01 10:55:20 -07:00
// Check if AI is enabled
const aiEnabled = await options . getOptionBool ( 'aiEnabled' ) ;
if ( ! aiEnabled ) {
2025-03-11 23:26:47 +00:00
return {
2025-04-01 10:55:20 -07:00
success : false ,
message : "AI features are disabled"
2025-03-11 23:26:47 +00:00
} ;
}
2025-04-01 10:55:20 -07:00
const { force = false } = req . body ;
// Start indexing
await indexService . startFullIndexing ( force ) ;
return {
success : true ,
message : "Indexing started"
} ;
2025-03-11 23:26:47 +00:00
} catch ( error : any ) {
log . error ( ` Error starting indexing: ${ error . message || 'Unknown error' } ` ) ;
throw new Error ( ` Failed to start indexing: ${ error . message || 'Unknown error' } ` ) ;
}
}
/ * *
2025-03-26 19:19:19 +00:00
* @swagger
2025-04-01 10:55:20 -07:00
* / a p i / l l m / i n d e x e s / f a i l e d :
2025-03-26 19:19:19 +00:00
* get :
2025-04-01 10:55:20 -07:00
* summary : Get list of notes that failed to index
2025-03-26 19:19:19 +00:00
* operationId : llm - failed - indexes
2025-04-01 10:55:20 -07:00
* parameters :
* - name : limit
* in : query
* required : false
* schema :
* type : integer
* default : 100
2025-03-26 19:19:19 +00:00
* responses :
* '200' :
2025-04-01 10:55:20 -07:00
* description : Failed indexes successfully retrieved
2025-03-26 19:19:19 +00:00
* security :
* - session : [ ]
* tags : [ "llm" ]
2025-03-11 23:26:47 +00:00
* /
async function getFailedIndexes ( req : Request , res : Response ) {
try {
2025-04-01 10:55:20 -07:00
// Check if AI is enabled
const aiEnabled = await options . getOptionBool ( 'aiEnabled' ) ;
if ( ! aiEnabled ) {
return {
success : false ,
message : "AI features are disabled"
} ;
2025-03-11 23:26:47 +00:00
}
2025-04-01 10:55:20 -07:00
const limit = parseInt ( req . query . limit as string || "100" , 10 ) ;
// Get failed indexes
const failed = await indexService . getFailedIndexes ( limit ) ;
2025-03-12 00:02:02 +00:00
2025-03-11 23:26:47 +00:00
return {
2025-04-01 10:55:20 -07:00
success : true ,
failed
2025-03-11 23:26:47 +00:00
} ;
} catch ( error : any ) {
log . error ( ` Error getting failed indexes: ${ error . message || 'Unknown error' } ` ) ;
throw new Error ( ` Failed to get failed indexes: ${ error . message || 'Unknown error' } ` ) ;
}
}
/ * *
2025-03-26 19:19:19 +00:00
* @swagger
2025-04-01 10:55:20 -07:00
* / a p i / l l m / i n d e x e s / n o t e s / { n o t e I d } :
* put :
* summary : Retry indexing a specific note that previously failed
* operationId : llm - retry - index
2025-03-26 19:19:19 +00:00
* parameters :
* - name : noteId
* in : path
* required : true
* schema :
* type : string
* responses :
* '200' :
2025-04-01 10:55:20 -07:00
* description : Index retry successfully initiated
2025-03-26 19:19:19 +00:00
* security :
* - session : [ ]
* tags : [ "llm" ]
2025-03-11 23:26:47 +00:00
* /
async function retryFailedIndex ( req : Request , res : Response ) {
try {
2025-04-01 10:55:20 -07:00
// Check if AI is enabled
const aiEnabled = await options . getOptionBool ( 'aiEnabled' ) ;
if ( ! aiEnabled ) {
return {
success : false ,
message : "AI features are disabled"
} ;
2025-03-11 23:26:47 +00:00
}
const { noteId } = req . params ;
2025-04-01 10:55:20 -07:00
// Retry indexing the note
const result = await indexService . retryFailedNote ( noteId ) ;
2025-03-12 00:02:02 +00:00
2025-03-11 23:26:47 +00:00
return {
2025-04-01 10:55:20 -07:00
success : true ,
message : result ? "Note queued for indexing" : "Failed to queue note for indexing"
2025-03-11 23:26:47 +00:00
} ;
} catch ( error : any ) {
log . error ( ` Error retrying failed index: ${ error . message || 'Unknown error' } ` ) ;
throw new Error ( ` Failed to retry index: ${ error . message || 'Unknown error' } ` ) ;
}
}
/ * *
2025-03-26 19:19:19 +00:00
* @swagger
2025-04-01 10:55:20 -07:00
* / a p i / l l m / i n d e x e s / f a i l e d :
* put :
2025-03-26 19:19:19 +00:00
* summary : Retry indexing all failed notes
2025-04-01 10:55:20 -07:00
* operationId : llm - retry - all - indexes
2025-03-26 19:19:19 +00:00
* responses :
* '200' :
2025-04-01 10:55:20 -07:00
* description : Retry of all failed indexes successfully initiated
2025-03-26 19:19:19 +00:00
* security :
* - session : [ ]
* tags : [ "llm" ]
2025-03-11 23:26:47 +00:00
* /
async function retryAllFailedIndexes ( req : Request , res : Response ) {
try {
2025-04-01 10:55:20 -07:00
// Check if AI is enabled
const aiEnabled = await options . getOptionBool ( 'aiEnabled' ) ;
if ( ! aiEnabled ) {
return {
success : false ,
message : "AI features are disabled"
} ;
2025-03-11 23:26:47 +00:00
}
2025-04-01 10:55:20 -07:00
// Retry all failed notes
2025-03-11 23:26:47 +00:00
const count = await indexService . retryAllFailedNotes ( ) ;
2025-03-12 00:02:02 +00:00
2025-03-11 23:26:47 +00:00
return {
success : true ,
2025-04-01 10:55:20 -07:00
message : ` ${ count } notes queued for reprocessing `
2025-03-11 23:26:47 +00:00
} ;
} catch ( error : any ) {
log . error ( ` Error retrying all failed indexes: ${ error . message || 'Unknown error' } ` ) ;
2025-04-01 10:55:20 -07:00
throw new Error ( ` Failed to retry all indexes: ${ error . message || 'Unknown error' } ` ) ;
2025-03-11 23:26:47 +00:00
}
}
/ * *
2025-03-26 19:19:19 +00:00
* @swagger
2025-04-01 10:55:20 -07:00
* / a p i / l l m / i n d e x e s / n o t e s / s i m i l a r :
* get :
* summary : Find notes similar to a query string
* operationId : llm - find - similar - notes
* parameters :
* - name : query
* in : query
* required : true
* schema :
* type : string
* - name : contextNoteId
* in : query
* required : false
* schema :
* type : string
* - name : limit
* in : query
* required : false
* schema :
* type : integer
* default : 5
2025-03-26 19:19:19 +00:00
* responses :
* '200' :
2025-04-01 10:55:20 -07:00
* description : Similar notes found successfully
2025-03-26 19:19:19 +00:00
* security :
* - session : [ ]
* tags : [ "llm" ]
2025-03-11 23:26:47 +00:00
* /
async function findSimilarNotes ( req : Request , res : Response ) {
try {
2025-04-01 10:55:20 -07:00
// Check if AI is enabled
const aiEnabled = await options . getOptionBool ( 'aiEnabled' ) ;
if ( ! aiEnabled ) {
return {
success : false ,
message : "AI features are disabled"
} ;
2025-03-11 23:26:47 +00:00
}
2025-04-01 10:55:20 -07:00
const query = req . query . query as string ;
const contextNoteId = req . query . contextNoteId as string | undefined ;
const limit = parseInt ( req . query . limit as string || "5" , 10 ) ;
2025-03-12 00:02:02 +00:00
2025-04-01 10:55:20 -07:00
if ( ! query ) {
return {
success : false ,
message : "Query is required"
} ;
2025-03-11 23:26:47 +00:00
}
2025-04-01 10:55:20 -07:00
// Find similar notes
const similar = await indexService . findSimilarNotes ( query , contextNoteId , limit ) ;
2025-03-12 00:02:02 +00:00
2025-03-11 23:26:47 +00:00
return {
2025-04-01 10:55:20 -07:00
success : true ,
similar
2025-03-11 23:26:47 +00:00
} ;
} catch ( error : any ) {
log . error ( ` Error finding similar notes: ${ error . message || 'Unknown error' } ` ) ;
throw new Error ( ` Failed to find similar notes: ${ error . message || 'Unknown error' } ` ) ;
}
}
/ * *
2025-03-26 19:19:19 +00:00
* @swagger
2025-04-01 10:55:20 -07:00
* / a p i / l l m / i n d e x e s / c o n t e x t :
* get :
* summary : Generate context for an LLM query based on the knowledge base
2025-03-26 19:19:19 +00:00
* operationId : llm - generate - context
2025-04-01 10:55:20 -07:00
* parameters :
* - name : query
* in : query
* required : true
* schema :
* type : string
* - name : contextNoteId
* in : query
* required : false
* schema :
* type : string
* - name : depth
* in : query
* required : false
* schema :
* type : integer
* default : 2
2025-03-26 19:19:19 +00:00
* responses :
* '200' :
2025-04-01 10:55:20 -07:00
* description : Context generated successfully
2025-03-26 19:19:19 +00:00
* security :
* - session : [ ]
* tags : [ "llm" ]
2025-03-11 23:26:47 +00:00
* /
async function generateQueryContext ( req : Request , res : Response ) {
try {
2025-04-01 10:55:20 -07:00
// Check if AI is enabled
const aiEnabled = await options . getOptionBool ( 'aiEnabled' ) ;
if ( ! aiEnabled ) {
return {
success : false ,
message : "AI features are disabled"
} ;
2025-03-11 23:26:47 +00:00
}
2025-04-01 10:55:20 -07:00
const query = req . query . query as string ;
const contextNoteId = req . query . contextNoteId as string | undefined ;
const depth = parseInt ( req . query . depth as string || "2" , 10 ) ;
2025-03-12 00:02:02 +00:00
2025-04-01 10:55:20 -07:00
if ( ! query ) {
return {
success : false ,
message : "Query is required"
} ;
2025-03-11 23:26:47 +00:00
}
2025-04-01 10:55:20 -07:00
// Generate context
const context = await indexService . generateQueryContext ( query , contextNoteId , depth ) ;
2025-03-12 00:02:02 +00:00
2025-03-11 23:26:47 +00:00
return {
2025-04-01 10:55:20 -07:00
success : true ,
context
2025-03-11 23:26:47 +00:00
} ;
} catch ( error : any ) {
log . error ( ` Error generating query context: ${ error . message || 'Unknown error' } ` ) ;
throw new Error ( ` Failed to generate query context: ${ error . message || 'Unknown error' } ` ) ;
}
}
/ * *
2025-03-26 19:19:19 +00:00
* @swagger
2025-04-01 10:55:20 -07:00
* / a p i / l l m / i n d e x e s / n o t e s / { n o t e I d } :
2025-03-26 19:19:19 +00:00
* post :
2025-04-01 10:55:20 -07:00
* summary : Index a specific note for LLM knowledge base
2025-03-26 19:19:19 +00:00
* operationId : llm - index - note
* parameters :
* - name : noteId
* in : path
* required : true
* schema :
* type : string
* responses :
* '200' :
2025-04-01 10:55:20 -07:00
* description : Note indexed successfully
2025-03-26 19:19:19 +00:00
* security :
* - session : [ ]
* tags : [ "llm" ]
2025-03-11 23:26:47 +00:00
* /
async function indexNote ( req : Request , res : Response ) {
try {
2025-04-01 10:55:20 -07:00
// Check if AI is enabled
const aiEnabled = await options . getOptionBool ( 'aiEnabled' ) ;
if ( ! aiEnabled ) {
return {
success : false ,
message : "AI features are disabled"
} ;
2025-03-11 23:26:47 +00:00
}
const { noteId } = req . params ;
2025-04-01 10:55:20 -07:00
if ( ! noteId ) {
return {
success : false ,
message : "Note ID is required"
} ;
2025-03-11 23:26:47 +00:00
}
2025-04-01 10:55:20 -07:00
// Index the note
const result = await indexService . generateNoteIndex ( noteId ) ;
2025-03-12 00:02:02 +00:00
2025-03-11 23:26:47 +00:00
return {
2025-04-01 10:55:20 -07:00
success : true ,
message : result ? "Note indexed successfully" : "Failed to index note"
2025-03-11 23:26:47 +00:00
} ;
} catch ( error : any ) {
log . error ( ` Error indexing note: ${ error . message || 'Unknown error' } ` ) ;
throw new Error ( ` Failed to index note: ${ error . message || 'Unknown error' } ` ) ;
}
}
2025-04-10 21:00:12 +00:00
/ * *
* @swagger
* / a p i / l l m / s e s s i o n s / { s e s s i o n I d } / m e s s a g e s / s t r e a m :
* post :
* summary : Start a streaming response session via WebSockets
* operationId : llm - stream - message
* parameters :
* - name : sessionId
* in : path
* required : true
* schema :
* type : string
* requestBody :
* required : true
* content :
* application / json :
* schema :
* type : object
* properties :
* content :
* type : string
* description : The user message to send to the LLM
* useAdvancedContext :
* type : boolean
* description : Whether to use advanced context extraction
* showThinking :
* type : boolean
* description : Whether to show thinking process in the response
* responses :
* '200' :
* description : Streaming started successfully
* '404' :
* description : Session not found
* '500' :
* description : Error processing request
* security :
* - session : [ ]
* tags : [ "llm" ]
* /
async function streamMessage ( req : Request , res : Response ) {
log . info ( "=== Starting streamMessage ===" ) ;
try {
const sessionId = req . params . sessionId ;
const { content , useAdvancedContext , showThinking } = req . body ;
if ( ! content || typeof content !== 'string' || content . trim ( ) . length === 0 ) {
throw new Error ( 'Content cannot be empty' ) ;
}
// Check if session exists
const session = restChatService . getSessions ( ) . get ( sessionId ) ;
if ( ! session ) {
throw new Error ( 'Session not found' ) ;
}
// Update last active timestamp
session . lastActive = new Date ( ) ;
// Add user message to the session
session . messages . push ( {
role : 'user' ,
content ,
timestamp : new Date ( )
} ) ;
// Create request parameters for the pipeline
const requestParams = {
sessionId ,
content ,
useAdvancedContext : useAdvancedContext === true ,
showThinking : showThinking === true ,
stream : true // Always stream for this endpoint
} ;
// Create a fake request/response pair to pass to the handler
const fakeReq = {
. . . req ,
method : 'GET' , // Set to GET to indicate streaming
query : {
stream : 'true' , // Set stream param - don't use format: 'stream' to avoid confusion
useAdvancedContext : String ( useAdvancedContext === true ) ,
showThinking : String ( showThinking === true )
} ,
params : {
sessionId
} ,
// Make sure the original content is available to the handler
body : {
content ,
useAdvancedContext : useAdvancedContext === true ,
showThinking : showThinking === true
}
} as unknown as Request ;
// Log to verify correct parameters
log . info ( ` WebSocket stream settings - useAdvancedContext= ${ useAdvancedContext === true } , in query= ${ fakeReq . query . useAdvancedContext } , in body= ${ fakeReq . body . useAdvancedContext } ` ) ;
// Extra safety to ensure the parameters are passed correctly
if ( useAdvancedContext === true ) {
log . info ( ` Enhanced context IS enabled for this request ` ) ;
} else {
log . info ( ` Enhanced context is NOT enabled for this request ` ) ;
}
// Process the request in the background
Promise . resolve ( ) . then ( async ( ) = > {
try {
await restChatService . handleSendMessage ( fakeReq , res ) ;
} catch ( error ) {
log . error ( ` Background message processing error: ${ error } ` ) ;
// Import the WebSocket service
const wsService = ( await import ( '../../services/ws.js' ) ) . default ;
// Define LLMStreamMessage interface
interface LLMStreamMessage {
type : 'llm-stream' ;
sessionId : string ;
content? : string ;
thinking? : string ;
toolExecution? : any ;
done? : boolean ;
error? : string ;
raw? : unknown ;
}
// Send error to client via WebSocket
wsService . sendMessageToAllClients ( {
type : 'llm-stream' ,
sessionId ,
error : ` Error processing message: ${ error } ` ,
done : true
} as LLMStreamMessage ) ;
}
} ) ;
// Import the WebSocket service
const wsService = ( await import ( '../../services/ws.js' ) ) . default ;
// Let the client know streaming has started via WebSocket (helps client confirm connection is working)
wsService . sendMessageToAllClients ( {
type : 'llm-stream' ,
sessionId ,
thinking : 'Initializing streaming LLM response...'
} ) ;
// Let the client know streaming has started via HTTP response
return {
success : true ,
message : 'Streaming started' ,
sessionId
} ;
} catch ( error : any ) {
log . error ( ` Error starting message stream: ${ error . message } ` ) ;
throw error ;
}
}
2025-03-09 02:19:26 +00:00
export default {
2025-03-11 23:26:47 +00:00
// Chat session management
2025-03-09 02:19:26 +00:00
createSession ,
getSession ,
updateSession ,
listSessions ,
deleteSession ,
2025-03-11 23:26:47 +00:00
sendMessage ,
2025-04-10 21:00:12 +00:00
streamMessage , // Add new streaming endpoint
2025-03-12 00:02:02 +00:00
2025-03-11 23:26:47 +00:00
// Knowledge base index management
getIndexStats ,
startIndexing ,
getFailedIndexes ,
retryFailedIndex ,
retryAllFailedIndexes ,
findSimilarNotes ,
generateQueryContext ,
indexNote
2025-03-09 02:19:26 +00:00
} ;