Merge branch 'develop' into dateNote

This commit is contained in:
Elian Doran 2025-06-04 22:54:10 +03:00 committed by GitHub
commit 9bfadd7799
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
64 changed files with 4455 additions and 1578 deletions

View File

@ -37,7 +37,7 @@ jobs:
shell: bash
forge_platform: darwin
- name: linux
image: ubuntu-latest
image: ubuntu-22.04
shell: bash
forge_platform: linux
- name: windows
@ -102,7 +102,7 @@ jobs:
arch: [x64, arm64]
include:
- arch: x64
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
- arch: arm64
runs-on: ubuntu-24.04-arm
runs-on: ${{ matrix.runs-on }}

View File

@ -73,7 +73,7 @@ jobs:
arch: [x64, arm64]
include:
- arch: x64
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
- arch: arm64
runs-on: ubuntu-24.04-arm
runs-on: ${{ matrix.runs-on }}

View File

@ -36,6 +36,7 @@ See [screenshots](https://triliumnext.github.io/Docs/Wiki/screenshot-tour) for q
* [Evernote](https://triliumnext.github.io/Docs/Wiki/evernote-import) and [Markdown import & export](https://triliumnext.github.io/Docs/Wiki/markdown)
* [Web Clipper](https://triliumnext.github.io/Docs/Wiki/web-clipper) for easy saving of web content
* Customizable UI (sidebar buttons, user-defined widgets, ...)
* [Metrics](./docs/User%20Guide/User%20Guide/Advanced%20Usage/Metrics.md), along with a [Grafana Dashboard](./docs/User%20Guide/User%20Guide/Advanced%20Usage/Metrics/grafana-dashboard.json)
✨ Check out the following third-party resources/communities for more TriliumNext related goodies:

View File

@ -66,7 +66,7 @@
"@types/react": "19.1.6",
"@types/react-dom": "19.1.5",
"copy-webpack-plugin": "13.0.0",
"happy-dom": "17.6.1",
"happy-dom": "17.6.3",
"script-loader": "0.7.2",
"vite-plugin-static-copy": "3.0.0"
},

View File

@ -8,7 +8,7 @@ interface Entity {
export interface EntityChange {
id?: number | null;
noteId?: string;
entityName: EntityRowNames;
entityName: EntityType;
entityId: string;
entity?: Entity;
positions?: Record<string, number>;
@ -22,3 +22,5 @@ export interface EntityChange {
changeId?: string | null;
instanceId?: string | null;
}
export type EntityType = "notes" | "branches" | "attributes" | "note_reordering" | "revisions" | "options" | "attachments" | "blobs" | "etapi_tokens" | "note_embeddings";

View File

@ -35,8 +35,8 @@ async function processEntityChanges(entityChanges: EntityChange[]) {
loadResults.addOption(attributeEntity.name);
} else if (ec.entityName === "attachments") {
processAttachment(loadResults, ec);
} else if (ec.entityName === "blobs" || ec.entityName === "etapi_tokens") {
// NOOP
} else if (ec.entityName === "blobs" || ec.entityName === "etapi_tokens" || ec.entityName === "note_embeddings") {
// NOOP - these entities are handled at the backend level and don't require frontend processing
} else {
throw new Error(`Unknown entityName '${ec.entityName}'`);
}

View File

@ -16,4 +16,24 @@ describe("Link", () => {
const output = parseNavigationStateFromUrl(`#root/WWaBNf3SSA1b/mQ2tIzLVFKHL`);
expect(output).toMatchObject({ notePath: "root/WWaBNf3SSA1b/mQ2tIzLVFKHL", noteId: "mQ2tIzLVFKHL" });
});
it("parses notePath with spaces", () => {
const output = parseNavigationStateFromUrl(` #root/WWaBNf3SSA1b/mQ2tIzLVFKHL`);
expect(output).toMatchObject({ notePath: "root/WWaBNf3SSA1b/mQ2tIzLVFKHL", noteId: "mQ2tIzLVFKHL" });
});
it("ignores external URL with internal hash anchor", () => {
const output = parseNavigationStateFromUrl(`https://en.wikipedia.org/wiki/Bearded_Collie#Health`);
expect(output).toMatchObject({});
});
it("ignores malformed but hash-containing external URL", () => {
const output = parseNavigationStateFromUrl("https://abc.com/#drop?searchString=firefox");
expect(output).toStrictEqual({});
});
it("ignores non-hash internal path", () => {
const output = parseNavigationStateFromUrl("/root/abc123");
expect(output).toStrictEqual({});
});
});

View File

@ -204,11 +204,17 @@ export function parseNavigationStateFromUrl(url: string | undefined) {
return {};
}
url = url.trim();
const hashIdx = url.indexOf("#");
if (hashIdx === -1) {
return {};
}
// Exclude external links that contain #
if (hashIdx !== 0 && !url.includes("/#root") && !url.includes("/#?searchString")) {
return {};
}
const hash = url.substr(hashIdx + 1); // strip also the initial '#'
let [notePath, paramString] = hash.split("?");

View File

@ -44,9 +44,17 @@ interface OptionRow {}
interface NoteReorderingRow {}
interface ContentNoteIdToComponentIdRow {
interface NoteEmbeddingRow {
embedId: string;
noteId: string;
componentId: string;
providerId: string;
modelId: string;
dimension: number;
version: number;
dateCreated: string;
utcDateCreated: string;
dateModified: string;
utcDateModified: string;
}
type EntityRowMappings = {
@ -56,6 +64,7 @@ type EntityRowMappings = {
options: OptionRow;
revisions: RevisionRow;
note_reordering: NoteReorderingRow;
note_embeddings: NoteEmbeddingRow;
};
export type EntityRowNames = keyof EntityRowMappings;

View File

@ -70,6 +70,7 @@
--scrollbar-border-color: #666;
--scrollbar-background-color: #333;
--selection-background-color: #3399FF70;
--tooltip-background-color: #333;
--link-color: lightskyblue;

View File

@ -74,6 +74,7 @@ html {
--scrollbar-border-color: #ddd;
--scrollbar-background-color: #ddd;
--selection-background-color: #3399FF70;
--tooltip-background-color: #f8f8f8;
--link-color: blue;

View File

@ -6,8 +6,10 @@ import type { SessionResponse } from "./types.js";
/**
* Create a new chat session
* @param currentNoteId - Optional current note ID for context
* @returns The noteId of the created chat note
*/
export async function createChatSession(currentNoteId?: string): Promise<{chatNoteId: string | null, noteId: string | null}> {
export async function createChatSession(currentNoteId?: string): Promise<string | null> {
try {
const resp = await server.post<SessionResponse>('llm/chat', {
title: 'Note Chat',
@ -15,48 +17,42 @@ export async function createChatSession(currentNoteId?: string): Promise<{chatNo
});
if (resp && resp.id) {
// The backend might provide the noteId separately from the chatNoteId
// If noteId is provided, use it; otherwise, we'll need to query for it separately
return {
chatNoteId: resp.id,
noteId: resp.noteId || null
};
// Backend returns the chat note ID as 'id'
return resp.id;
}
} catch (error) {
console.error('Failed to create chat session:', error);
}
return {
chatNoteId: null,
noteId: null
};
return null;
}
/**
* Check if a session exists
* Check if a chat note exists
* @param noteId - The ID of the chat note
*/
export async function checkSessionExists(chatNoteId: string): Promise<boolean> {
export async function checkSessionExists(noteId: string): Promise<boolean> {
try {
// Validate that we have a proper note ID format, not a session ID
// Note IDs in Trilium are typically longer or in a different format
if (chatNoteId && chatNoteId.length === 16 && /^[A-Za-z0-9]+$/.test(chatNoteId)) {
console.warn(`Invalid note ID format detected: ${chatNoteId} appears to be a legacy session ID`);
return false;
}
const sessionCheck = await server.getWithSilentNotFound<any>(`llm/chat/${chatNoteId}`);
const sessionCheck = await server.getWithSilentNotFound<any>(`llm/chat/${noteId}`);
return !!(sessionCheck && sessionCheck.id);
} catch (error: any) {
console.log(`Error checking chat note ${chatNoteId}:`, error);
console.log(`Error checking chat note ${noteId}:`, error);
return false;
}
}
/**
* Set up streaming response via WebSocket
* @param noteId - The ID of the chat note
* @param messageParams - Message parameters
* @param onContentUpdate - Callback for content updates
* @param onThinkingUpdate - Callback for thinking updates
* @param onToolExecution - Callback for tool execution
* @param onComplete - Callback for completion
* @param onError - Callback for errors
*/
export async function setupStreamingResponse(
chatNoteId: string,
noteId: string,
messageParams: any,
onContentUpdate: (content: string, isDone?: boolean) => void,
onThinkingUpdate: (thinking: string) => void,
@ -64,35 +60,24 @@ export async function setupStreamingResponse(
onComplete: () => void,
onError: (error: Error) => void
): Promise<void> {
// Validate that we have a proper note ID format, not a session ID
if (chatNoteId && chatNoteId.length === 16 && /^[A-Za-z0-9]+$/.test(chatNoteId)) {
console.error(`Invalid note ID format: ${chatNoteId} appears to be a legacy session ID`);
onError(new Error("Invalid note ID format - using a legacy session ID"));
return;
}
return new Promise((resolve, reject) => {
let assistantResponse = '';
let postToolResponse = ''; // Separate accumulator for post-tool execution content
let receivedAnyContent = false;
let receivedPostToolContent = false; // Track if we've started receiving post-tool content
let timeoutId: number | null = null;
let initialTimeoutId: number | null = null;
let cleanupTimeoutId: number | null = null;
let receivedAnyMessage = false;
let toolsExecuted = false; // Flag to track if tools were executed in this session
let toolExecutionCompleted = false; // Flag to track if tool execution is completed
let eventListener: ((event: Event) => void) | null = null;
let lastMessageTimestamp = 0;
// Create a unique identifier for this response process
const responseId = `llm-stream-${Date.now()}-${Math.floor(Math.random() * 1000)}`;
console.log(`[${responseId}] Setting up WebSocket streaming for chat note ${chatNoteId}`);
console.log(`[${responseId}] Setting up WebSocket streaming for chat note ${noteId}`);
// Send the initial request to initiate streaming
(async () => {
try {
const streamResponse = await server.post<any>(`llm/chat/${chatNoteId}/messages/stream`, {
const streamResponse = await server.post<any>(`llm/chat/${noteId}/messages/stream`, {
content: messageParams.content,
useAdvancedContext: messageParams.useAdvancedContext,
showThinking: messageParams.showThinking,
@ -129,28 +114,14 @@ export async function setupStreamingResponse(
resolve();
};
// Function to schedule cleanup with ability to cancel
const scheduleCleanup = (delay: number) => {
// Clear any existing cleanup timeout
if (cleanupTimeoutId) {
window.clearTimeout(cleanupTimeoutId);
// Set initial timeout to catch cases where no message is received at all
initialTimeoutId = window.setTimeout(() => {
if (!receivedAnyMessage) {
console.error(`[${responseId}] No initial message received within timeout`);
performCleanup();
reject(new Error('No response received from server'));
}
console.log(`[${responseId}] Scheduling listener cleanup in ${delay}ms`);
// Set new cleanup timeout
cleanupTimeoutId = window.setTimeout(() => {
// Only clean up if no messages received recently (in last 2 seconds)
const timeSinceLastMessage = Date.now() - lastMessageTimestamp;
if (timeSinceLastMessage > 2000) {
performCleanup();
} else {
console.log(`[${responseId}] Received message recently, delaying cleanup`);
// Reschedule cleanup
scheduleCleanup(2000);
}
}, delay);
};
}, 10000);
// Create a message handler for CustomEvents
eventListener = (event: Event) => {
@ -158,7 +129,7 @@ export async function setupStreamingResponse(
const message = customEvent.detail;
// Only process messages for our chat note
if (!message || message.chatNoteId !== chatNoteId) {
if (!message || message.chatNoteId !== noteId) {
return;
}
@ -172,12 +143,12 @@ export async function setupStreamingResponse(
cleanupTimeoutId = null;
}
console.log(`[${responseId}] LLM Stream message received via CustomEvent: chatNoteId=${chatNoteId}, content=${!!message.content}, contentLength=${message.content?.length || 0}, thinking=${!!message.thinking}, toolExecution=${!!message.toolExecution}, done=${!!message.done}, type=${message.type || 'llm-stream'}`);
console.log(`[${responseId}] LLM Stream message received: content=${!!message.content}, contentLength=${message.content?.length || 0}, thinking=${!!message.thinking}, toolExecution=${!!message.toolExecution}, done=${!!message.done}`);
// Mark first message received
if (!receivedAnyMessage) {
receivedAnyMessage = true;
console.log(`[${responseId}] First message received for chat note ${chatNoteId}`);
console.log(`[${responseId}] First message received for chat note ${noteId}`);
// Clear the initial timeout since we've received a message
if (initialTimeoutId !== null) {
@ -186,109 +157,33 @@ export async function setupStreamingResponse(
}
}
// Handle specific message types
if (message.type === 'tool_execution_start') {
toolsExecuted = true; // Mark that tools were executed
onThinkingUpdate('Executing tools...');
// Also trigger tool execution UI with a specific format
onToolExecution({
action: 'start',
tool: 'tools',
result: 'Executing tools...'
});
return; // Skip accumulating content from this message
// Handle error
if (message.error) {
console.error(`[${responseId}] Stream error: ${message.error}`);
performCleanup();
reject(new Error(message.error));
return;
}
if (message.type === 'tool_result' && message.toolExecution) {
toolsExecuted = true; // Mark that tools were executed
console.log(`[${responseId}] Processing tool result: ${JSON.stringify(message.toolExecution)}`);
// Handle thinking updates - only show if showThinking is enabled
if (message.thinking && messageParams.showThinking) {
console.log(`[${responseId}] Received thinking: ${message.thinking.substring(0, 100)}...`);
onThinkingUpdate(message.thinking);
}
// If tool execution doesn't have an action, add 'result' as the default
if (!message.toolExecution.action) {
message.toolExecution.action = 'result';
}
// First send a 'start' action to ensure the container is created
onToolExecution({
action: 'start',
tool: 'tools',
result: 'Tool execution initialized'
});
// Then send the actual tool execution data
// Handle tool execution updates
if (message.toolExecution) {
console.log(`[${responseId}] Tool execution update:`, message.toolExecution);
onToolExecution(message.toolExecution);
// Mark tool execution as completed if this is a result or error
if (message.toolExecution.action === 'result' || message.toolExecution.action === 'complete' || message.toolExecution.action === 'error') {
toolExecutionCompleted = true;
console.log(`[${responseId}] Tool execution completed`);
}
return; // Skip accumulating content from this message
}
if (message.type === 'tool_execution_error' && message.toolExecution) {
toolsExecuted = true; // Mark that tools were executed
toolExecutionCompleted = true; // Mark tool execution as completed
onToolExecution({
...message.toolExecution,
action: 'error',
error: message.toolExecution.error || 'Unknown error during tool execution'
});
return; // Skip accumulating content from this message
}
if (message.type === 'tool_completion_processing') {
toolsExecuted = true; // Mark that tools were executed
toolExecutionCompleted = true; // Tools are done, now processing the result
onThinkingUpdate('Generating response with tool results...');
// Also trigger tool execution UI with a specific format
onToolExecution({
action: 'generating',
tool: 'tools',
result: 'Generating response with tool results...'
});
return; // Skip accumulating content from this message
}
// Handle content updates
if (message.content) {
console.log(`[${responseId}] Received content chunk of length ${message.content.length}, preview: "${message.content.substring(0, 50)}${message.content.length > 50 ? '...' : ''}"`);
// If tools were executed and completed, and we're now getting new content,
// this is likely the final response after tool execution from Anthropic
if (toolsExecuted && toolExecutionCompleted && message.content) {
console.log(`[${responseId}] Post-tool execution content detected`);
// If this is the first post-tool chunk, indicate we're starting a new response
if (!receivedPostToolContent) {
receivedPostToolContent = true;
postToolResponse = ''; // Clear any previous post-tool response
console.log(`[${responseId}] First post-tool content chunk, starting fresh accumulation`);
}
// Accumulate post-tool execution content
postToolResponse += message.content;
console.log(`[${responseId}] Accumulated post-tool content, now ${postToolResponse.length} chars`);
// Update the UI with the accumulated post-tool content
// This replaces the pre-tool content with our accumulated post-tool content
onContentUpdate(postToolResponse, message.done || false);
} else {
// Standard content handling for non-tool cases or initial tool response
// Check if this is a duplicated message containing the same content we already have
if (message.done && assistantResponse.includes(message.content)) {
console.log(`[${responseId}] Ignoring duplicated content in done message`);
} else {
// Add to our accumulated response
assistantResponse += message.content;
}
// Update the UI immediately with each chunk
onContentUpdate(assistantResponse, message.done || false);
}
// Simply append the new content - no complex deduplication
assistantResponse += message.content;
// Update the UI immediately with each chunk
onContentUpdate(assistantResponse, message.done || false);
receivedAnyContent = true;
// Reset timeout since we got content
@ -298,151 +193,33 @@ export async function setupStreamingResponse(
// Set new timeout
timeoutId = window.setTimeout(() => {
console.warn(`[${responseId}] Stream timeout for chat note ${chatNoteId}`);
// Clean up
console.warn(`[${responseId}] Stream timeout for chat note ${noteId}`);
performCleanup();
reject(new Error('Stream timeout'));
}, 30000);
}
// Handle tool execution updates (legacy format and standard format with llm-stream type)
if (message.toolExecution) {
// Only process if we haven't already handled this message via specific message types
if (message.type === 'llm-stream' || !message.type) {
console.log(`[${responseId}] Received tool execution update: action=${message.toolExecution.action || 'unknown'}`);
toolsExecuted = true; // Mark that tools were executed
// Mark tool execution as completed if this is a result or error
if (message.toolExecution.action === 'result' ||
message.toolExecution.action === 'complete' ||
message.toolExecution.action === 'error') {
toolExecutionCompleted = true;
console.log(`[${responseId}] Tool execution completed via toolExecution message`);
}
onToolExecution(message.toolExecution);
}
}
// Handle tool calls from the raw data or direct in message (OpenAI format)
const toolCalls = message.tool_calls || (message.raw && message.raw.tool_calls);
if (toolCalls && Array.isArray(toolCalls)) {
console.log(`[${responseId}] Received tool calls: ${toolCalls.length} tools`);
toolsExecuted = true; // Mark that tools were executed
// First send a 'start' action to ensure the container is created
onToolExecution({
action: 'start',
tool: 'tools',
result: 'Tool execution initialized'
});
// Then process each tool call
for (const toolCall of toolCalls) {
let args = toolCall.function?.arguments || {};
// Try to parse arguments if they're a string
if (typeof args === 'string') {
try {
args = JSON.parse(args);
} catch (e) {
console.log(`[${responseId}] Could not parse tool arguments as JSON: ${e}`);
args = { raw: args };
}
}
onToolExecution({
action: 'executing',
tool: toolCall.function?.name || 'unknown',
toolCallId: toolCall.id,
args: args
});
}
}
// Handle thinking state updates
if (message.thinking) {
console.log(`[${responseId}] Received thinking update: ${message.thinking.substring(0, 50)}...`);
onThinkingUpdate(message.thinking);
}
// Handle completion
if (message.done) {
console.log(`[${responseId}] Stream completed for chat note ${chatNoteId}, has content: ${!!message.content}, content length: ${message.content?.length || 0}, current response: ${assistantResponse.length} chars`);
console.log(`[${responseId}] Stream completed for chat note ${noteId}, final response: ${assistantResponse.length} chars`);
// Dump message content to console for debugging
if (message.content) {
console.log(`[${responseId}] CONTENT IN DONE MESSAGE (first 200 chars): "${message.content.substring(0, 200)}..."`);
// Check if the done message contains the exact same content as our accumulated response
// We normalize by removing whitespace to avoid false negatives due to spacing differences
const normalizedMessage = message.content.trim();
const normalizedResponse = assistantResponse.trim();
if (normalizedMessage === normalizedResponse) {
console.log(`[${responseId}] Final message is identical to accumulated response, no need to update`);
}
// If the done message is longer but contains our accumulated response, use the done message
else if (normalizedMessage.includes(normalizedResponse) && normalizedMessage.length > normalizedResponse.length) {
console.log(`[${responseId}] Final message is more complete than accumulated response, using it`);
assistantResponse = message.content;
}
// If the done message is different and not already included, append it to avoid duplication
else if (!normalizedResponse.includes(normalizedMessage) && normalizedMessage.length > 0) {
console.log(`[${responseId}] Final message has unique content, using it`);
assistantResponse = message.content;
}
// Otherwise, we already have the content accumulated, so no need to update
else {
console.log(`[${responseId}] Already have this content accumulated, not updating`);
}
}
// Clear timeout if set
// Clear all timeouts
if (timeoutId !== null) {
window.clearTimeout(timeoutId);
timeoutId = null;
}
// Always mark as done when we receive the done flag
onContentUpdate(assistantResponse, true);
// Set a longer delay before cleanup to allow for post-tool execution messages
// Especially important for Anthropic which may send final message after tool execution
const cleanupDelay = toolsExecuted ? 15000 : 1000; // 15 seconds if tools were used, otherwise 1 second
console.log(`[${responseId}] Setting cleanup delay of ${cleanupDelay}ms since toolsExecuted=${toolsExecuted}`);
scheduleCleanup(cleanupDelay);
// Schedule cleanup after a brief delay to ensure all processing is complete
cleanupTimeoutId = window.setTimeout(() => {
performCleanup();
}, 100);
}
};
// Register event listener for the custom event
try {
window.addEventListener('llm-stream-message', eventListener);
console.log(`[${responseId}] Event listener added for llm-stream-message events`);
} catch (err) {
console.error(`[${responseId}] Error setting up event listener:`, err);
reject(err);
return;
}
// Register the event listener for WebSocket messages
window.addEventListener('llm-stream-message', eventListener);
// Set initial timeout for receiving any message
initialTimeoutId = window.setTimeout(() => {
console.warn(`[${responseId}] No messages received for initial period in chat note ${chatNoteId}`);
if (!receivedAnyMessage) {
console.error(`[${responseId}] WebSocket connection not established for chat note ${chatNoteId}`);
if (timeoutId !== null) {
window.clearTimeout(timeoutId);
}
// Clean up
cleanupEventListener(eventListener);
// Show error message to user
reject(new Error('WebSocket connection not established'));
}
}, 10000);
console.log(`[${responseId}] Event listener registered, waiting for messages...`);
});
}
@ -463,15 +240,9 @@ function cleanupEventListener(listener: ((event: Event) => void) | null): void {
/**
* Get a direct response from the server without streaming
*/
export async function getDirectResponse(chatNoteId: string, messageParams: any): Promise<any> {
export async function getDirectResponse(noteId: string, messageParams: any): Promise<any> {
try {
// Validate that we have a proper note ID format, not a session ID
if (chatNoteId && chatNoteId.length === 16 && /^[A-Za-z0-9]+$/.test(chatNoteId)) {
console.error(`Invalid note ID format: ${chatNoteId} appears to be a legacy session ID`);
throw new Error("Invalid note ID format - using a legacy session ID");
}
const postResponse = await server.post<any>(`llm/chat/${chatNoteId}/messages`, {
const postResponse = await server.post<any>(`llm/chat/${noteId}/messages`, {
message: messageParams.content,
includeContext: messageParams.useAdvancedContext,
options: {

View File

@ -37,9 +37,10 @@ export default class LlmChatPanel extends BasicWidget {
private thinkingBubble!: HTMLElement;
private thinkingText!: HTMLElement;
private thinkingToggle!: HTMLElement;
private chatNoteId: string | null = null;
private noteId: string | null = null; // The actual noteId for the Chat Note
private currentNoteId: string | null = null;
// Simplified to just use noteId - this represents the AI Chat note we're working with
private noteId: string | null = null;
private currentNoteId: string | null = null; // The note providing context (for regular notes)
private _messageHandlerId: number | null = null;
private _messageHandler: any = null;
@ -68,7 +69,6 @@ export default class LlmChatPanel extends BasicWidget {
totalTokens?: number;
};
} = {
model: 'default',
temperature: 0.7,
toolExecutions: []
};
@ -90,12 +90,21 @@ export default class LlmChatPanel extends BasicWidget {
this.messages = messages;
}
public getChatNoteId(): string | null {
return this.chatNoteId;
public getNoteId(): string | null {
return this.noteId;
}
public setChatNoteId(chatNoteId: string | null): void {
this.chatNoteId = chatNoteId;
public setNoteId(noteId: string | null): void {
this.noteId = noteId;
}
// Deprecated - keeping for backward compatibility but mapping to noteId
public getChatNoteId(): string | null {
return this.noteId;
}
public setChatNoteId(noteId: string | null): void {
this.noteId = noteId;
}
public getNoteContextChatMessages(): HTMLElement {
@ -307,16 +316,22 @@ export default class LlmChatPanel extends BasicWidget {
}
}
const dataToSave: ChatData = {
// Only save if we have a valid note ID
if (!this.noteId) {
console.warn('Cannot save chat data: no noteId available');
return;
}
const dataToSave = {
messages: this.messages,
chatNoteId: this.chatNoteId,
noteId: this.noteId,
chatNoteId: this.noteId, // For backward compatibility
toolSteps: toolSteps,
// Add sources if we have them
sources: this.sources || [],
// Add metadata
metadata: {
model: this.metadata?.model || 'default',
model: this.metadata?.model || undefined,
provider: this.metadata?.provider || undefined,
temperature: this.metadata?.temperature || 0.7,
lastUpdated: new Date().toISOString(),
@ -325,7 +340,7 @@ export default class LlmChatPanel extends BasicWidget {
}
};
console.log(`Saving chat data with chatNoteId: ${this.chatNoteId}, noteId: ${this.noteId}, ${toolSteps.length} tool steps, ${this.sources?.length || 0} sources, ${toolExecutions.length} tool executions`);
console.log(`Saving chat data with noteId: ${this.noteId}, ${toolSteps.length} tool steps, ${this.sources?.length || 0} sources, ${toolExecutions.length} tool executions`);
// Save the data to the note attribute via the callback
// This is the ONLY place we should save data, letting the container widget handle persistence
@ -347,16 +362,52 @@ export default class LlmChatPanel extends BasicWidget {
const savedData = await this.onGetData() as ChatData;
if (savedData?.messages?.length > 0) {
// Check if we actually have new content to avoid unnecessary UI rebuilds
const currentMessageCount = this.messages.length;
const savedMessageCount = savedData.messages.length;
// If message counts are the same, check if content is different
const hasNewContent = savedMessageCount > currentMessageCount ||
JSON.stringify(this.messages) !== JSON.stringify(savedData.messages);
if (!hasNewContent) {
console.log("No new content detected, skipping UI rebuild");
return true;
}
console.log(`Loading saved data: ${currentMessageCount} -> ${savedMessageCount} messages`);
// Store current scroll position if we need to preserve it
const shouldPreserveScroll = savedMessageCount > currentMessageCount && currentMessageCount > 0;
const currentScrollTop = shouldPreserveScroll ? this.chatContainer.scrollTop : 0;
const currentScrollHeight = shouldPreserveScroll ? this.chatContainer.scrollHeight : 0;
// Load messages
const oldMessages = [...this.messages];
this.messages = savedData.messages;
// Clear and rebuild the chat UI
this.noteContextChatMessages.innerHTML = '';
// Only rebuild UI if we have significantly different content
if (savedMessageCount > currentMessageCount) {
// We have new messages - just add the new ones instead of rebuilding everything
const newMessages = savedData.messages.slice(currentMessageCount);
console.log(`Adding ${newMessages.length} new messages to UI`);
this.messages.forEach(message => {
const role = message.role as 'user' | 'assistant';
this.addMessageToChat(role, message.content);
});
newMessages.forEach(message => {
const role = message.role as 'user' | 'assistant';
this.addMessageToChat(role, message.content);
});
} else {
// Content changed but count is same - need to rebuild
console.log("Message content changed, rebuilding UI");
// Clear and rebuild the chat UI
this.noteContextChatMessages.innerHTML = '';
this.messages.forEach(message => {
const role = message.role as 'user' | 'assistant';
this.addMessageToChat(role, message.content);
});
}
// Restore tool execution steps if they exist
if (savedData.toolSteps && Array.isArray(savedData.toolSteps) && savedData.toolSteps.length > 0) {
@ -400,13 +451,33 @@ export default class LlmChatPanel extends BasicWidget {
// Load Chat Note ID if available
if (savedData.noteId) {
console.log(`Using noteId as Chat Note ID: ${savedData.noteId}`);
this.chatNoteId = savedData.noteId;
this.noteId = savedData.noteId;
} else {
console.log(`No noteId found in saved data, cannot load chat session`);
return false;
}
// Restore scroll position if we were preserving it
if (shouldPreserveScroll) {
// Calculate the new scroll position to maintain relative position
const newScrollHeight = this.chatContainer.scrollHeight;
const scrollDifference = newScrollHeight - currentScrollHeight;
const newScrollTop = currentScrollTop + scrollDifference;
// Only scroll down if we're near the bottom, otherwise preserve exact position
const wasNearBottom = (currentScrollTop + this.chatContainer.clientHeight) >= (currentScrollHeight - 50);
if (wasNearBottom) {
// User was at bottom, scroll to new bottom
this.chatContainer.scrollTop = newScrollHeight;
console.log("User was at bottom, scrolling to new bottom");
} else {
// User was not at bottom, try to preserve their position
this.chatContainer.scrollTop = newScrollTop;
console.log(`Preserving scroll position: ${currentScrollTop} -> ${newScrollTop}`);
}
}
return true;
}
} catch (error) {
@ -550,6 +621,15 @@ export default class LlmChatPanel extends BasicWidget {
// Get current note context if needed
const currentActiveNoteId = appContext.tabManager.getActiveContext()?.note?.noteId || null;
// For AI Chat notes, the note itself IS the chat session
// So currentNoteId and noteId should be the same
if (this.noteId && currentActiveNoteId === this.noteId) {
// We're in an AI Chat note - don't reset, just load saved data
console.log(`Refreshing AI Chat note ${this.noteId} - loading saved data`);
await this.loadSavedData();
return;
}
// If we're switching to a different note, we need to reset
if (this.currentNoteId !== currentActiveNoteId) {
console.log(`Note ID changed from ${this.currentNoteId} to ${currentActiveNoteId}, resetting chat panel`);
@ -557,7 +637,6 @@ export default class LlmChatPanel extends BasicWidget {
// Reset the UI and data
this.noteContextChatMessages.innerHTML = '';
this.messages = [];
this.chatNoteId = null;
this.noteId = null; // Also reset the chat note ID
this.hideSources(); // Hide any sources from previous note
@ -569,7 +648,7 @@ export default class LlmChatPanel extends BasicWidget {
const hasSavedData = await this.loadSavedData();
// Only create a new session if we don't have a session or saved data
if (!this.chatNoteId || !this.noteId || !hasSavedData) {
if (!this.noteId || !hasSavedData) {
// Create a new chat session
await this.createChatSession();
}
@ -580,19 +659,15 @@ export default class LlmChatPanel extends BasicWidget {
*/
private async createChatSession() {
try {
// Create a new chat session, passing the current note ID if it exists
const { chatNoteId, noteId } = await createChatSession(
this.currentNoteId ? this.currentNoteId : undefined
);
// If we already have a noteId (for AI Chat notes), use it
const contextNoteId = this.noteId || this.currentNoteId;
if (chatNoteId) {
// If we got back an ID from the API, use it
this.chatNoteId = chatNoteId;
// For new sessions, the noteId should equal the chatNoteId
// This ensures we're using the note ID consistently
this.noteId = noteId || chatNoteId;
// Create a new chat session, passing the context note ID
const noteId = await createChatSession(contextNoteId ? contextNoteId : undefined);
if (noteId) {
// Set the note ID for this chat
this.noteId = noteId;
console.log(`Created new chat session with noteId: ${this.noteId}`);
} else {
throw new Error("Failed to create chat session - no ID returned");
@ -645,7 +720,7 @@ export default class LlmChatPanel extends BasicWidget {
const showThinking = this.showThinkingCheckbox.checked;
// Add logging to verify parameters
console.log(`Sending message with: useAdvancedContext=${useAdvancedContext}, showThinking=${showThinking}, noteId=${this.currentNoteId}, sessionId=${this.chatNoteId}`);
console.log(`Sending message with: useAdvancedContext=${useAdvancedContext}, showThinking=${showThinking}, noteId=${this.currentNoteId}, sessionId=${this.noteId}`);
// Create the message parameters
const messageParams = {
@ -695,11 +770,11 @@ export default class LlmChatPanel extends BasicWidget {
await validateEmbeddingProviders(this.validationWarning);
// Make sure we have a valid session
if (!this.chatNoteId) {
if (!this.noteId) {
// If no session ID, create a new session
await this.createChatSession();
if (!this.chatNoteId) {
if (!this.noteId) {
// If still no session ID, show error and return
console.error("Failed to create chat session");
toastService.showError("Failed to create chat session");
@ -730,7 +805,7 @@ export default class LlmChatPanel extends BasicWidget {
await this.saveCurrentData();
// Add logging to verify parameters
console.log(`Sending message with: useAdvancedContext=${useAdvancedContext}, showThinking=${showThinking}, noteId=${this.currentNoteId}, sessionId=${this.chatNoteId}`);
console.log(`Sending message with: useAdvancedContext=${useAdvancedContext}, showThinking=${showThinking}, noteId=${this.currentNoteId}, sessionId=${this.noteId}`);
// Create the message parameters
const messageParams = {
@ -767,12 +842,12 @@ export default class LlmChatPanel extends BasicWidget {
*/
private async handleDirectResponse(messageParams: any): Promise<boolean> {
try {
if (!this.chatNoteId) return false;
if (!this.noteId) return false;
console.log(`Getting direct response using sessionId: ${this.chatNoteId} (noteId: ${this.noteId})`);
console.log(`Getting direct response using sessionId: ${this.noteId} (noteId: ${this.noteId})`);
// Get a direct response from the server
const postResponse = await getDirectResponse(this.chatNoteId, messageParams);
const postResponse = await getDirectResponse(this.noteId, messageParams);
// If the POST request returned content directly, display it
if (postResponse && postResponse.content) {
@ -845,11 +920,11 @@ export default class LlmChatPanel extends BasicWidget {
* Set up streaming response via WebSocket
*/
private async setupStreamingResponse(messageParams: any): Promise<void> {
if (!this.chatNoteId) {
if (!this.noteId) {
throw new Error("No session ID available");
}
console.log(`Setting up streaming response using sessionId: ${this.chatNoteId} (noteId: ${this.noteId})`);
console.log(`Setting up streaming response using sessionId: ${this.noteId} (noteId: ${this.noteId})`);
// Store tool executions captured during streaming
const toolExecutionsCache: Array<{
@ -862,7 +937,7 @@ export default class LlmChatPanel extends BasicWidget {
}> = [];
return setupStreamingResponse(
this.chatNoteId,
this.noteId,
messageParams,
// Content update handler
(content: string, isDone: boolean = false) => {
@ -898,7 +973,7 @@ export default class LlmChatPanel extends BasicWidget {
similarity?: number;
content?: string;
}>;
}>(`llm/chat/${this.chatNoteId}`)
}>(`llm/chat/${this.noteId}`)
.then((sessionData) => {
console.log("Got updated session data:", sessionData);
@ -933,9 +1008,9 @@ export default class LlmChatPanel extends BasicWidget {
}
}
// Save the updated data to the note
this.saveCurrentData()
.catch(err => console.error("Failed to save data after streaming completed:", err));
// DON'T save here - let the server handle saving the complete conversation
// to avoid race conditions between client and server saves
console.log("Updated metadata after streaming completion, server should save");
})
.catch(err => console.error("Error fetching session data after streaming:", err));
}
@ -973,11 +1048,9 @@ export default class LlmChatPanel extends BasicWidget {
console.log(`Cached tool execution for ${toolData.tool} to be saved later`);
// Save immediately after receiving a tool execution
// This ensures we don't lose tool execution data if streaming fails
this.saveCurrentData().catch(err => {
console.error("Failed to save tool execution data:", err);
});
// DON'T save immediately during streaming - let the server handle saving
// to avoid race conditions between client and server saves
console.log(`Tool execution cached, will be saved by server`);
}
},
// Complete handler
@ -995,23 +1068,19 @@ export default class LlmChatPanel extends BasicWidget {
* Update the UI with streaming content
*/
private updateStreamingUI(assistantResponse: string, isDone: boolean = false) {
// Parse and handle thinking content if present
if (!isDone) {
const thinkingContent = this.parseThinkingContent(assistantResponse);
if (thinkingContent) {
this.updateThinkingText(thinkingContent);
// Don't display the raw response with think tags in the chat
return;
}
}
// Get the existing assistant message or create a new one
let assistantMessageEl = this.noteContextChatMessages.querySelector('.assistant-message:last-child');
if (!assistantMessageEl) {
// If no assistant message yet, create one
// Track if we have a streaming message in progress
const hasStreamingMessage = !!this.noteContextChatMessages.querySelector('.assistant-message.streaming');
// Create a new message element or use the existing streaming one
let assistantMessageEl: HTMLElement;
if (hasStreamingMessage) {
// Use the existing streaming message
assistantMessageEl = this.noteContextChatMessages.querySelector('.assistant-message.streaming')!;
} else {
// Create a new message element
assistantMessageEl = document.createElement('div');
assistantMessageEl.className = 'assistant-message message mb-3';
assistantMessageEl.className = 'assistant-message message mb-3 streaming';
this.noteContextChatMessages.appendChild(assistantMessageEl);
// Add assistant profile icon
@ -1026,60 +1095,37 @@ export default class LlmChatPanel extends BasicWidget {
assistantMessageEl.appendChild(messageContent);
}
// Clean the response to remove thinking tags before displaying
const cleanedResponse = this.removeThinkingTags(assistantResponse);
// Update the content
// Update the content with the current response
const messageContent = assistantMessageEl.querySelector('.message-content') as HTMLElement;
messageContent.innerHTML = formatMarkdown(cleanedResponse);
messageContent.innerHTML = formatMarkdown(assistantResponse);
// Apply syntax highlighting if this is the final update
// When the response is complete
if (isDone) {
// Remove the streaming class to mark this message as complete
assistantMessageEl.classList.remove('streaming');
// Apply syntax highlighting
formatCodeBlocks($(assistantMessageEl as HTMLElement));
// Hide the thinking display when response is complete
this.hideThinkingDisplay();
// Update message in the data model for storage
// Find the last assistant message to update, or add a new one if none exists
const assistantMessages = this.messages.filter(msg => msg.role === 'assistant');
const lastAssistantMsgIndex = assistantMessages.length > 0 ?
this.messages.lastIndexOf(assistantMessages[assistantMessages.length - 1]) : -1;
if (lastAssistantMsgIndex >= 0) {
// Update existing message with cleaned content
this.messages[lastAssistantMsgIndex].content = cleanedResponse;
} else {
// Add new message with cleaned content
this.messages.push({
role: 'assistant',
content: cleanedResponse
});
}
// Hide loading indicator
hideLoadingIndicator(this.loadingIndicator);
// Save the final state to the Chat Note
this.saveCurrentData().catch(err => {
console.error("Failed to save assistant response to note:", err);
// Always add a new message to the data model
// This ensures we preserve all distinct assistant messages
this.messages.push({
role: 'assistant',
content: assistantResponse,
timestamp: new Date()
});
// Save the updated message list
this.saveCurrentData();
}
// Scroll to bottom
this.chatContainer.scrollTop = this.chatContainer.scrollHeight;
}
/**
* Remove thinking tags from response content
*/
private removeThinkingTags(content: string): string {
if (!content) return content;
// Remove <think>...</think> blocks from the content
return content.replace(/<think>[\s\S]*?<\/think>/gi, '').trim();
}
/**
* Handle general errors in the send message flow
*/

View File

@ -11,7 +11,7 @@ export interface ChatResponse {
export interface SessionResponse {
id: string;
title: string;
noteId?: string;
noteId: string; // The ID of the chat note
}
export interface ToolExecutionStep {
@ -33,8 +33,8 @@ export interface MessageData {
export interface ChatData {
messages: MessageData[];
chatNoteId: string | null;
noteId?: string | null;
noteId: string; // The ID of the chat note
chatNoteId?: string; // Deprecated - kept for backward compatibility, should equal noteId
toolSteps: ToolExecutionStep[];
sources?: Array<{
noteId: string;

View File

@ -94,6 +94,11 @@ export default class AiChatTypeWidget extends TypeWidget {
this.llmChatPanel.clearNoteContextChatMessages();
this.llmChatPanel.setMessages([]);
// Set the note ID for the chat panel
if (note) {
this.llmChatPanel.setNoteId(note.noteId);
}
// This will load saved data via the getData callback
await this.llmChatPanel.refresh();
this.isInitialized = true;
@ -130,7 +135,7 @@ export default class AiChatTypeWidget extends TypeWidget {
// Reset the chat panel UI
this.llmChatPanel.clearNoteContextChatMessages();
this.llmChatPanel.setMessages([]);
this.llmChatPanel.setChatNoteId(null);
this.llmChatPanel.setNoteId(this.note.noteId);
}
// Call the parent method to refresh
@ -152,6 +157,7 @@ export default class AiChatTypeWidget extends TypeWidget {
// Make sure the chat panel has the current note ID
if (this.note) {
this.llmChatPanel.setCurrentNoteId(this.note.noteId);
this.llmChatPanel.setNoteId(this.note.noteId);
}
this.initPromise = (async () => {
@ -186,7 +192,7 @@ export default class AiChatTypeWidget extends TypeWidget {
// Format the data properly - this is the canonical format of the data
const formattedData = {
messages: data.messages || [],
chatNoteId: data.chatNoteId || this.note.noteId,
noteId: this.note.noteId, // Always use the note's own ID
toolSteps: data.toolSteps || [],
sources: data.sources || [],
metadata: {

View File

@ -7,8 +7,11 @@ import tray from "@triliumnext/server/src/services/tray.js";
import options from "@triliumnext/server/src/services/options.js";
import electronDebug from "electron-debug";
import electronDl from "electron-dl";
import { deferred } from "@triliumnext/server/src/services/utils.js";
async function main() {
const serverInitializedPromise = deferred<void>();
// Prevent Trilium starting twice on first install and on uninstall for the Windows installer.
if ((require("electron-squirrel-startup")).default) {
process.exit(0);
@ -37,7 +40,11 @@ async function main() {
}
});
electron.app.on("ready", onReady);
electron.app.on("ready", async () => {
await serverInitializedPromise;
console.log("Starting Electron...");
await onReady();
});
electron.app.on("will-quit", () => {
electron.globalShortcut.unregisterAll();
@ -47,7 +54,10 @@ async function main() {
process.env["ELECTRON_DISABLE_SECURITY_WARNINGS"] = "true";
await initializeTranslations();
await import("@triliumnext/server/src/main.js");
const startTriliumServer = (await import("@triliumnext/server/src/www.js")).default;
await startTriliumServer();
console.log("Server loaded");
serverInitializedPromise.resolve();
}
async function onReady() {

View File

@ -38,7 +38,8 @@ export function startElectron(callback: () => void): DeferredPromise<void> {
console.log("Electron is ready!");
// Start the server.
await import("@triliumnext/server/src/main.js");
const startTriliumServer = (await import("@triliumnext/server/src/www.js")).default;
await startTriliumServer();
// Create the main window.
await windowService.createMainWindow(electron.app);

View File

@ -0,0 +1,6 @@
TRILIUM_ENV=dev
TRILIUM_DATA_DIR=./apps/server/spec/db
TRILIUM_RESOURCE_DIR=./apps/server/dist
TRILIUM_PUBLIC_SERVER=http://localhost:4200
TRILIUM_PORT=8086
TRILIUM_INTEGRATION_TEST=edit

View File

@ -129,6 +129,23 @@
"runBuildTargetDependencies": false
}
},
"edit-integration-db": {
"executor": "@nx/js:node",
"dependsOn": [
{
"projects": [
"client"
],
"target": "serve"
},
"build-without-client"
],
"continuous": true,
"options": {
"buildTarget": "server:build-without-client:development",
"runBuildTargetDependencies": false
}
},
"package": {
"dependsOn": [
"build"

Binary file not shown.

File diff suppressed because one or more lines are too long

Binary file not shown.

After

Width:  |  Height:  |  Size: 548 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 52 KiB

View File

@ -79,4 +79,24 @@ trilium_notes_total 1234 1701432000
<li><code>400</code> - Invalid format parameter</li>
<li><code>401</code> - Missing or invalid ETAPI token</li>
<li><code>500</code> - Internal server error</li>
</ul>
</ul>
<p>&nbsp;</p>
<h2><strong>Grafana Dashboard</strong></h2>
<figure class="image">
<img style="aspect-ratio:2594/1568;" src="1_Metrics_image.png" width="2594"
height="1568">
</figure>
<p>&nbsp;</p>
<p>You can also use the Grafana Dashboard that has been created for TriliumNext
- just take the JSON from&nbsp;<a class="reference-link" href="#root/pOsGYCXsbNQG/tC7s2alapj8V/uYF7pmepw27K/_help_bOP3TB56fL1V">grafana-dashboard.json</a>&nbsp;and
then import the dashboard, following these screenshots:</p>
<figure class="image">
<img style="aspect-ratio:1881/282;" src="2_Metrics_image.png" width="1881"
height="282">
</figure>
<p>Then paste the JSON, and hit load:</p>
<figure class="image">
<img style="aspect-ratio:1055/830;" src="Metrics_image.png" width="1055"
height="830">
</figure>
<p>&nbsp;</p>

File diff suppressed because it is too large Load Diff

Binary file not shown.

After

Width:  |  Height:  |  Size: 73 KiB

View File

@ -1,12 +1,18 @@
{
"keyboard_actions": {
"back-in-note-history": "Navigate to previous note in history",
"forward-in-note-history": "Navigate to next note in history",
"open-jump-to-note-dialog": "Open \"Jump to note\" dialog",
"scroll-to-active-note": "Scroll note tree to active note",
"quick-search": "Activate quick search bar",
"search-in-subtree": "Search for notes in the active note's subtree",
"expand-subtree": "Expand subtree of current note",
"collapse-tree": "Collapses the complete note tree",
"collapse-subtree": "Collapses subtree of current note",
"sort-child-notes": "Sort child notes",
"creating-and-moving-notes": "Creating and moving notes",
"create-note-after": "Create note after active note",
"create-note-into": "Create note as child of active note",
"create-note-into-inbox": "Create a note in the inbox (if defined) or day note",
"delete-note": "Delete note",
"move-note-up": "Move note up",
@ -14,40 +20,44 @@
"move-note-up-in-hierarchy": "Move note up in hierarchy",
"move-note-down-in-hierarchy": "Move note down in hierarchy",
"edit-note-title": "Jump from tree to the note detail and edit title",
"edit-branch-prefix": "Show Edit branch prefix dialog",
"edit-branch-prefix": "Show \"Edit branch prefix\" dialog",
"cloneNotesTo": "Clone selected notes",
"moveNotesTo": "Move selected notes",
"note-clipboard": "Note clipboard",
"copy-notes-to-clipboard": "Copy selected notes to the clipboard",
"paste-notes-from-clipboard": "Paste notes from the clipboard into active note",
"cut-notes-to-clipboard": "Cut selected notes to the clipboard",
"select-all-notes-in-parent": "Select all notes from the current note level",
"add-note-above-to-the-selection": "Add note above to the selection",
"add-note-below-to-selection": "Add note above to the selection",
"add-note-below-to-selection": "Add note below to the selection",
"duplicate-subtree": "Duplicate subtree",
"tabs-and-windows": "Tabs & Windows",
"open-new-tab": "Opens new tab",
"close-active-tab": "Closes active tab",
"reopen-last-tab": "Reopens the last closed tab",
"activate-next-tab": "Activates tab on the right",
"activate-previous-tab": "Activates tab on the left",
"open-new-tab": "Open new tab",
"close-active-tab": "Close active tab",
"reopen-last-tab": "Reopen the last closed tab",
"activate-next-tab": "Activate tab on the right",
"activate-previous-tab": "Activate tab on the left",
"open-new-window": "Open new empty window",
"toggle-tray": "Shows/hides the application from the system tray",
"first-tab": "Activates the first tab in the list",
"second-tab": "Activates the second tab in the list",
"third-tab": "Activates the third tab in the list",
"fourth-tab": "Activates the fourth tab in the list",
"fifth-tab": "Activates the fifth tab in the list",
"sixth-tab": "Activates the sixth tab in the list",
"seventh-tab": "Activates the seventh tab in the list",
"eight-tab": "Activates the eighth tab in the list",
"ninth-tab": "Activates the ninth tab in the list",
"last-tab": "Activates the last tab in the list",
"toggle-tray": "Show/hide the application from the system tray",
"first-tab": "Activate the first tab in the list",
"second-tab": "Activate the second tab in the list",
"third-tab": "Activate the third tab in the list",
"fourth-tab": "Activate the fourth tab in the list",
"fifth-tab": "Activate the fifth tab in the list",
"sixth-tab": "Activate the sixth tab in the list",
"seventh-tab": "Activate the seventh tab in the list",
"eight-tab": "Activate the eighth tab in the list",
"ninth-tab": "Activate the ninth tab in the list",
"last-tab": "Activate the last tab in the list",
"dialogs": "Dialogs",
"show-note-source": "Shows Note Source dialog",
"show-options": "Shows Options dialog",
"show-revisions": "Shows Note Revisions dialog",
"show-recent-changes": "Shows Recent Changes dialog",
"show-sql-console": "Shows SQL Console dialog",
"show-backend-log": "Shows Backend Log dialog",
"show-note-source": "Show \"Note Source\" dialog",
"show-options": "Open \"Options\" page",
"show-revisions": "Show \"Note Revisions\" dialog",
"show-recent-changes": "Show \"Recent Changes\" dialog",
"show-sql-console": "Open \"SQL Console\" page",
"show-backend-log": "Open \"Backend Log\" page",
"show-help": "Open the built-in User Guide",
"show-cheatsheet": "Show a modal with common keyboard operations",
"text-note-operations": "Text note operations",
"add-link-to-text": "Open dialog to add link to the text",
"follow-link-under-cursor": "Follow link within which the caret is placed",
@ -76,10 +86,11 @@
"open-note-externally": "Open note as a file with default application",
"render-active-note": "Render (re-render) active note",
"run-active-note": "Run active JavaScript (frontend/backend) code note",
"toggle-note-hoisting": "Toggles note hoisting of active note",
"toggle-note-hoisting": "Toggle note hoisting of active note",
"unhoist": "Unhoist from anywhere",
"reload-frontend-app": "Reload frontend App",
"open-dev-tools": "Open dev tools",
"reload-frontend-app": "Reload frontend",
"open-dev-tools": "Open developer tools",
"find-in-text": "Toggle search panel",
"toggle-left-note-tree-panel": "Toggle left (note tree) panel",
"toggle-full-screen": "Toggle full screen",
"zoom-out": "Zoom Out",
@ -88,11 +99,9 @@
"reset-zoom-level": "Reset zoom level",
"copy-without-formatting": "Copy selected text without formatting",
"force-save-revision": "Force creating / saving new note revision of the active note",
"show-help": "Shows the built-in User Guide",
"toggle-book-properties": "Toggle Book Properties",
"toggle-classic-editor-toolbar": "Toggle the Formatting tab for the editor with fixed toolbar",
"export-as-pdf": "Exports the current note as a PDF",
"show-cheatsheet": "Shows a modal with common keyboard operations",
"export-as-pdf": "Export the current note as a PDF",
"toggle-zen-mode": "Enables/disables the zen mode (minimal UI for more focused editing)"
},
"login": {

View File

@ -118,6 +118,15 @@
<% if (themeCssUrl) { %>
<link href="<%= themeCssUrl %>" rel="stylesheet">
<% } %>
<% if (themeUseNextAsBase === "next") { %>
<link href="<%= assetPath %>/stylesheets/theme-next.css" rel="stylesheet">
<% } else if (themeUseNextAsBase === "next-dark") { %>
<link href="<%= assetPath %>/stylesheets/theme-next-dark.css" rel="stylesheet">
<% } else if (themeUseNextAsBase === "next-light") { %>
<link href="<%= assetPath %>/stylesheets/theme-next-light.css" rel="stylesheet">
<% } %>
<link href="<%= assetPath %>/stylesheets/style.css" rel="stylesheet">
<link href="<%= assetPath %>/stylesheets/print.css" rel="stylesheet" media="print">

View File

@ -12,6 +12,7 @@ import type { AttachmentRow, BlobRow, RevisionRow } from "@triliumnext/commons";
import BBlob from "./entities/bblob.js";
import BRecentNote from "./entities/brecent_note.js";
import type AbstractBeccaEntity from "./entities/abstract_becca_entity.js";
import type BNoteEmbedding from "./entities/bnote_embedding.js";
interface AttachmentOpts {
includeContentLength?: boolean;
@ -32,6 +33,7 @@ export default class Becca {
attributeIndex!: Record<string, BAttribute[]>;
options!: Record<string, BOption>;
etapiTokens!: Record<string, BEtapiToken>;
noteEmbeddings!: Record<string, BNoteEmbedding>;
allNoteSetCache: NoteSet | null;
@ -48,6 +50,7 @@ export default class Becca {
this.attributeIndex = {};
this.options = {};
this.etapiTokens = {};
this.noteEmbeddings = {};
this.dirtyNoteSetCache();

View File

@ -9,9 +9,10 @@ import BBranch from "./entities/bbranch.js";
import BAttribute from "./entities/battribute.js";
import BOption from "./entities/boption.js";
import BEtapiToken from "./entities/betapi_token.js";
import BNoteEmbedding from "./entities/bnote_embedding.js";
import cls from "../services/cls.js";
import entityConstructor from "../becca/entity_constructor.js";
import type { AttributeRow, BranchRow, EtapiTokenRow, NoteRow, OptionRow } from "@triliumnext/commons";
import type { AttributeRow, BranchRow, EtapiTokenRow, NoteRow, OptionRow, NoteEmbeddingRow } from "@triliumnext/commons";
import type AbstractBeccaEntity from "./entities/abstract_becca_entity.js";
import ws from "../services/ws.js";
@ -63,6 +64,18 @@ function load() {
for (const row of sql.getRows<EtapiTokenRow>(/*sql*/`SELECT etapiTokenId, name, tokenHash, utcDateCreated, utcDateModified FROM etapi_tokens WHERE isDeleted = 0`)) {
new BEtapiToken(row);
}
try {
for (const row of sql.getRows<NoteEmbeddingRow>(/*sql*/`SELECT embedId, noteId, providerId, modelId, dimension, embedding, version, dateCreated, dateModified, utcDateCreated, utcDateModified FROM note_embeddings`)) {
new BNoteEmbedding(row).init();
}
} catch (e: unknown) {
if (e && typeof e === "object" && "message" in e && typeof e.message === "string" && e.message.includes("no such table")) {
// Can be ignored.
} else {
throw e;
}
}
});
for (const noteId in becca.notes) {
@ -85,7 +98,7 @@ eventService.subscribeBeccaLoader([eventService.ENTITY_CHANGE_SYNCED], ({ entity
return;
}
if (["notes", "branches", "attributes", "etapi_tokens", "options"].includes(entityName)) {
if (["notes", "branches", "attributes", "etapi_tokens", "options", "note_embeddings"].includes(entityName)) {
const EntityClass = entityConstructor.getEntityFromEntityName(entityName);
const primaryKeyName = EntityClass.primaryKeyName;
@ -143,6 +156,8 @@ eventService.subscribeBeccaLoader([eventService.ENTITY_DELETED, eventService.ENT
attributeDeleted(entityId);
} else if (entityName === "etapi_tokens") {
etapiTokenDeleted(entityId);
} else if (entityName === "note_embeddings") {
noteEmbeddingDeleted(entityId);
}
});
@ -278,6 +293,10 @@ function etapiTokenDeleted(etapiTokenId: string) {
delete becca.etapiTokens[etapiTokenId];
}
function noteEmbeddingDeleted(embedId: string) {
delete becca.noteEmbeddings[embedId];
}
eventService.subscribeBeccaLoader(eventService.ENTER_PROTECTED_SESSION, () => {
try {
becca.decryptProtectedNotes();

View File

@ -32,6 +32,12 @@ class BNoteEmbedding extends AbstractBeccaEntity<BNoteEmbedding> {
}
}
init() {
if (this.embedId) {
this.becca.noteEmbeddings[this.embedId] = this;
}
}
updateFromRow(row: NoteEmbeddingRow): void {
this.embedId = row.embedId;
this.noteId = row.noteId;
@ -44,6 +50,10 @@ class BNoteEmbedding extends AbstractBeccaEntity<BNoteEmbedding> {
this.dateModified = row.dateModified;
this.utcDateCreated = row.utcDateCreated;
this.utcDateModified = row.utcDateModified;
if (this.embedId) {
this.becca.noteEmbeddings[this.embedId] = this;
}
}
override beforeSaving() {

View File

@ -7,7 +7,8 @@ import { initializeTranslations } from "./services/i18n.js";
async function startApplication() {
await initializeTranslations();
await import("./www.js");
const startTriliumServer = (await import("./www.js")).default;
await startTriliumServer();
}
startApplication();

View File

@ -5,7 +5,6 @@ import options from "../../services/options.js";
// Import the index service for knowledge base management
import indexService from "../../services/llm/index_service.js";
import restChatService from "../../services/llm/rest_chat_service.js";
import chatService from '../../services/llm/chat_service.js';
import chatStorageService from '../../services/llm/chat_storage_service.js';
// Define basic interfaces
@ -190,23 +189,26 @@ async function getSession(req: Request, res: Response) {
* tags: ["llm"]
*/
async function updateSession(req: Request, res: Response) {
// Get the chat using ChatService
// Get the chat using chatStorageService directly
const chatNoteId = req.params.chatNoteId;
const updates = req.body;
try {
// Get the chat
const session = await chatService.getOrCreateSession(chatNoteId);
const chat = await chatStorageService.getChat(chatNoteId);
if (!chat) {
throw new Error(`Chat with ID ${chatNoteId} not found`);
}
// Update title if provided
if (updates.title) {
await chatStorageService.updateChat(chatNoteId, session.messages, updates.title);
await chatStorageService.updateChat(chatNoteId, chat.messages, updates.title);
}
// Return the updated chat
return {
id: chatNoteId,
title: updates.title || session.title,
title: updates.title || chat.title,
updatedAt: new Date()
};
} catch (error) {
@ -248,18 +250,18 @@ async function updateSession(req: Request, res: Response) {
* tags: ["llm"]
*/
async function listSessions(req: Request, res: Response) {
// Get all sessions using ChatService
// Get all sessions using chatStorageService directly
try {
const sessions = await chatService.getAllSessions();
const chats = await chatStorageService.getAllChats();
// Format the response
return {
sessions: sessions.map(session => ({
id: session.id,
title: session.title,
createdAt: new Date(), // Since we don't have this in chat sessions
lastActive: new Date(), // Since we don't have this in chat sessions
messageCount: session.messages.length
sessions: chats.map(chat => ({
id: chat.id,
title: chat.title,
createdAt: chat.createdAt || new Date(),
lastActive: chat.updatedAt || new Date(),
messageCount: chat.messages.length
}))
};
} catch (error) {
@ -811,17 +813,38 @@ async function streamMessage(req: Request, res: Response) {
const { content, useAdvancedContext, showThinking, mentions } = req.body;
if (!content || typeof content !== 'string' || content.trim().length === 0) {
throw new Error('Content cannot be empty');
return res.status(400).json({
success: false,
error: 'Content cannot be empty'
});
}
// IMPORTANT: Immediately send a success response to the initial POST request
// The client is waiting for this to confirm streaming has been initiated
res.status(200).json({
success: true,
message: 'Streaming initiated successfully'
});
log.info(`Sent immediate success response for streaming setup`);
// Create a new response object for streaming through WebSocket only
// We won't use HTTP streaming since we've already sent the HTTP response
// Check if session exists
const session = restChatService.getSessions().get(chatNoteId);
if (!session) {
throw new Error('Chat not found');
// Get or create chat directly from storage (simplified approach)
let chat = await chatStorageService.getChat(chatNoteId);
if (!chat) {
// Create a new chat if it doesn't exist
chat = await chatStorageService.createChat('New Chat');
log.info(`Created new chat with ID: ${chat.id} for stream request`);
}
// Update last active timestamp
session.lastActive = new Date();
// Add the user message to the chat immediately
chat.messages.push({
role: 'user',
content
});
// Save the chat to ensure the user message is recorded
await chatStorageService.updateChat(chat.id, chat.messages, chat.title);
// Process mentions if provided
let enhancedContent = content;
@ -830,7 +853,6 @@ async function streamMessage(req: Request, res: Response) {
// Import note service to get note content
const becca = (await import('../../becca/becca.js')).default;
const mentionContexts: string[] = [];
for (const mention of mentions) {
@ -857,102 +879,94 @@ async function streamMessage(req: Request, res: Response) {
}
}
// Add user message to the session (with enhanced content for processing)
session.messages.push({
role: 'user',
content: enhancedContent,
timestamp: new Date()
});
// Create request parameters for the pipeline
const requestParams = {
chatNoteId: chatNoteId,
content: enhancedContent,
useAdvancedContext: useAdvancedContext === true,
showThinking: showThinking === true,
stream: true // Always stream for this endpoint
};
// Create a fake request/response pair to pass to the handler
const fakeReq = {
...req,
method: 'GET', // Set to GET to indicate streaming
query: {
stream: 'true', // Set stream param - don't use format: 'stream' to avoid confusion
useAdvancedContext: String(useAdvancedContext === true),
showThinking: String(showThinking === true)
},
params: {
chatNoteId: chatNoteId
},
// Make sure the enhanced content is available to the handler
body: {
content: enhancedContent,
useAdvancedContext: useAdvancedContext === true,
showThinking: showThinking === true
}
} as unknown as Request;
// Log to verify correct parameters
log.info(`WebSocket stream settings - useAdvancedContext=${useAdvancedContext === true}, in query=${fakeReq.query.useAdvancedContext}, in body=${fakeReq.body.useAdvancedContext}`);
// Extra safety to ensure the parameters are passed correctly
if (useAdvancedContext === true) {
log.info(`Enhanced context IS enabled for this request`);
} else {
log.info(`Enhanced context is NOT enabled for this request`);
}
// Process the request in the background
Promise.resolve().then(async () => {
try {
await restChatService.handleSendMessage(fakeReq, res);
} catch (error) {
log.error(`Background message processing error: ${error}`);
// Import the WebSocket service
const wsService = (await import('../../services/ws.js')).default;
// Define LLMStreamMessage interface
interface LLMStreamMessage {
type: 'llm-stream';
chatNoteId: string;
content?: string;
thinking?: string;
toolExecution?: any;
done?: boolean;
error?: string;
raw?: unknown;
}
// Send error to client via WebSocket
wsService.sendMessageToAllClients({
type: 'llm-stream',
chatNoteId: chatNoteId,
error: `Error processing message: ${error}`,
done: true
} as LLMStreamMessage);
}
});
// Import the WebSocket service
// Import the WebSocket service to send immediate feedback
const wsService = (await import('../../services/ws.js')).default;
// Let the client know streaming has started via WebSocket (helps client confirm connection is working)
// Let the client know streaming has started
wsService.sendMessageToAllClients({
type: 'llm-stream',
chatNoteId: chatNoteId,
thinking: 'Initializing streaming LLM response...'
thinking: showThinking ? 'Initializing streaming LLM response...' : undefined
});
// Let the client know streaming has started via HTTP response
return {
success: true,
message: 'Streaming started',
chatNoteId: chatNoteId
};
// Instead of trying to reimplement the streaming logic ourselves,
// delegate to restChatService but set up the correct protocol:
// 1. We've already sent a success response to the initial POST
// 2. Now we'll have restChatService process the actual streaming through WebSocket
try {
// Import the WebSocket service for sending messages
const wsService = (await import('../../services/ws.js')).default;
// Create a simple pass-through response object that won't write to the HTTP response
// but will allow restChatService to send WebSocket messages
const dummyResponse = {
writableEnded: false,
// Implement methods that would normally be used by restChatService
write: (_chunk: string) => {
// Silent no-op - we're only using WebSocket
return true;
},
end: (_chunk?: string) => {
// Log when streaming is complete via WebSocket
log.info(`[${chatNoteId}] Completed HTTP response handling during WebSocket streaming`);
return dummyResponse;
},
setHeader: (name: string, _value: string) => {
// Only log for content-type to reduce noise
if (name.toLowerCase() === 'content-type') {
log.info(`[${chatNoteId}] Setting up streaming for WebSocket only`);
}
return dummyResponse;
}
};
// Process the streaming now through WebSocket only
try {
log.info(`[${chatNoteId}] Processing LLM streaming through WebSocket after successful initiation at ${new Date().toISOString()}`);
// Call restChatService with our enhanced request and dummy response
// The important part is setting method to GET to indicate streaming mode
await restChatService.handleSendMessage({
...req,
method: 'GET', // Indicate streaming mode
query: {
...req.query,
stream: 'true' // Add the required stream parameter
},
body: {
content: enhancedContent,
useAdvancedContext: useAdvancedContext === true,
showThinking: showThinking === true
},
params: { chatNoteId }
} as unknown as Request, dummyResponse as unknown as Response);
log.info(`[${chatNoteId}] WebSocket streaming completed at ${new Date().toISOString()}`);
} catch (streamError) {
log.error(`[${chatNoteId}] Error during WebSocket streaming: ${streamError}`);
// Send error message through WebSocket
wsService.sendMessageToAllClients({
type: 'llm-stream',
chatNoteId: chatNoteId,
error: `Error during streaming: ${streamError}`,
done: true
});
}
} catch (error) {
log.error(`Error during streaming: ${error}`);
// Send error to client via WebSocket
wsService.sendMessageToAllClients({
type: 'llm-stream',
chatNoteId: chatNoteId,
error: `Error processing message: ${error}`,
done: true
});
}
} catch (error: any) {
log.error(`Error starting message stream: ${error.message}`);
throw error;
log.error(`Error starting message stream, can't communicate via WebSocket: ${error.message}`);
}
}

View File

@ -81,13 +81,13 @@ async function listModels(req: Request, res: Response) {
// Filter and categorize models
const allModels = response.data || [];
// Separate models into chat models and embedding models
// Include all models as chat models, without filtering by specific model names
// This allows models from providers like OpenRouter to be displayed
const chatModels = allModels
.filter((model) =>
// Include GPT models for chat
model.id.includes('gpt') ||
// Include Claude models via Azure OpenAI
model.id.includes('claude')
.filter((model) =>
// Exclude models that are explicitly for embeddings
!model.id.includes('embedding') &&
!model.id.includes('embed')
)
.map((model) => ({
id: model.id,

View File

@ -799,6 +799,7 @@ class ConsistencyChecks {
this.runEntityChangeChecks("attributes", "attributeId");
this.runEntityChangeChecks("etapi_tokens", "etapiTokenId");
this.runEntityChangeChecks("options", "name");
this.runEntityChangeChecks("note_embeddings", "embedId");
}
findWronglyNamedAttributes() {

View File

@ -19,12 +19,14 @@ function getDefaultKeyboardActions() {
actionName: "backInNoteHistory",
// Mac has a different history navigation shortcuts - https://github.com/zadam/trilium/issues/376
defaultShortcuts: isMac ? ["CommandOrControl+Left"] : ["Alt+Left"],
description: t("keyboard_actions.back-in-note-history"),
scope: "window"
},
{
actionName: "forwardInNoteHistory",
// Mac has a different history navigation shortcuts - https://github.com/zadam/trilium/issues/376
defaultShortcuts: isMac ? ["CommandOrControl+Right"] : ["Alt+Right"],
description: t("keyboard_actions.forward-in-note-history"),
scope: "window"
},
{
@ -36,11 +38,13 @@ function getDefaultKeyboardActions() {
{
actionName: "scrollToActiveNote",
defaultShortcuts: ["CommandOrControl+."],
description: t("keyboard_actions.scroll-to-active-note"),
scope: "window"
},
{
actionName: "quickSearch",
defaultShortcuts: ["CommandOrControl+S"],
description: t("keyboard_actions.quick-search"),
scope: "window"
},
{
@ -80,11 +84,13 @@ function getDefaultKeyboardActions() {
{
actionName: "createNoteAfter",
defaultShortcuts: ["CommandOrControl+O"],
description: t("keyboard_actions.create-note-after"),
scope: "window"
},
{
actionName: "createNoteInto",
defaultShortcuts: ["CommandOrControl+P"],
description: t("keyboard_actions.create-note-into"),
scope: "window"
},
{
@ -138,11 +144,13 @@ function getDefaultKeyboardActions() {
{
actionName: "cloneNotesTo",
defaultShortcuts: ["CommandOrControl+Shift+C"],
description: t("keyboard_actions.clone-notes-to"),
scope: "window"
},
{
actionName: "moveNotesTo",
defaultShortcuts: ["CommandOrControl+Shift+X"],
description: t("keyboard_actions.move-notes-to"),
scope: "window"
},
@ -566,6 +574,7 @@ function getDefaultKeyboardActions() {
{
actionName: "findInText",
defaultShortcuts: isElectron ? ["CommandOrControl+F"] : [],
description: t("keyboard_actions.find-in-text"),
scope: "window"
},
{

View File

@ -18,6 +18,19 @@ import type {
} from './interfaces/ai_service_interfaces.js';
import type { NoteSearchResult } from './interfaces/context_interfaces.js';
// Import new configuration system
import {
getProviderPrecedence,
getPreferredProvider,
getEmbeddingProviderPrecedence,
parseModelIdentifier,
isAIEnabled,
getDefaultModelForProvider,
clearConfigurationCache,
validateConfiguration
} from './config/configuration_helpers.js';
import type { ProviderType } from './interfaces/configuration_interfaces.js';
/**
* Interface representing relevant note context
*/
@ -36,7 +49,7 @@ export class AIServiceManager implements IAIServiceManager {
ollama: new OllamaService()
};
private providerOrder: ServiceProviders[] = ['openai', 'anthropic', 'ollama']; // Default order
private providerOrder: ServiceProviders[] = []; // Will be populated from configuration
private initialized = false;
constructor() {
@ -71,7 +84,24 @@ export class AIServiceManager implements IAIServiceManager {
}
/**
* Update the provider precedence order from saved options
* Update the provider precedence order using the new configuration system
*/
async updateProviderOrderAsync(): Promise<void> {
try {
const providers = await getProviderPrecedence();
this.providerOrder = providers as ServiceProviders[];
this.initialized = true;
log.info(`Updated provider order: ${providers.join(', ')}`);
} catch (error) {
log.error(`Failed to get provider precedence: ${error}`);
// Keep empty order, will be handled gracefully by other methods
this.providerOrder = [];
this.initialized = true;
}
}
/**
* Update the provider precedence order (legacy sync version)
* Returns true if successful, false if options not available yet
*/
updateProviderOrder(): boolean {
@ -79,146 +109,48 @@ export class AIServiceManager implements IAIServiceManager {
return true;
}
try {
// Default precedence: openai, anthropic, ollama
const defaultOrder: ServiceProviders[] = ['openai', 'anthropic', 'ollama'];
// Use async version but don't wait
this.updateProviderOrderAsync().catch(error => {
log.error(`Error in async provider order update: ${error}`);
});
// Get custom order from options
const customOrder = options.getOption('aiProviderPrecedence');
if (customOrder) {
try {
// Try to parse as JSON first
let parsed;
// Handle both array in JSON format and simple string format
if (customOrder.startsWith('[') && customOrder.endsWith(']')) {
parsed = JSON.parse(customOrder);
} else if (typeof customOrder === 'string') {
// If it's a string with commas, split it
if (customOrder.includes(',')) {
parsed = customOrder.split(',').map(p => p.trim());
} else {
// If it's a simple string (like "ollama"), convert to single-item array
parsed = [customOrder];
}
} else {
// Fallback to default
parsed = defaultOrder;
}
// Validate that all providers are valid
if (Array.isArray(parsed) &&
parsed.every(p => Object.keys(this.services).includes(p))) {
this.providerOrder = parsed as ServiceProviders[];
} else {
log.info('Invalid AI provider precedence format, using defaults');
this.providerOrder = defaultOrder;
}
} catch (e) {
log.error(`Failed to parse AI provider precedence: ${e}`);
this.providerOrder = defaultOrder;
}
} else {
this.providerOrder = defaultOrder;
}
this.initialized = true;
// Remove the validateEmbeddingProviders call since we now do validation on the client
// this.validateEmbeddingProviders();
return true;
} catch (error) {
// If options table doesn't exist yet, use defaults
// This happens during initial database creation
this.providerOrder = ['openai', 'anthropic', 'ollama'];
return false;
}
return true;
}
/**
* Validate embedding providers configuration
* - Check if embedding default provider is in provider precedence list
* - Check if all providers in precedence list and default provider are enabled
*
* @returns A warning message if there are issues, or null if everything is fine
* Validate AI configuration using the new configuration system
*/
async validateEmbeddingProviders(): Promise<string | null> {
async validateConfiguration(): Promise<string | null> {
try {
// Check if AI is enabled, if not, skip validation
const aiEnabled = await options.getOptionBool('aiEnabled');
if (!aiEnabled) {
return null;
const result = await validateConfiguration();
if (!result.isValid) {
let message = 'There are issues with your AI configuration:';
for (const error of result.errors) {
message += `\n• ${error}`;
}
if (result.warnings.length > 0) {
message += '\n\nWarnings:';
for (const warning of result.warnings) {
message += `\n• ${warning}`;
}
}
message += '\n\nPlease check your AI settings.';
return message;
}
// Get precedence list from options
let precedenceList: string[] = ['openai']; // Default to openai if not set
const precedenceOption = await options.getOption('aiProviderPrecedence');
if (precedenceOption) {
try {
if (precedenceOption.startsWith('[') && precedenceOption.endsWith(']')) {
precedenceList = JSON.parse(precedenceOption);
} else if (typeof precedenceOption === 'string') {
if (precedenceOption.includes(',')) {
precedenceList = precedenceOption.split(',').map(p => p.trim());
} else {
precedenceList = [precedenceOption];
}
}
} catch (e) {
log.error(`Error parsing precedence list: ${e}`);
if (result.warnings.length > 0) {
let message = 'AI configuration warnings:';
for (const warning of result.warnings) {
message += `\n• ${warning}`;
}
}
// Check for configuration issues with providers in the precedence list
const configIssues: string[] = [];
// Check each provider in the precedence list for proper configuration
for (const provider of precedenceList) {
if (provider === 'openai') {
// Check OpenAI configuration
const apiKey = await options.getOption('openaiApiKey');
if (!apiKey) {
configIssues.push(`OpenAI API key is missing`);
}
} else if (provider === 'anthropic') {
// Check Anthropic configuration
const apiKey = await options.getOption('anthropicApiKey');
if (!apiKey) {
configIssues.push(`Anthropic API key is missing`);
}
} else if (provider === 'ollama') {
// Check Ollama configuration
const baseUrl = await options.getOption('ollamaBaseUrl');
if (!baseUrl) {
configIssues.push(`Ollama Base URL is missing`);
}
}
// Add checks for other providers as needed
}
// Return warning message if there are configuration issues
if (configIssues.length > 0) {
let message = 'There are issues with your AI provider configuration:';
for (const issue of configIssues) {
message += `\n• ${issue}`;
}
message += '\n\nPlease check your AI settings.';
// Log warning to console
log.error('AI Provider Configuration Warning: ' + message);
return message;
log.info(message);
}
return null;
} catch (error) {
log.error(`Error validating embedding providers: ${error}`);
return null;
log.error(`Error validating AI configuration: ${error}`);
return `Configuration validation failed: ${error}`;
}
}
@ -279,18 +211,20 @@ export class AIServiceManager implements IAIServiceManager {
// If a specific provider is requested and available, use it
if (options.model && options.model.includes(':')) {
const [providerName, modelName] = options.model.split(':');
// Use the new configuration system to parse model identifier
const modelIdentifier = parseModelIdentifier(options.model);
if (availableProviders.includes(providerName as ServiceProviders)) {
if (modelIdentifier.provider && availableProviders.includes(modelIdentifier.provider as ServiceProviders)) {
try {
const modifiedOptions = { ...options, model: modelName };
log.info(`[AIServiceManager] Using provider ${providerName} from model prefix with modifiedOptions.stream: ${modifiedOptions.stream}`);
return await this.services[providerName as ServiceProviders].generateChatCompletion(messages, modifiedOptions);
const modifiedOptions = { ...options, model: modelIdentifier.modelId };
log.info(`[AIServiceManager] Using provider ${modelIdentifier.provider} from model prefix with modifiedOptions.stream: ${modifiedOptions.stream}`);
return await this.services[modelIdentifier.provider as ServiceProviders].generateChatCompletion(messages, modifiedOptions);
} catch (error) {
log.error(`Error with specified provider ${providerName}: ${error}`);
log.error(`Error with specified provider ${modelIdentifier.provider}: ${error}`);
// If the specified provider fails, continue with the fallback providers
}
}
// If not a provider prefix, treat the entire string as a model name and continue with normal provider selection
}
// Try each provider in order until one succeeds
@ -390,39 +324,33 @@ export class AIServiceManager implements IAIServiceManager {
}
/**
* Get whether AI features are enabled from options
* Get whether AI features are enabled using the new configuration system
*/
async getAIEnabledAsync(): Promise<boolean> {
return isAIEnabled();
}
/**
* Get whether AI features are enabled (sync version for compatibility)
*/
getAIEnabled(): boolean {
// For synchronous compatibility, use the old method
// In a full refactor, this should be async
return options.getOptionBool('aiEnabled');
}
/**
* Set up embeddings provider for AI features
* Set up embeddings provider using the new configuration system
*/
async setupEmbeddingsProvider(): Promise<void> {
try {
if (!this.getAIEnabled()) {
const aiEnabled = await isAIEnabled();
if (!aiEnabled) {
log.info('AI features are disabled');
return;
}
// Get provider precedence list
const precedenceOption = await options.getOption('embeddingProviderPrecedence');
let precedenceList: string[] = [];
if (precedenceOption) {
if (precedenceOption.startsWith('[') && precedenceOption.endsWith(']')) {
precedenceList = JSON.parse(precedenceOption);
} else if (typeof precedenceOption === 'string') {
if (precedenceOption.includes(',')) {
precedenceList = precedenceOption.split(',').map(p => p.trim());
} else {
precedenceList = [precedenceOption];
}
}
}
// Check if we have enabled providers
// Use the new configuration system - no string parsing!
const enabledProviders = await getEnabledEmbeddingProviders();
if (enabledProviders.length === 0) {
@ -439,20 +367,23 @@ export class AIServiceManager implements IAIServiceManager {
}
/**
* Initialize the AI Service
* Initialize the AI Service using the new configuration system
*/
async initialize(): Promise<void> {
try {
log.info("Initializing AI service...");
// Check if AI is enabled in options
const isAIEnabled = this.getAIEnabled();
// Check if AI is enabled using the new helper
const aiEnabled = await isAIEnabled();
if (!isAIEnabled) {
if (!aiEnabled) {
log.info("AI features are disabled in options");
return;
}
// Update provider order from configuration
await this.updateProviderOrderAsync();
// Set up embeddings provider if AI is enabled
await this.setupEmbeddingsProvider();
@ -586,7 +517,25 @@ export class AIServiceManager implements IAIServiceManager {
}
/**
* Get the preferred provider based on configuration
* Get the preferred provider based on configuration using the new system
*/
async getPreferredProviderAsync(): Promise<string> {
try {
const preferredProvider = await getPreferredProvider();
if (preferredProvider === null) {
// No providers configured, fallback to first available
log.info('No providers configured in precedence, using first available provider');
return this.providerOrder[0];
}
return preferredProvider;
} catch (error) {
log.error(`Error getting preferred provider: ${error}`);
return this.providerOrder[0];
}
}
/**
* Get the preferred provider based on configuration (sync version for compatibility)
*/
getPreferredProvider(): string {
this.ensureInitialized();
@ -669,7 +618,7 @@ export default {
},
// Add validateEmbeddingProviders method
async validateEmbeddingProviders(): Promise<string | null> {
return getInstance().validateEmbeddingProviders();
return getInstance().validateConfiguration();
},
// Context and index related methods
getContextExtractor() {

View File

@ -3,7 +3,6 @@
*/
import log from "../../../log.js";
import type { Message } from "../../ai_interface.js";
import SessionsStore from "../sessions_store.js";
/**
* Handles the execution of LLM tools
@ -101,11 +100,6 @@ export class ToolHandler {
: JSON.stringify(result).substring(0, 100) + '...';
log.info(`Tool result: ${resultPreview}`);
// Record tool execution in session if chatNoteId is provided
if (chatNoteId) {
SessionsStore.recordToolExecution(chatNoteId, toolCall, typeof result === 'string' ? result : JSON.stringify(result));
}
// Format result as a proper message
return {
role: 'tool',
@ -116,11 +110,6 @@ export class ToolHandler {
} catch (error: any) {
log.error(`Error executing tool ${toolCall.function.name}: ${error.message}`);
// Record error in session if chatNoteId is provided
if (chatNoteId) {
SessionsStore.recordToolExecution(chatNoteId, toolCall, '', error.message);
}
// Return error as tool result
return {
role: 'tool',

View File

@ -2,7 +2,6 @@
* Chat module export
*/
import restChatService from './rest_chat_service.js';
import sessionsStore from './sessions_store.js';
import { ContextHandler } from './handlers/context_handler.js';
import { ToolHandler } from './handlers/tool_handler.js';
import { StreamHandler } from './handlers/stream_handler.js';
@ -13,7 +12,6 @@ import type { LLMStreamMessage } from '../interfaces/chat_ws_messages.js';
// Export components
export {
restChatService as default,
sessionsStore,
ContextHandler,
ToolHandler,
StreamHandler,

View File

@ -1,5 +1,6 @@
/**
* Service to handle chat API interactions
* Simplified service to handle chat API interactions
* Works directly with ChatStorageService - no complex session management
*/
import log from "../../log.js";
import type { Request, Response } from "express";
@ -8,21 +9,16 @@ import { AIServiceManager } from "../ai_service_manager.js";
import { ChatPipeline } from "../pipeline/chat_pipeline.js";
import type { ChatPipelineInput } from "../pipeline/interfaces.js";
import options from "../../options.js";
import { SEARCH_CONSTANTS } from '../constants/search_constants.js';
// Import our refactored modules
import { ContextHandler } from "./handlers/context_handler.js";
import { ToolHandler } from "./handlers/tool_handler.js";
import { StreamHandler } from "./handlers/stream_handler.js";
import SessionsStore from "./sessions_store.js";
import * as MessageFormatter from "./utils/message_formatter.js";
import type { NoteSource } from "../interfaces/chat_session.js";
import type { LLMStreamMessage } from "../interfaces/chat_ws_messages.js";
import type { ChatMessage } from '../interfaces/chat_session.js';
import type { ChatSession } from '../interfaces/chat_session.js';
import chatStorageService from '../chat_storage_service.js';
import {
isAIEnabled,
getFirstValidModelConfig,
} from '../config/configuration_helpers.js';
/**
* Service to handle chat API interactions
* Simplified service to handle chat API interactions
*/
class RestChatService {
/**
@ -41,35 +37,15 @@ class RestChatService {
* Check if AI services are available
*/
safelyUseAIManager(): boolean {
// Only use AI manager if database is initialized
if (!this.isDatabaseInitialized()) {
log.info("AI check failed: Database is not initialized");
return false;
}
// Try to access the manager - will create instance only if needed
try {
// Create local instance to avoid circular references
const aiManager = new AIServiceManager();
if (!aiManager) {
log.info("AI check failed: AI manager module is not available");
return false;
}
const isAvailable = aiManager.isAnyServiceAvailable();
log.info(`AI service availability check result: ${isAvailable}`);
if (isAvailable) {
// Additional diagnostics
try {
const providers = aiManager.getAvailableProviders();
log.info(`Available AI providers: ${providers.join(', ')}`);
} catch (err) {
log.info(`Could not get available providers: ${err}`);
}
}
return isAvailable;
} catch (error) {
log.error(`Error accessing AI service manager: ${error}`);
@ -79,505 +55,330 @@ class RestChatService {
/**
* Handle a message sent to an LLM and get a response
* Simplified to work directly with chat storage
*/
async handleSendMessage(req: Request, res: Response) {
log.info("=== Starting handleSendMessage ===");
log.info("=== Starting simplified handleSendMessage ===");
try {
// Extract parameters differently based on the request method
// Extract parameters
let content, useAdvancedContext, showThinking, chatNoteId;
if (req.method === 'POST') {
// For POST requests, get content from the request body
const requestBody = req.body || {};
content = requestBody.content;
useAdvancedContext = requestBody.useAdvancedContext || false;
showThinking = requestBody.showThinking || false;
// Add logging for POST requests
log.info(`LLM POST message: chatNoteId=${req.params.chatNoteId}, useAdvancedContext=${useAdvancedContext}, showThinking=${showThinking}, contentLength=${content ? content.length : 0}`);
log.info(`LLM POST message: chatNoteId=${req.params.chatNoteId}, contentLength=${content ? content.length : 0}`);
} else if (req.method === 'GET') {
// For GET (streaming) requests, get parameters from query params and body
// For streaming requests, we need the content from the body
useAdvancedContext = req.query.useAdvancedContext === 'true' || (req.body && req.body.useAdvancedContext === true);
showThinking = req.query.showThinking === 'true' || (req.body && req.body.showThinking === true);
content = req.body && req.body.content ? req.body.content : '';
// Add detailed logging for GET requests
log.info(`LLM GET stream: chatNoteId=${req.params.chatNoteId}, useAdvancedContext=${useAdvancedContext}, showThinking=${showThinking}`);
log.info(`Parameters from query: useAdvancedContext=${req.query.useAdvancedContext}, showThinking=${req.query.showThinking}`);
log.info(`Parameters from body: useAdvancedContext=${req.body?.useAdvancedContext}, showThinking=${req.body?.showThinking}, content=${content ? `${content.substring(0, 20)}...` : 'none'}`);
log.info(`LLM GET stream: chatNoteId=${req.params.chatNoteId}`);
}
// Get chatNoteId from URL params
chatNoteId = req.params.chatNoteId;
// For GET requests, ensure we have the stream parameter
// Validate inputs
if (req.method === 'GET' && req.query.stream !== 'true') {
throw new Error('Stream parameter must be set to true for GET/streaming requests');
}
// For POST requests, validate the content
if (req.method === 'POST' && (!content || typeof content !== 'string' || content.trim().length === 0)) {
throw new Error('Content cannot be empty');
}
// Get or create session from Chat Note
let session = await this.getOrCreateSessionFromChatNote(chatNoteId, req.method === 'POST');
// Check if AI is enabled
const aiEnabled = await options.getOptionBool('aiEnabled');
if (!aiEnabled) {
return { error: "AI features are disabled. Please enable them in the settings." };
}
// If no session found and we're not allowed to create one (GET request)
if (!session && req.method === 'GET') {
if (!this.safelyUseAIManager()) {
return { error: "AI services are currently unavailable. Please check your configuration." };
}
// Load or create chat directly from storage
let chat = await chatStorageService.getChat(chatNoteId);
if (!chat && req.method === 'GET') {
throw new Error('Chat Note not found, cannot create session for streaming');
}
// For POST requests, if no Chat Note exists, create a new one
if (!session && req.method === 'POST') {
log.info(`No Chat Note found for ${chatNoteId}, creating a new Chat Note and session`);
// Create a new Chat Note via the storage service
//const chatStorageService = (await import('../../llm/chat_storage_service.js')).default;
//const newChat = await chatStorageService.createChat('New Chat');
// Use the new Chat Note's ID for the session
session = SessionsStore.createSession({
//title: newChat.title,
chatNoteId: chatNoteId
});
// Update the session ID to match the Chat Note ID
session.id = chatNoteId;
log.info(`Created new Chat Note and session with ID: ${session.id}`);
// Update the parameter to use the new ID
chatNoteId = session.id;
if (!chat && req.method === 'POST') {
log.info(`Creating new chat note with ID: ${chatNoteId}`);
chat = await chatStorageService.createChat('New Chat');
// Update the chat ID to match the requested ID if possible
// In practice, we'll use the generated ID
chatNoteId = chat.id;
}
// At this point, session should never be null
// TypeScript doesn't know this, so we'll add a check
if (!session) {
// This should never happen due to our logic above
throw new Error('Failed to create or retrieve session');
if (!chat) {
throw new Error('Failed to create or retrieve chat');
}
// Update session last active timestamp
SessionsStore.touchSession(session.id);
// For POST requests, store the user message
if (req.method === 'POST' && content && session) {
// Add message to session
session.messages.push({
// For POST requests, add the user message to the chat immediately
// This ensures user messages are always saved
if (req.method === 'POST' && content) {
chat.messages.push({
role: 'user',
content,
timestamp: new Date()
content
});
// Log a preview of the message
log.info(`Processing LLM message: "${content.substring(0, 50)}${content.length > 50 ? '...' : ''}"`);
}
// Check if AI services are enabled before proceeding
const aiEnabled = await options.getOptionBool('aiEnabled');
log.info(`AI enabled setting: ${aiEnabled}`);
if (!aiEnabled) {
log.info("AI services are disabled by configuration");
return {
error: "AI features are disabled. Please enable them in the settings."
};
}
// Check if AI services are available
log.info("Checking if AI services are available...");
if (!this.safelyUseAIManager()) {
log.info("AI services are not available - checking for specific issues");
try {
// Create a direct instance to avoid circular references
const aiManager = new AIServiceManager();
if (!aiManager) {
log.error("AI service manager is not initialized");
return {
error: "AI service is not properly initialized. Please check your configuration."
};
}
const availableProviders = aiManager.getAvailableProviders();
if (availableProviders.length === 0) {
log.error("No AI providers are available");
return {
error: "No AI providers are configured or available. Please check your AI settings."
};
}
} catch (err) {
log.error(`Detailed AI service check failed: ${err}`);
}
return {
error: "AI services are currently unavailable. Please check your configuration."
};
}
// Create direct instance to avoid circular references
const aiManager = new AIServiceManager();
// Get the default service - just use the first available one
const availableProviders = aiManager.getAvailableProviders();
if (availableProviders.length === 0) {
log.error("No AI providers are available after manager check");
return {
error: "No AI providers are configured or available. Please check your AI settings."
};
}
// Use the first available provider
const providerName = availableProviders[0];
log.info(`Using AI provider: ${providerName}`);
// We know the manager has a 'services' property from our code inspection,
// but TypeScript doesn't know that from the interface.
// This is a workaround to access it
const service = (aiManager as any).services[providerName];
if (!service) {
log.error(`AI service for provider ${providerName} not found`);
return {
error: `Selected AI provider (${providerName}) is not available. Please check your configuration.`
};
// Save immediately to ensure user message is saved
await chatStorageService.updateChat(chat.id, chat.messages, chat.title);
log.info(`Added and saved user message: "${content.substring(0, 50)}${content.length > 50 ? '...' : ''}"`);
}
// Initialize tools
log.info("Initializing LLM agent tools...");
// Ensure tools are initialized to prevent tool execution issues
await ToolHandler.ensureToolsInitialized();
// Create and use the chat pipeline instead of direct processing
// Create and use the chat pipeline
const pipeline = new ChatPipeline({
enableStreaming: req.method === 'GET',
enableMetrics: true,
maxToolCallIterations: 5
});
log.info("Executing chat pipeline...");
// Get user's preferred model
const preferredModel = await this.getPreferredModel();
// Create options object for better tracking
const pipelineOptions = {
// Force useAdvancedContext to be a boolean, no matter what
useAdvancedContext: useAdvancedContext === true,
systemPrompt: session?.messages.find(m => m.role === 'system')?.content,
temperature: session?.metadata.temperature,
maxTokens: session?.metadata.maxTokens,
model: session?.metadata.model,
// Set stream based on request type, but ensure it's explicitly a boolean value
// GET requests or format=stream parameter indicates streaming should be used
systemPrompt: chat.messages.find(m => m.role === 'system')?.content,
model: preferredModel,
stream: !!(req.method === 'GET' || req.query.format === 'stream' || req.query.stream === 'true'),
// Include chatNoteId for tracking tool executions
chatNoteId: chatNoteId
};
// Log the options to verify what's being sent to the pipeline
log.info(`Pipeline input options: ${JSON.stringify({
useAdvancedContext: pipelineOptions.useAdvancedContext,
stream: pipelineOptions.stream
})}`);
log.info(`Pipeline options: ${JSON.stringify({ useAdvancedContext: pipelineOptions.useAdvancedContext, stream: pipelineOptions.stream })}`);
// Import the WebSocket service for direct access
// Import WebSocket service for streaming
const wsService = await import('../../ws.js');
const accumulatedContentRef = { value: '' };
// Create a stream callback wrapper
// This will ensure we properly handle all streaming messages
let messageContent = '';
// Prepare the pipeline input
const pipelineInput: ChatPipelineInput = {
messages: session.messages.map(msg => ({
messages: chat.messages.map(msg => ({
role: msg.role as 'user' | 'assistant' | 'system',
content: msg.content
})),
query: content || '', // Ensure query is always a string, even if content is null/undefined
noteId: session.noteContext ?? undefined,
query: content || '',
noteId: undefined, // TODO: Add context note support if needed
showThinking: showThinking,
options: pipelineOptions,
streamCallback: req.method === 'GET' ? (data, done, rawChunk) => {
try {
// Use WebSocket service to send messages
this.handleStreamCallback(
data, done, rawChunk,
wsService.default, chatNoteId,
messageContent, session, res
);
} catch (error) {
log.error(`Error in stream callback: ${error}`);
// Try to send error message
try {
wsService.default.sendMessageToAllClients({
type: 'llm-stream',
chatNoteId: chatNoteId,
error: `Stream error: ${error instanceof Error ? error.message : 'Unknown error'}`,
done: true
});
// End the response
res.write(`data: ${JSON.stringify({ error: 'Stream error', done: true })}\n\n`);
res.end();
} catch (e) {
log.error(`Failed to send error message: ${e}`);
}
}
this.handleStreamCallback(data, done, rawChunk, wsService.default, chatNoteId, res, accumulatedContentRef, chat);
} : undefined
};
// Execute the pipeline
const response = await pipeline.execute(pipelineInput);
// Handle the response
if (req.method === 'POST') {
// Add assistant message to session
session.messages.push({
// Add assistant response to chat
chat.messages.push({
role: 'assistant',
content: response.text || '',
timestamp: new Date()
content: response.text || ''
});
// Extract sources if they're available
// Save the updated chat back to storage (single source of truth)
await chatStorageService.updateChat(chat.id, chat.messages, chat.title);
log.info(`Saved non-streaming assistant response: ${(response.text || '').length} characters`);
// Extract sources if available
const sources = (response as any).sources || [];
// Store sources in the session metadata if they're present
if (sources.length > 0) {
session.metadata.sources = sources;
log.info(`Stored ${sources.length} sources in session metadata`);
}
// Return the response with complete metadata
return {
content: response.text || '',
sources: sources,
metadata: {
model: response.model || session.metadata.model,
provider: response.provider || session.metadata.provider,
temperature: session.metadata.temperature,
maxTokens: session.metadata.maxTokens,
lastUpdated: new Date().toISOString(),
toolExecutions: session.metadata.toolExecutions || []
model: response.model,
provider: response.provider,
lastUpdated: new Date().toISOString()
}
};
} else {
// For streaming requests, we've already sent the response
// For streaming, response is already sent via WebSocket/SSE
// The accumulatedContentRef will have been saved in handleStreamCallback when done=true
return null;
}
} catch (processingError: any) {
log.error(`Error processing message: ${processingError}`);
return {
error: `Error processing your request: ${processingError.message}`
};
} catch (error: any) {
log.error(`Error processing message: ${error}`);
return { error: `Error processing your request: ${error.message}` };
}
}
/**
* Handle stream callback for WebSocket communication
* Simplified stream callback handler
*/
private handleStreamCallback(
private async handleStreamCallback(
data: string | null,
done: boolean,
rawChunk: any,
wsService: any,
chatNoteId: string,
messageContent: string,
session: any,
res: Response
res: Response,
accumulatedContentRef: { value: string },
chat: { id: string; messages: Message[]; title: string }
) {
// Only accumulate content that's actually text (not tool execution or thinking info)
if (data) {
messageContent += data;
}
// Create a message object with all necessary fields
const message: LLMStreamMessage = {
type: 'llm-stream',
chatNoteId: chatNoteId
chatNoteId: chatNoteId,
done: done
};
// Add content if available - either the new chunk or full content on completion
if (data) {
message.content = data;
// Simple accumulation - just append the new data
accumulatedContentRef.value += data;
}
// Add thinking info if available in the raw chunk
// Only include thinking if explicitly present in rawChunk
if (rawChunk && 'thinking' in rawChunk && rawChunk.thinking) {
message.thinking = rawChunk.thinking as string;
}
// Add tool execution info if available in the raw chunk
// Only include tool execution if explicitly present in rawChunk
if (rawChunk && 'toolExecution' in rawChunk && rawChunk.toolExecution) {
// Transform the toolExecution to match the expected format
const toolExec = rawChunk.toolExecution;
message.toolExecution = {
// Use optional chaining for all properties
tool: typeof toolExec.tool === 'string'
? toolExec.tool
: toolExec.tool?.name,
tool: typeof toolExec.tool === 'string' ? toolExec.tool : toolExec.tool?.name,
result: toolExec.result,
// Map arguments to args
args: 'arguments' in toolExec ?
(typeof toolExec.arguments === 'object' ?
toolExec.arguments as Record<string, unknown> : {}) : {},
// Add additional properties if they exist
(typeof toolExec.arguments === 'object' ? toolExec.arguments as Record<string, unknown> : {}) : {},
action: 'action' in toolExec ? toolExec.action as string : undefined,
toolCallId: 'toolCallId' in toolExec ? toolExec.toolCallId as string : undefined,
error: 'error' in toolExec ? toolExec.error as string : undefined
};
}
// Set done flag explicitly
message.done = done;
// On final message, include the complete content too
if (done) {
// Store the response in the session when done
session.messages.push({
role: 'assistant',
content: messageContent,
timestamp: new Date()
});
}
// Send message to all clients
// Send WebSocket message
wsService.sendMessageToAllClients(message);
// Log what was sent (first message and completion)
if (message.thinking || done) {
log.info(
`[WS-SERVER] Sending LLM stream message: chatNoteId=${chatNoteId}, content=${!!message.content}, contentLength=${message.content?.length || 0}, thinking=${!!message.thinking}, toolExecution=${!!message.toolExecution}, done=${done}`
);
}
// For GET requests, also send as server-sent events
// Prepare response data for JSON event
const responseData: any = {
content: data,
done
};
// Add tool execution if available
// Send SSE response for compatibility
const responseData: any = { content: data, done };
if (rawChunk?.toolExecution) {
responseData.toolExecution = rawChunk.toolExecution;
}
// Send the data as a JSON event
res.write(`data: ${JSON.stringify(responseData)}\n\n`);
// When streaming is complete, save the accumulated content to the chat note
if (done) {
try {
// Only save if we have accumulated content
if (accumulatedContentRef.value) {
// Add assistant response to chat
chat.messages.push({
role: 'assistant',
content: accumulatedContentRef.value
});
// Save the updated chat back to storage
await chatStorageService.updateChat(chat.id, chat.messages, chat.title);
log.info(`Saved streaming assistant response: ${accumulatedContentRef.value.length} characters`);
}
} catch (error) {
// Log error but don't break the response flow
log.error(`Error saving streaming response: ${error}`);
}
// End the response
res.end();
}
}
/**
* Create a new chat session
* Create a new chat
*/
async createSession(req: Request, res: Response) {
try {
const options: any = req.body || {};
const title = options.title || 'Chat Session';
// Use the currentNoteId as the chatNoteId if provided
let chatNoteId = options.chatNoteId;
let noteId = options.noteId || options.chatNoteId;
// If currentNoteId is provided but chatNoteId is not, use currentNoteId
if (!chatNoteId && options.currentNoteId) {
chatNoteId = options.currentNoteId;
log.info(`Using provided currentNoteId ${chatNoteId} as chatNoteId`);
// Check if currentNoteId is already an AI Chat note
if (!noteId && options.currentNoteId) {
const becca = (await import('../../../becca/becca.js')).default;
const note = becca.notes[options.currentNoteId];
if (note) {
try {
const content = note.getContent();
if (content) {
const contentStr = typeof content === 'string' ? content : content.toString();
const parsedContent = JSON.parse(contentStr);
if (parsedContent.messages && Array.isArray(parsedContent.messages)) {
noteId = options.currentNoteId;
log.info(`Using existing AI Chat note ${noteId} as session`);
}
}
} catch (_) {
// Not JSON content, so not an AI Chat note
}
}
}
// If we still don't have a chatNoteId, create a new Chat Note
if (!chatNoteId) {
// Create a new Chat Note via the storage service
const chatStorageService = (await import('../../llm/chat_storage_service.js')).default;
// Create new chat if needed
if (!noteId) {
const newChat = await chatStorageService.createChat(title);
chatNoteId = newChat.id;
log.info(`Created new Chat Note with ID: ${chatNoteId}`);
noteId = newChat.id;
log.info(`Created new Chat Note with ID: ${noteId}`);
} else {
log.info(`Using existing Chat Note with ID: ${noteId}`);
}
// Create a new session through our session store
const session = SessionsStore.createSession({
chatNoteId,
title,
systemPrompt: options.systemPrompt,
contextNoteId: options.contextNoteId,
maxTokens: options.maxTokens,
model: options.model,
provider: options.provider,
temperature: options.temperature
});
return {
id: session.id,
title: session.title,
createdAt: session.createdAt,
noteId: chatNoteId // Return the note ID explicitly
id: noteId,
title: title,
createdAt: new Date(),
noteId: noteId
};
} catch (error: any) {
log.error(`Error creating LLM session: ${error.message || 'Unknown error'}`);
throw new Error(`Failed to create LLM session: ${error.message || 'Unknown error'}`);
log.error(`Error creating chat session: ${error.message || 'Unknown error'}`);
throw new Error(`Failed to create chat session: ${error.message || 'Unknown error'}`);
}
}
/**
* Get a specific chat session by ID
* Get a chat by ID
*/
async getSession(req: Request, res: Response) {
async getSession(req: Request, res: Response): Promise<any> {
try {
const { sessionId } = req.params;
// Check if session exists
const session = SessionsStore.getSession(sessionId);
if (!session) {
// Instead of throwing an error, return a structured 404 response
// that the frontend can handle gracefully
const chat = await chatStorageService.getChat(sessionId);
if (!chat) {
res.status(404).json({
error: true,
message: `Session with ID ${sessionId} not found`,
code: 'session_not_found',
sessionId
});
return null; // Return null to prevent further processing
return null;
}
// Return session with metadata and additional fields
return {
id: session.id,
title: session.title,
createdAt: session.createdAt,
lastActive: session.lastActive,
messages: session.messages,
noteContext: session.noteContext,
// Include additional fields for the frontend
sources: session.metadata.sources || [],
metadata: {
model: session.metadata.model,
provider: session.metadata.provider,
temperature: session.metadata.temperature,
maxTokens: session.metadata.maxTokens,
lastUpdated: session.lastActive.toISOString(),
// Include simplified tool executions if available
toolExecutions: session.metadata.toolExecutions || []
}
id: chat.id,
title: chat.title,
createdAt: chat.createdAt,
lastActive: chat.updatedAt,
messages: chat.messages,
metadata: chat.metadata || {}
};
} catch (error: any) {
log.error(`Error getting LLM session: ${error.message || 'Unknown error'}`);
log.error(`Error getting chat session: ${error.message || 'Unknown error'}`);
throw new Error(`Failed to get session: ${error.message || 'Unknown error'}`);
}
}
/**
* Delete a chat session
* Delete a chat
*/
async deleteSession(req: Request, res: Response) {
try {
const { sessionId } = req.params;
// Delete the session
const success = SessionsStore.deleteSession(sessionId);
const success = await chatStorageService.deleteChat(sessionId);
if (!success) {
throw new Error(`Session with ID ${sessionId} not found`);
}
@ -587,91 +388,47 @@ class RestChatService {
message: `Session ${sessionId} deleted successfully`
};
} catch (error: any) {
log.error(`Error deleting LLM session: ${error.message || 'Unknown error'}`);
log.error(`Error deleting chat session: ${error.message || 'Unknown error'}`);
throw new Error(`Failed to delete session: ${error.message || 'Unknown error'}`);
}
}
/**
* Get all sessions
* Get all chats
*/
getSessions() {
return SessionsStore.getAllSessions();
}
/**
* Create an in-memory session from a Chat Note
* This treats the Chat Note as the source of truth, using its ID as the session ID
*/
async createSessionFromChatNote(noteId: string): Promise<ChatSession | null> {
async getAllSessions() {
try {
log.info(`Creating in-memory session for Chat Note ID ${noteId}`);
// Import chat storage service
const chatStorageService = (await import('../../llm/chat_storage_service.js')).default;
// Try to get the Chat Note data
const chatNote = await chatStorageService.getChat(noteId);
if (!chatNote) {
log.error(`Chat Note ${noteId} not found, cannot create session`);
return null;
}
log.info(`Found Chat Note ${noteId}, creating in-memory session`);
// Convert Message[] to ChatMessage[] by ensuring the role is compatible
const chatMessages: ChatMessage[] = chatNote.messages.map(msg => ({
role: msg.role === 'tool' ? 'assistant' : msg.role, // Map 'tool' role to 'assistant'
content: msg.content,
timestamp: new Date()
}));
// Create a new session with the same ID as the Chat Note
const session: ChatSession = {
id: chatNote.id, // Use Chat Note ID as the session ID
title: chatNote.title,
messages: chatMessages,
createdAt: chatNote.createdAt || new Date(),
lastActive: new Date(),
metadata: chatNote.metadata || {}
const chats = await chatStorageService.getAllChats();
return {
sessions: chats.map(chat => ({
id: chat.id,
title: chat.title,
createdAt: chat.createdAt,
lastActive: chat.updatedAt,
messageCount: chat.messages.length
}))
};
// Add the session to the in-memory store
SessionsStore.getAllSessions().set(noteId, session);
log.info(`Successfully created in-memory session for Chat Note ${noteId}`);
return session;
} catch (error) {
log.error(`Failed to create session from Chat Note: ${error}`);
return null;
} catch (error: any) {
log.error(`Error listing sessions: ${error}`);
throw new Error(`Failed to list sessions: ${error}`);
}
}
/**
* Get an existing session or create a new one from a Chat Note
* This treats the Chat Note as the source of truth, using its ID as the session ID
* Get the user's preferred model
*/
async getOrCreateSessionFromChatNote(noteId: string, createIfNotFound: boolean = true): Promise<ChatSession | null> {
// First check if we already have this session in memory
let session = SessionsStore.getSession(noteId);
if (session) {
log.info(`Found existing in-memory session for Chat Note ${noteId}`);
return session;
async getPreferredModel(): Promise<string | undefined> {
try {
const validConfig = await getFirstValidModelConfig();
if (!validConfig) {
log.error('No valid AI model configuration found');
return undefined;
}
return validConfig.model;
} catch (error) {
log.error(`Error getting preferred model: ${error}`);
return undefined;
}
// If not in memory, try to create from Chat Note
log.info(`Session not found in memory for Chat Note ${noteId}, attempting to create it`);
// Only try to create if allowed
if (!createIfNotFound) {
log.info(`Not creating new session for ${noteId} as createIfNotFound=false`);
return null;
}
// Create from Chat Note
return await this.createSessionFromChatNote(noteId);
}
}

View File

@ -1,169 +0,0 @@
/**
* In-memory storage for chat sessions
*/
import log from "../../log.js";
import { LLM_CONSTANTS } from '../constants/provider_constants.js';
import { SEARCH_CONSTANTS } from '../constants/search_constants.js';
import { randomString } from "../../utils.js";
import type { ChatSession, ChatMessage } from '../interfaces/chat_session.js';
// In-memory storage for sessions
const sessions = new Map<string, ChatSession>();
// Flag to track if cleanup timer has been initialized
let cleanupInitialized = false;
/**
* Provides methods to manage chat sessions
*/
class SessionsStore {
/**
* Initialize the session cleanup timer to remove old/inactive sessions
*/
initializeCleanupTimer(): void {
if (cleanupInitialized) {
return;
}
// Clean sessions that have expired based on the constants
function cleanupOldSessions() {
const expiryTime = new Date(Date.now() - LLM_CONSTANTS.SESSION.SESSION_EXPIRY_MS);
for (const [sessionId, session] of sessions.entries()) {
if (session.lastActive < expiryTime) {
sessions.delete(sessionId);
}
}
}
// Run cleanup at the configured interval
setInterval(cleanupOldSessions, LLM_CONSTANTS.SESSION.CLEANUP_INTERVAL_MS);
cleanupInitialized = true;
log.info("Session cleanup timer initialized");
}
/**
* Get all sessions
*/
getAllSessions(): Map<string, ChatSession> {
return sessions;
}
/**
* Get a specific session by ID
*/
getSession(sessionId: string): ChatSession | undefined {
return sessions.get(sessionId);
}
/**
* Create a new session
*/
createSession(options: {
chatNoteId: string;
title?: string;
systemPrompt?: string;
contextNoteId?: string;
maxTokens?: number;
model?: string;
provider?: string;
temperature?: number;
}): ChatSession {
this.initializeCleanupTimer();
const title = options.title || 'Chat Session';
const sessionId = options.chatNoteId;
const now = new Date();
// Initial system message if provided
const messages: ChatMessage[] = [];
if (options.systemPrompt) {
messages.push({
role: 'system',
content: options.systemPrompt,
timestamp: now
});
}
// Create and store the session
const session: ChatSession = {
id: sessionId,
title,
messages,
createdAt: now,
lastActive: now,
noteContext: options.contextNoteId,
metadata: {
temperature: options.temperature || SEARCH_CONSTANTS.TEMPERATURE.DEFAULT,
maxTokens: options.maxTokens,
model: options.model,
provider: options.provider,
sources: [],
toolExecutions: [],
lastUpdated: now.toISOString()
}
};
sessions.set(sessionId, session);
log.info(`Created in-memory session for Chat Note ID: ${sessionId}`);
return session;
}
/**
* Update a session's last active timestamp
*/
touchSession(sessionId: string): boolean {
const session = sessions.get(sessionId);
if (!session) {
return false;
}
session.lastActive = new Date();
return true;
}
/**
* Delete a session
*/
deleteSession(sessionId: string): boolean {
return sessions.delete(sessionId);
}
/**
* Record a tool execution in the session metadata
*/
recordToolExecution(chatNoteId: string, tool: any, result: string, error?: string): void {
if (!chatNoteId) return;
const session = sessions.get(chatNoteId);
if (!session) return;
try {
const toolExecutions = session.metadata.toolExecutions || [];
// Format tool execution record
const execution = {
id: tool.id || `tool-${Date.now()}-${Math.random().toString(36).substring(2, 7)}`,
name: tool.function?.name || 'unknown',
arguments: typeof tool.function?.arguments === 'string'
? (() => { try { return JSON.parse(tool.function.arguments); } catch { return tool.function.arguments; } })()
: tool.function?.arguments || {},
result: result,
error: error,
timestamp: new Date().toISOString()
};
// Add to tool executions
toolExecutions.push(execution);
session.metadata.toolExecutions = toolExecutions;
log.info(`Recorded tool execution for ${execution.name} in session ${chatNoteId}`);
} catch (err) {
log.error(`Failed to record tool execution: ${err}`);
}
}
}
// Create singleton instance
const sessionsStore = new SessionsStore();
export default sessionsStore;

View File

@ -0,0 +1,179 @@
import configurationManager from './configuration_manager.js';
import type {
ProviderType,
ModelIdentifier,
ModelConfig,
ProviderPrecedenceConfig,
EmbeddingProviderPrecedenceConfig
} from '../interfaces/configuration_interfaces.js';
/**
* Helper functions for accessing AI configuration without string parsing
* Use these throughout the codebase instead of parsing strings directly
*/
/**
* Get the ordered list of AI providers
*/
export async function getProviderPrecedence(): Promise<ProviderType[]> {
const config = await configurationManager.getProviderPrecedence();
return config.providers;
}
/**
* Get the default/preferred AI provider
*/
export async function getPreferredProvider(): Promise<ProviderType | null> {
const config = await configurationManager.getProviderPrecedence();
if (config.providers.length === 0) {
return null; // No providers configured
}
return config.defaultProvider || config.providers[0];
}
/**
* Get the ordered list of embedding providers
*/
export async function getEmbeddingProviderPrecedence(): Promise<string[]> {
const config = await configurationManager.getEmbeddingProviderPrecedence();
return config.providers;
}
/**
* Get the default embedding provider
*/
export async function getPreferredEmbeddingProvider(): Promise<string | null> {
const config = await configurationManager.getEmbeddingProviderPrecedence();
if (config.providers.length === 0) {
return null; // No providers configured
}
return config.defaultProvider || config.providers[0];
}
/**
* Parse a model identifier (handles "provider:model" format)
*/
export function parseModelIdentifier(modelString: string): ModelIdentifier {
return configurationManager.parseModelIdentifier(modelString);
}
/**
* Create a model configuration from a model string
*/
export function createModelConfig(modelString: string, defaultProvider?: ProviderType): ModelConfig {
return configurationManager.createModelConfig(modelString, defaultProvider);
}
/**
* Get the default model for a specific provider
*/
export async function getDefaultModelForProvider(provider: ProviderType): Promise<string | undefined> {
const config = await configurationManager.getAIConfig();
return config.defaultModels[provider]; // This can now be undefined
}
/**
* Get provider settings for a specific provider
*/
export async function getProviderSettings(provider: ProviderType) {
const config = await configurationManager.getAIConfig();
return config.providerSettings[provider];
}
/**
* Check if AI is enabled
*/
export async function isAIEnabled(): Promise<boolean> {
const config = await configurationManager.getAIConfig();
return config.enabled;
}
/**
* Check if a provider has required configuration
*/
export async function isProviderConfigured(provider: ProviderType): Promise<boolean> {
const settings = await getProviderSettings(provider);
switch (provider) {
case 'openai':
return Boolean((settings as any)?.apiKey);
case 'anthropic':
return Boolean((settings as any)?.apiKey);
case 'ollama':
return Boolean((settings as any)?.baseUrl);
default:
return false;
}
}
/**
* Get the first available (configured) provider from the precedence list
*/
export async function getFirstAvailableProvider(): Promise<ProviderType | null> {
const providers = await getProviderPrecedence();
if (providers.length === 0) {
return null; // No providers configured
}
for (const provider of providers) {
if (await isProviderConfigured(provider)) {
return provider;
}
}
return null; // No providers are properly configured
}
/**
* Validate the current AI configuration
*/
export async function validateConfiguration() {
return configurationManager.validateConfig();
}
/**
* Clear cached configuration (use when settings change)
*/
export function clearConfigurationCache(): void {
configurationManager.clearCache();
}
/**
* Get a model configuration with validation that no defaults are assumed
*/
export async function getValidModelConfig(provider: ProviderType): Promise<{ model: string; provider: ProviderType } | null> {
const defaultModel = await getDefaultModelForProvider(provider);
if (!defaultModel) {
// No default model configured for this provider
return null;
}
const isConfigured = await isProviderConfigured(provider);
if (!isConfigured) {
// Provider is not properly configured
return null;
}
return {
model: defaultModel,
provider
};
}
/**
* Get the first valid model configuration from the provider precedence list
*/
export async function getFirstValidModelConfig(): Promise<{ model: string; provider: ProviderType } | null> {
const providers = await getProviderPrecedence();
for (const provider of providers) {
const config = await getValidModelConfig(provider);
if (config) {
return config;
}
}
return null; // No valid model configuration found
}

View File

@ -0,0 +1,378 @@
import options from '../../options.js';
import log from '../../log.js';
import type {
AIConfig,
ProviderPrecedenceConfig,
EmbeddingProviderPrecedenceConfig,
ModelIdentifier,
ModelConfig,
ProviderType,
EmbeddingProviderType,
ConfigValidationResult,
ProviderSettings,
OpenAISettings,
AnthropicSettings,
OllamaSettings
} from '../interfaces/configuration_interfaces.js';
/**
* Configuration manager that handles conversion from string-based options
* to proper typed configuration objects.
*
* This is the ONLY place where string parsing should happen for LLM configurations.
*/
export class ConfigurationManager {
private static instance: ConfigurationManager | null = null;
private cachedConfig: AIConfig | null = null;
private lastConfigUpdate: number = 0;
// Cache for 5 minutes to avoid excessive option reads
private static readonly CACHE_DURATION = 5 * 60 * 1000;
private constructor() {}
public static getInstance(): ConfigurationManager {
if (!ConfigurationManager.instance) {
ConfigurationManager.instance = new ConfigurationManager();
}
return ConfigurationManager.instance;
}
/**
* Get the complete AI configuration
*/
public async getAIConfig(): Promise<AIConfig> {
const now = Date.now();
if (this.cachedConfig && (now - this.lastConfigUpdate) < ConfigurationManager.CACHE_DURATION) {
return this.cachedConfig;
}
try {
const config: AIConfig = {
enabled: await this.getAIEnabled(),
providerPrecedence: await this.getProviderPrecedence(),
embeddingProviderPrecedence: await this.getEmbeddingProviderPrecedence(),
defaultModels: await this.getDefaultModels(),
providerSettings: await this.getProviderSettings()
};
this.cachedConfig = config;
this.lastConfigUpdate = now;
return config;
} catch (error) {
log.error(`Error loading AI configuration: ${error}`);
return this.getDefaultConfig();
}
}
/**
* Parse provider precedence from string option
*/
public async getProviderPrecedence(): Promise<ProviderPrecedenceConfig> {
try {
const precedenceOption = await options.getOption('aiProviderPrecedence');
const providers = this.parseProviderList(precedenceOption);
return {
providers: providers as ProviderType[],
defaultProvider: providers.length > 0 ? providers[0] as ProviderType : undefined
};
} catch (error) {
log.error(`Error parsing provider precedence: ${error}`);
// Only return known providers if they exist, don't assume defaults
return {
providers: [],
defaultProvider: undefined
};
}
}
/**
* Parse embedding provider precedence from string option
*/
public async getEmbeddingProviderPrecedence(): Promise<EmbeddingProviderPrecedenceConfig> {
try {
const precedenceOption = await options.getOption('embeddingProviderPrecedence');
const providers = this.parseProviderList(precedenceOption);
return {
providers: providers as EmbeddingProviderType[],
defaultProvider: providers.length > 0 ? providers[0] as EmbeddingProviderType : undefined
};
} catch (error) {
log.error(`Error parsing embedding provider precedence: ${error}`);
// Don't assume defaults, return empty configuration
return {
providers: [],
defaultProvider: undefined
};
}
}
/**
* Parse model identifier with optional provider prefix
* Handles formats like "gpt-4", "openai:gpt-4", "ollama:llama2:7b"
*/
public parseModelIdentifier(modelString: string): ModelIdentifier {
if (!modelString) {
return {
modelId: '',
fullIdentifier: ''
};
}
const parts = modelString.split(':');
if (parts.length === 1) {
// No provider prefix, just model name
return {
modelId: modelString,
fullIdentifier: modelString
};
}
// Check if first part is a known provider
const potentialProvider = parts[0].toLowerCase();
const knownProviders: ProviderType[] = ['openai', 'anthropic', 'ollama'];
if (knownProviders.includes(potentialProvider as ProviderType)) {
// Provider prefix format
const provider = potentialProvider as ProviderType;
const modelId = parts.slice(1).join(':'); // Rejoin in case model has colons
return {
provider,
modelId,
fullIdentifier: modelString
};
}
// Not a provider prefix, treat whole string as model name
return {
modelId: modelString,
fullIdentifier: modelString
};
}
/**
* Create model configuration from string
*/
public createModelConfig(modelString: string, defaultProvider?: ProviderType): ModelConfig {
const identifier = this.parseModelIdentifier(modelString);
const provider = identifier.provider || defaultProvider || 'openai';
return {
provider,
modelId: identifier.modelId,
displayName: identifier.fullIdentifier
};
}
/**
* Get default models for each provider - ONLY from user configuration
*/
public async getDefaultModels(): Promise<Record<ProviderType, string | undefined>> {
try {
const [openaiModel, anthropicModel, ollamaModel] = await Promise.all([
options.getOption('openaiDefaultModel'),
options.getOption('anthropicDefaultModel'),
options.getOption('ollamaDefaultModel')
]);
return {
openai: openaiModel || undefined,
anthropic: anthropicModel || undefined,
ollama: ollamaModel || undefined
};
} catch (error) {
log.error(`Error loading default models: ${error}`);
// Return undefined for all providers if we can't load config
return {
openai: undefined,
anthropic: undefined,
ollama: undefined
};
}
}
/**
* Get provider-specific settings
*/
public async getProviderSettings(): Promise<ProviderSettings> {
try {
const [
openaiApiKey, openaiBaseUrl, openaiDefaultModel,
anthropicApiKey, anthropicBaseUrl, anthropicDefaultModel,
ollamaBaseUrl, ollamaDefaultModel
] = await Promise.all([
options.getOption('openaiApiKey'),
options.getOption('openaiBaseUrl'),
options.getOption('openaiDefaultModel'),
options.getOption('anthropicApiKey'),
options.getOption('anthropicBaseUrl'),
options.getOption('anthropicDefaultModel'),
options.getOption('ollamaBaseUrl'),
options.getOption('ollamaDefaultModel')
]);
const settings: ProviderSettings = {};
if (openaiApiKey || openaiBaseUrl || openaiDefaultModel) {
settings.openai = {
apiKey: openaiApiKey,
baseUrl: openaiBaseUrl,
defaultModel: openaiDefaultModel
};
}
if (anthropicApiKey || anthropicBaseUrl || anthropicDefaultModel) {
settings.anthropic = {
apiKey: anthropicApiKey,
baseUrl: anthropicBaseUrl,
defaultModel: anthropicDefaultModel
};
}
if (ollamaBaseUrl || ollamaDefaultModel) {
settings.ollama = {
baseUrl: ollamaBaseUrl,
defaultModel: ollamaDefaultModel
};
}
return settings;
} catch (error) {
log.error(`Error loading provider settings: ${error}`);
return {};
}
}
/**
* Validate configuration
*/
public async validateConfig(): Promise<ConfigValidationResult> {
const result: ConfigValidationResult = {
isValid: true,
errors: [],
warnings: []
};
try {
const config = await this.getAIConfig();
if (!config.enabled) {
result.warnings.push('AI features are disabled');
return result;
}
// Validate provider precedence
if (config.providerPrecedence.providers.length === 0) {
result.errors.push('No providers configured in precedence list');
result.isValid = false;
}
// Validate provider settings
for (const provider of config.providerPrecedence.providers) {
const providerConfig = config.providerSettings[provider];
if (provider === 'openai') {
const openaiConfig = providerConfig as OpenAISettings | undefined;
if (!openaiConfig?.apiKey) {
result.warnings.push('OpenAI API key is not configured');
}
}
if (provider === 'anthropic') {
const anthropicConfig = providerConfig as AnthropicSettings | undefined;
if (!anthropicConfig?.apiKey) {
result.warnings.push('Anthropic API key is not configured');
}
}
if (provider === 'ollama') {
const ollamaConfig = providerConfig as OllamaSettings | undefined;
if (!ollamaConfig?.baseUrl) {
result.warnings.push('Ollama base URL is not configured');
}
}
}
} catch (error) {
result.errors.push(`Configuration validation error: ${error}`);
result.isValid = false;
}
return result;
}
/**
* Clear cached configuration (force reload on next access)
*/
public clearCache(): void {
this.cachedConfig = null;
this.lastConfigUpdate = 0;
}
// Private helper methods
private async getAIEnabled(): Promise<boolean> {
try {
return await options.getOptionBool('aiEnabled');
} catch {
return false;
}
}
private parseProviderList(precedenceOption: string | null): string[] {
if (!precedenceOption) {
// Don't assume any defaults - return empty array
return [];
}
try {
// Handle JSON array format
if (precedenceOption.startsWith('[') && precedenceOption.endsWith(']')) {
const parsed = JSON.parse(precedenceOption);
if (Array.isArray(parsed)) {
return parsed.map(p => String(p).trim());
}
}
// Handle comma-separated format
if (precedenceOption.includes(',')) {
return precedenceOption.split(',').map(p => p.trim());
}
// Handle single provider
return [precedenceOption.trim()];
} catch (error) {
log.error(`Error parsing provider list "${precedenceOption}": ${error}`);
// Don't assume defaults on parse error
return [];
}
}
private getDefaultConfig(): AIConfig {
return {
enabled: false,
providerPrecedence: {
providers: [],
defaultProvider: undefined
},
embeddingProviderPrecedence: {
providers: [],
defaultProvider: undefined
},
defaultModels: {
openai: undefined,
anthropic: undefined,
ollama: undefined
},
providerSettings: {}
};
}
}
// Export singleton instance
export default ConfigurationManager.getInstance();

View File

@ -42,7 +42,6 @@ export class AgentToolsManager {
}
try {
log.info("Initializing agent tools");
// Initialize the context service first
try {

View File

@ -1,4 +1,4 @@
import sql from "../../sql.js";
import sql from '../../sql.js'
import { randomString } from "../../../services/utils.js";
import dateUtils from "../../../services/date_utils.js";
import log from "../../log.js";
@ -11,6 +11,7 @@ import { SEARCH_CONSTANTS } from '../constants/search_constants.js';
import type { NoteEmbeddingContext } from "./embeddings_interface.js";
import becca from "../../../becca/becca.js";
import { isNoteExcludedFromAIById } from "../utils/ai_exclusion_utils.js";
import { getEmbeddingProviderPrecedence } from '../config/configuration_helpers.js';
interface Similarity {
noteId: string;
@ -271,44 +272,28 @@ export async function findSimilarNotes(
}
}
} else {
// Use dedicated embedding provider precedence from options for other strategies
let preferredProviders: string[] = [];
const embeddingPrecedence = await options.getOption('embeddingProviderPrecedence');
// Try providers using the new configuration system
if (useFallback) {
log.info('No embeddings found for specified provider, trying fallback providers...');
if (embeddingPrecedence) {
// For "comma,separated,values"
if (embeddingPrecedence.includes(',')) {
preferredProviders = embeddingPrecedence.split(',').map(p => p.trim());
}
// For JSON array ["value1", "value2"]
else if (embeddingPrecedence.startsWith('[') && embeddingPrecedence.endsWith(']')) {
try {
preferredProviders = JSON.parse(embeddingPrecedence);
} catch (e) {
log.error(`Error parsing embedding precedence: ${e}`);
preferredProviders = [embeddingPrecedence]; // Fallback to using as single value
// Use the new configuration system - no string parsing!
const preferredProviders = await getEmbeddingProviderPrecedence();
log.info(`Using provider precedence: ${preferredProviders.join(', ')}`);
// Try providers in precedence order
for (const provider of preferredProviders) {
const providerEmbeddings = availableEmbeddings.filter(e => e.providerId === provider);
if (providerEmbeddings.length > 0) {
// Choose the model with the most embeddings
const bestModel = providerEmbeddings.sort((a, b) => b.count - a.count)[0];
log.info(`Found fallback provider: ${provider}, model: ${bestModel.modelId}, dimension: ${bestModel.dimension}`);
// The 'regenerate' strategy would go here if needed
// We're no longer supporting the 'adapt' strategy
}
}
// For a single value
else {
preferredProviders = [embeddingPrecedence];
}
}
log.info(`Using provider precedence: ${preferredProviders.join(', ')}`);
// Try providers in precedence order
for (const provider of preferredProviders) {
const providerEmbeddings = availableEmbeddings.filter(e => e.providerId === provider);
if (providerEmbeddings.length > 0) {
// Choose the model with the most embeddings
const bestModel = providerEmbeddings.sort((a, b) => b.count - a.count)[0];
log.info(`Found fallback provider: ${provider}, model: ${bestModel.modelId}, dimension: ${bestModel.dimension}`);
// The 'regenerate' strategy would go here if needed
// We're no longer supporting the 'adapt' strategy
}
}
}
}

View File

@ -0,0 +1,108 @@
/**
* Configuration interfaces for LLM services
* These interfaces replace string parsing with proper typed objects
*/
/**
* Provider precedence configuration
*/
export interface ProviderPrecedenceConfig {
providers: ProviderType[];
defaultProvider?: ProviderType;
}
/**
* Model configuration with provider information
*/
export interface ModelConfig {
provider: ProviderType;
modelId: string;
displayName?: string;
capabilities?: ModelCapabilities;
}
/**
* Embedding provider precedence configuration
*/
export interface EmbeddingProviderPrecedenceConfig {
providers: EmbeddingProviderType[];
defaultProvider?: EmbeddingProviderType;
}
/**
* Model capabilities
*/
export interface ModelCapabilities {
contextWindow?: number;
supportsTools?: boolean;
supportsVision?: boolean;
supportsStreaming?: boolean;
maxTokens?: number;
temperature?: number;
}
/**
* Complete AI configuration
*/
export interface AIConfig {
enabled: boolean;
providerPrecedence: ProviderPrecedenceConfig;
embeddingProviderPrecedence: EmbeddingProviderPrecedenceConfig;
defaultModels: Record<ProviderType, string | undefined>;
providerSettings: ProviderSettings;
}
/**
* Provider-specific settings
*/
export interface ProviderSettings {
openai?: OpenAISettings;
anthropic?: AnthropicSettings;
ollama?: OllamaSettings;
}
export interface OpenAISettings {
apiKey?: string;
baseUrl?: string;
defaultModel?: string;
}
export interface AnthropicSettings {
apiKey?: string;
baseUrl?: string;
defaultModel?: string;
}
export interface OllamaSettings {
baseUrl?: string;
defaultModel?: string;
timeout?: number;
}
/**
* Valid provider types
*/
export type ProviderType = 'openai' | 'anthropic' | 'ollama';
/**
* Valid embedding provider types
*/
export type EmbeddingProviderType = 'openai' | 'ollama' | 'local';
/**
* Model identifier with provider prefix (e.g., "openai:gpt-4" or "ollama:llama2")
*/
export interface ModelIdentifier {
provider?: ProviderType;
modelId: string;
fullIdentifier: string; // The complete string representation
}
/**
* Validation result for configuration
*/
export interface ConfigValidationResult {
isValid: boolean;
errors: string[];
warnings: string[];
}

View File

@ -20,44 +20,44 @@ export class MessagePreparationStage extends BasePipelineStage<MessagePreparatio
*/
protected async process(input: MessagePreparationInput): Promise<{ messages: Message[] }> {
const { messages, context, systemPrompt, options } = input;
// Determine provider from model string if available (format: "provider:model")
let provider = 'default';
if (options?.model && options.model.includes(':')) {
const [providerName] = options.model.split(':');
provider = providerName;
}
// Check if tools are enabled
const toolsEnabled = options?.enableTools === true;
log.info(`Preparing messages for provider: ${provider}, context: ${!!context}, system prompt: ${!!systemPrompt}, tools: ${toolsEnabled}`);
// Get appropriate formatter for this provider
const formatter = MessageFormatterFactory.getFormatter(provider);
// Determine the system prompt to use
let finalSystemPrompt = systemPrompt || SYSTEM_PROMPTS.DEFAULT_SYSTEM_PROMPT;
// If tools are enabled, enhance system prompt with tools guidance
if (toolsEnabled) {
const toolCount = toolRegistry.getAllTools().length;
const toolsPrompt = `You have access to ${toolCount} tools to help you respond. When you need information that might be in the user's notes, use the search_notes tool to find relevant content or the read_note tool to read a specific note by ID. Use tools when specific information is required rather than making assumptions.`;
// Add tools guidance to system prompt
finalSystemPrompt = finalSystemPrompt + '\n\n' + toolsPrompt;
log.info(`Enhanced system prompt with tools guidance: ${toolCount} tools available`);
}
// Format messages using provider-specific approach
const formattedMessages = formatter.formatMessages(
messages,
finalSystemPrompt,
context
);
log.info(`Formatted ${messages.length} messages into ${formattedMessages.length} messages for provider: ${provider}`);
return { messages: formattedMessages };
}
}

View File

@ -3,9 +3,22 @@ import type { ModelSelectionInput } from '../interfaces.js';
import type { ChatCompletionOptions } from '../../ai_interface.js';
import type { ModelMetadata } from '../../providers/provider_options.js';
import log from '../../../log.js';
import options from '../../../options.js';
import aiServiceManager from '../../ai_service_manager.js';
import { SEARCH_CONSTANTS, MODEL_CAPABILITIES } from "../../constants/search_constants.js";
// Import types
import type { ServiceProviders } from '../../interfaces/ai_service_interfaces.js';
// Import new configuration system
import {
getProviderPrecedence,
getPreferredProvider,
parseModelIdentifier,
getDefaultModelForProvider,
createModelConfig
} from '../../config/configuration_helpers.js';
import type { ProviderType } from '../../interfaces/configuration_interfaces.js';
/**
* Pipeline stage for selecting the appropriate LLM model
*/
@ -36,15 +49,15 @@ export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput,
// If model already specified, don't override it
if (updatedOptions.model) {
// Check if the model has a provider prefix, which indicates legacy format
const modelParts = this.parseModelIdentifier(updatedOptions.model);
// Use the new configuration system to parse model identifier
const modelIdentifier = parseModelIdentifier(updatedOptions.model);
if (modelParts.provider) {
if (modelIdentifier.provider) {
// Add provider metadata for backward compatibility
this.addProviderMetadata(updatedOptions, modelParts.provider, modelParts.model);
this.addProviderMetadata(updatedOptions, modelIdentifier.provider as ServiceProviders, modelIdentifier.modelId);
// Update the model to be just the model name without provider prefix
updatedOptions.model = modelParts.model;
log.info(`Using explicitly specified model: ${modelParts.model} from provider: ${modelParts.provider}`);
updatedOptions.model = modelIdentifier.modelId;
log.info(`Using explicitly specified model: ${modelIdentifier.modelId} from provider: ${modelIdentifier.provider}`);
} else {
log.info(`Using explicitly specified model: ${updatedOptions.model}`);
}
@ -86,118 +99,72 @@ export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput,
}
}
// Get default provider and model based on precedence
let defaultProvider = 'openai';
let defaultModelName = 'gpt-3.5-turbo';
// Get default provider and model using the new configuration system
try {
// Get provider precedence list
const providerPrecedence = await options.getOption('aiProviderPrecedence');
if (providerPrecedence) {
// Parse provider precedence list
let providers: string[] = [];
if (providerPrecedence.includes(',')) {
providers = providerPrecedence.split(',').map(p => p.trim());
} else if (providerPrecedence.startsWith('[') && providerPrecedence.endsWith(']')) {
providers = JSON.parse(providerPrecedence);
} else {
providers = [providerPrecedence];
}
// Use the new configuration helpers - no string parsing!
const preferredProvider = await getPreferredProvider();
// Check for first available provider
if (providers.length > 0) {
const firstProvider = providers[0];
defaultProvider = firstProvider;
if (!preferredProvider) {
throw new Error('No AI providers are configured. Please check your AI settings.');
}
// Get provider-specific default model
if (firstProvider === 'openai') {
const model = await options.getOption('openaiDefaultModel');
if (model) defaultModelName = model;
} else if (firstProvider === 'anthropic') {
const model = await options.getOption('anthropicDefaultModel');
if (model) defaultModelName = model;
} else if (firstProvider === 'ollama') {
const model = await options.getOption('ollamaDefaultModel');
if (model) {
defaultModelName = model;
const modelName = await getDefaultModelForProvider(preferredProvider);
// Enable tools for all Ollama models
// The Ollama API will handle models that don't support tool calling
log.info(`Using Ollama model ${model} with tool calling enabled`);
updatedOptions.enableTools = true;
}
}
if (!modelName) {
throw new Error(`No default model configured for provider ${preferredProvider}. Please set a default model in your AI settings.`);
}
log.info(`Selected provider: ${preferredProvider}, model: ${modelName}`);
// Determine query complexity
let queryComplexity = 'low';
if (query) {
// Simple heuristic: longer queries or those with complex terms indicate higher complexity
const complexityIndicators = [
'explain', 'analyze', 'compare', 'evaluate', 'synthesize',
'summarize', 'elaborate', 'investigate', 'research', 'debate'
];
const hasComplexTerms = complexityIndicators.some(term => query.toLowerCase().includes(term));
const isLongQuery = query.length > 100;
const hasMultipleQuestions = (query.match(/\?/g) || []).length > 1;
if ((hasComplexTerms && isLongQuery) || hasMultipleQuestions) {
queryComplexity = 'high';
} else if (hasComplexTerms || isLongQuery) {
queryComplexity = 'medium';
}
}
// Check content length if provided
if (contentLength && contentLength > SEARCH_CONSTANTS.CONTEXT.CONTENT_LENGTH.MEDIUM_THRESHOLD) {
// For large content, favor more powerful models
queryComplexity = contentLength > SEARCH_CONSTANTS.CONTEXT.CONTENT_LENGTH.HIGH_THRESHOLD ? 'high' : 'medium';
}
// Set the model and add provider metadata
updatedOptions.model = modelName;
this.addProviderMetadata(updatedOptions, preferredProvider as ServiceProviders, modelName);
log.info(`Selected model: ${modelName} from provider: ${preferredProvider} for query complexity: ${queryComplexity}`);
log.info(`[ModelSelectionStage] Final options: ${JSON.stringify({
model: updatedOptions.model,
stream: updatedOptions.stream,
provider: preferredProvider,
enableTools: updatedOptions.enableTools
})}`);
return { options: updatedOptions };
} catch (error) {
// If any error occurs, use the fallback default
log.error(`Error determining default model: ${error}`);
}
// Determine query complexity
let queryComplexity = 'low';
if (query) {
// Simple heuristic: longer queries or those with complex terms indicate higher complexity
const complexityIndicators = [
'explain', 'analyze', 'compare', 'evaluate', 'synthesize',
'summarize', 'elaborate', 'investigate', 'research', 'debate'
];
const hasComplexTerms = complexityIndicators.some(term => query.toLowerCase().includes(term));
const isLongQuery = query.length > 100;
const hasMultipleQuestions = (query.match(/\?/g) || []).length > 1;
if ((hasComplexTerms && isLongQuery) || hasMultipleQuestions) {
queryComplexity = 'high';
} else if (hasComplexTerms || isLongQuery) {
queryComplexity = 'medium';
}
}
// Check content length if provided
if (contentLength && contentLength > SEARCH_CONSTANTS.CONTEXT.CONTENT_LENGTH.MEDIUM_THRESHOLD) {
// For large content, favor more powerful models
queryComplexity = contentLength > SEARCH_CONSTANTS.CONTEXT.CONTENT_LENGTH.HIGH_THRESHOLD ? 'high' : 'medium';
}
// Set the model and add provider metadata
updatedOptions.model = defaultModelName;
this.addProviderMetadata(updatedOptions, defaultProvider, defaultModelName);
log.info(`Selected model: ${defaultModelName} from provider: ${defaultProvider} for query complexity: ${queryComplexity}`);
log.info(`[ModelSelectionStage] Final options: ${JSON.stringify({
model: updatedOptions.model,
stream: updatedOptions.stream,
provider: defaultProvider,
enableTools: updatedOptions.enableTools
})}`);
return { options: updatedOptions };
}
/**
* Helper to parse model identifier with provider prefix
* Handles legacy format "provider:model"
*/
private parseModelIdentifier(modelId: string): { provider?: string, model: string } {
if (!modelId) return { model: '' };
const parts = modelId.split(':');
if (parts.length === 1) {
// No provider prefix
return { model: modelId };
} else {
// Extract provider and model
const provider = parts[0];
const model = parts.slice(1).join(':'); // Handle model names that might include :
return { provider, model };
throw new Error(`Failed to determine AI model configuration: ${error}`);
}
}
/**
* Add provider metadata to the options based on model name
*/
private addProviderMetadata(options: ChatCompletionOptions, provider: string, modelName: string): void {
private addProviderMetadata(options: ChatCompletionOptions, provider: ServiceProviders, modelName: string): void {
// Check if we already have providerMetadata
if (options.providerMetadata) {
// If providerMetadata exists but not modelId, add the model name
@ -216,7 +183,7 @@ export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput,
// Find the first available provider
for (const p of providerPrecedence) {
if (aiServiceManager.isProviderAvailable(p)) {
selectedProvider = p;
selectedProvider = p as ServiceProviders;
break;
}
}
@ -234,7 +201,8 @@ export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput,
// For backward compatibility, ensure model name is set without prefix
if (options.model && options.model.includes(':')) {
options.model = modelName || options.model.split(':')[1];
const parsed = parseModelIdentifier(options.model);
options.model = modelName || parsed.modelId;
}
log.info(`Set provider metadata: provider=${selectedProvider}, model=${modelName}`);
@ -242,33 +210,43 @@ export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput,
}
/**
* Determine model based on provider precedence
* Determine model based on provider precedence using the new configuration system
*/
private determineDefaultModel(input: ModelSelectionInput): string {
const providerPrecedence = ['anthropic', 'openai', 'ollama'];
private async determineDefaultModel(input: ModelSelectionInput): Promise<string> {
try {
// Use the new configuration system
const providers = await getProviderPrecedence();
// Use only providers that are available
const availableProviders = providerPrecedence.filter(provider =>
aiServiceManager.isProviderAvailable(provider));
// Use only providers that are available
const availableProviders = providers.filter(provider =>
aiServiceManager.isProviderAvailable(provider));
if (availableProviders.length === 0) {
throw new Error('No AI providers are available');
if (availableProviders.length === 0) {
throw new Error('No AI providers are available');
}
// Get the first available provider and its default model
const defaultProvider = availableProviders[0];
const defaultModel = await getDefaultModelForProvider(defaultProvider);
if (!defaultModel) {
throw new Error(`No default model configured for provider ${defaultProvider}. Please configure a default model in your AI settings.`);
}
// Set provider metadata
if (!input.options.providerMetadata) {
input.options.providerMetadata = {
provider: defaultProvider as 'openai' | 'anthropic' | 'ollama' | 'local',
modelId: defaultModel
};
}
log.info(`Selected default model ${defaultModel} from provider ${defaultProvider}`);
return defaultModel;
} catch (error) {
log.error(`Error determining default model: ${error}`);
throw error; // Don't provide fallback defaults, let the error propagate
}
// Get the first available provider and its default model
const defaultProvider = availableProviders[0] as 'openai' | 'anthropic' | 'ollama' | 'local';
let defaultModel = 'gpt-3.5-turbo'; // Use model from our constants
// Set provider metadata
if (!input.options.providerMetadata) {
input.options.providerMetadata = {
provider: defaultProvider,
modelId: defaultModel
};
}
log.info(`Selected default model ${defaultModel} from provider ${defaultProvider}`);
return defaultModel;
}
/**

View File

@ -559,11 +559,9 @@ export class ToolCallingStage extends BasePipelineStage<ToolExecutionInput, { re
// Get agent tools manager and initialize it
const agentTools = aiServiceManager.getAgentTools();
if (agentTools && typeof agentTools.initialize === 'function') {
log.info('Initializing agent tools to create vectorSearchTool');
try {
// Force initialization to ensure it runs even if previously marked as initialized
await agentTools.initialize(true);
log.info('Agent tools initialized successfully');
} catch (initError: unknown) {
const errorMessage = initError instanceof Error ? initError.message : String(initError);
log.error(`Failed to initialize agent tools: ${errorMessage}`);
@ -812,14 +810,11 @@ export class ToolCallingStage extends BasePipelineStage<ToolExecutionInput, { re
const agentTools = aiServiceManager.getAgentTools();
if (agentTools && typeof agentTools.initialize === 'function') {
await agentTools.initialize(true);
log.info(`Agent tools initialized during preloading`);
}
// Check if the vector search tool is available
const vectorSearchTool = aiServiceManager.getVectorSearchTool();
if (vectorSearchTool && typeof vectorSearchTool.searchNotes === 'function') {
log.info(`Vector search tool successfully preloaded`);
} else {
if (!(vectorSearchTool && typeof vectorSearchTool.searchNotes === 'function')) {
log.error(`Vector search tool not available after initialization`);
}
} catch (error: unknown) {

View File

@ -300,7 +300,7 @@ export async function initializeDefaultProviders() {
const ollamaBaseUrl = await options.getOption('ollamaBaseUrl');
if (ollamaBaseUrl) {
// Use specific embedding models if available
const embeddingModel = await options.getOption('ollamaEmbeddingModel') || 'nomic-embed-text';
const embeddingModel = await options.getOption('ollamaEmbeddingModel');
try {
// Create provider with initial dimension to be updated during initialization

View File

@ -63,11 +63,9 @@ async function getOrCreateVectorSearchTool(): Promise<any> {
// Get agent tools manager and initialize it
const agentTools = aiServiceManager.getAgentTools();
if (agentTools && typeof agentTools.initialize === 'function') {
log.info('Initializing agent tools to create vectorSearchTool');
try {
// Force initialization to ensure it runs even if previously marked as initialized
await agentTools.initialize(true);
log.info('Agent tools initialized successfully');
} catch (initError: any) {
log.error(`Failed to initialize agent tools: ${initError.message}`);
return null;
@ -143,7 +141,7 @@ export class SearchNotesTool implements ToolHandler {
temperature: 0.3,
maxTokens: 200,
// Type assertion to bypass type checking for special internal parameters
...(({
...(({
bypassFormatter: true,
bypassContextProcessing: true
} as Record<string, boolean>))

View File

@ -13,7 +13,7 @@ import log from '../../log.js';
export class ToolRegistry {
private static instance: ToolRegistry;
private tools: Map<string, ToolHandler> = new Map();
private initializationAttempted: boolean = false;
private initializationAttempted = false;
private constructor() {}
@ -106,7 +106,6 @@ export class ToolRegistry {
}
this.tools.set(name, handler);
log.info(`Registered tool: ${name}`);
}
/**

View File

@ -16,5 +16,5 @@ describe("Migration", () => {
resolve();
});
});
});
}, 60_000);
});

View File

@ -25,10 +25,12 @@ async function migrate() {
}
// backup before attempting migration
await backupService.backupNow(
// creating a special backup for version 0.60.4, the changes in 0.61 are major.
currentDbVersion === 214 ? `before-migration-v060` : "before-migration"
);
if (!process.env.TRILIUM_INTEGRATION_TEST) {
await backupService.backupNow(
// creating a special backup for version 0.60.4, the changes in 0.61 are major.
currentDbVersion === 214 ? `before-migration-v060` : "before-migration"
);
}
const migrations = await prepareMigrations(currentDbVersion);

View File

@ -203,6 +203,13 @@ function fillInAdditionalProperties(entityChange: EntityChange) {
WHERE attachmentId = ?`,
[entityChange.entityId]
);
} else if (entityChange.entityName === "note_embeddings") {
// Note embeddings are backend-only entities for AI/vector search
// Frontend doesn't need the full embedding data (which is large binary data)
// Just ensure entity is marked as handled - actual sync happens at database level
if (!entityChange.isErased) {
entityChange.entity = { embedId: entityChange.entityId };
}
}
if (entityChange.entity instanceof AbstractBeccaEntity) {

View File

@ -14,37 +14,35 @@ import type { Express } from "express";
const MINIMUM_NODE_VERSION = "20.0.0";
// setup basic error handling even before requiring dependencies, since those can produce errors as well
export default async function startTriliumServer() {
// setup basic error handling even before requiring dependencies, since those can produce errors as well
process.on("unhandledRejection", (error: Error) => {
// this makes sure that stacktrace of failed promise is printed out
console.log(error);
process.on("unhandledRejection", (error: Error) => {
// this makes sure that stacktrace of failed promise is printed out
console.log(error);
// but also try to log it into file
log.info(error);
});
// but also try to log it into file
log.info(error);
});
function exit() {
console.log("Caught interrupt/termination signal. Exiting.");
process.exit(0);
}
function exit() {
console.log("Caught interrupt/termination signal. Exiting.");
process.exit(0);
}
process.on("SIGINT", exit);
process.on("SIGTERM", exit);
process.on("SIGINT", exit);
process.on("SIGTERM", exit);
if (utils.compareVersions(process.versions.node, MINIMUM_NODE_VERSION) < 0) {
console.error();
console.error(`The Trilium server requires Node.js ${MINIMUM_NODE_VERSION} and later in order to start.\n`);
console.error(`\tCurrent version:\t${process.versions.node}`);
console.error(`\tExpected version:\t${MINIMUM_NODE_VERSION}`);
console.error();
process.exit(1);
}
if (utils.compareVersions(process.versions.node, MINIMUM_NODE_VERSION) < 0) {
console.error();
console.error(`The Trilium server requires Node.js ${MINIMUM_NODE_VERSION} and later in order to start.\n`);
console.error(`\tCurrent version:\t${process.versions.node}`);
console.error(`\tExpected version:\t${MINIMUM_NODE_VERSION}`);
console.error();
process.exit(1);
}
tmp.setGracefulCleanup();
tmp.setGracefulCleanup();
startTrilium();
async function startTrilium() {
const app = await buildApp();
/**
@ -98,7 +96,7 @@ function startHttpServer(app: Express) {
log.info(`Trusted reverse proxy: ${app.get("trust proxy")}`);
let httpServer;
let httpServer: http.Server | https.Server;
if (config["Network"]["https"]) {
if (!config["Network"]["keyPath"] || !config["Network"]["keyPath"].trim().length) {

View File

@ -11,7 +11,6 @@ export default defineConfig(() => ({
setupFiles: ["./spec/setup.ts"],
environment: "node",
include: ['{src,spec}/**/*.{test,spec}.{js,mjs,cjs,ts,mts,cts,jsx,tsx}'],
reporters: ['default'],
coverage: {
reportsDirectory: './test-output/vitest/coverage',
provider: 'v8' as const,

View File

@ -10734,11 +10734,65 @@
"value": "bx bxs-data",
"isInheritable": false,
"position": 10
},
{
"type": "relation",
"name": "internalLink",
"value": "bOP3TB56fL1V",
"isInheritable": false,
"position": 20
}
],
"format": "markdown",
"dataFileName": "Metrics.md",
"attachments": []
"attachments": [
{
"attachmentId": "6FcnvEg39b88",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "Metrics_image.png"
},
{
"attachmentId": "amOIi8fzVhSM",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "1_Metrics_image.png"
},
{
"attachmentId": "Ojj9cAXPbxJO",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "2_Metrics_image.png"
}
],
"dirFileName": "Metrics",
"children": [
{
"isClone": false,
"noteId": "bOP3TB56fL1V",
"notePath": [
"pOsGYCXsbNQG",
"tC7s2alapj8V",
"uYF7pmepw27K",
"bOP3TB56fL1V"
],
"title": "grafana-dashboard.json",
"notePosition": 10,
"prefix": null,
"isExpanded": false,
"type": "code",
"mime": "application/json",
"attributes": [],
"dataFileName": "grafana-dashboard.json",
"attachments": []
}
]
}
]
},

Binary file not shown.

After

Width:  |  Height:  |  Size: 548 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 52 KiB

View File

@ -105,4 +105,16 @@ scrape_configs:
* `400` - Invalid format parameter
* `401` - Missing or invalid ETAPI token
* `500` - Internal server error
* `500` - Internal server error
## **Grafana Dashboard**
<figure class="image"><img style="aspect-ratio:2594/1568;" src="1_Metrics_image.png" width="2594" height="1568"></figure>
You can also use the Grafana Dashboard that has been created for TriliumNext - just take the JSON from <a class="reference-link" href="Metrics/grafana-dashboard.json">grafana-dashboard.json</a> and then import the dashboard, following these screenshots:
<figure class="image"><img style="aspect-ratio:1881/282;" src="2_Metrics_image.png" width="1881" height="282"></figure>
Then paste the JSON, and hit load:
<figure class="image"><img style="aspect-ratio:1055/830;" src="Metrics_image.png" width="1055" height="830"></figure>

File diff suppressed because it is too large Load Diff

Binary file not shown.

After

Width:  |  Height:  |  Size: 73 KiB

46
pnpm-lock.yaml generated
View File

@ -107,7 +107,7 @@ importers:
version: 2.2.0(eslint@9.28.0(jiti@2.4.2))
happy-dom:
specifier: ~17.6.0
version: 17.6.1
version: 17.6.3
jiti:
specifier: 2.4.2
version: 2.4.2
@ -146,7 +146,7 @@ importers:
version: 4.5.4(@types/node@22.15.29)(rollup@4.40.0)(typescript@5.8.3)(vite@6.3.5(@types/node@22.15.29)(jiti@2.4.2)(less@4.1.3)(sass-embedded@1.87.0)(sass@1.87.0)(stylus@0.64.0)(sugarss@4.0.1(postcss@8.5.3))(terser@5.39.0)(tsx@4.19.4)(yaml@2.8.0))
vitest:
specifier: ^3.0.0
version: 3.2.1(@types/debug@4.1.12)(@types/node@22.15.29)(@vitest/ui@3.2.1)(happy-dom@17.6.1)(jiti@2.4.2)(jsdom@26.1.0(bufferutil@4.0.9)(utf-8-validate@6.0.5))(less@4.1.3)(msw@2.7.5(@types/node@22.15.29)(typescript@5.8.3))(sass-embedded@1.87.0)(sass@1.87.0)(stylus@0.64.0)(sugarss@4.0.1(postcss@8.5.3))(terser@5.39.0)(tsx@4.19.4)(yaml@2.8.0)
version: 3.2.1(@types/debug@4.1.12)(@types/node@22.15.29)(@vitest/ui@3.2.1)(happy-dom@17.6.3)(jiti@2.4.2)(jsdom@26.1.0(bufferutil@4.0.9)(utf-8-validate@6.0.5))(less@4.1.3)(msw@2.7.5(@types/node@22.15.29)(typescript@5.8.3))(sass-embedded@1.87.0)(sass@1.87.0)(stylus@0.64.0)(sugarss@4.0.1(postcss@8.5.3))(terser@5.39.0)(tsx@4.19.4)(yaml@2.8.0)
apps/client:
dependencies:
@ -314,8 +314,8 @@ importers:
specifier: 13.0.0
version: 13.0.0(webpack@5.99.9(@swc/core@1.11.29(@swc/helpers@0.5.17))(esbuild@0.25.5))
happy-dom:
specifier: 17.6.1
version: 17.6.1
specifier: 17.6.3
version: 17.6.3
script-loader:
specifier: 0.7.2
version: 0.7.2
@ -854,7 +854,7 @@ importers:
version: 2.0.0(typescript@5.8.3)(vite@6.3.5(@types/node@22.15.29)(jiti@2.4.2)(less@4.1.3)(sass-embedded@1.87.0)(sass@1.87.0)(stylus@0.64.0)(sugarss@4.0.1(postcss@8.5.3))(terser@5.39.0)(tsx@4.19.4)(yaml@2.8.0))
vitest:
specifier: ^3.0.5
version: 3.2.0(@types/debug@4.1.12)(@types/node@22.15.29)(@vitest/browser@3.2.0)(@vitest/ui@3.2.0)(happy-dom@17.6.1)(jiti@2.4.2)(jsdom@26.1.0(bufferutil@4.0.9)(utf-8-validate@6.0.5))(less@4.1.3)(msw@2.7.5(@types/node@22.15.29)(typescript@5.8.3))(sass-embedded@1.87.0)(sass@1.87.0)(stylus@0.64.0)(sugarss@4.0.1(postcss@8.5.3))(terser@5.39.0)(tsx@4.19.4)(yaml@2.8.0)
version: 3.2.0(@types/debug@4.1.12)(@types/node@22.15.29)(@vitest/browser@3.2.0)(@vitest/ui@3.2.0)(happy-dom@17.6.3)(jiti@2.4.2)(jsdom@26.1.0(bufferutil@4.0.9)(utf-8-validate@6.0.5))(less@4.1.3)(msw@2.7.5(@types/node@22.15.29)(typescript@5.8.3))(sass-embedded@1.87.0)(sass@1.87.0)(stylus@0.64.0)(sugarss@4.0.1(postcss@8.5.3))(terser@5.39.0)(tsx@4.19.4)(yaml@2.8.0)
webdriverio:
specifier: ^9.0.7
version: 9.15.0(bufferutil@4.0.9)(utf-8-validate@6.0.5)
@ -914,7 +914,7 @@ importers:
version: 2.0.0(typescript@5.8.3)(vite@6.3.5(@types/node@22.15.29)(jiti@2.4.2)(less@4.1.3)(sass-embedded@1.87.0)(sass@1.87.0)(stylus@0.64.0)(sugarss@4.0.1(postcss@8.5.3))(terser@5.39.0)(tsx@4.19.4)(yaml@2.8.0))
vitest:
specifier: ^3.0.5
version: 3.2.0(@types/debug@4.1.12)(@types/node@22.15.29)(@vitest/browser@3.2.0)(@vitest/ui@3.2.0)(happy-dom@17.6.1)(jiti@2.4.2)(jsdom@26.1.0(bufferutil@4.0.9)(utf-8-validate@6.0.5))(less@4.1.3)(msw@2.7.5(@types/node@22.15.29)(typescript@5.8.3))(sass-embedded@1.87.0)(sass@1.87.0)(stylus@0.64.0)(sugarss@4.0.1(postcss@8.5.3))(terser@5.39.0)(tsx@4.19.4)(yaml@2.8.0)
version: 3.2.0(@types/debug@4.1.12)(@types/node@22.15.29)(@vitest/browser@3.2.0)(@vitest/ui@3.2.0)(happy-dom@17.6.3)(jiti@2.4.2)(jsdom@26.1.0(bufferutil@4.0.9)(utf-8-validate@6.0.5))(less@4.1.3)(msw@2.7.5(@types/node@22.15.29)(typescript@5.8.3))(sass-embedded@1.87.0)(sass@1.87.0)(stylus@0.64.0)(sugarss@4.0.1(postcss@8.5.3))(terser@5.39.0)(tsx@4.19.4)(yaml@2.8.0)
webdriverio:
specifier: ^9.0.7
version: 9.15.0(bufferutil@4.0.9)(utf-8-validate@6.0.5)
@ -974,7 +974,7 @@ importers:
version: 2.0.0(typescript@5.8.3)(vite@6.3.5(@types/node@22.15.29)(jiti@2.4.2)(less@4.1.3)(sass-embedded@1.87.0)(sass@1.87.0)(stylus@0.64.0)(sugarss@4.0.1(postcss@8.5.3))(terser@5.39.0)(tsx@4.19.4)(yaml@2.8.0))
vitest:
specifier: ^3.0.5
version: 3.2.0(@types/debug@4.1.12)(@types/node@22.15.29)(@vitest/browser@3.2.0)(@vitest/ui@3.2.0)(happy-dom@17.6.1)(jiti@2.4.2)(jsdom@26.1.0(bufferutil@4.0.9)(utf-8-validate@6.0.5))(less@4.1.3)(msw@2.7.5(@types/node@22.15.29)(typescript@5.8.3))(sass-embedded@1.87.0)(sass@1.87.0)(stylus@0.64.0)(sugarss@4.0.1(postcss@8.5.3))(terser@5.39.0)(tsx@4.19.4)(yaml@2.8.0)
version: 3.2.0(@types/debug@4.1.12)(@types/node@22.15.29)(@vitest/browser@3.2.0)(@vitest/ui@3.2.0)(happy-dom@17.6.3)(jiti@2.4.2)(jsdom@26.1.0(bufferutil@4.0.9)(utf-8-validate@6.0.5))(less@4.1.3)(msw@2.7.5(@types/node@22.15.29)(typescript@5.8.3))(sass-embedded@1.87.0)(sass@1.87.0)(stylus@0.64.0)(sugarss@4.0.1(postcss@8.5.3))(terser@5.39.0)(tsx@4.19.4)(yaml@2.8.0)
webdriverio:
specifier: ^9.0.7
version: 9.15.0(bufferutil@4.0.9)(utf-8-validate@6.0.5)
@ -1041,7 +1041,7 @@ importers:
version: 2.0.0(typescript@5.8.3)(vite@6.3.5(@types/node@22.15.29)(jiti@2.4.2)(less@4.1.3)(sass-embedded@1.87.0)(sass@1.87.0)(stylus@0.64.0)(sugarss@4.0.1(postcss@8.5.3))(terser@5.39.0)(tsx@4.19.4)(yaml@2.8.0))
vitest:
specifier: ^3.0.5
version: 3.2.0(@types/debug@4.1.12)(@types/node@22.15.29)(@vitest/browser@3.2.0)(@vitest/ui@3.2.0)(happy-dom@17.6.1)(jiti@2.4.2)(jsdom@26.1.0(bufferutil@4.0.9)(utf-8-validate@6.0.5))(less@4.1.3)(msw@2.7.5(@types/node@22.15.29)(typescript@5.8.3))(sass-embedded@1.87.0)(sass@1.87.0)(stylus@0.64.0)(sugarss@4.0.1(postcss@8.5.3))(terser@5.39.0)(tsx@4.19.4)(yaml@2.8.0)
version: 3.2.0(@types/debug@4.1.12)(@types/node@22.15.29)(@vitest/browser@3.2.0)(@vitest/ui@3.2.0)(happy-dom@17.6.3)(jiti@2.4.2)(jsdom@26.1.0(bufferutil@4.0.9)(utf-8-validate@6.0.5))(less@4.1.3)(msw@2.7.5(@types/node@22.15.29)(typescript@5.8.3))(sass-embedded@1.87.0)(sass@1.87.0)(stylus@0.64.0)(sugarss@4.0.1(postcss@8.5.3))(terser@5.39.0)(tsx@4.19.4)(yaml@2.8.0)
webdriverio:
specifier: ^9.0.7
version: 9.15.0(bufferutil@4.0.9)(utf-8-validate@6.0.5)
@ -1108,7 +1108,7 @@ importers:
version: 2.0.0(typescript@5.8.3)(vite@6.3.5(@types/node@22.15.29)(jiti@2.4.2)(less@4.1.3)(sass-embedded@1.87.0)(sass@1.87.0)(stylus@0.64.0)(sugarss@4.0.1(postcss@8.5.3))(terser@5.39.0)(tsx@4.19.4)(yaml@2.8.0))
vitest:
specifier: ^3.0.5
version: 3.2.0(@types/debug@4.1.12)(@types/node@22.15.29)(@vitest/browser@3.2.0)(@vitest/ui@3.2.0)(happy-dom@17.6.1)(jiti@2.4.2)(jsdom@26.1.0(bufferutil@4.0.9)(utf-8-validate@6.0.5))(less@4.1.3)(msw@2.7.5(@types/node@22.15.29)(typescript@5.8.3))(sass-embedded@1.87.0)(sass@1.87.0)(stylus@0.64.0)(sugarss@4.0.1(postcss@8.5.3))(terser@5.39.0)(tsx@4.19.4)(yaml@2.8.0)
version: 3.2.0(@types/debug@4.1.12)(@types/node@22.15.29)(@vitest/browser@3.2.0)(@vitest/ui@3.2.0)(happy-dom@17.6.3)(jiti@2.4.2)(jsdom@26.1.0(bufferutil@4.0.9)(utf-8-validate@6.0.5))(less@4.1.3)(msw@2.7.5(@types/node@22.15.29)(typescript@5.8.3))(sass-embedded@1.87.0)(sass@1.87.0)(stylus@0.64.0)(sugarss@4.0.1(postcss@8.5.3))(terser@5.39.0)(tsx@4.19.4)(yaml@2.8.0)
webdriverio:
specifier: ^9.0.7
version: 9.15.0(bufferutil@4.0.9)(utf-8-validate@6.0.5)
@ -7723,9 +7723,9 @@ packages:
handle-thing@2.0.1:
resolution: {integrity: sha512-9Qn4yBxelxoh2Ow62nP+Ka/kMnOXRi8BXnRaUwezLNhqelnN49xKz4F/dPP8OYLxLxq6JDtZb2i9XznUQbNPTg==}
happy-dom@17.6.1:
resolution: {integrity: sha512-x2Iie2VWObB283X/KXwJ7g5f+4acfm8Pmf9KWUn6gujFUvFlSWytZYTv74bdFH40fPDedbI6/V6U3dYYMtW4SQ==}
engines: {node: '>=18.0.0'}
happy-dom@17.6.3:
resolution: {integrity: sha512-UVIHeVhxmxedbWPCfgS55Jg2rDfwf2BCKeylcPSqazLz5w3Kri7Q4xdBJubsr/+VUzFLh0VjIvh13RaDA2/Xug==}
engines: {node: '>=20.0.0'}
harmony-reflect@1.6.2:
resolution: {integrity: sha512-HIp/n38R9kQjDEziXyDTuW3vvoxxyxjxFzXLrBr18uB47GnSt+G9D29fqrpM5ZkspMcPICud3XsBJQ4Y2URg8g==}
@ -16460,7 +16460,7 @@ snapshots:
semver: 7.7.2
tsconfig-paths: 4.2.0
vite: 6.3.5(@types/node@22.15.29)(jiti@2.4.2)(less@4.1.3)(sass-embedded@1.87.0)(sass@1.87.0)(stylus@0.64.0)(sugarss@4.0.1(postcss@8.5.3))(terser@5.39.0)(tsx@4.19.4)(yaml@2.8.0)
vitest: 3.2.1(@types/debug@4.1.12)(@types/node@22.15.29)(@vitest/ui@3.2.1)(happy-dom@17.6.1)(jiti@2.4.2)(jsdom@26.1.0(bufferutil@4.0.9)(utf-8-validate@6.0.5))(less@4.1.3)(msw@2.7.5(@types/node@22.15.29)(typescript@5.8.3))(sass-embedded@1.87.0)(sass@1.87.0)(stylus@0.64.0)(sugarss@4.0.1(postcss@8.5.3))(terser@5.39.0)(tsx@4.19.4)(yaml@2.8.0)
vitest: 3.2.1(@types/debug@4.1.12)(@types/node@22.15.29)(@vitest/ui@3.2.1)(happy-dom@17.6.3)(jiti@2.4.2)(jsdom@26.1.0(bufferutil@4.0.9)(utf-8-validate@6.0.5))(less@4.1.3)(msw@2.7.5(@types/node@22.15.29)(typescript@5.8.3))(sass-embedded@1.87.0)(sass@1.87.0)(stylus@0.64.0)(sugarss@4.0.1(postcss@8.5.3))(terser@5.39.0)(tsx@4.19.4)(yaml@2.8.0)
transitivePeerDependencies:
- '@babel/traverse'
- '@swc-node/register'
@ -17965,7 +17965,7 @@ snapshots:
magic-string: 0.30.17
sirv: 3.0.1
tinyrainbow: 2.0.0
vitest: 3.2.0(@types/debug@4.1.12)(@types/node@22.15.29)(@vitest/browser@3.2.0)(@vitest/ui@3.2.0)(happy-dom@17.6.1)(jiti@2.4.2)(jsdom@26.1.0(bufferutil@4.0.9)(utf-8-validate@6.0.5))(less@4.1.3)(msw@2.7.5(@types/node@22.15.29)(typescript@5.8.3))(sass-embedded@1.87.0)(sass@1.87.0)(stylus@0.64.0)(sugarss@4.0.1(postcss@8.5.3))(terser@5.39.0)(tsx@4.19.4)(yaml@2.8.0)
vitest: 3.2.0(@types/debug@4.1.12)(@types/node@22.15.29)(@vitest/browser@3.2.0)(@vitest/ui@3.2.0)(happy-dom@17.6.3)(jiti@2.4.2)(jsdom@26.1.0(bufferutil@4.0.9)(utf-8-validate@6.0.5))(less@4.1.3)(msw@2.7.5(@types/node@22.15.29)(typescript@5.8.3))(sass-embedded@1.87.0)(sass@1.87.0)(stylus@0.64.0)(sugarss@4.0.1(postcss@8.5.3))(terser@5.39.0)(tsx@4.19.4)(yaml@2.8.0)
ws: 8.18.2(bufferutil@4.0.9)(utf-8-validate@6.0.5)
optionalDependencies:
playwright: 1.52.0
@ -17988,7 +17988,7 @@ snapshots:
magicast: 0.3.5
test-exclude: 7.0.1
tinyrainbow: 2.0.0
vitest: 3.2.0(@types/debug@4.1.12)(@types/node@22.15.29)(@vitest/browser@3.2.0)(@vitest/ui@3.2.0)(happy-dom@17.6.1)(jiti@2.4.2)(jsdom@26.1.0(bufferutil@4.0.9)(utf-8-validate@6.0.5))(less@4.1.3)(msw@2.7.5(@types/node@22.15.29)(typescript@5.8.3))(sass-embedded@1.87.0)(sass@1.87.0)(stylus@0.64.0)(sugarss@4.0.1(postcss@8.5.3))(terser@5.39.0)(tsx@4.19.4)(yaml@2.8.0)
vitest: 3.2.0(@types/debug@4.1.12)(@types/node@22.15.29)(@vitest/browser@3.2.0)(@vitest/ui@3.2.0)(happy-dom@17.6.3)(jiti@2.4.2)(jsdom@26.1.0(bufferutil@4.0.9)(utf-8-validate@6.0.5))(less@4.1.3)(msw@2.7.5(@types/node@22.15.29)(typescript@5.8.3))(sass-embedded@1.87.0)(sass@1.87.0)(stylus@0.64.0)(sugarss@4.0.1(postcss@8.5.3))(terser@5.39.0)(tsx@4.19.4)(yaml@2.8.0)
transitivePeerDependencies:
- supports-color
@ -18007,7 +18007,7 @@ snapshots:
std-env: 3.9.0
test-exclude: 7.0.1
tinyrainbow: 2.0.0
vitest: 3.2.1(@types/debug@4.1.12)(@types/node@22.15.29)(@vitest/ui@3.2.1)(happy-dom@17.6.1)(jiti@2.4.2)(jsdom@26.1.0(bufferutil@4.0.9)(utf-8-validate@6.0.5))(less@4.1.3)(msw@2.7.5(@types/node@22.15.29)(typescript@5.8.3))(sass-embedded@1.87.0)(sass@1.87.0)(stylus@0.64.0)(sugarss@4.0.1(postcss@8.5.3))(terser@5.39.0)(tsx@4.19.4)(yaml@2.8.0)
vitest: 3.2.1(@types/debug@4.1.12)(@types/node@22.15.29)(@vitest/ui@3.2.1)(happy-dom@17.6.3)(jiti@2.4.2)(jsdom@26.1.0(bufferutil@4.0.9)(utf-8-validate@6.0.5))(less@4.1.3)(msw@2.7.5(@types/node@22.15.29)(typescript@5.8.3))(sass-embedded@1.87.0)(sass@1.87.0)(stylus@0.64.0)(sugarss@4.0.1(postcss@8.5.3))(terser@5.39.0)(tsx@4.19.4)(yaml@2.8.0)
transitivePeerDependencies:
- supports-color
@ -18092,7 +18092,7 @@ snapshots:
sirv: 3.0.1
tinyglobby: 0.2.14
tinyrainbow: 2.0.0
vitest: 3.2.0(@types/debug@4.1.12)(@types/node@22.15.29)(@vitest/browser@3.2.0)(@vitest/ui@3.2.0)(happy-dom@17.6.1)(jiti@2.4.2)(jsdom@26.1.0(bufferutil@4.0.9)(utf-8-validate@6.0.5))(less@4.1.3)(msw@2.7.5(@types/node@22.15.29)(typescript@5.8.3))(sass-embedded@1.87.0)(sass@1.87.0)(stylus@0.64.0)(sugarss@4.0.1(postcss@8.5.3))(terser@5.39.0)(tsx@4.19.4)(yaml@2.8.0)
vitest: 3.2.0(@types/debug@4.1.12)(@types/node@22.15.29)(@vitest/browser@3.2.0)(@vitest/ui@3.2.0)(happy-dom@17.6.3)(jiti@2.4.2)(jsdom@26.1.0(bufferutil@4.0.9)(utf-8-validate@6.0.5))(less@4.1.3)(msw@2.7.5(@types/node@22.15.29)(typescript@5.8.3))(sass-embedded@1.87.0)(sass@1.87.0)(stylus@0.64.0)(sugarss@4.0.1(postcss@8.5.3))(terser@5.39.0)(tsx@4.19.4)(yaml@2.8.0)
optional: true
'@vitest/ui@3.2.1(vitest@3.2.1)':
@ -18104,7 +18104,7 @@ snapshots:
sirv: 3.0.1
tinyglobby: 0.2.14
tinyrainbow: 2.0.0
vitest: 3.2.1(@types/debug@4.1.12)(@types/node@22.15.29)(@vitest/ui@3.2.1)(happy-dom@17.6.1)(jiti@2.4.2)(jsdom@26.1.0(bufferutil@4.0.9)(utf-8-validate@6.0.5))(less@4.1.3)(msw@2.7.5(@types/node@22.15.29)(typescript@5.8.3))(sass-embedded@1.87.0)(sass@1.87.0)(stylus@0.64.0)(sugarss@4.0.1(postcss@8.5.3))(terser@5.39.0)(tsx@4.19.4)(yaml@2.8.0)
vitest: 3.2.1(@types/debug@4.1.12)(@types/node@22.15.29)(@vitest/ui@3.2.1)(happy-dom@17.6.3)(jiti@2.4.2)(jsdom@26.1.0(bufferutil@4.0.9)(utf-8-validate@6.0.5))(less@4.1.3)(msw@2.7.5(@types/node@22.15.29)(typescript@5.8.3))(sass-embedded@1.87.0)(sass@1.87.0)(stylus@0.64.0)(sugarss@4.0.1(postcss@8.5.3))(terser@5.39.0)(tsx@4.19.4)(yaml@2.8.0)
'@vitest/utils@3.2.0':
dependencies:
@ -21548,7 +21548,7 @@ snapshots:
handle-thing@2.0.1: {}
happy-dom@17.6.1:
happy-dom@17.6.3:
dependencies:
webidl-conversions: 7.0.0
whatwg-mimetype: 3.0.0
@ -27074,7 +27074,7 @@ snapshots:
tsx: 4.19.4
yaml: 2.8.0
vitest@3.2.0(@types/debug@4.1.12)(@types/node@22.15.29)(@vitest/browser@3.2.0)(@vitest/ui@3.2.0)(happy-dom@17.6.1)(jiti@2.4.2)(jsdom@26.1.0(bufferutil@4.0.9)(utf-8-validate@6.0.5))(less@4.1.3)(msw@2.7.5(@types/node@22.15.29)(typescript@5.8.3))(sass-embedded@1.87.0)(sass@1.87.0)(stylus@0.64.0)(sugarss@4.0.1(postcss@8.5.3))(terser@5.39.0)(tsx@4.19.4)(yaml@2.8.0):
vitest@3.2.0(@types/debug@4.1.12)(@types/node@22.15.29)(@vitest/browser@3.2.0)(@vitest/ui@3.2.0)(happy-dom@17.6.3)(jiti@2.4.2)(jsdom@26.1.0(bufferutil@4.0.9)(utf-8-validate@6.0.5))(less@4.1.3)(msw@2.7.5(@types/node@22.15.29)(typescript@5.8.3))(sass-embedded@1.87.0)(sass@1.87.0)(stylus@0.64.0)(sugarss@4.0.1(postcss@8.5.3))(terser@5.39.0)(tsx@4.19.4)(yaml@2.8.0):
dependencies:
'@types/chai': 5.2.2
'@vitest/expect': 3.2.0
@ -27104,7 +27104,7 @@ snapshots:
'@types/node': 22.15.29
'@vitest/browser': 3.2.0(bufferutil@4.0.9)(msw@2.7.5(@types/node@22.15.29)(typescript@5.8.3))(playwright@1.52.0)(utf-8-validate@6.0.5)(vite@6.3.5(@types/node@22.15.29)(jiti@2.4.2)(less@4.1.3)(sass-embedded@1.87.0)(sass@1.87.0)(stylus@0.64.0)(sugarss@4.0.1(postcss@8.5.3))(terser@5.39.0)(tsx@4.19.4)(yaml@2.8.0))(vitest@3.2.0)(webdriverio@9.15.0(bufferutil@4.0.9)(utf-8-validate@6.0.5))
'@vitest/ui': 3.2.0(vitest@3.2.0)
happy-dom: 17.6.1
happy-dom: 17.6.3
jsdom: 26.1.0(bufferutil@4.0.9)(utf-8-validate@6.0.5)
transitivePeerDependencies:
- jiti
@ -27120,7 +27120,7 @@ snapshots:
- tsx
- yaml
vitest@3.2.1(@types/debug@4.1.12)(@types/node@22.15.29)(@vitest/ui@3.2.1)(happy-dom@17.6.1)(jiti@2.4.2)(jsdom@26.1.0(bufferutil@4.0.9)(utf-8-validate@6.0.5))(less@4.1.3)(msw@2.7.5(@types/node@22.15.29)(typescript@5.8.3))(sass-embedded@1.87.0)(sass@1.87.0)(stylus@0.64.0)(sugarss@4.0.1(postcss@8.5.3))(terser@5.39.0)(tsx@4.19.4)(yaml@2.8.0):
vitest@3.2.1(@types/debug@4.1.12)(@types/node@22.15.29)(@vitest/ui@3.2.1)(happy-dom@17.6.3)(jiti@2.4.2)(jsdom@26.1.0(bufferutil@4.0.9)(utf-8-validate@6.0.5))(less@4.1.3)(msw@2.7.5(@types/node@22.15.29)(typescript@5.8.3))(sass-embedded@1.87.0)(sass@1.87.0)(stylus@0.64.0)(sugarss@4.0.1(postcss@8.5.3))(terser@5.39.0)(tsx@4.19.4)(yaml@2.8.0):
dependencies:
'@types/chai': 5.2.2
'@vitest/expect': 3.2.1
@ -27149,7 +27149,7 @@ snapshots:
'@types/debug': 4.1.12
'@types/node': 22.15.29
'@vitest/ui': 3.2.1(vitest@3.2.1)
happy-dom: 17.6.1
happy-dom: 17.6.3
jsdom: 26.1.0(bufferutil@4.0.9)(utf-8-validate@6.0.5)
transitivePeerDependencies:
- jiti