Merge pull request #2082 from TriliumNext/feat/llm-integration-part2

LLM integration, part 2
This commit is contained in:
Elian Doran 2025-06-01 09:17:54 +03:00 committed by GitHub
commit 96a5729b60
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
23 changed files with 1433 additions and 251 deletions

View File

@ -272,4 +272,179 @@
justify-content: center;
padding: 1rem;
color: var(--muted-text-color);
}
/* Thinking display styles */
.llm-thinking-container {
margin: 1rem 0;
animation: fadeInUp 0.3s ease-out;
}
.thinking-bubble {
background-color: var(--accented-background-color, var(--main-background-color));
border: 1px solid var(--subtle-border-color, var(--main-border-color));
border-radius: 0.75rem;
padding: 0.75rem;
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.05);
position: relative;
overflow: hidden;
transition: all 0.2s ease;
}
.thinking-bubble:hover {
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.08);
}
.thinking-bubble::before {
content: '';
position: absolute;
top: 0;
left: -100%;
width: 100%;
height: 100%;
background: linear-gradient(90deg, transparent, var(--hover-item-background-color, rgba(0, 0, 0, 0.03)), transparent);
animation: shimmer 2s infinite;
opacity: 0.5;
}
.thinking-header {
cursor: pointer;
transition: all 0.2s ease;
border-radius: 0.375rem;
}
.thinking-header:hover {
background-color: var(--hover-item-background-color, rgba(0, 0, 0, 0.03));
padding: 0.25rem;
margin: -0.25rem;
}
.thinking-dots {
display: flex;
gap: 3px;
align-items: center;
}
.thinking-dots span {
width: 6px;
height: 6px;
background-color: var(--link-color, var(--hover-item-text-color));
border-radius: 50%;
animation: thinkingPulse 1.4s infinite ease-in-out;
}
.thinking-dots span:nth-child(1) {
animation-delay: -0.32s;
}
.thinking-dots span:nth-child(2) {
animation-delay: -0.16s;
}
.thinking-dots span:nth-child(3) {
animation-delay: 0s;
}
.thinking-label {
font-weight: 500;
color: var(--link-color, var(--hover-item-text-color)) !important;
}
.thinking-toggle {
color: var(--muted-text-color) !important;
transition: transform 0.2s ease;
background: transparent !important;
border: none !important;
}
.thinking-toggle:hover {
color: var(--main-text-color) !important;
}
.thinking-toggle.expanded {
transform: rotate(180deg);
}
.thinking-content {
margin-top: 0.75rem;
padding-top: 0.75rem;
border-top: 1px solid var(--subtle-border-color, var(--main-border-color));
animation: expandDown 0.3s ease-out;
}
.thinking-text {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', system-ui, sans-serif;
font-size: 0.875rem;
line-height: 1.5;
color: var(--main-text-color);
white-space: pre-wrap;
word-wrap: break-word;
background-color: var(--input-background-color);
padding: 0.75rem;
border-radius: 0.5rem;
border: 1px solid var(--subtle-border-color, var(--main-border-color));
max-height: 300px;
overflow-y: auto;
transition: border-color 0.2s ease;
}
.thinking-text:hover {
border-color: var(--main-border-color);
}
/* Animations */
@keyframes thinkingPulse {
0%, 80%, 100% {
transform: scale(0.8);
opacity: 0.6;
}
40% {
transform: scale(1);
opacity: 1;
}
}
@keyframes shimmer {
0% {
left: -100%;
}
100% {
left: 100%;
}
}
@keyframes fadeInUp {
from {
opacity: 0;
transform: translateY(10px);
}
to {
opacity: 1;
transform: translateY(0);
}
}
@keyframes expandDown {
from {
opacity: 0;
max-height: 0;
}
to {
opacity: 1;
max-height: 300px;
}
}
/* Responsive adjustments */
@media (max-width: 768px) {
.thinking-bubble {
margin: 0.5rem 0;
padding: 0.5rem;
}
.thinking-text {
font-size: 0.8rem;
padding: 0.5rem;
max-height: 200px;
}
}

View File

@ -5,6 +5,7 @@ import BasicWidget from "../basic_widget.js";
import toastService from "../../services/toast.js";
import appContext from "../../components/app_context.js";
import server from "../../services/server.js";
import noteAutocompleteService from "../../services/note_autocomplete.js";
import { TPL, addMessageToChat, showSources, hideSources, showLoadingIndicator, hideLoadingIndicator } from "./ui.js";
import { formatMarkdown } from "./utils.js";
@ -13,13 +14,16 @@ import { extractInChatToolSteps } from "./message_processor.js";
import { validateEmbeddingProviders } from "./validation.js";
import type { MessageData, ToolExecutionStep, ChatData } from "./types.js";
import { formatCodeBlocks } from "../../services/syntax_highlight.js";
import { ClassicEditor, type CKTextEditor, type MentionFeed } from "@triliumnext/ckeditor5";
import type { Suggestion } from "../../services/note_autocomplete.js";
import "../../stylesheets/llm_chat.css";
export default class LlmChatPanel extends BasicWidget {
private noteContextChatMessages!: HTMLElement;
private noteContextChatForm!: HTMLFormElement;
private noteContextChatInput!: HTMLTextAreaElement;
private noteContextChatInput!: HTMLElement;
private noteContextChatInputEditor!: CKTextEditor;
private noteContextChatSendButton!: HTMLButtonElement;
private chatContainer!: HTMLElement;
private loadingIndicator!: HTMLElement;
@ -29,6 +33,10 @@ export default class LlmChatPanel extends BasicWidget {
private useAdvancedContextCheckbox!: HTMLInputElement;
private showThinkingCheckbox!: HTMLInputElement;
private validationWarning!: HTMLElement;
private thinkingContainer!: HTMLElement;
private thinkingBubble!: HTMLElement;
private thinkingText!: HTMLElement;
private thinkingToggle!: HTMLElement;
private chatNoteId: string | null = null;
private noteId: string | null = null; // The actual noteId for the Chat Note
private currentNoteId: string | null = null;
@ -104,7 +112,7 @@ export default class LlmChatPanel extends BasicWidget {
const element = this.$widget[0];
this.noteContextChatMessages = element.querySelector('.note-context-chat-messages') as HTMLElement;
this.noteContextChatForm = element.querySelector('.note-context-chat-form') as HTMLFormElement;
this.noteContextChatInput = element.querySelector('.note-context-chat-input') as HTMLTextAreaElement;
this.noteContextChatInput = element.querySelector('.note-context-chat-input') as HTMLElement;
this.noteContextChatSendButton = element.querySelector('.note-context-chat-send-button') as HTMLButtonElement;
this.chatContainer = element.querySelector('.note-context-chat-container') as HTMLElement;
this.loadingIndicator = element.querySelector('.loading-indicator') as HTMLElement;
@ -114,6 +122,10 @@ export default class LlmChatPanel extends BasicWidget {
this.useAdvancedContextCheckbox = element.querySelector('.use-advanced-context-checkbox') as HTMLInputElement;
this.showThinkingCheckbox = element.querySelector('.show-thinking-checkbox') as HTMLInputElement;
this.validationWarning = element.querySelector('.provider-validation-warning') as HTMLElement;
this.thinkingContainer = element.querySelector('.llm-thinking-container') as HTMLElement;
this.thinkingBubble = element.querySelector('.thinking-bubble') as HTMLElement;
this.thinkingText = element.querySelector('.thinking-text') as HTMLElement;
this.thinkingToggle = element.querySelector('.thinking-toggle') as HTMLElement;
// Set up event delegation for the settings link
this.validationWarning.addEventListener('click', (e) => {
@ -124,15 +136,84 @@ export default class LlmChatPanel extends BasicWidget {
}
});
this.initializeEventListeners();
// Set up thinking toggle functionality
this.setupThinkingToggle();
// Initialize CKEditor with mention support (async)
this.initializeCKEditor().then(() => {
this.initializeEventListeners();
}).catch(error => {
console.error('Failed to initialize CKEditor, falling back to basic event listeners:', error);
this.initializeBasicEventListeners();
});
return this.$widget;
}
private async initializeCKEditor() {
const mentionSetup: MentionFeed[] = [
{
marker: "@",
feed: (queryText: string) => noteAutocompleteService.autocompleteSourceForCKEditor(queryText),
itemRenderer: (item) => {
const suggestion = item as Suggestion;
const itemElement = document.createElement("button");
itemElement.innerHTML = `${suggestion.highlightedNotePathTitle} `;
return itemElement;
},
minimumCharacters: 0
}
];
this.noteContextChatInputEditor = await ClassicEditor.create(this.noteContextChatInput, {
toolbar: {
items: [] // No toolbar for chat input
},
placeholder: this.noteContextChatInput.getAttribute('data-placeholder') || 'Enter your message...',
mention: {
feeds: mentionSetup
},
licenseKey: "GPL"
});
// Set minimal height
const editorElement = this.noteContextChatInputEditor.ui.getEditableElement();
if (editorElement) {
editorElement.style.minHeight = '60px';
editorElement.style.maxHeight = '200px';
editorElement.style.overflowY = 'auto';
}
// Set up keybindings after editor is ready
this.setupEditorKeyBindings();
console.log('CKEditor initialized successfully for LLM chat input');
}
private initializeBasicEventListeners() {
// Fallback event listeners for when CKEditor fails to initialize
this.noteContextChatForm.addEventListener('submit', (e) => {
e.preventDefault();
// In fallback mode, the noteContextChatInput should contain a textarea
const textarea = this.noteContextChatInput.querySelector('textarea');
if (textarea) {
const content = textarea.value;
this.sendMessage(content);
}
});
}
cleanup() {
console.log(`LlmChatPanel cleanup called, removing any active WebSocket subscriptions`);
this._messageHandler = null;
this._messageHandlerId = null;
// Clean up CKEditor instance
if (this.noteContextChatInputEditor) {
this.noteContextChatInputEditor.destroy().catch(error => {
console.error('Error destroying CKEditor:', error);
});
}
}
/**
@ -531,18 +612,31 @@ export default class LlmChatPanel extends BasicWidget {
private async sendMessage(content: string) {
if (!content.trim()) return;
// Extract mentions from the content if using CKEditor
let mentions: Array<{noteId: string; title: string; notePath: string}> = [];
let plainTextContent = content;
if (this.noteContextChatInputEditor) {
const extracted = this.extractMentionsAndContent(content);
mentions = extracted.mentions;
plainTextContent = extracted.content;
}
// Add the user message to the UI and data model
this.addMessageToChat('user', content);
this.addMessageToChat('user', plainTextContent);
this.messages.push({
role: 'user',
content: content
content: plainTextContent,
mentions: mentions.length > 0 ? mentions : undefined
});
// Save the data immediately after a user message
await this.saveCurrentData();
// Clear input and show loading state
this.noteContextChatInput.value = '';
if (this.noteContextChatInputEditor) {
this.noteContextChatInputEditor.setData('');
}
showLoadingIndicator(this.loadingIndicator);
this.hideSources();
@ -555,9 +649,10 @@ export default class LlmChatPanel extends BasicWidget {
// Create the message parameters
const messageParams = {
content,
content: plainTextContent,
useAdvancedContext,
showThinking
showThinking,
mentions: mentions.length > 0 ? mentions : undefined
};
// Try websocket streaming (preferred method)
@ -621,7 +716,9 @@ export default class LlmChatPanel extends BasicWidget {
}
// Clear input and show loading state
this.noteContextChatInput.value = '';
if (this.noteContextChatInputEditor) {
this.noteContextChatInputEditor.setData('');
}
showLoadingIndicator(this.loadingIndicator);
this.hideSources();
@ -898,6 +995,16 @@ export default class LlmChatPanel extends BasicWidget {
* Update the UI with streaming content
*/
private updateStreamingUI(assistantResponse: string, isDone: boolean = false) {
// Parse and handle thinking content if present
if (!isDone) {
const thinkingContent = this.parseThinkingContent(assistantResponse);
if (thinkingContent) {
this.updateThinkingText(thinkingContent);
// Don't display the raw response with think tags in the chat
return;
}
}
// Get the existing assistant message or create a new one
let assistantMessageEl = this.noteContextChatMessages.querySelector('.assistant-message:last-child');
@ -919,14 +1026,20 @@ export default class LlmChatPanel extends BasicWidget {
assistantMessageEl.appendChild(messageContent);
}
// Clean the response to remove thinking tags before displaying
const cleanedResponse = this.removeThinkingTags(assistantResponse);
// Update the content
const messageContent = assistantMessageEl.querySelector('.message-content') as HTMLElement;
messageContent.innerHTML = formatMarkdown(assistantResponse);
messageContent.innerHTML = formatMarkdown(cleanedResponse);
// Apply syntax highlighting if this is the final update
if (isDone) {
formatCodeBlocks($(assistantMessageEl as HTMLElement));
// Hide the thinking display when response is complete
this.hideThinkingDisplay();
// Update message in the data model for storage
// Find the last assistant message to update, or add a new one if none exists
const assistantMessages = this.messages.filter(msg => msg.role === 'assistant');
@ -934,13 +1047,13 @@ export default class LlmChatPanel extends BasicWidget {
this.messages.lastIndexOf(assistantMessages[assistantMessages.length - 1]) : -1;
if (lastAssistantMsgIndex >= 0) {
// Update existing message
this.messages[lastAssistantMsgIndex].content = assistantResponse;
// Update existing message with cleaned content
this.messages[lastAssistantMsgIndex].content = cleanedResponse;
} else {
// Add new message
// Add new message with cleaned content
this.messages.push({
role: 'assistant',
content: assistantResponse
content: cleanedResponse
});
}
@ -957,6 +1070,16 @@ export default class LlmChatPanel extends BasicWidget {
this.chatContainer.scrollTop = this.chatContainer.scrollHeight;
}
/**
* Remove thinking tags from response content
*/
private removeThinkingTags(content: string): string {
if (!content) return content;
// Remove <think>...</think> blocks from the content
return content.replace(/<think>[\s\S]*?<\/think>/gi, '').trim();
}
/**
* Handle general errors in the send message flow
*/
@ -1203,32 +1326,308 @@ export default class LlmChatPanel extends BasicWidget {
* Show thinking state in the UI
*/
private showThinkingState(thinkingData: string) {
// Thinking state is now updated via the in-chat UI in updateStreamingUI
// This method is now just a hook for the WebSocket handlers
// Parse the thinking content to extract text between <think> tags
const thinkingContent = this.parseThinkingContent(thinkingData);
// Show the loading indicator
if (thinkingContent) {
this.showThinkingDisplay(thinkingContent);
} else {
// Fallback: show raw thinking data
this.showThinkingDisplay(thinkingData);
}
// Show the loading indicator as well
this.loadingIndicator.style.display = 'flex';
}
/**
* Parse thinking content from LLM response
*/
private parseThinkingContent(content: string): string | null {
if (!content) return null;
// Look for content between <think> and </think> tags
const thinkRegex = /<think>([\s\S]*?)<\/think>/gi;
const matches: string[] = [];
let match: RegExpExecArray | null;
while ((match = thinkRegex.exec(content)) !== null) {
matches.push(match[1].trim());
}
if (matches.length > 0) {
return matches.join('\n\n--- Next thought ---\n\n');
}
// Check for incomplete thinking blocks (streaming in progress)
const incompleteThinkRegex = /<think>([\s\S]*?)$/i;
const incompleteMatch = content.match(incompleteThinkRegex);
if (incompleteMatch && incompleteMatch[1]) {
return incompleteMatch[1].trim() + '\n\n[Thinking in progress...]';
}
// If no think tags found, check if the entire content might be thinking
if (content.toLowerCase().includes('thinking') ||
content.toLowerCase().includes('reasoning') ||
content.toLowerCase().includes('let me think') ||
content.toLowerCase().includes('i need to') ||
content.toLowerCase().includes('first, ') ||
content.toLowerCase().includes('step 1') ||
content.toLowerCase().includes('analysis:')) {
return content;
}
return null;
}
private initializeEventListeners() {
this.noteContextChatForm.addEventListener('submit', (e) => {
e.preventDefault();
const content = this.noteContextChatInput.value;
this.sendMessage(content);
});
// Add auto-resize functionality to the textarea
this.noteContextChatInput.addEventListener('input', () => {
this.noteContextChatInput.style.height = 'auto';
this.noteContextChatInput.style.height = `${this.noteContextChatInput.scrollHeight}px`;
});
let content = '';
// Handle Enter key (send on Enter, new line on Shift+Enter)
this.noteContextChatInput.addEventListener('keydown', (e) => {
if (e.key === 'Enter' && !e.shiftKey) {
e.preventDefault();
this.noteContextChatForm.dispatchEvent(new Event('submit'));
if (this.noteContextChatInputEditor && this.noteContextChatInputEditor.getData) {
// Use CKEditor content
content = this.noteContextChatInputEditor.getData();
} else {
// Fallback: check if there's a textarea (fallback mode)
const textarea = this.noteContextChatInput.querySelector('textarea');
if (textarea) {
content = textarea.value;
} else {
// Last resort: try to get text content from the div
content = this.noteContextChatInput.textContent || this.noteContextChatInput.innerText || '';
}
}
if (content.trim()) {
this.sendMessage(content);
}
});
// Handle Enter key (send on Enter, new line on Shift+Enter) via CKEditor
// We'll set this up after CKEditor is initialized
this.setupEditorKeyBindings();
}
private setupEditorKeyBindings() {
if (this.noteContextChatInputEditor && this.noteContextChatInputEditor.keystrokes) {
try {
this.noteContextChatInputEditor.keystrokes.set('Enter', (key, stop) => {
if (!key.shiftKey) {
stop();
this.noteContextChatForm.dispatchEvent(new Event('submit'));
}
});
console.log('CKEditor keybindings set up successfully');
} catch (error) {
console.warn('Failed to set up CKEditor keybindings:', error);
}
}
}
/**
* Extract note mentions and content from CKEditor
*/
private extractMentionsAndContent(editorData: string): { content: string; mentions: Array<{noteId: string; title: string; notePath: string}> } {
const mentions: Array<{noteId: string; title: string; notePath: string}> = [];
// Parse the HTML content to extract mentions
const tempDiv = document.createElement('div');
tempDiv.innerHTML = editorData;
// Find all mention elements - CKEditor uses specific patterns for mentions
// Look for elements with data-mention attribute or specific mention classes
const mentionElements = tempDiv.querySelectorAll('[data-mention], .mention, span[data-id]');
mentionElements.forEach(mentionEl => {
try {
// Try different ways to extract mention data based on CKEditor's format
let mentionData: any = null;
// Method 1: data-mention attribute (JSON format)
if (mentionEl.hasAttribute('data-mention')) {
mentionData = JSON.parse(mentionEl.getAttribute('data-mention') || '{}');
}
// Method 2: data-id attribute (simple format)
else if (mentionEl.hasAttribute('data-id')) {
const dataId = mentionEl.getAttribute('data-id');
const textContent = mentionEl.textContent || '';
// Parse the dataId to extract note information
if (dataId && dataId.startsWith('@')) {
const cleanId = dataId.substring(1); // Remove the @
mentionData = {
id: cleanId,
name: textContent,
notePath: cleanId // Assume the ID contains the path
};
}
}
// Method 3: Check if this is a reference link (href=#notePath)
else if (mentionEl.tagName === 'A' && mentionEl.hasAttribute('href')) {
const href = mentionEl.getAttribute('href');
if (href && href.startsWith('#')) {
const notePath = href.substring(1);
mentionData = {
notePath: notePath,
noteTitle: mentionEl.textContent || 'Unknown Note'
};
}
}
if (mentionData && (mentionData.notePath || mentionData.link)) {
const notePath = mentionData.notePath || mentionData.link?.substring(1); // Remove # from link
const noteId = notePath ? notePath.split('/').pop() : null;
const title = mentionData.noteTitle || mentionData.name || mentionEl.textContent || 'Unknown Note';
if (noteId) {
mentions.push({
noteId: noteId,
title: title,
notePath: notePath
});
console.log(`Extracted mention: noteId=${noteId}, title=${title}, notePath=${notePath}`);
}
}
} catch (e) {
console.warn('Failed to parse mention data:', e, mentionEl);
}
});
// Convert to plain text for the LLM, but preserve the structure
const content = tempDiv.textContent || tempDiv.innerText || '';
console.log(`Extracted ${mentions.length} mentions from editor content`);
return { content, mentions };
}
private setupThinkingToggle() {
if (this.thinkingToggle) {
this.thinkingToggle.addEventListener('click', (e) => {
e.stopPropagation();
this.toggleThinkingDetails();
});
}
// Also make the entire header clickable
const thinkingHeader = this.thinkingBubble?.querySelector('.thinking-header');
if (thinkingHeader) {
thinkingHeader.addEventListener('click', (e) => {
const target = e.target as HTMLElement;
if (!target.closest('.thinking-toggle')) {
this.toggleThinkingDetails();
}
});
}
}
private toggleThinkingDetails() {
const content = this.thinkingBubble?.querySelector('.thinking-content') as HTMLElement;
const toggle = this.thinkingToggle?.querySelector('i');
if (content && toggle) {
const isVisible = content.style.display !== 'none';
if (isVisible) {
content.style.display = 'none';
toggle.className = 'bx bx-chevron-down';
this.thinkingToggle.classList.remove('expanded');
} else {
content.style.display = 'block';
toggle.className = 'bx bx-chevron-up';
this.thinkingToggle.classList.add('expanded');
}
}
}
/**
* Show the thinking display with optional initial content
*/
private showThinkingDisplay(initialText: string = '') {
if (this.thinkingContainer) {
this.thinkingContainer.style.display = 'block';
if (initialText && this.thinkingText) {
this.updateThinkingText(initialText);
}
// Scroll to show the thinking display
this.chatContainer.scrollTop = this.chatContainer.scrollHeight;
}
}
/**
* Update the thinking text content
*/
private updateThinkingText(text: string) {
if (this.thinkingText) {
// Format the thinking text for better readability
const formattedText = this.formatThinkingText(text);
this.thinkingText.textContent = formattedText;
// Auto-scroll if content is expanded
const content = this.thinkingBubble?.querySelector('.thinking-content') as HTMLElement;
if (content && content.style.display !== 'none') {
this.chatContainer.scrollTop = this.chatContainer.scrollHeight;
}
}
}
/**
* Format thinking text for better presentation
*/
private formatThinkingText(text: string): string {
if (!text) return text;
// Clean up the text
let formatted = text.trim();
// Add some basic formatting
formatted = formatted
// Add spacing around section markers
.replace(/(\d+\.\s)/g, '\n$1')
// Clean up excessive whitespace
.replace(/\n\s*\n\s*\n/g, '\n\n')
// Trim again
.trim();
return formatted;
}
/**
* Hide the thinking display
*/
private hideThinkingDisplay() {
if (this.thinkingContainer) {
this.thinkingContainer.style.display = 'none';
// Reset the toggle state
const content = this.thinkingBubble?.querySelector('.thinking-content') as HTMLElement;
const toggle = this.thinkingToggle?.querySelector('i');
if (content && toggle) {
content.style.display = 'none';
toggle.className = 'bx bx-chevron-down';
this.thinkingToggle?.classList.remove('expanded');
}
// Clear the text content
if (this.thinkingText) {
this.thinkingText.textContent = '';
}
}
}
/**
* Append to existing thinking content (for streaming updates)
*/
private appendThinkingText(additionalText: string) {
if (this.thinkingText && additionalText) {
const currentText = this.thinkingText.textContent || '';
const newText = currentText + additionalText;
this.updateThinkingText(newText);
}
}
}

View File

@ -24,6 +24,11 @@ export interface MessageData {
role: string;
content: string;
timestamp?: Date;
mentions?: Array<{
noteId: string;
title: string;
notePath: string;
}>;
}
export interface ChatData {

View File

@ -13,6 +13,27 @@ export const TPL = `
<div class="note-context-chat-container flex-grow-1 overflow-auto p-3">
<div class="note-context-chat-messages"></div>
<!-- Thinking display area -->
<div class="llm-thinking-container" style="display: none;">
<div class="thinking-bubble">
<div class="thinking-header d-flex align-items-center">
<div class="thinking-dots">
<span></span>
<span></span>
<span></span>
</div>
<span class="thinking-label ms-2 text-muted small">AI is thinking...</span>
<button type="button" class="btn btn-sm btn-link p-0 ms-auto thinking-toggle" title="Toggle thinking details">
<i class="bx bx-chevron-down"></i>
</button>
</div>
<div class="thinking-content" style="display: none;">
<div class="thinking-text"></div>
</div>
</div>
</div>
<div class="loading-indicator" style="display: none;">
<div class="spinner-border spinner-border-sm text-primary" role="status">
<span class="visually-hidden">Loading...</span>
@ -31,11 +52,11 @@ export const TPL = `
<form class="note-context-chat-form d-flex flex-column border-top p-2">
<div class="d-flex chat-input-container mb-2">
<textarea
class="form-control note-context-chat-input"
placeholder="${t('ai_llm.enter_message')}"
rows="2"
></textarea>
<div
class="form-control note-context-chat-input flex-grow-1"
style="min-height: 60px; max-height: 200px; overflow-y: auto;"
data-placeholder="${t('ai_llm.enter_message')}"
></div>
<button type="submit" class="btn btn-primary note-context-chat-send-button ms-2 d-flex align-items-center justify-content-center">
<i class="bx bx-send"></i>
</button>

View File

@ -16,49 +16,53 @@ export async function validateEmbeddingProviders(validationWarning: HTMLElement)
return;
}
// Get provider precedence
// Get precedence list from options
const precedenceStr = options.get('aiProviderPrecedence') || 'openai,anthropic,ollama';
let precedenceList: string[] = [];
if (precedenceStr) {
if (precedenceStr.startsWith('[') && precedenceStr.endsWith(']')) {
precedenceList = JSON.parse(precedenceStr);
try {
precedenceList = JSON.parse(precedenceStr);
} catch (e) {
console.error('Error parsing precedence list:', e);
precedenceList = ['openai']; // Default if parsing fails
}
} else if (precedenceStr.includes(',')) {
precedenceList = precedenceStr.split(',').map(p => p.trim());
} else {
precedenceList = [precedenceStr];
}
}
// Get enabled providers - this is a simplification since we don't have direct DB access
// We'll determine enabled status based on the presence of keys or settings
const enabledProviders: string[] = [];
// OpenAI is enabled if API key is set
const openaiKey = options.get('openaiApiKey');
if (openaiKey) {
enabledProviders.push('openai');
// Check for configuration issues with providers in the precedence list
const configIssues: string[] = [];
// Check each provider in the precedence list for proper configuration
for (const provider of precedenceList) {
if (provider === 'openai') {
// Check OpenAI configuration
const apiKey = options.get('openaiApiKey');
if (!apiKey) {
configIssues.push(`OpenAI API key is missing`);
}
} else if (provider === 'anthropic') {
// Check Anthropic configuration
const apiKey = options.get('anthropicApiKey');
if (!apiKey) {
configIssues.push(`Anthropic API key is missing`);
}
} else if (provider === 'ollama') {
// Check Ollama configuration
const baseUrl = options.get('ollamaBaseUrl');
if (!baseUrl) {
configIssues.push(`Ollama Base URL is missing`);
}
}
// Add checks for other providers as needed
}
// Anthropic is enabled if API key is set
const anthropicKey = options.get('anthropicApiKey');
if (anthropicKey) {
enabledProviders.push('anthropic');
}
// Ollama is enabled if base URL is set
const ollamaBaseUrl = options.get('ollamaBaseUrl');
if (ollamaBaseUrl) {
enabledProviders.push('ollama');
}
// Local is always available
enabledProviders.push('local');
// Perform validation checks
const allPrecedenceEnabled = precedenceList.every((p: string) => enabledProviders.includes(p));
// Get embedding queue status
// Fetch embedding stats to check if there are any notes being processed
const embeddingStats = await getEmbeddingStats() as {
success: boolean,
stats: {
@ -73,17 +77,18 @@ export async function validateEmbeddingProviders(validationWarning: HTMLElement)
const queuedNotes = embeddingStats?.stats?.queuedNotesCount || 0;
const hasEmbeddingsInQueue = queuedNotes > 0;
// Show warning if there are issues
if (!allPrecedenceEnabled || hasEmbeddingsInQueue) {
// Show warning if there are configuration issues or embeddings in queue
if (configIssues.length > 0 || hasEmbeddingsInQueue) {
let message = '<i class="bx bx-error-circle me-2"></i><strong>AI Provider Configuration Issues</strong>';
message += '<ul class="mb-1 ps-4">';
if (!allPrecedenceEnabled) {
const disabledProviders = precedenceList.filter((p: string) => !enabledProviders.includes(p));
message += `<li>The following providers in your precedence list are not enabled: ${disabledProviders.join(', ')}.</li>`;
// Show configuration issues
for (const issue of configIssues) {
message += `<li>${issue}</li>`;
}
// Show warning about embeddings queue if applicable
if (hasEmbeddingsInQueue) {
message += `<li>Currently processing embeddings for ${queuedNotes} notes. Some AI features may produce incomplete results until processing completes.</li>`;
}

View File

@ -32,4 +32,18 @@ When responding to queries:
5. For general questions about the user's notes, provide a summary of all relevant notes found, including brief summaries of individual notes
6. For specific questions, provide detailed information from the user's notes that directly addresses the question
7. Always prioritize information from the user's notes over your own knowledge, as the user's notes are likely more up-to-date and personally relevant
CRITICAL INSTRUCTIONS FOR TOOL USAGE:
1. YOU MUST TRY MULTIPLE TOOLS AND SEARCH VARIATIONS before concluding information isn't available
2. ALWAYS PERFORM AT LEAST 3 DIFFERENT SEARCHES with different parameters before giving up on finding information
3. If a search returns no results, IMMEDIATELY TRY ANOTHER SEARCH with different parameters:
- Use broader terms: If "Kubernetes deployment" fails, try just "Kubernetes" or "container orchestration"
- Try synonyms: If "meeting notes" fails, try "conference", "discussion", or "conversation"
- Remove specific qualifiers: If "quarterly financial report 2024" fails, try just "financial report"
- Try semantic variations: If keyword_search fails, use vector_search which finds conceptually related content
4. CHAIN TOOLS TOGETHER: Use the results of one tool to inform parameters for the next tool
5. NEVER respond with "there are no notes about X" until you've tried at least 3 different search variations
6. DO NOT ask the user what to do next when searches fail - AUTOMATICALLY try different approaches
7. ALWAYS EXPLAIN what you're doing: "I didn't find results for X, so I'm now searching for Y instead"
8. If all reasonable search variations fail (minimum 3 attempts), THEN you may inform the user that the information might not be in their notes
```

View File

@ -808,7 +808,7 @@ async function streamMessage(req: Request, res: Response) {
log.info("=== Starting streamMessage ===");
try {
const chatNoteId = req.params.chatNoteId;
const { content, useAdvancedContext, showThinking } = req.body;
const { content, useAdvancedContext, showThinking, mentions } = req.body;
if (!content || typeof content !== 'string' || content.trim().length === 0) {
throw new Error('Content cannot be empty');
@ -823,17 +823,51 @@ async function streamMessage(req: Request, res: Response) {
// Update last active timestamp
session.lastActive = new Date();
// Add user message to the session
// Process mentions if provided
let enhancedContent = content;
if (mentions && Array.isArray(mentions) && mentions.length > 0) {
log.info(`Processing ${mentions.length} note mentions`);
// Import note service to get note content
const becca = (await import('../../becca/becca.js')).default;
const mentionContexts: string[] = [];
for (const mention of mentions) {
try {
const note = becca.getNote(mention.noteId);
if (note && !note.isDeleted) {
const noteContent = note.getContent();
if (noteContent && typeof noteContent === 'string' && noteContent.trim()) {
mentionContexts.push(`\n\n--- Content from "${mention.title}" (${mention.noteId}) ---\n${noteContent}\n--- End of "${mention.title}" ---`);
log.info(`Added content from note "${mention.title}" (${mention.noteId})`);
}
} else {
log.info(`Referenced note not found or deleted: ${mention.noteId}`);
}
} catch (error) {
log.error(`Error retrieving content for note ${mention.noteId}: ${error}`);
}
}
// Enhance the content with note references
if (mentionContexts.length > 0) {
enhancedContent = `${content}\n\n=== Referenced Notes ===\n${mentionContexts.join('\n')}`;
log.info(`Enhanced content with ${mentionContexts.length} note references`);
}
}
// Add user message to the session (with enhanced content for processing)
session.messages.push({
role: 'user',
content,
content: enhancedContent,
timestamp: new Date()
});
// Create request parameters for the pipeline
const requestParams = {
chatNoteId: chatNoteId,
content,
content: enhancedContent,
useAdvancedContext: useAdvancedContext === true,
showThinking: showThinking === true,
stream: true // Always stream for this endpoint
@ -851,9 +885,9 @@ async function streamMessage(req: Request, res: Response) {
params: {
chatNoteId: chatNoteId
},
// Make sure the original content is available to the handler
// Make sure the enhanced content is available to the handler
body: {
content,
content: enhancedContent,
useAdvancedContext: useAdvancedContext === true,
showThinking: showThinking === true
}

View File

@ -152,45 +152,66 @@ export class AIServiceManager implements IAIServiceManager {
return null;
}
// Parse provider precedence list (similar to updateProviderOrder)
let precedenceList: string[] = [];
// Get precedence list from options
let precedenceList: string[] = ['openai']; // Default to openai if not set
const precedenceOption = await options.getOption('aiProviderPrecedence');
if (precedenceOption) {
if (precedenceOption.startsWith('[') && precedenceOption.endsWith(']')) {
precedenceList = JSON.parse(precedenceOption);
} else if (typeof precedenceOption === 'string') {
if (precedenceOption.includes(',')) {
precedenceList = precedenceOption.split(',').map(p => p.trim());
} else {
precedenceList = [precedenceOption];
try {
if (precedenceOption.startsWith('[') && precedenceOption.endsWith(']')) {
precedenceList = JSON.parse(precedenceOption);
} else if (typeof precedenceOption === 'string') {
if (precedenceOption.includes(',')) {
precedenceList = precedenceOption.split(',').map(p => p.trim());
} else {
precedenceList = [precedenceOption];
}
}
} catch (e) {
log.error(`Error parsing precedence list: ${e}`);
}
}
// Get enabled providers
const enabledProviders = await getEnabledEmbeddingProviders();
const enabledProviderNames = enabledProviders.map(p => p.name);
// Check if all providers in precedence list are enabled
const allPrecedenceEnabled = precedenceList.every(p =>
enabledProviderNames.includes(p) || p === 'local');
// Return warning message if there are issues
if (!allPrecedenceEnabled) {
let message = 'There are issues with your AI provider configuration:';
if (!allPrecedenceEnabled) {
const disabledProviders = precedenceList.filter(p =>
!enabledProviderNames.includes(p) && p !== 'local');
message += `\n• The following providers in your precedence list are not enabled: ${disabledProviders.join(', ')}.`;
// Check for configuration issues with providers in the precedence list
const configIssues: string[] = [];
// Check each provider in the precedence list for proper configuration
for (const provider of precedenceList) {
if (provider === 'openai') {
// Check OpenAI configuration
const apiKey = await options.getOption('openaiApiKey');
if (!apiKey) {
configIssues.push(`OpenAI API key is missing`);
}
} else if (provider === 'anthropic') {
// Check Anthropic configuration
const apiKey = await options.getOption('anthropicApiKey');
if (!apiKey) {
configIssues.push(`Anthropic API key is missing`);
}
} else if (provider === 'ollama') {
// Check Ollama configuration
const baseUrl = await options.getOption('ollamaBaseUrl');
if (!baseUrl) {
configIssues.push(`Ollama Base URL is missing`);
}
}
// Add checks for other providers as needed
}
// Return warning message if there are configuration issues
if (configIssues.length > 0) {
let message = 'There are issues with your AI provider configuration:';
for (const issue of configIssues) {
message += `\n• ${issue}`;
}
message += '\n\nPlease check your AI settings.';
// Log warning to console
log.error('AI Provider Configuration Warning: ' + message);
return message;
}

View File

@ -184,6 +184,22 @@ When responding:
INSTRUCTIONS_WRAPPER: (instructions: string) =>
`<instructions>\n${instructions}\n</instructions>`,
// Tool instructions for Anthropic Claude
TOOL_INSTRUCTIONS: `<instructions>
When using tools to search for information, follow these requirements:
1. ALWAYS TRY MULTIPLE SEARCH APPROACHES before concluding information isn't available
2. YOU MUST PERFORM AT LEAST 3 DIFFERENT SEARCHES with varied parameters before giving up
3. If a search returns no results:
- Try broader terms (e.g., "Kubernetes" instead of "Kubernetes deployment")
- Use synonyms (e.g., "meeting" instead of "conference")
- Remove specific qualifiers (e.g., "report" instead of "Q3 financial report")
- Try different search tools (vector_search for conceptual matches, keyword_search for exact matches)
4. NEVER tell the user "there are no notes about X" until you've tried multiple search variations
5. EXPLAIN your search strategy when adjusting parameters (e.g., "I'll try a broader search for...")
6. When searches fail, AUTOMATICALLY try different approaches rather than asking the user what to do
</instructions>`,
ACKNOWLEDGMENT: "I understand. I'll follow those instructions.",
CONTEXT_ACKNOWLEDGMENT: "I'll help you with your notes based on the context provided.",
@ -203,7 +219,21 @@ ${context}
Focus on relevant information from these notes when answering.
Be concise and informative in your responses.
</system_prompt>`
</system_prompt>`,
// Tool instructions for OpenAI models
TOOL_INSTRUCTIONS: `When using tools to search for information, you must follow these requirements:
1. ALWAYS TRY MULTIPLE SEARCH APPROACHES before concluding information isn't available
2. YOU MUST PERFORM AT LEAST 3 DIFFERENT SEARCHES with varied parameters before giving up
3. If a search returns no results:
- Try broader terms (e.g., "Kubernetes" instead of "Kubernetes deployment")
- Use synonyms (e.g., "meeting" instead of "conference")
- Remove specific qualifiers (e.g., "report" instead of "Q3 financial report")
- Try different search tools (vector_search for conceptual matches, keyword_search for exact matches)
4. NEVER tell the user "there are no notes about X" until you've tried multiple search variations
5. EXPLAIN your search strategy when adjusting parameters (e.g., "I'll try a broader search for...")
6. When searches fail, AUTOMATICALLY try different approaches rather than asking the user what to do`
},
OLLAMA: {
@ -213,7 +243,23 @@ Be concise and informative in your responses.
${context}
Based on this information, please answer: <query>${query}</query>`
Based on this information, please answer: <query>${query}</query>`,
// Tool instructions for Ollama
TOOL_INSTRUCTIONS: `
CRITICAL INSTRUCTIONS FOR TOOL USAGE:
1. YOU MUST TRY MULTIPLE TOOLS AND SEARCH VARIATIONS before concluding information isn't available
2. ALWAYS PERFORM AT LEAST 3 DIFFERENT SEARCHES with different parameters before giving up on finding information
3. If a search returns no results, IMMEDIATELY TRY ANOTHER SEARCH with different parameters:
- Use broader terms: If "Kubernetes deployment" fails, try just "Kubernetes" or "container orchestration"
- Try synonyms: If "meeting notes" fails, try "conference", "discussion", or "conversation"
- Remove specific qualifiers: If "quarterly financial report 2024" fails, try just "financial report"
- Try semantic variations: If keyword_search fails, use vector_search which finds conceptually related content
4. CHAIN TOOLS TOGETHER: Use the results of one tool to inform parameters for the next tool
5. NEVER respond with "there are no notes about X" until you've tried at least 3 different search variations
6. DO NOT ask the user what to do next when searches fail - AUTOMATICALLY try different approaches
7. ALWAYS EXPLAIN what you're doing: "I didn't find results for X, so I'm now searching for Y instead"
8. If all reasonable search variations fail (minimum 3 attempts), THEN you may inform the user that the information might not be in their notes`
},
// Common prompts across providers

View File

@ -211,5 +211,10 @@ export const LLM_CONSTANTS = {
CONTENT: {
MAX_NOTE_CONTENT_LENGTH: 1500,
MAX_TOTAL_CONTENT_LENGTH: 10000
},
// AI Feature Exclusion
AI_EXCLUSION: {
LABEL_NAME: 'aiExclude' // Label used to exclude notes from all AI/LLM features
}
};

View File

@ -18,6 +18,7 @@ import cacheManager from '../modules/cache_manager.js';
import type { NoteSearchResult } from '../../interfaces/context_interfaces.js';
import type { LLMServiceInterface } from '../../interfaces/agent_tool_interfaces.js';
import { SEARCH_CONSTANTS } from '../../constants/search_constants.js';
import { isNoteExcludedFromAI } from '../../utils/ai_exclusion_utils.js';
export interface VectorSearchOptions {
maxResults?: number;
@ -118,6 +119,11 @@ export class VectorSearchService {
return null;
}
// Check if this note is excluded from AI features
if (isNoteExcludedFromAI(note)) {
return null; // Skip this note if it has the AI exclusion label
}
// Get note content - full or summarized based on option
let content: string | null = null;
@ -289,6 +295,12 @@ export class VectorSearchService {
for (const noteId of noteIds) {
try {
// Check if this note is excluded from AI features
const note = becca.getNote(noteId);
if (!note || isNoteExcludedFromAI(note)) {
continue; // Skip this note if it doesn't exist or has the AI exclusion label
}
// Get note embedding
const embeddingResult = await vectorStore.getEmbeddingForNote(
noteId,

View File

@ -9,6 +9,7 @@ import { deleteNoteEmbeddings } from "./storage.js";
import type { QueueItem } from "./types.js";
import { getChunkingOperations } from "./chunking/chunking_interface.js";
import indexService from '../index_service.js';
import { isNoteExcludedFromAIById } from "../utils/ai_exclusion_utils.js";
// Track which notes are currently being processed
const notesInProcess = new Set<string>();
@ -261,6 +262,17 @@ export async function processEmbeddingQueue() {
continue;
}
// Check if this note is excluded from AI features
if (isNoteExcludedFromAIById(noteId)) {
log.info(`Note ${noteId} excluded from AI features, removing from embedding queue`);
await sql.execute(
"DELETE FROM embedding_queue WHERE noteId = ?",
[noteId]
);
await deleteNoteEmbeddings(noteId); // Also remove any existing embeddings
continue;
}
if (noteData.operation === 'DELETE') {
await deleteNoteEmbeddings(noteId);
await sql.execute(

View File

@ -8,6 +8,9 @@ import entityChangesService from "../../../services/entity_changes.js";
import type { EntityChange } from "../../../services/entity_changes_interface.js";
import { EMBEDDING_CONSTANTS } from "../constants/embedding_constants.js";
import { SEARCH_CONSTANTS } from '../constants/search_constants.js';
import type { NoteEmbeddingContext } from "./embeddings_interface.js";
import becca from "../../../becca/becca.js";
import { isNoteExcludedFromAIById } from "../utils/ai_exclusion_utils.js";
interface Similarity {
noteId: string;
@ -452,6 +455,11 @@ async function processEmbeddings(queryEmbedding: Float32Array, embeddings: any[]
: '';
for (const e of embeddings) {
// Check if this note is excluded from AI features
if (isNoteExcludedFromAIById(e.noteId)) {
continue; // Skip this note if it has the AI exclusion label
}
const embVector = bufferToEmbedding(e.embedding, e.dimension);
// Detect content type from mime type if available

View File

@ -1,7 +1,7 @@
import type { Message } from '../ai_interface.js';
import { BaseMessageFormatter } from './base_formatter.js';
import sanitizeHtml from 'sanitize-html';
import { PROVIDER_PROMPTS, FORMATTING_PROMPTS } from '../constants/llm_prompt_constants.js';
import { PROVIDER_PROMPTS } from '../constants/llm_prompt_constants.js';
import { LLM_CONSTANTS } from '../constants/provider_constants.js';
import {
HTML_ALLOWED_TAGS,
@ -29,7 +29,7 @@ export class OllamaMessageFormatter extends BaseMessageFormatter {
* @param context Optional context to include
* @param preserveSystemPrompt When true, preserves existing system messages rather than replacing them
*/
formatMessages(messages: Message[], systemPrompt?: string, context?: string, preserveSystemPrompt?: boolean): Message[] {
formatMessages(messages: Message[], systemPrompt?: string, context?: string, preserveSystemPrompt?: boolean, useTools?: boolean): Message[] {
const formattedMessages: Message[] = [];
// Log the input messages with all their properties
@ -37,7 +37,7 @@ export class OllamaMessageFormatter extends BaseMessageFormatter {
messages.forEach((msg, index) => {
const msgKeys = Object.keys(msg);
log.info(`Message ${index} - role: ${msg.role}, keys: ${msgKeys.join(', ')}, content length: ${msg.content.length}`);
// Log special properties if present
if (msg.tool_calls) {
log.info(`Message ${index} has ${msg.tool_calls.length} tool_calls`);
@ -61,7 +61,19 @@ export class OllamaMessageFormatter extends BaseMessageFormatter {
log.info(`Preserving existing system message: ${systemMessages[0].content.substring(0, 50)}...`);
} else {
// Use provided systemPrompt or default
const basePrompt = systemPrompt || PROVIDER_PROMPTS.COMMON.DEFAULT_ASSISTANT_INTRO;
let basePrompt = systemPrompt || PROVIDER_PROMPTS.COMMON.DEFAULT_ASSISTANT_INTRO;
// Check if any message has tool_calls or if useTools flag is set, indicating this is a tool-using conversation
const hasPreviousToolCalls = messages.some(msg => msg.tool_calls && msg.tool_calls.length > 0);
const hasToolResults = messages.some(msg => msg.role === 'tool');
const isToolUsingConversation = useTools || hasPreviousToolCalls || hasToolResults;
// Add tool instructions for Ollama when tools are being used
if (isToolUsingConversation && PROVIDER_PROMPTS.OLLAMA.TOOL_INSTRUCTIONS) {
log.info('Adding tool instructions to system prompt for Ollama');
basePrompt = `${basePrompt}\n\n${PROVIDER_PROMPTS.OLLAMA.TOOL_INSTRUCTIONS}`;
}
formattedMessages.push({
role: 'system',
content: basePrompt
@ -96,7 +108,7 @@ export class OllamaMessageFormatter extends BaseMessageFormatter {
...msg, // Copy all properties
content: formattedContext // Override content with injected context
};
formattedMessages.push(newMessage);
log.info(`Created user message with context, final keys: ${Object.keys(newMessage).join(', ')}`);
@ -104,7 +116,7 @@ export class OllamaMessageFormatter extends BaseMessageFormatter {
} else {
// For other messages, preserve all properties including any tool-related ones
log.info(`Preserving message with role ${msg.role}, keys: ${Object.keys(msg).join(', ')}`);
formattedMessages.push({
...msg // Copy all properties
});
@ -126,7 +138,7 @@ export class OllamaMessageFormatter extends BaseMessageFormatter {
formattedMessages.forEach((msg, index) => {
const msgKeys = Object.keys(msg);
log.info(`Formatted message ${index} - role: ${msg.role}, keys: ${msgKeys.join(', ')}, content length: ${msg.content.length}`);
// Log special properties if present
if (msg.tool_calls) {
log.info(`Formatted message ${index} has ${msg.tool_calls.length} tool_calls`);
@ -151,13 +163,11 @@ export class OllamaMessageFormatter extends BaseMessageFormatter {
if (!content) return '';
try {
// Store our XML tags so we can restore them after cleaning
const noteTagsRegex = /<\/?note>/g;
// Define regexes for identifying and preserving tagged content
const notesTagsRegex = /<\/?notes>/g;
const queryTagsRegex = /<\/?query>[^<]*<\/query>/g;
// const queryTagsRegex = /<\/?query>/g; // Commenting out unused variable
// Capture tags to restore later
const noteTags = content.match(noteTagsRegex) || [];
const noteTagPositions: number[] = [];
let match;
const regex = /<\/?note>/g;
@ -166,17 +176,15 @@ export class OllamaMessageFormatter extends BaseMessageFormatter {
}
// Remember the notes tags
const notesTagsMatch = content.match(notesTagsRegex) || [];
const notesTagPositions: number[] = [];
while ((match = notesTagsRegex.exec(content)) !== null) {
notesTagPositions.push(match.index);
}
// Remember the query tags
const queryTagsMatch = content.match(queryTagsRegex) || [];
// Remember the query tag
// Temporarily replace XML tags with markers that won't be affected by sanitization
let modified = content
const modified = content
.replace(/<note>/g, '[NOTE_START]')
.replace(/<\/note>/g, '[NOTE_END]')
.replace(/<notes>/g, '[NOTES_START]')
@ -184,7 +192,7 @@ export class OllamaMessageFormatter extends BaseMessageFormatter {
.replace(/<query>(.*?)<\/query>/g, '[QUERY]$1[/QUERY]');
// First use the parent class to do standard cleaning
let sanitized = super.cleanContextContent(modified);
const sanitized = super.cleanContextContent(modified);
// Then apply Ollama-specific aggressive cleaning
// Remove any remaining HTML using sanitizeHtml while keeping our markers

View File

@ -1,7 +1,7 @@
import sanitizeHtml from 'sanitize-html';
import type { Message } from '../ai_interface.js';
import { BaseMessageFormatter } from './base_formatter.js';
import { PROVIDER_PROMPTS, FORMATTING_PROMPTS } from '../constants/llm_prompt_constants.js';
import { PROVIDER_PROMPTS } from '../constants/llm_prompt_constants.js';
import { LLM_CONSTANTS } from '../constants/provider_constants.js';
import {
HTML_ALLOWED_TAGS,
@ -10,6 +10,7 @@ import {
HTML_ENTITY_REPLACEMENTS,
FORMATTER_LOGS
} from '../constants/formatter_constants.js';
import log from '../../log.js';
/**
* OpenAI-specific message formatter
@ -24,8 +25,13 @@ export class OpenAIMessageFormatter extends BaseMessageFormatter {
/**
* Format messages for the OpenAI API
* @param messages The messages to format
* @param systemPrompt Optional system prompt to use
* @param context Optional context to include
* @param preserveSystemPrompt When true, preserves existing system messages
* @param useTools Flag indicating if tools will be used in this request
*/
formatMessages(messages: Message[], systemPrompt?: string, context?: string): Message[] {
formatMessages(messages: Message[], systemPrompt?: string, context?: string, preserveSystemPrompt?: boolean, useTools?: boolean): Message[] {
const formattedMessages: Message[] = [];
// Check if we already have a system message
@ -47,9 +53,22 @@ export class OpenAIMessageFormatter extends BaseMessageFormatter {
}
// If we don't have explicit context but have a system prompt
else if (!hasSystemMessage && systemPrompt) {
let baseSystemPrompt = systemPrompt || PROVIDER_PROMPTS.COMMON.DEFAULT_ASSISTANT_INTRO;
// Check if this is a tool-using conversation
const hasPreviousToolCalls = messages.some(msg => msg.tool_calls && msg.tool_calls.length > 0);
const hasToolResults = messages.some(msg => msg.role === 'tool');
const isToolUsingConversation = useTools || hasPreviousToolCalls || hasToolResults;
// Add tool instructions for OpenAI when tools are being used
if (isToolUsingConversation && PROVIDER_PROMPTS.OPENAI.TOOL_INSTRUCTIONS) {
log.info('Adding tool instructions to system prompt for OpenAI');
baseSystemPrompt = `${baseSystemPrompt}\n\n${PROVIDER_PROMPTS.OPENAI.TOOL_INSTRUCTIONS}`;
}
formattedMessages.push({
role: 'system',
content: systemPrompt
content: baseSystemPrompt
});
}
// If neither context nor system prompt is provided, use default system prompt

View File

@ -20,6 +20,7 @@ import sql from "../sql.js";
import sqlInit from "../sql_init.js";
import { CONTEXT_PROMPTS } from './constants/llm_prompt_constants.js';
import { SEARCH_CONSTANTS } from './constants/search_constants.js';
import { isNoteExcludedFromAI } from "./utils/ai_exclusion_utils.js";
export class IndexService {
private initialized = false;
@ -803,6 +804,12 @@ export class IndexService {
throw new Error(`Note ${noteId} not found`);
}
// Check if this note is excluded from AI features
if (isNoteExcludedFromAI(note)) {
log.info(`Note ${noteId} (${note.title}) excluded from AI indexing due to exclusion label`);
return true; // Return true to indicate successful handling (exclusion is intentional)
}
// Check where embedding generation should happen
const embeddingLocation = await options.getOption('embeddingGenerationLocation') || 'client';

View File

@ -6,6 +6,26 @@ import toolRegistry from '../../tools/tool_registry.js';
import chatStorageService from '../../chat_storage_service.js';
import aiServiceManager from '../../ai_service_manager.js';
// Type definitions for tools and validation results
interface ToolInterface {
execute: (args: Record<string, unknown>) => Promise<unknown>;
[key: string]: unknown;
}
interface ToolValidationResult {
toolCall: {
id?: string;
function: {
name: string;
arguments: string | Record<string, unknown>;
};
};
valid: boolean;
tool: ToolInterface | null;
error: string | null;
guidance?: string; // Guidance to help the LLM select better tools/parameters
}
/**
* Pipeline stage for handling LLM tool calling
* This stage is responsible for:
@ -50,12 +70,35 @@ export class ToolCallingStage extends BasePipelineStage<ToolExecutionInput, { re
}
// Check if the registry has any tools
const availableTools = toolRegistry.getAllTools();
const registryTools = toolRegistry.getAllTools();
// Convert ToolHandler[] to ToolInterface[] with proper type safety
const availableTools: ToolInterface[] = registryTools.map(tool => {
// Create a proper ToolInterface from the ToolHandler
const toolInterface: ToolInterface = {
// Pass through the execute method
execute: (args: Record<string, unknown>) => tool.execute(args),
// Include other properties from the tool definition
...tool.definition
};
return toolInterface;
});
log.info(`Available tools in registry: ${availableTools.length}`);
// Log available tools for debugging
if (availableTools.length > 0) {
const availableToolNames = availableTools.map(t => t.definition.function.name).join(', ');
const availableToolNames = availableTools.map(t => {
// Safely access the name property using type narrowing
if (t && typeof t === 'object' && 'definition' in t &&
t.definition && typeof t.definition === 'object' &&
'function' in t.definition && t.definition.function &&
typeof t.definition.function === 'object' &&
'name' in t.definition.function &&
typeof t.definition.function.name === 'string') {
return t.definition.function.name;
}
return 'unknown';
}).join(', ');
log.info(`Available tools: ${availableToolNames}`);
}
@ -66,9 +109,11 @@ export class ToolCallingStage extends BasePipelineStage<ToolExecutionInput, { re
log.info('Attempting to initialize tools as recovery step');
// Tools are already initialized in the AIServiceManager constructor
// No need to initialize them again
log.info(`After recovery initialization: ${toolRegistry.getAllTools().length} tools available`);
} catch (error: any) {
log.error(`Failed to initialize tools in recovery step: ${error.message}`);
const toolCount = toolRegistry.getAllTools().length;
log.info(`After recovery initialization: ${toolCount} tools available`);
} catch (error: unknown) {
const errorMessage = error instanceof Error ? error.message : String(error);
log.error(`Failed to initialize tools in recovery step: ${errorMessage}`);
}
}
@ -88,25 +133,29 @@ export class ToolCallingStage extends BasePipelineStage<ToolExecutionInput, { re
const executionStartTime = Date.now();
// First validate all tools before executing them
// First validate all tools before execution
log.info(`Validating ${response.tool_calls?.length || 0} tools before execution`);
const validationResults = await Promise.all((response.tool_calls || []).map(async (toolCall) => {
const validationResults: ToolValidationResult[] = await Promise.all((response.tool_calls || []).map(async (toolCall) => {
try {
// Get the tool from registry
const tool = toolRegistry.getTool(toolCall.function.name);
if (!tool) {
log.error(`Tool not found in registry: ${toolCall.function.name}`);
// Generate guidance for the LLM when a tool is not found
const guidance = this.generateToolGuidance(toolCall.function.name, `Tool not found: ${toolCall.function.name}`);
return {
toolCall,
valid: false,
tool: null,
error: `Tool not found: ${toolCall.function.name}`
error: `Tool not found: ${toolCall.function.name}`,
guidance // Add guidance for the LLM
};
}
// Validate the tool before execution
const isToolValid = await this.validateToolBeforeExecution(tool, toolCall.function.name);
// Use unknown as an intermediate step for type conversion
const isToolValid = await this.validateToolBeforeExecution(tool as unknown as ToolInterface, toolCall.function.name);
if (!isToolValid) {
throw new Error(`Tool '${toolCall.function.name}' failed validation before execution`);
}
@ -114,15 +163,16 @@ export class ToolCallingStage extends BasePipelineStage<ToolExecutionInput, { re
return {
toolCall,
valid: true,
tool,
tool: tool as unknown as ToolInterface,
error: null
};
} catch (error: any) {
} catch (error: unknown) {
const errorMessage = error instanceof Error ? error.message : String(error);
return {
toolCall,
valid: false,
tool: null,
error: error.message || String(error)
error: errorMessage
};
}
}));
@ -141,15 +191,21 @@ export class ToolCallingStage extends BasePipelineStage<ToolExecutionInput, { re
: JSON.stringify(toolCall.function.arguments);
log.info(`Tool parameters: ${argsStr}`);
// If validation failed, throw the error
// If validation failed, generate guidance and throw the error
if (!valid || !tool) {
throw new Error(error || `Unknown validation error for tool '${toolCall.function.name}'`);
// If we already have guidance from validation, use it, otherwise generate it
const toolGuidance = validation.guidance ||
this.generateToolGuidance(toolCall.function.name,
error || `Unknown validation error for tool '${toolCall.function.name}'`);
// Include the guidance in the error message
throw new Error(`${error || `Unknown validation error for tool '${toolCall.function.name}'`}\n${toolGuidance}`);
}
log.info(`Tool validated successfully: ${toolCall.function.name}`);
// Parse arguments (handle both string and object formats)
let args;
let args: Record<string, unknown>;
// At this stage, arguments should already be processed by the provider-specific service
// But we still need to handle different formats just in case
if (typeof toolCall.function.arguments === 'string') {
@ -157,7 +213,7 @@ export class ToolCallingStage extends BasePipelineStage<ToolExecutionInput, { re
try {
// Try to parse as JSON first
args = JSON.parse(toolCall.function.arguments);
args = JSON.parse(toolCall.function.arguments) as Record<string, unknown>;
log.info(`Parsed JSON arguments: ${Object.keys(args).join(', ')}`);
} catch (e: unknown) {
// If it's not valid JSON, try to check if it's a stringified object with quotes
@ -168,25 +224,26 @@ export class ToolCallingStage extends BasePipelineStage<ToolExecutionInput, { re
// Try to clean it up
try {
const cleaned = toolCall.function.arguments
.replace(/^['"]|['"]$/g, '') // Remove surrounding quotes
.replace(/^['"]/g, '') // Remove surrounding quotes
.replace(/['"]$/g, '') // Remove surrounding quotes
.replace(/\\"/g, '"') // Replace escaped quotes
.replace(/([{,])\s*'([^']+)'\s*:/g, '$1"$2":') // Replace single quotes around property names
.replace(/([{,])\s*(\w+)\s*:/g, '$1"$2":'); // Add quotes around unquoted property names
log.info(`Cleaned argument string: ${cleaned}`);
args = JSON.parse(cleaned);
args = JSON.parse(cleaned) as Record<string, unknown>;
log.info(`Successfully parsed cleaned arguments: ${Object.keys(args).join(', ')}`);
} catch (cleanError: unknown) {
// If all parsing fails, treat it as a text argument
const cleanErrorMessage = cleanError instanceof Error ? cleanError.message : String(cleanError);
log.info(`Failed to parse cleaned arguments: ${cleanErrorMessage}`);
args = { text: toolCall.function.arguments };
log.info(`Using text argument: ${args.text.substring(0, 50)}...`);
log.info(`Using text argument: ${(args.text as string).substring(0, 50)}...`);
}
}
} else {
// Arguments are already an object
args = toolCall.function.arguments;
args = toolCall.function.arguments as Record<string, unknown>;
log.info(`Using object arguments with keys: ${Object.keys(args).join(', ')}`);
}
@ -263,9 +320,16 @@ export class ToolCallingStage extends BasePipelineStage<ToolExecutionInput, { re
callbackResult.catch((e: Error) => log.error(`Error sending tool execution complete event: ${e.message}`));
}
}
} catch (execError: any) {
} catch (execError: unknown) {
const executionTime = Date.now() - executionStart;
log.error(`================ TOOL EXECUTION FAILED in ${executionTime}ms: ${execError.message} ================`);
const errorMessage = execError instanceof Error ? execError.message : String(execError);
log.error(`================ TOOL EXECUTION FAILED in ${executionTime}ms: ${errorMessage} ================`);
// Generate guidance for the failed tool execution
const toolGuidance = this.generateToolGuidance(toolCall.function.name, errorMessage);
// Add the guidance to the error message for the LLM
const enhancedErrorMessage = `${errorMessage}\n${toolGuidance}`;
// Record this failed tool execution if there's a sessionId available
if (input.options?.sessionId) {
@ -276,7 +340,7 @@ export class ToolCallingStage extends BasePipelineStage<ToolExecutionInput, { re
toolCall.id || `tool-${Date.now()}-${Math.random().toString(36).substring(2, 9)}`,
args,
"", // No result for failed execution
execError.message || String(execError)
enhancedErrorMessage // Use enhanced error message with guidance
);
} catch (storageError) {
log.error(`Failed to record tool execution error in chat storage: ${storageError}`);
@ -291,7 +355,7 @@ export class ToolCallingStage extends BasePipelineStage<ToolExecutionInput, { re
name: toolCall.function.name,
arguments: {} as Record<string, unknown>
},
error: execError.message || String(execError),
error: enhancedErrorMessage, // Include guidance in the error message
type: 'error' as const
};
@ -306,6 +370,10 @@ export class ToolCallingStage extends BasePipelineStage<ToolExecutionInput, { re
}
}
// Modify the error to include our guidance
if (execError instanceof Error) {
execError.message = enhancedErrorMessage;
}
throw execError;
}
@ -322,19 +390,24 @@ export class ToolCallingStage extends BasePipelineStage<ToolExecutionInput, { re
name: toolCall.function.name,
result
};
} catch (error: any) {
log.error(`Error executing tool ${toolCall.function.name}: ${error.message || String(error)}`);
} catch (error: unknown) {
const errorMessage = error instanceof Error ? error.message : String(error);
log.error(`Error executing tool ${toolCall.function.name}: ${errorMessage}`);
// Emit tool error event if not already handled in the try/catch above
// and if streaming is enabled
if (streamCallback && error.name !== "ExecutionError") {
// Need to check if error is an object with a name property of type string
const isExecutionError = typeof error === 'object' && error !== null &&
'name' in error && (error as { name: unknown }).name === "ExecutionError";
if (streamCallback && !isExecutionError) {
const toolExecutionData = {
action: 'error',
tool: {
name: toolCall.function.name,
arguments: {} as Record<string, unknown>
},
error: error.message || String(error),
error: errorMessage,
type: 'error' as const
};
@ -353,7 +426,7 @@ export class ToolCallingStage extends BasePipelineStage<ToolExecutionInput, { re
return {
toolCallId: toolCall.id,
name: toolCall.function.name,
result: `Error: ${error.message || String(error)}`
result: `Error: ${errorMessage}`
};
}
}));
@ -364,6 +437,7 @@ export class ToolCallingStage extends BasePipelineStage<ToolExecutionInput, { re
// Add each tool result to the messages array
const toolResultMessages: Message[] = [];
let hasEmptyResults = false;
for (const result of toolResults) {
const { toolCallId, name, result: toolResult } = result;
@ -373,10 +447,23 @@ export class ToolCallingStage extends BasePipelineStage<ToolExecutionInput, { re
? toolResult
: JSON.stringify(toolResult, null, 2);
// Check if result is empty or unhelpful
const isEmptyResult = this.isEmptyToolResult(toolResult, name);
if (isEmptyResult && !resultContent.startsWith('Error:')) {
hasEmptyResults = true;
log.info(`Empty result detected for tool ${name}. Will add suggestion to try different parameters.`);
}
// Add enhancement for empty results
let enhancedContent = resultContent;
if (isEmptyResult && !resultContent.startsWith('Error:')) {
enhancedContent = `${resultContent}\n\nNOTE: This tool returned no useful results with the provided parameters. Consider trying again with different parameters such as broader search terms, different filters, or alternative approaches.`;
}
// Add a new message for the tool result
const toolMessage: Message = {
role: 'tool',
content: resultContent,
content: enhancedContent,
name: name,
tool_call_id: toolCallId
};
@ -385,7 +472,7 @@ export class ToolCallingStage extends BasePipelineStage<ToolExecutionInput, { re
log.info(`-------- Tool Result for ${name} (ID: ${toolCallId}) --------`);
log.info(`Result type: ${typeof toolResult}`);
log.info(`Result preview: ${resultContent.substring(0, 150)}${resultContent.length > 150 ? '...' : ''}`);
log.info(`Tool result status: ${resultContent.startsWith('Error:') ? 'ERROR' : 'SUCCESS'}`);
log.info(`Tool result status: ${resultContent.startsWith('Error:') ? 'ERROR' : isEmptyResult ? 'EMPTY' : 'SUCCESS'}`);
updatedMessages.push(toolMessage);
toolResultMessages.push(toolMessage);
@ -398,7 +485,36 @@ export class ToolCallingStage extends BasePipelineStage<ToolExecutionInput, { re
const needsFollowUp = hasToolResults;
log.info(`Follow-up needed: ${needsFollowUp}`);
log.info(`Reasoning: ${hasToolResults ? 'Has tool results to process' : 'No tool results'} ${hasErrors ? ', contains errors' : ''}`);
log.info(`Reasoning: ${hasToolResults ? 'Has tool results to process' : 'No tool results'} ${hasErrors ? ', contains errors' : ''} ${hasEmptyResults ? ', contains empty results' : ''}`);
// Add a system message with hints for empty results
if (hasEmptyResults && needsFollowUp) {
log.info('Adding system message requiring the LLM to run additional tools with different parameters');
// Build a more directive message based on which tools were empty
const emptyToolNames = toolResultMessages
.filter(msg => this.isEmptyToolResult(msg.content, msg.name || ''))
.map(msg => msg.name);
let directiveMessage = `YOU MUST NOT GIVE UP AFTER A SINGLE EMPTY SEARCH RESULT. `;
if (emptyToolNames.includes('search_notes') || emptyToolNames.includes('vector_search')) {
directiveMessage += `IMMEDIATELY RUN ANOTHER SEARCH TOOL with broader search terms, alternative keywords, or related concepts. `;
directiveMessage += `Try synonyms, more general terms, or related topics. `;
}
if (emptyToolNames.includes('keyword_search')) {
directiveMessage += `IMMEDIATELY TRY VECTOR_SEARCH INSTEAD as it might find semantic matches where keyword search failed. `;
}
directiveMessage += `DO NOT ask the user what to do next or if they want general information. CONTINUE SEARCHING with different parameters.`;
updatedMessages.push({
role: 'system',
content: directiveMessage
});
}
log.info(`Total messages to return to pipeline: ${updatedMessages.length}`);
log.info(`Last 3 messages in conversation:`);
const lastMessages = updatedMessages.slice(-3);
@ -421,7 +537,7 @@ export class ToolCallingStage extends BasePipelineStage<ToolExecutionInput, { re
* @param toolName The name of the tool requiring this dependency
* @returns The requested dependency or null if it couldn't be created
*/
private async getOrCreateDependency(dependencyType: string, toolName: string): Promise<any> {
private async getOrCreateDependency(dependencyType: string, toolName: string): Promise<unknown | null> {
const aiServiceManager = (await import('../../ai_service_manager.js')).default;
try {
@ -448,8 +564,9 @@ export class ToolCallingStage extends BasePipelineStage<ToolExecutionInput, { re
// Force initialization to ensure it runs even if previously marked as initialized
await agentTools.initialize(true);
log.info('Agent tools initialized successfully');
} catch (initError: any) {
log.error(`Failed to initialize agent tools: ${initError.message}`);
} catch (initError: unknown) {
const errorMessage = initError instanceof Error ? initError.message : String(initError);
log.error(`Failed to initialize agent tools: ${errorMessage}`);
return null;
}
} else {
@ -474,8 +591,9 @@ export class ToolCallingStage extends BasePipelineStage<ToolExecutionInput, { re
// Unknown dependency type
log.error(`Unknown dependency type: ${dependencyType}`);
return null;
} catch (error: any) {
log.error(`Error getting or creating dependency '${dependencyType}': ${error.message}`);
} catch (error: unknown) {
const errorMessage = error instanceof Error ? error.message : String(error);
log.error(`Error getting or creating dependency '${dependencyType}': ${errorMessage}`);
return null;
}
}
@ -485,7 +603,7 @@ export class ToolCallingStage extends BasePipelineStage<ToolExecutionInput, { re
* @param tool The tool to validate
* @param toolName The name of the tool
*/
private async validateToolBeforeExecution(tool: any, toolName: string): Promise<boolean> {
private async validateToolBeforeExecution(tool: ToolInterface, toolName: string): Promise<boolean> {
try {
if (!tool) {
log.error(`Tool '${toolName}' not found or failed validation`);
@ -525,31 +643,164 @@ export class ToolCallingStage extends BasePipelineStage<ToolExecutionInput, { re
return false;
}
log.info('Successfully initialized vectorSearchTool');
} catch (initError: any) {
log.error(`Failed to initialize agent tools: ${initError.message}`);
} catch (initError: unknown) {
const errorMessage = initError instanceof Error ? initError.message : String(initError);
log.error(`Failed to initialize agent tools: ${errorMessage}`);
return false;
}
}
// Verify the vectorSearchTool has the required methods
if (!vectorSearchTool.searchNotes || typeof vectorSearchTool.searchNotes !== 'function') {
log.error(`Tool '${toolName}' dependency vectorSearchTool is missing searchNotes method`);
return false;
}
} catch (error: any) {
log.error(`Error validating dependencies for tool '${toolName}': ${error.message}`);
} catch (error: unknown) {
const errorMessage = error instanceof Error ? error.message : String(error);
log.error(`Error validating dependencies for tool '${toolName}': ${errorMessage}`);
return false;
}
}
// Add additional tool-specific validations here
return true;
} catch (error: any) {
log.error(`Error validating tool before execution: ${error.message}`);
} catch (error: unknown) {
const errorMessage = error instanceof Error ? error.message : String(error);
log.error(`Error validating tool before execution: ${errorMessage}`);
return false;
}
}
/**
* Generate guidance for the LLM when a tool fails or is not found
* @param toolName The name of the tool that failed
* @param errorMessage The error message from the failed tool
* @returns A guidance message for the LLM with suggestions of what to try next
*/
private generateToolGuidance(toolName: string, errorMessage: string): string {
// Get all available tool names for recommendations
const availableTools = toolRegistry.getAllTools();
const availableToolNames = availableTools
.map(t => {
if (t && typeof t === 'object' && 'definition' in t &&
t.definition && typeof t.definition === 'object' &&
'function' in t.definition && t.definition.function &&
typeof t.definition.function === 'object' &&
'name' in t.definition.function &&
typeof t.definition.function.name === 'string') {
return t.definition.function.name;
}
return '';
})
.filter(name => name !== '');
// Create specific guidance based on the error and tool
let guidance = `TOOL GUIDANCE: The tool '${toolName}' failed with error: ${errorMessage}.\n`;
// Add suggestions based on the specific tool and error
if (toolName === 'attribute_search' && errorMessage.includes('Invalid attribute type')) {
guidance += "CRITICAL REQUIREMENT: The 'attribute_search' tool requires 'attributeType' parameter that must be EXACTLY 'label' or 'relation' (lowercase, no other values).\n";
guidance += "CORRECT EXAMPLE: { \"attributeType\": \"label\", \"attributeName\": \"important\", \"attributeValue\": \"yes\" }\n";
guidance += "INCORRECT EXAMPLE: { \"attributeType\": \"Label\", ... } - Case matters! Must be lowercase.\n";
}
else if (errorMessage.includes('Tool not found')) {
// Provide guidance on available search tools if a tool wasn't found
const searchTools = availableToolNames.filter(name => name.includes('search'));
guidance += `AVAILABLE SEARCH TOOLS: ${searchTools.join(', ')}\n`;
guidance += "TRY VECTOR SEARCH: For conceptual matches, use 'vector_search' with a query parameter.\n";
guidance += "EXAMPLE: { \"query\": \"your search terms here\" }\n";
}
else if (errorMessage.includes('missing required parameter')) {
// Provide parameter guidance based on the tool name
if (toolName === 'vector_search') {
guidance += "REQUIRED PARAMETERS: The 'vector_search' tool requires a 'query' parameter.\n";
guidance += "EXAMPLE: { \"query\": \"your search terms here\" }\n";
} else if (toolName === 'keyword_search') {
guidance += "REQUIRED PARAMETERS: The 'keyword_search' tool requires a 'query' parameter.\n";
guidance += "EXAMPLE: { \"query\": \"your search terms here\" }\n";
}
}
// Add a general suggestion to try vector_search as a fallback
if (!toolName.includes('vector_search')) {
guidance += "RECOMMENDATION: If specific searches fail, try the 'vector_search' tool which performs semantic searches.\n";
}
return guidance;
}
/**
* Determines if a tool result is effectively empty or unhelpful
* @param result The result from the tool execution
* @param toolName The name of the tool that was executed
* @returns true if the result is considered empty or unhelpful
*/
private isEmptyToolResult(result: unknown, toolName: string): boolean {
// Handle string results
if (typeof result === 'string') {
const trimmed = result.trim();
if (trimmed === '' || trimmed === '[]' || trimmed === '{}') {
return true;
}
// Tool-specific empty results (for string responses)
if (toolName === 'search_notes' &&
(trimmed === 'No matching notes found.' ||
trimmed.includes('No results found') ||
trimmed.includes('No matches found') ||
trimmed.includes('No notes found'))) {
// This is a valid result (empty, but valid), don't mark as empty so LLM can see feedback
return false;
}
if (toolName === 'vector_search' &&
(trimmed.includes('No results found') ||
trimmed.includes('No matching documents'))) {
return true;
}
if (toolName === 'keyword_search' &&
(trimmed.includes('No matches found') ||
trimmed.includes('No results for'))) {
return true;
}
}
// Handle object/array results
else if (result !== null && typeof result === 'object') {
// Check if it's an empty array
if (Array.isArray(result) && result.length === 0) {
return true;
}
// Check if it's an object with no meaningful properties
// or with properties indicating empty results
if (!Array.isArray(result)) {
if (Object.keys(result).length === 0) {
return true;
}
// Tool-specific object empty checks
const resultObj = result as Record<string, unknown>;
if (toolName === 'search_notes' &&
'results' in resultObj &&
Array.isArray(resultObj.results) &&
resultObj.results.length === 0) {
return true;
}
if (toolName === 'vector_search' &&
'matches' in resultObj &&
Array.isArray(resultObj.matches) &&
resultObj.matches.length === 0) {
return true;
}
}
}
return false;
}
/**
* Preload the vector search tool to ensure it's available before tool execution
*/
@ -571,8 +822,9 @@ export class ToolCallingStage extends BasePipelineStage<ToolExecutionInput, { re
} else {
log.error(`Vector search tool not available after initialization`);
}
} catch (error: any) {
log.error(`Failed to preload vector search tool: ${error.message}`);
} catch (error: unknown) {
const errorMessage = error instanceof Error ? error.message : String(error);
log.error(`Failed to preload vector search tool: ${errorMessage}`);
}
}
}

View File

@ -6,7 +6,7 @@ import type { ToolCall, Tool } from '../tools/tool_interfaces.js';
import toolRegistry from '../tools/tool_registry.js';
import type { OllamaOptions } from './provider_options.js';
import { getOllamaOptions } from './providers.js';
import { Ollama, type ChatRequest, type ChatResponse as OllamaChatResponse } from 'ollama';
import { Ollama, type ChatRequest } from 'ollama';
import options from '../../options.js';
import {
StreamProcessor,
@ -144,14 +144,19 @@ export class OllamaService extends BaseAIService {
messagesToSend = [...messages];
log.info(`Bypassing formatter for Ollama request with ${messages.length} messages`);
} else {
// Determine if tools will be used in this request
const willUseTools = providerOptions.enableTools !== false;
// Use the formatter to prepare messages
messagesToSend = this.formatter.formatMessages(
messages,
systemPrompt,
undefined, // context
providerOptions.preserveSystemPrompt
providerOptions.preserveSystemPrompt,
willUseTools // Pass flag indicating if tools will be used
);
log.info(`Sending to Ollama with formatted messages: ${messagesToSend.length}`);
log.info(`Sending to Ollama with formatted messages: ${messagesToSend.length}${willUseTools ? ' (with tool instructions)' : ''}`);
}
// Get tools if enabled
@ -361,8 +366,15 @@ export class OllamaService extends BaseAIService {
},
async (callback) => {
let completeText = '';
let responseToolCalls: any[] = [];
let chunkCount = 0;
// Create a response object that will be updated during streaming
const response: ChatResponse = {
text: '',
model: providerOptions.model,
provider: this.getName(),
tool_calls: []
};
try {
// Perform health check
@ -395,8 +407,10 @@ export class OllamaService extends BaseAIService {
// Extract any tool calls
const toolCalls = StreamProcessor.extractToolCalls(chunk);
// Update response tool calls if any are found
if (toolCalls.length > 0) {
responseToolCalls = toolCalls;
// Update the response object's tool_calls for final return
response.tool_calls = toolCalls;
}
// Send to callback - directly pass the content without accumulating
@ -433,35 +447,38 @@ export class OllamaService extends BaseAIService {
/**
* Transform Ollama tool calls to the standard format expected by the pipeline
* @param toolCalls Array of tool calls from Ollama response or undefined
* @returns Standardized ToolCall array for consistent handling in the pipeline
*/
private transformToolCalls(toolCalls: any[] | undefined): ToolCall[] {
private transformToolCalls(toolCalls: unknown[] | undefined): ToolCall[] {
if (!toolCalls || !Array.isArray(toolCalls) || toolCalls.length === 0) {
return [];
}
return toolCalls.map((toolCall, index) => {
// Use type guards to safely access properties
const toolCallObj = toolCall as { id?: string; function?: { name?: string; arguments?: string } };
// Generate a unique ID if none is provided
const id = toolCall.id || `tool-call-${Date.now()}-${index}`;
const id = typeof toolCallObj.id === 'string' ? toolCallObj.id : `tool-call-${Date.now()}-${index}`;
// Safely extract function name and arguments with defaults
const functionName = toolCallObj.function && typeof toolCallObj.function.name === 'string'
? toolCallObj.function.name
: 'unknown_function';
const functionArgs = toolCallObj.function && typeof toolCallObj.function.arguments === 'string'
? toolCallObj.function.arguments
: '{}';
// Handle arguments based on their type
let processedArguments: Record<string, any> | string = toolCall.function?.arguments || {};
if (typeof processedArguments === 'string') {
try {
processedArguments = JSON.parse(processedArguments);
} catch (error) {
// If we can't parse as JSON, create a simple object
log.info(`Could not parse tool arguments as JSON in transformToolCalls: ${error}`);
processedArguments = { raw: processedArguments };
}
}
// Return a properly typed ToolCall object
return {
id,
type: 'function',
function: {
name: toolCall.function?.name || '',
arguments: processedArguments
name: functionName,
arguments: functionArgs
}
};
});

View File

@ -3,6 +3,8 @@ import { BaseAIService } from '../base_ai_service.js';
import type { ChatCompletionOptions, ChatResponse, Message, StreamChunk } from '../ai_interface.js';
import { getOpenAIOptions } from './providers.js';
import OpenAI from 'openai';
import { PROVIDER_PROMPTS } from '../constants/llm_prompt_constants.js';
import log from '../../log.js';
export class OpenAIService extends BaseAIService {
private openai: OpenAI | null = null;
@ -36,7 +38,17 @@ export class OpenAIService extends BaseAIService {
// Initialize the OpenAI client
const client = this.getClient(providerOptions.apiKey, providerOptions.baseUrl);
const systemPrompt = this.getSystemPrompt(providerOptions.systemPrompt || options.getOption('aiSystemPrompt'));
// Get base system prompt
let systemPrompt = this.getSystemPrompt(providerOptions.systemPrompt || options.getOption('aiSystemPrompt'));
// Check if tools are enabled for this request
const willUseTools = providerOptions.enableTools && providerOptions.tools && providerOptions.tools.length > 0;
// Add tool instructions to system prompt if tools are enabled
if (willUseTools && PROVIDER_PROMPTS.OPENAI.TOOL_INSTRUCTIONS) {
log.info('Adding tool instructions to system prompt for OpenAI');
systemPrompt = `${systemPrompt}\n\n${PROVIDER_PROMPTS.OPENAI.TOOL_INSTRUCTIONS}`;
}
// Ensure we have a system message
const systemMessageExists = messages.some(m => m.role === 'system');
@ -67,7 +79,7 @@ export class OpenAIService extends BaseAIService {
}
// Log the request parameters
console.log('OpenAI API Request:', JSON.stringify({
log.info(`OpenAI API Request: ${JSON.stringify({
endpoint: 'chat.completions.create',
model: params.model,
messages: params.messages,
@ -76,7 +88,7 @@ export class OpenAIService extends BaseAIService {
stream: params.stream,
tools: params.tools,
tool_choice: params.tool_choice
}, null, 2));
}, null, 2)}`);
// If streaming is requested
if (providerOptions.stream) {
@ -84,10 +96,10 @@ export class OpenAIService extends BaseAIService {
// Get stream from OpenAI SDK
const stream = await client.chat.completions.create(params);
console.log('OpenAI API Stream Started');
log.info('OpenAI API Stream Started');
// Create a closure to hold accumulated tool calls
let accumulatedToolCalls: any[] = [];
const accumulatedToolCalls: OpenAI.Chat.ChatCompletionMessageToolCall[] = [];
// Return a response with the stream handler
const response: ChatResponse = {
@ -104,7 +116,8 @@ export class OpenAIService extends BaseAIService {
if (Symbol.asyncIterator in stream) {
for await (const chunk of stream as AsyncIterable<OpenAI.Chat.ChatCompletionChunk>) {
// Log each chunk received from OpenAI
console.log('OpenAI API Stream Chunk:', JSON.stringify(chunk, null, 2));
// Use info level as debug is not available
log.info(`OpenAI API Stream Chunk: ${JSON.stringify(chunk, null, 2)}`);
const content = chunk.choices[0]?.delta?.content || '';
const isDone = !!chunk.choices[0]?.finish_reason;

View File

@ -19,18 +19,18 @@ export const attributeSearchToolDefinition: Tool = {
type: 'function',
function: {
name: 'attribute_search',
description: 'Search for notes with specific attributes (labels or relations). Use this when you need to find notes based on their metadata rather than content.',
description: 'Search for notes with specific attributes (labels or relations). Use this when you need to find notes based on their metadata rather than content. IMPORTANT: attributeType must be exactly "label" or "relation" (lowercase).',
parameters: {
type: 'object',
properties: {
attributeType: {
type: 'string',
description: 'Type of attribute to search for: "label" or "relation"',
description: 'MUST be exactly "label" or "relation" (lowercase, no other values are valid)',
enum: ['label', 'relation']
},
attributeName: {
type: 'string',
description: 'Name of the attribute to search for'
description: 'Name of the attribute to search for (e.g., "important", "todo", "related-to")'
},
attributeValue: {
type: 'string',
@ -63,7 +63,7 @@ export class AttributeSearchTool implements ToolHandler {
// Validate attribute type
if (attributeType !== 'label' && attributeType !== 'relation') {
return `Error: Invalid attribute type. Must be either "label" or "relation".`;
return `Error: Invalid attribute type. Must be exactly "label" or "relation" (lowercase). You provided: "${attributeType}".`;
}
// Execute the search
@ -133,7 +133,7 @@ export class AttributeSearchTool implements ToolHandler {
} else {
contentPreview = String(content).substring(0, 150) + (String(content).length > 150 ? '...' : '');
}
} catch (e) {
} catch (_) {
contentPreview = '[Content not available]';
}
@ -148,9 +148,10 @@ export class AttributeSearchTool implements ToolHandler {
};
})
};
} catch (error: any) {
log.error(`Error executing attribute_search tool: ${error.message || String(error)}`);
return `Error: ${error.message || String(error)}`;
} catch (error: unknown) {
const errorMessage = error instanceof Error ? error.message : String(error);
log.error(`Error executing attribute_search tool: ${errorMessage}`);
return `Error: ${errorMessage}`;
}
}
}

View File

@ -17,17 +17,17 @@ export const searchNotesToolDefinition: Tool = {
type: 'function',
function: {
name: 'search_notes',
description: 'Search for notes in the database using semantic search. Returns notes most semantically related to the query.',
description: 'Search for notes in the database using semantic search. Returns notes most semantically related to the query. Use specific, descriptive queries for best results.',
parameters: {
type: 'object',
properties: {
query: {
type: 'string',
description: 'The search query to find semantically related notes'
description: 'The search query to find semantically related notes. Be specific and descriptive for best results.'
},
parentNoteId: {
type: 'string',
description: 'Optional system ID of the parent note to restrict search to a specific branch (not the title). This is a unique identifier like "abc123def456".'
description: 'Optional system ID of the parent note to restrict search to a specific branch (not the title). This is a unique identifier like "abc123def456". Do not use note titles here.'
},
maxResults: {
type: 'number',
@ -142,11 +142,11 @@ export class SearchNotesTool implements ToolHandler {
const result = await llmService.generateChatCompletion(messages, {
temperature: 0.3,
maxTokens: 200,
// Use any to bypass the type checking for special parameters
...(({
// Type assertion to bypass type checking for special internal parameters
...(({
bypassFormatter: true,
bypassContextProcessing: true
} as any))
} as Record<string, boolean>))
});
if (result && result.text) {
@ -159,30 +159,33 @@ export class SearchNotesTool implements ToolHandler {
}
}
// Fall back to smart truncation if summarization fails or isn't requested
const previewLength = Math.min(formattedContent.length, 600);
let preview = formattedContent.substring(0, previewLength);
try {
// Fall back to smart truncation if summarization fails or isn't requested
const previewLength = Math.min(formattedContent.length, 600);
let preview = formattedContent.substring(0, previewLength);
// Only add ellipsis if we've truncated the content
if (previewLength < formattedContent.length) {
// Try to find a natural break point
const breakPoints = ['. ', '.\n', '\n\n', '\n', '. '];
let breakFound = false;
// Only add ellipsis if we've truncated the content
if (previewLength < formattedContent.length) {
// Try to find a natural break point
const breakPoints = ['. ', '.\n', '\n\n', '\n', '. '];
for (const breakPoint of breakPoints) {
const lastBreak = preview.lastIndexOf(breakPoint);
if (lastBreak > previewLength * 0.6) { // At least 60% of the way through
preview = preview.substring(0, lastBreak + breakPoint.length);
breakFound = true;
break;
for (const breakPoint of breakPoints) {
const lastBreak = preview.lastIndexOf(breakPoint);
if (lastBreak > previewLength * 0.6) { // At least 60% of the way through
preview = preview.substring(0, lastBreak + breakPoint.length);
break;
}
}
// Add ellipsis if truncated
preview += '...';
}
// Add ellipsis if truncated
preview += '...';
return preview;
} catch (error) {
log.error(`Error getting rich content preview: ${error}`);
return 'Error retrieving content preview';
}
return preview;
} catch (error) {
log.error(`Error getting rich content preview: ${error}`);
return 'Error retrieving content preview';
@ -226,11 +229,8 @@ export class SearchNotesTool implements ToolHandler {
// Execute the search
log.info(`Performing semantic search for: "${query}"`);
const searchStartTime = Date.now();
const results = await vectorSearchTool.searchNotes(query, {
parentNoteId,
maxResults
// Don't pass summarize - we'll handle it ourselves
});
const response = await vectorSearchTool.searchNotes(query, parentNoteId, maxResults);
const results: Array<Record<string, unknown>> = response?.matches ?? [];
const searchDuration = Date.now() - searchStartTime;
log.info(`Search completed in ${searchDuration}ms, found ${results.length} matching notes`);
@ -247,12 +247,16 @@ export class SearchNotesTool implements ToolHandler {
// Get enhanced previews for each result
const enhancedResults = await Promise.all(
results.map(async (result: any) => {
const preview = await this.getRichContentPreview(result.noteId, summarize);
const noteId = result.noteId;
const preview = await this.getRichContentPreview(noteId, summarize);
return {
noteId: result.noteId,
title: result.title,
noteId: noteId,
title: result?.title as string || '[Unknown title]',
preview: preview,
score: result?.score as number,
dateCreated: result?.dateCreated as string,
dateModified: result?.dateModified as string,
similarity: Math.round(result.similarity * 100) / 100,
parentId: result.parentId
};
@ -260,14 +264,24 @@ export class SearchNotesTool implements ToolHandler {
);
// Format the results
return {
count: enhancedResults.length,
results: enhancedResults,
message: "Note: Use the noteId (not the title) when performing operations on specific notes with other tools."
};
} catch (error: any) {
log.error(`Error executing search_notes tool: ${error.message || String(error)}`);
return `Error: ${error.message || String(error)}`;
if (results.length === 0) {
return {
count: 0,
results: [],
query: query,
message: 'No notes found matching your query. Try using more general terms or try the keyword_search_notes tool with a different query. Note: Use the noteId (not the title) when performing operations on specific notes with other tools.'
};
} else {
return {
count: enhancedResults.length,
results: enhancedResults,
message: "Note: Use the noteId (not the title) when performing operations on specific notes with other tools."
};
}
} catch (error: unknown) {
const errorMessage = error instanceof Error ? error.message : String(error);
log.error(`Error executing search_notes tool: ${errorMessage}`);
return `Error: ${errorMessage}`;
}
}
}

View File

@ -0,0 +1,94 @@
import becca from '../../../becca/becca.js';
import type BNote from '../../../becca/entities/bnote.js';
import { LLM_CONSTANTS } from '../constants/provider_constants.js';
import log from '../../log.js';
/**
* Check if a note should be excluded from all AI/LLM features
*
* @param note - The note to check (BNote object)
* @returns true if the note should be excluded from AI features
*/
export function isNoteExcludedFromAI(note: BNote): boolean {
if (!note) {
return false;
}
try {
// Check if the note has the AI exclusion label
const hasExclusionLabel = note.hasLabel(LLM_CONSTANTS.AI_EXCLUSION.LABEL_NAME);
if (hasExclusionLabel) {
log.info(`Note ${note.noteId} (${note.title}) excluded from AI features due to ${LLM_CONSTANTS.AI_EXCLUSION.LABEL_NAME} label`);
return true;
}
return false;
} catch (error) {
log.error(`Error checking AI exclusion for note ${note.noteId}: ${error}`);
return false; // Default to not excluding on error
}
}
/**
* Check if a note should be excluded from AI features by noteId
*
* @param noteId - The ID of the note to check
* @returns true if the note should be excluded from AI features
*/
export function isNoteExcludedFromAIById(noteId: string): boolean {
if (!noteId) {
return false;
}
try {
const note = becca.getNote(noteId);
if (!note) {
return false;
}
return isNoteExcludedFromAI(note);
} catch (error) {
log.error(`Error checking AI exclusion for note ID ${noteId}: ${error}`);
return false; // Default to not excluding on error
}
}
/**
* Filter out notes that are excluded from AI features
*
* @param notes - Array of notes to filter
* @returns Array of notes with AI-excluded notes removed
*/
export function filterAIExcludedNotes(notes: BNote[]): BNote[] {
return notes.filter(note => !isNoteExcludedFromAI(note));
}
/**
* Filter out note IDs that are excluded from AI features
*
* @param noteIds - Array of note IDs to filter
* @returns Array of note IDs with AI-excluded notes removed
*/
export function filterAIExcludedNoteIds(noteIds: string[]): string[] {
return noteIds.filter(noteId => !isNoteExcludedFromAIById(noteId));
}
/**
* Check if any notes in an array are excluded from AI features
*
* @param notes - Array of notes to check
* @returns true if any note should be excluded from AI features
*/
export function hasAIExcludedNotes(notes: BNote[]): boolean {
return notes.some(note => isNoteExcludedFromAI(note));
}
/**
* Get the AI exclusion label name from constants
* This can be used in UI components or other places that need to reference the label
*
* @returns The label name used for AI exclusion
*/
export function getAIExclusionLabelName(): string {
return LLM_CONSTANTS.AI_EXCLUSION.LABEL_NAME;
}

View File

@ -205,7 +205,7 @@ const defaultOptions: DefaultOption[] = [
{ name: "anthropicBaseUrl", value: "https://api.anthropic.com/v1", isSynced: true },
{ name: "ollamaEnabled", value: "false", isSynced: true },
{ name: "ollamaDefaultModel", value: "llama3", isSynced: true },
{ name: "ollamaBaseUrl", value: "", isSynced: true },
{ name: "ollamaBaseUrl", value: "http://localhost:11434", isSynced: true },
{ name: "ollamaEmbeddingModel", value: "nomic-embed-text", isSynced: true },
{ name: "embeddingAutoUpdateEnabled", value: "true", isSynced: true },