centralize prompts

This commit is contained in:
perf3ct 2025-03-28 23:07:02 +00:00
parent 72c380b6f4
commit 224cb22fe9
No known key found for this signature in database
GPG Key ID: 569C4EEC436F5232
8 changed files with 148 additions and 59 deletions

View File

@ -13,7 +13,7 @@ import contextService from "../../services/llm/context_service.js";
import sql from "../../services/sql.js"; import sql from "../../services/sql.js";
// Import the index service for knowledge base management // Import the index service for knowledge base management
import indexService from "../../services/llm/index_service.js"; import indexService from "../../services/llm/index_service.js";
import { CONTEXT_PROMPTS } from '../../services/llm/constants/llm_prompt_constants.js'; import { CONTEXT_PROMPTS, ERROR_PROMPTS, FORMATTING_PROMPTS } from '../../services/llm/constants/llm_prompt_constants.js';
// LLM service constants // LLM service constants
export const LLM_CONSTANTS = { export const LLM_CONSTANTS = {
@ -951,10 +951,9 @@ async function sendMessage(req: Request, res: Response) {
}; };
// DEBUG: Log context details before sending to LLM // DEBUG: Log context details before sending to LLM
log.info(`CONTEXT BEING SENT TO LLM: ${context.length} chars`); log.info(`${FORMATTING_PROMPTS.DIVIDERS.CONTENT_SECTION} Context begins with: "${context.substring(0, 200)}..."`);
log.info(`Context begins with: "${context.substring(0, 200)}..."`); log.info(`${FORMATTING_PROMPTS.DIVIDERS.CONTENT_SECTION} Context ends with: "...${context.substring(context.length - 200)}"`);
log.info(`Context ends with: "...${context.substring(context.length - 200)}"`); log.info(`${FORMATTING_PROMPTS.DIVIDERS.NOTE_SECTION} Number of notes included: ${sourceNotes.length}`);
log.info(`Number of notes included: ${sourceNotes.length}`);
// Format messages for the LLM using the proper context // Format messages for the LLM using the proper context
const aiMessages = await contextService.buildMessagesWithContext( const aiMessages = await contextService.buildMessagesWithContext(
@ -1228,8 +1227,10 @@ async function sendMessage(req: Request, res: Response) {
}; };
} }
} catch (error: any) { } catch (error: any) {
log.error(`Error sending message to LLM: ${error.message}`); log.error(`Error in LLM query processing: ${error}`);
throw new Error(`Failed to send message: ${error.message}`); return {
error: ERROR_PROMPTS.USER_ERRORS.GENERAL_ERROR
};
} }
} }

View File

@ -2,7 +2,7 @@ import type { Message, ChatCompletionOptions } from './ai_interface.js';
import aiServiceManager from './ai_service_manager.js'; import aiServiceManager from './ai_service_manager.js';
import chatStorageService from './chat_storage_service.js'; import chatStorageService from './chat_storage_service.js';
import log from '../log.js'; import log from '../log.js';
import { CONTEXT_PROMPTS } from './constants/llm_prompt_constants.js'; import { CONTEXT_PROMPTS, ERROR_PROMPTS } from './constants/llm_prompt_constants.js';
export interface ChatSession { export interface ChatSession {
id: string; id: string;
@ -133,10 +133,7 @@ export class ChatService {
// Add error message so user knows something went wrong // Add error message so user knows something went wrong
const errorMessage: Message = { const errorMessage: Message = {
role: 'assistant', role: 'assistant',
content: CONTEXT_PROMPTS.ERROR_MESSAGES.GENERAL_ERROR.replace( content: ERROR_PROMPTS.USER_ERRORS.GENERAL_ERROR
'{errorMessage}',
error.message || 'Please check AI settings and try again.'
)
}; };
session.messages.push(errorMessage); session.messages.push(errorMessage);
@ -311,10 +308,7 @@ export class ChatService {
// Add error message // Add error message
const errorMessage: Message = { const errorMessage: Message = {
role: 'assistant', role: 'assistant',
content: CONTEXT_PROMPTS.ERROR_MESSAGES.CONTEXT_ERROR.replace( content: ERROR_PROMPTS.USER_ERRORS.CONTEXT_ERROR
'{errorMessage}',
error.message || 'Please try again.'
)
}; };
session.messages.push(errorMessage); session.messages.push(errorMessage);

View File

@ -133,12 +133,114 @@ export const AGENT_TOOL_PROMPTS = {
// Provider-specific prompt modifiers // Provider-specific prompt modifiers
export const PROVIDER_PROMPTS = { export const PROVIDER_PROMPTS = {
ANTHROPIC: { ANTHROPIC: {
// Any Anthropic Claude-specific prompt modifications would go here // Anthropic Claude-specific prompt formatting
SYSTEM_WITH_CONTEXT: (context: string) =>
`<instructions>
${DEFAULT_SYSTEM_PROMPT}
Use the following information from the user's notes to answer their questions:
<user_notes>
${context}
</user_notes>
When responding:
- Focus on the most relevant information from the notes
- Be concise and direct in your answers
- If quoting from notes, mention which note it's from
- If the notes don't contain relevant information, say so clearly
</instructions>`,
INSTRUCTIONS_WRAPPER: (instructions: string) =>
`<instructions>\n${instructions}\n</instructions>`,
ACKNOWLEDGMENT: "I understand. I'll follow those instructions.",
CONTEXT_ACKNOWLEDGMENT: "I'll help you with your notes based on the context provided.",
CONTEXT_QUERY_ACKNOWLEDGMENT: "I'll help you with your notes based on the context provided. What would you like to know?"
}, },
OPENAI: { OPENAI: {
// Any OpenAI-specific prompt modifications would go here // OpenAI-specific prompt formatting
SYSTEM_WITH_CONTEXT: (context: string) =>
`You are an AI assistant integrated into TriliumNext Notes.
Use the following information from the user's notes to answer their questions:
${context}
Focus on relevant information from these notes when answering.
Be concise and informative in your responses.`
}, },
OLLAMA: { OLLAMA: {
// Any Ollama-specific prompt modifications would go here // Ollama-specific prompt formatting
CONTEXT_INJECTION: (context: string, query: string) =>
`Here's information from my notes to help answer the question:
${context}
Based on this information, please answer: ${query}`
},
// Common prompts across providers
COMMON: {
DEFAULT_ASSISTANT_INTRO: "You are an AI assistant integrated into TriliumNext Notes. Focus on helping users find information in their notes and answering questions based on their knowledge base. Be concise, informative, and direct when responding to queries."
}
};
// Constants for formatting context and messages
export const FORMATTING_PROMPTS = {
// Headers for context formatting
CONTEXT_HEADERS: {
SIMPLE: (query: string) => `I'm searching for information about: ${query}\n\nHere are the most relevant notes from my knowledge base:`,
DETAILED: (query: string) => `I'm searching for information about: "${query}"\n\nHere are the most relevant notes from my personal knowledge base:`
},
// Closing text for context formatting
CONTEXT_CLOSERS: {
SIMPLE: `End of notes. Please use this information to answer my question comprehensively.`,
DETAILED: `End of context information. Please use only the above notes to answer my question as comprehensively as possible.`
},
// Dividers used in context formatting
DIVIDERS: {
NOTE_SECTION: `------ NOTE INFORMATION ------`,
CONTENT_SECTION: `------ CONTEXT INFORMATION ------`,
NOTE_START: `# Note: `,
CONTENT_START: `Content: `
},
HTML_ALLOWED_TAGS: ['b', 'i', 'em', 'strong', 'a', 'p', 'br', 'ul', 'ol', 'li', 'h1', 'h2', 'h3', 'h4', 'h5', 'code', 'pre']
};
// Prompt templates for chat service
export const CHAT_PROMPTS = {
// Introduction messages for new chats
INTRODUCTIONS: {
NEW_CHAT: "Welcome to TriliumNext AI Assistant. How can I help you with your notes today?",
SEMANTIC_SEARCH: "I'll search through your notes for relevant information. What would you like to know?"
},
// Placeholders for various chat scenarios
PLACEHOLDERS: {
NO_CONTEXT: "I don't have any specific note context yet. Would you like me to search your notes for something specific?",
WAITING_FOR_QUERY: "Awaiting your question..."
}
};
// Error messages and fallbacks
export const ERROR_PROMPTS = {
// User-facing error messages
USER_ERRORS: {
GENERAL_ERROR: "I encountered an error processing your request. Please try again or rephrase your question.",
CONTEXT_ERROR: "I couldn't retrieve context from your notes. Please check your query or try a different question.",
NETWORK_ERROR: "There was a network error connecting to the AI service. Please check your connection and try again.",
RATE_LIMIT: "The AI service is currently experiencing high demand. Please try again in a moment."
},
// Internal error handling
INTERNAL_ERRORS: {
CONTEXT_PROCESSING: "Error processing context data",
MESSAGE_FORMATTING: "Error formatting messages for LLM",
RESPONSE_PARSING: "Error parsing LLM response"
} }
}; };

View File

@ -1,6 +1,6 @@
import sanitizeHtml from 'sanitize-html'; import sanitizeHtml from 'sanitize-html';
import log from '../../../log.js'; import log from '../../../log.js';
import { CONTEXT_PROMPTS } from '../../constants/llm_prompt_constants.js'; import { CONTEXT_PROMPTS, FORMATTING_PROMPTS } from '../../constants/llm_prompt_constants.js';
import type { IContextFormatter, NoteSearchResult } from '../../interfaces/context_interfaces.js'; import type { IContextFormatter, NoteSearchResult } from '../../interfaces/context_interfaces.js';
// Constants for context window sizes, defines in-module to avoid circular dependencies // Constants for context window sizes, defines in-module to avoid circular dependencies
@ -50,7 +50,7 @@ export class ContextFormatter implements IContextFormatter {
if (providerId === 'ollama') { if (providerId === 'ollama') {
// For Ollama, use a much simpler plain text format that's less prone to encoding issues // For Ollama, use a much simpler plain text format that's less prone to encoding issues
formattedContext = `# Context for your query: "${query}"\n\n`; formattedContext = FORMATTING_PROMPTS.CONTEXT_HEADERS.SIMPLE(query);
} else if (providerId === 'anthropic') { } else if (providerId === 'anthropic') {
formattedContext = CONTEXT_PROMPTS.CONTEXT_HEADERS.ANTHROPIC(query); formattedContext = CONTEXT_PROMPTS.CONTEXT_HEADERS.ANTHROPIC(query);
} else { } else {
@ -107,7 +107,7 @@ export class ContextFormatter implements IContextFormatter {
let formattedSource = ''; let formattedSource = '';
if (providerId === 'ollama') { if (providerId === 'ollama') {
// For Ollama, use a simpler format and plain ASCII // For Ollama, use a simpler format and plain ASCII
formattedSource = `## ${title}\n${content}\n\n`; formattedSource = `${FORMATTING_PROMPTS.DIVIDERS.NOTE_START}${title}\n${content}\n\n`;
} else { } else {
formattedSource = `### ${title}\n${content}\n\n`; formattedSource = `### ${title}\n${content}\n\n`;
} }
@ -146,7 +146,7 @@ export class ContextFormatter implements IContextFormatter {
// Add closing to provide instructions to the AI - use simpler version for Ollama // Add closing to provide instructions to the AI - use simpler version for Ollama
let closing = ''; let closing = '';
if (providerId === 'ollama') { if (providerId === 'ollama') {
closing = '\n\nPlease use the information above to answer the query and keep your response concise.'; closing = `\n\n${FORMATTING_PROMPTS.CONTEXT_CLOSERS.SIMPLE}`;
} else if (providerId === 'anthropic') { } else if (providerId === 'anthropic') {
closing = CONTEXT_PROMPTS.CONTEXT_CLOSINGS.ANTHROPIC; closing = CONTEXT_PROMPTS.CONTEXT_CLOSINGS.ANTHROPIC;
} else { } else {

View File

@ -1,6 +1,7 @@
import sanitizeHtml from 'sanitize-html'; import sanitizeHtml from 'sanitize-html';
import type { Message } from '../ai_interface.js'; import type { Message } from '../ai_interface.js';
import { BaseMessageFormatter } from './base_formatter.js'; import { BaseMessageFormatter } from './base_formatter.js';
import { PROVIDER_PROMPTS } from '../constants/llm_prompt_constants.js';
/** /**
* Anthropic-specific message formatter * Anthropic-specific message formatter
@ -27,18 +28,9 @@ export class AnthropicMessageFormatter extends BaseMessageFormatter {
// 1. If explicit context is provided, we format it with XML tags // 1. If explicit context is provided, we format it with XML tags
if (context) { if (context) {
// Build the system message with context // Build the system message with context
const baseInstructions = this.getDefaultSystemPrompt(systemPrompt); const formattedContext = PROVIDER_PROMPTS.ANTHROPIC.SYSTEM_WITH_CONTEXT(
this.cleanContextContent(context)
const formattedContext = );
`<instructions>\n${baseInstructions}\n\n` +
`Use the following information from the user's notes to answer their questions:\n\n` +
`<user_notes>\n${this.cleanContextContent(context)}\n</user_notes>\n\n` +
`When responding:\n` +
`- Focus on the most relevant information from the notes\n` +
`- Be concise and direct in your answers\n` +
`- If quoting from notes, mention which note it's from\n` +
`- If the notes don't contain relevant information, say so clearly\n` +
`</instructions>`;
// If there's at least one user message, add the context to the first one // If there's at least one user message, add the context to the first one
if (userAssistantMessages.length > 0 && userAssistantMessages[0].role === 'user') { if (userAssistantMessages.length > 0 && userAssistantMessages[0].role === 'user') {
@ -51,7 +43,7 @@ export class AnthropicMessageFormatter extends BaseMessageFormatter {
// Add system response acknowledgment // Add system response acknowledgment
formattedMessages.push({ formattedMessages.push({
role: 'assistant', role: 'assistant',
content: "I'll help you with your notes based on the context provided." content: PROVIDER_PROMPTS.ANTHROPIC.CONTEXT_ACKNOWLEDGMENT
}); });
// Add remaining messages // Add remaining messages
@ -68,7 +60,7 @@ export class AnthropicMessageFormatter extends BaseMessageFormatter {
formattedMessages.push({ formattedMessages.push({
role: 'assistant', role: 'assistant',
content: "I'll help you with your notes based on the context provided. What would you like to know?" content: PROVIDER_PROMPTS.ANTHROPIC.CONTEXT_QUERY_ACKNOWLEDGMENT
}); });
// Add any existing assistant messages if they exist // Add any existing assistant messages if they exist
@ -84,8 +76,9 @@ export class AnthropicMessageFormatter extends BaseMessageFormatter {
const systemMessages = messages.filter(msg => msg.role === 'system'); const systemMessages = messages.filter(msg => msg.role === 'system');
// Build system content with XML tags // Build system content with XML tags
const systemContent = const systemContent = PROVIDER_PROMPTS.ANTHROPIC.INSTRUCTIONS_WRAPPER(
`<instructions>\n${systemMessages.map(msg => this.cleanContextContent(msg.content)).join('\n\n')}\n</instructions>`; systemMessages.map(msg => this.cleanContextContent(msg.content)).join('\n\n')
);
// Add as first user message // Add as first user message
formattedMessages.push({ formattedMessages.push({
@ -96,7 +89,7 @@ export class AnthropicMessageFormatter extends BaseMessageFormatter {
// Add assistant acknowledgment // Add assistant acknowledgment
formattedMessages.push({ formattedMessages.push({
role: 'assistant', role: 'assistant',
content: "I understand. I'll follow those instructions." content: PROVIDER_PROMPTS.ANTHROPIC.ACKNOWLEDGMENT
}); });
// Add remaining user/assistant messages // Add remaining user/assistant messages
@ -109,13 +102,13 @@ export class AnthropicMessageFormatter extends BaseMessageFormatter {
// Add as first user message with XML tags // Add as first user message with XML tags
formattedMessages.push({ formattedMessages.push({
role: 'user', role: 'user',
content: `<instructions>\n${systemPrompt}\n</instructions>` content: PROVIDER_PROMPTS.ANTHROPIC.INSTRUCTIONS_WRAPPER(systemPrompt)
}); });
// Add assistant acknowledgment // Add assistant acknowledgment
formattedMessages.push({ formattedMessages.push({
role: 'assistant', role: 'assistant',
content: "I understand. I'll follow those instructions." content: PROVIDER_PROMPTS.ANTHROPIC.ACKNOWLEDGMENT
}); });
// Add all other messages // Add all other messages
@ -128,13 +121,13 @@ export class AnthropicMessageFormatter extends BaseMessageFormatter {
// Add default system prompt with XML tags // Add default system prompt with XML tags
formattedMessages.push({ formattedMessages.push({
role: 'user', role: 'user',
content: `<instructions>\n${this.getDefaultSystemPrompt()}\n</instructions>` content: PROVIDER_PROMPTS.ANTHROPIC.INSTRUCTIONS_WRAPPER(this.getDefaultSystemPrompt())
}); });
// Add assistant acknowledgment // Add assistant acknowledgment
formattedMessages.push({ formattedMessages.push({
role: 'assistant', role: 'assistant',
content: "I understand. I'll follow those instructions." content: PROVIDER_PROMPTS.ANTHROPIC.ACKNOWLEDGMENT
}); });
// Add all user messages // Add all user messages

View File

@ -1,7 +1,7 @@
import sanitizeHtml from 'sanitize-html'; import sanitizeHtml from 'sanitize-html';
import type { Message } from '../ai_interface.js'; import type { Message } from '../ai_interface.js';
import type { MessageFormatter } from '../interfaces/message_formatter.js'; import type { MessageFormatter } from '../interfaces/message_formatter.js';
import { DEFAULT_SYSTEM_PROMPT } from '../constants/llm_prompt_constants.js'; import { DEFAULT_SYSTEM_PROMPT, PROVIDER_PROMPTS } from '../constants/llm_prompt_constants.js';
/** /**
* Base formatter with common functionality for all providers * Base formatter with common functionality for all providers
@ -25,7 +25,7 @@ export abstract class BaseMessageFormatter implements MessageFormatter {
* Uses the default prompt from constants * Uses the default prompt from constants
*/ */
protected getDefaultSystemPrompt(systemPrompt?: string): string { protected getDefaultSystemPrompt(systemPrompt?: string): string {
return systemPrompt || DEFAULT_SYSTEM_PROMPT; return systemPrompt || DEFAULT_SYSTEM_PROMPT || PROVIDER_PROMPTS.COMMON.DEFAULT_ASSISTANT_INTRO;
} }
/** /**

View File

@ -1,6 +1,7 @@
import type { Message } from '../ai_interface.js'; import type { Message } from '../ai_interface.js';
import { BaseMessageFormatter } from './base_formatter.js'; import { BaseMessageFormatter } from './base_formatter.js';
import sanitizeHtml from 'sanitize-html'; import sanitizeHtml from 'sanitize-html';
import { PROVIDER_PROMPTS, FORMATTING_PROMPTS } from '../constants/llm_prompt_constants.js';
/** /**
* Ollama-specific message formatter * Ollama-specific message formatter
@ -23,8 +24,8 @@ export class OllamaMessageFormatter extends BaseMessageFormatter {
const systemMessages = messages.filter(msg => msg.role === 'system'); const systemMessages = messages.filter(msg => msg.role === 'system');
const userMessages = messages.filter(msg => msg.role === 'user' || msg.role === 'assistant'); const userMessages = messages.filter(msg => msg.role === 'user' || msg.role === 'assistant');
// Create base system message with instructions // Create base system message with instructions or use default
const basePrompt = this.getDefaultSystemPrompt(systemPrompt); const basePrompt = systemPrompt || PROVIDER_PROMPTS.COMMON.DEFAULT_ASSISTANT_INTRO;
// Always add a system message with the base prompt // Always add a system message with the base prompt
formattedMessages.push({ formattedMessages.push({
@ -42,10 +43,10 @@ export class OllamaMessageFormatter extends BaseMessageFormatter {
if (msg.role === 'user' && !injectedContext) { if (msg.role === 'user' && !injectedContext) {
// Simple context injection directly in the user's message // Simple context injection directly in the user's message
const cleanedContext = this.cleanContextContent(context); const cleanedContext = this.cleanContextContent(context);
const formattedContext = const formattedContext = PROVIDER_PROMPTS.OLLAMA.CONTEXT_INJECTION(
"Here's information from my notes to help answer the question:\n\n" + cleanedContext,
cleanedContext + msg.content
"\n\nBased on this information, please answer: " + msg.content; );
formattedMessages.push({ formattedMessages.push({
role: 'user', role: 'user',

View File

@ -1,6 +1,7 @@
import sanitizeHtml from 'sanitize-html'; import sanitizeHtml from 'sanitize-html';
import type { Message } from '../ai_interface.js'; import type { Message } from '../ai_interface.js';
import { BaseMessageFormatter } from './base_formatter.js'; import { BaseMessageFormatter } from './base_formatter.js';
import { PROVIDER_PROMPTS, FORMATTING_PROMPTS } from '../constants/llm_prompt_constants.js';
/** /**
* OpenAI-specific message formatter * OpenAI-specific message formatter
@ -26,12 +27,9 @@ export class OpenAIMessageFormatter extends BaseMessageFormatter {
// If we have explicit context, format it properly // If we have explicit context, format it properly
if (context) { if (context) {
// For OpenAI, it's best to put context in the system message // For OpenAI, it's best to put context in the system message
const formattedContext = const formattedContext = PROVIDER_PROMPTS.OPENAI.SYSTEM_WITH_CONTEXT(
"You are an AI assistant integrated into TriliumNext Notes. " + this.cleanContextContent(context)
"Use the following information from the user's notes to answer their questions:\n\n" + );
this.cleanContextContent(context) +
"\n\nFocus on relevant information from these notes when answering. " +
"Be concise and informative in your responses.";
// Add as system message // Add as system message
formattedMessages.push({ formattedMessages.push({
@ -87,7 +85,7 @@ export class OpenAIMessageFormatter extends BaseMessageFormatter {
try { try {
// Convert HTML to Markdown for better readability // Convert HTML to Markdown for better readability
const cleaned = sanitizeHtml(content, { const cleaned = sanitizeHtml(content, {
allowedTags: ['b', 'i', 'em', 'strong', 'a', 'p', 'br', 'ul', 'ol', 'li', 'h1', 'h2', 'h3', 'h4', 'h5', 'code', 'pre'], allowedTags: FORMATTING_PROMPTS.HTML_ALLOWED_TAGS,
allowedAttributes: { allowedAttributes: {
'a': ['href'] 'a': ['href']
}, },