mirror of
https://github.com/TriliumNext/Notes.git
synced 2025-10-16 19:31:35 +08:00
centralize prompts
This commit is contained in:
parent
72c380b6f4
commit
224cb22fe9
@ -13,7 +13,7 @@ import contextService from "../../services/llm/context_service.js";
|
||||
import sql from "../../services/sql.js";
|
||||
// Import the index service for knowledge base management
|
||||
import indexService from "../../services/llm/index_service.js";
|
||||
import { CONTEXT_PROMPTS } from '../../services/llm/constants/llm_prompt_constants.js';
|
||||
import { CONTEXT_PROMPTS, ERROR_PROMPTS, FORMATTING_PROMPTS } from '../../services/llm/constants/llm_prompt_constants.js';
|
||||
|
||||
// LLM service constants
|
||||
export const LLM_CONSTANTS = {
|
||||
@ -951,10 +951,9 @@ async function sendMessage(req: Request, res: Response) {
|
||||
};
|
||||
|
||||
// DEBUG: Log context details before sending to LLM
|
||||
log.info(`CONTEXT BEING SENT TO LLM: ${context.length} chars`);
|
||||
log.info(`Context begins with: "${context.substring(0, 200)}..."`);
|
||||
log.info(`Context ends with: "...${context.substring(context.length - 200)}"`);
|
||||
log.info(`Number of notes included: ${sourceNotes.length}`);
|
||||
log.info(`${FORMATTING_PROMPTS.DIVIDERS.CONTENT_SECTION} Context begins with: "${context.substring(0, 200)}..."`);
|
||||
log.info(`${FORMATTING_PROMPTS.DIVIDERS.CONTENT_SECTION} Context ends with: "...${context.substring(context.length - 200)}"`);
|
||||
log.info(`${FORMATTING_PROMPTS.DIVIDERS.NOTE_SECTION} Number of notes included: ${sourceNotes.length}`);
|
||||
|
||||
// Format messages for the LLM using the proper context
|
||||
const aiMessages = await contextService.buildMessagesWithContext(
|
||||
@ -1228,8 +1227,10 @@ async function sendMessage(req: Request, res: Response) {
|
||||
};
|
||||
}
|
||||
} catch (error: any) {
|
||||
log.error(`Error sending message to LLM: ${error.message}`);
|
||||
throw new Error(`Failed to send message: ${error.message}`);
|
||||
log.error(`Error in LLM query processing: ${error}`);
|
||||
return {
|
||||
error: ERROR_PROMPTS.USER_ERRORS.GENERAL_ERROR
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2,7 +2,7 @@ import type { Message, ChatCompletionOptions } from './ai_interface.js';
|
||||
import aiServiceManager from './ai_service_manager.js';
|
||||
import chatStorageService from './chat_storage_service.js';
|
||||
import log from '../log.js';
|
||||
import { CONTEXT_PROMPTS } from './constants/llm_prompt_constants.js';
|
||||
import { CONTEXT_PROMPTS, ERROR_PROMPTS } from './constants/llm_prompt_constants.js';
|
||||
|
||||
export interface ChatSession {
|
||||
id: string;
|
||||
@ -133,10 +133,7 @@ export class ChatService {
|
||||
// Add error message so user knows something went wrong
|
||||
const errorMessage: Message = {
|
||||
role: 'assistant',
|
||||
content: CONTEXT_PROMPTS.ERROR_MESSAGES.GENERAL_ERROR.replace(
|
||||
'{errorMessage}',
|
||||
error.message || 'Please check AI settings and try again.'
|
||||
)
|
||||
content: ERROR_PROMPTS.USER_ERRORS.GENERAL_ERROR
|
||||
};
|
||||
|
||||
session.messages.push(errorMessage);
|
||||
@ -311,10 +308,7 @@ export class ChatService {
|
||||
// Add error message
|
||||
const errorMessage: Message = {
|
||||
role: 'assistant',
|
||||
content: CONTEXT_PROMPTS.ERROR_MESSAGES.CONTEXT_ERROR.replace(
|
||||
'{errorMessage}',
|
||||
error.message || 'Please try again.'
|
||||
)
|
||||
content: ERROR_PROMPTS.USER_ERRORS.CONTEXT_ERROR
|
||||
};
|
||||
|
||||
session.messages.push(errorMessage);
|
||||
|
@ -133,12 +133,114 @@ export const AGENT_TOOL_PROMPTS = {
|
||||
// Provider-specific prompt modifiers
|
||||
export const PROVIDER_PROMPTS = {
|
||||
ANTHROPIC: {
|
||||
// Any Anthropic Claude-specific prompt modifications would go here
|
||||
// Anthropic Claude-specific prompt formatting
|
||||
SYSTEM_WITH_CONTEXT: (context: string) =>
|
||||
`<instructions>
|
||||
${DEFAULT_SYSTEM_PROMPT}
|
||||
|
||||
Use the following information from the user's notes to answer their questions:
|
||||
|
||||
<user_notes>
|
||||
${context}
|
||||
</user_notes>
|
||||
|
||||
When responding:
|
||||
- Focus on the most relevant information from the notes
|
||||
- Be concise and direct in your answers
|
||||
- If quoting from notes, mention which note it's from
|
||||
- If the notes don't contain relevant information, say so clearly
|
||||
</instructions>`,
|
||||
|
||||
INSTRUCTIONS_WRAPPER: (instructions: string) =>
|
||||
`<instructions>\n${instructions}\n</instructions>`,
|
||||
|
||||
ACKNOWLEDGMENT: "I understand. I'll follow those instructions.",
|
||||
CONTEXT_ACKNOWLEDGMENT: "I'll help you with your notes based on the context provided.",
|
||||
CONTEXT_QUERY_ACKNOWLEDGMENT: "I'll help you with your notes based on the context provided. What would you like to know?"
|
||||
},
|
||||
|
||||
OPENAI: {
|
||||
// Any OpenAI-specific prompt modifications would go here
|
||||
// OpenAI-specific prompt formatting
|
||||
SYSTEM_WITH_CONTEXT: (context: string) =>
|
||||
`You are an AI assistant integrated into TriliumNext Notes.
|
||||
Use the following information from the user's notes to answer their questions:
|
||||
|
||||
${context}
|
||||
|
||||
Focus on relevant information from these notes when answering.
|
||||
Be concise and informative in your responses.`
|
||||
},
|
||||
|
||||
OLLAMA: {
|
||||
// Any Ollama-specific prompt modifications would go here
|
||||
// Ollama-specific prompt formatting
|
||||
CONTEXT_INJECTION: (context: string, query: string) =>
|
||||
`Here's information from my notes to help answer the question:
|
||||
|
||||
${context}
|
||||
|
||||
Based on this information, please answer: ${query}`
|
||||
},
|
||||
|
||||
// Common prompts across providers
|
||||
COMMON: {
|
||||
DEFAULT_ASSISTANT_INTRO: "You are an AI assistant integrated into TriliumNext Notes. Focus on helping users find information in their notes and answering questions based on their knowledge base. Be concise, informative, and direct when responding to queries."
|
||||
}
|
||||
};
|
||||
|
||||
// Constants for formatting context and messages
|
||||
export const FORMATTING_PROMPTS = {
|
||||
// Headers for context formatting
|
||||
CONTEXT_HEADERS: {
|
||||
SIMPLE: (query: string) => `I'm searching for information about: ${query}\n\nHere are the most relevant notes from my knowledge base:`,
|
||||
DETAILED: (query: string) => `I'm searching for information about: "${query}"\n\nHere are the most relevant notes from my personal knowledge base:`
|
||||
},
|
||||
|
||||
// Closing text for context formatting
|
||||
CONTEXT_CLOSERS: {
|
||||
SIMPLE: `End of notes. Please use this information to answer my question comprehensively.`,
|
||||
DETAILED: `End of context information. Please use only the above notes to answer my question as comprehensively as possible.`
|
||||
},
|
||||
|
||||
// Dividers used in context formatting
|
||||
DIVIDERS: {
|
||||
NOTE_SECTION: `------ NOTE INFORMATION ------`,
|
||||
CONTENT_SECTION: `------ CONTEXT INFORMATION ------`,
|
||||
NOTE_START: `# Note: `,
|
||||
CONTENT_START: `Content: `
|
||||
},
|
||||
|
||||
HTML_ALLOWED_TAGS: ['b', 'i', 'em', 'strong', 'a', 'p', 'br', 'ul', 'ol', 'li', 'h1', 'h2', 'h3', 'h4', 'h5', 'code', 'pre']
|
||||
};
|
||||
|
||||
// Prompt templates for chat service
|
||||
export const CHAT_PROMPTS = {
|
||||
// Introduction messages for new chats
|
||||
INTRODUCTIONS: {
|
||||
NEW_CHAT: "Welcome to TriliumNext AI Assistant. How can I help you with your notes today?",
|
||||
SEMANTIC_SEARCH: "I'll search through your notes for relevant information. What would you like to know?"
|
||||
},
|
||||
|
||||
// Placeholders for various chat scenarios
|
||||
PLACEHOLDERS: {
|
||||
NO_CONTEXT: "I don't have any specific note context yet. Would you like me to search your notes for something specific?",
|
||||
WAITING_FOR_QUERY: "Awaiting your question..."
|
||||
}
|
||||
};
|
||||
|
||||
// Error messages and fallbacks
|
||||
export const ERROR_PROMPTS = {
|
||||
// User-facing error messages
|
||||
USER_ERRORS: {
|
||||
GENERAL_ERROR: "I encountered an error processing your request. Please try again or rephrase your question.",
|
||||
CONTEXT_ERROR: "I couldn't retrieve context from your notes. Please check your query or try a different question.",
|
||||
NETWORK_ERROR: "There was a network error connecting to the AI service. Please check your connection and try again.",
|
||||
RATE_LIMIT: "The AI service is currently experiencing high demand. Please try again in a moment."
|
||||
},
|
||||
|
||||
// Internal error handling
|
||||
INTERNAL_ERRORS: {
|
||||
CONTEXT_PROCESSING: "Error processing context data",
|
||||
MESSAGE_FORMATTING: "Error formatting messages for LLM",
|
||||
RESPONSE_PARSING: "Error parsing LLM response"
|
||||
}
|
||||
};
|
||||
|
@ -1,6 +1,6 @@
|
||||
import sanitizeHtml from 'sanitize-html';
|
||||
import log from '../../../log.js';
|
||||
import { CONTEXT_PROMPTS } from '../../constants/llm_prompt_constants.js';
|
||||
import { CONTEXT_PROMPTS, FORMATTING_PROMPTS } from '../../constants/llm_prompt_constants.js';
|
||||
import type { IContextFormatter, NoteSearchResult } from '../../interfaces/context_interfaces.js';
|
||||
|
||||
// Constants for context window sizes, defines in-module to avoid circular dependencies
|
||||
@ -50,7 +50,7 @@ export class ContextFormatter implements IContextFormatter {
|
||||
|
||||
if (providerId === 'ollama') {
|
||||
// For Ollama, use a much simpler plain text format that's less prone to encoding issues
|
||||
formattedContext = `# Context for your query: "${query}"\n\n`;
|
||||
formattedContext = FORMATTING_PROMPTS.CONTEXT_HEADERS.SIMPLE(query);
|
||||
} else if (providerId === 'anthropic') {
|
||||
formattedContext = CONTEXT_PROMPTS.CONTEXT_HEADERS.ANTHROPIC(query);
|
||||
} else {
|
||||
@ -107,7 +107,7 @@ export class ContextFormatter implements IContextFormatter {
|
||||
let formattedSource = '';
|
||||
if (providerId === 'ollama') {
|
||||
// For Ollama, use a simpler format and plain ASCII
|
||||
formattedSource = `## ${title}\n${content}\n\n`;
|
||||
formattedSource = `${FORMATTING_PROMPTS.DIVIDERS.NOTE_START}${title}\n${content}\n\n`;
|
||||
} else {
|
||||
formattedSource = `### ${title}\n${content}\n\n`;
|
||||
}
|
||||
@ -146,7 +146,7 @@ export class ContextFormatter implements IContextFormatter {
|
||||
// Add closing to provide instructions to the AI - use simpler version for Ollama
|
||||
let closing = '';
|
||||
if (providerId === 'ollama') {
|
||||
closing = '\n\nPlease use the information above to answer the query and keep your response concise.';
|
||||
closing = `\n\n${FORMATTING_PROMPTS.CONTEXT_CLOSERS.SIMPLE}`;
|
||||
} else if (providerId === 'anthropic') {
|
||||
closing = CONTEXT_PROMPTS.CONTEXT_CLOSINGS.ANTHROPIC;
|
||||
} else {
|
||||
|
@ -1,6 +1,7 @@
|
||||
import sanitizeHtml from 'sanitize-html';
|
||||
import type { Message } from '../ai_interface.js';
|
||||
import { BaseMessageFormatter } from './base_formatter.js';
|
||||
import { PROVIDER_PROMPTS } from '../constants/llm_prompt_constants.js';
|
||||
|
||||
/**
|
||||
* Anthropic-specific message formatter
|
||||
@ -27,18 +28,9 @@ export class AnthropicMessageFormatter extends BaseMessageFormatter {
|
||||
// 1. If explicit context is provided, we format it with XML tags
|
||||
if (context) {
|
||||
// Build the system message with context
|
||||
const baseInstructions = this.getDefaultSystemPrompt(systemPrompt);
|
||||
|
||||
const formattedContext =
|
||||
`<instructions>\n${baseInstructions}\n\n` +
|
||||
`Use the following information from the user's notes to answer their questions:\n\n` +
|
||||
`<user_notes>\n${this.cleanContextContent(context)}\n</user_notes>\n\n` +
|
||||
`When responding:\n` +
|
||||
`- Focus on the most relevant information from the notes\n` +
|
||||
`- Be concise and direct in your answers\n` +
|
||||
`- If quoting from notes, mention which note it's from\n` +
|
||||
`- If the notes don't contain relevant information, say so clearly\n` +
|
||||
`</instructions>`;
|
||||
const formattedContext = PROVIDER_PROMPTS.ANTHROPIC.SYSTEM_WITH_CONTEXT(
|
||||
this.cleanContextContent(context)
|
||||
);
|
||||
|
||||
// If there's at least one user message, add the context to the first one
|
||||
if (userAssistantMessages.length > 0 && userAssistantMessages[0].role === 'user') {
|
||||
@ -51,7 +43,7 @@ export class AnthropicMessageFormatter extends BaseMessageFormatter {
|
||||
// Add system response acknowledgment
|
||||
formattedMessages.push({
|
||||
role: 'assistant',
|
||||
content: "I'll help you with your notes based on the context provided."
|
||||
content: PROVIDER_PROMPTS.ANTHROPIC.CONTEXT_ACKNOWLEDGMENT
|
||||
});
|
||||
|
||||
// Add remaining messages
|
||||
@ -68,7 +60,7 @@ export class AnthropicMessageFormatter extends BaseMessageFormatter {
|
||||
|
||||
formattedMessages.push({
|
||||
role: 'assistant',
|
||||
content: "I'll help you with your notes based on the context provided. What would you like to know?"
|
||||
content: PROVIDER_PROMPTS.ANTHROPIC.CONTEXT_QUERY_ACKNOWLEDGMENT
|
||||
});
|
||||
|
||||
// Add any existing assistant messages if they exist
|
||||
@ -84,8 +76,9 @@ export class AnthropicMessageFormatter extends BaseMessageFormatter {
|
||||
const systemMessages = messages.filter(msg => msg.role === 'system');
|
||||
|
||||
// Build system content with XML tags
|
||||
const systemContent =
|
||||
`<instructions>\n${systemMessages.map(msg => this.cleanContextContent(msg.content)).join('\n\n')}\n</instructions>`;
|
||||
const systemContent = PROVIDER_PROMPTS.ANTHROPIC.INSTRUCTIONS_WRAPPER(
|
||||
systemMessages.map(msg => this.cleanContextContent(msg.content)).join('\n\n')
|
||||
);
|
||||
|
||||
// Add as first user message
|
||||
formattedMessages.push({
|
||||
@ -96,7 +89,7 @@ export class AnthropicMessageFormatter extends BaseMessageFormatter {
|
||||
// Add assistant acknowledgment
|
||||
formattedMessages.push({
|
||||
role: 'assistant',
|
||||
content: "I understand. I'll follow those instructions."
|
||||
content: PROVIDER_PROMPTS.ANTHROPIC.ACKNOWLEDGMENT
|
||||
});
|
||||
|
||||
// Add remaining user/assistant messages
|
||||
@ -109,13 +102,13 @@ export class AnthropicMessageFormatter extends BaseMessageFormatter {
|
||||
// Add as first user message with XML tags
|
||||
formattedMessages.push({
|
||||
role: 'user',
|
||||
content: `<instructions>\n${systemPrompt}\n</instructions>`
|
||||
content: PROVIDER_PROMPTS.ANTHROPIC.INSTRUCTIONS_WRAPPER(systemPrompt)
|
||||
});
|
||||
|
||||
// Add assistant acknowledgment
|
||||
formattedMessages.push({
|
||||
role: 'assistant',
|
||||
content: "I understand. I'll follow those instructions."
|
||||
content: PROVIDER_PROMPTS.ANTHROPIC.ACKNOWLEDGMENT
|
||||
});
|
||||
|
||||
// Add all other messages
|
||||
@ -128,13 +121,13 @@ export class AnthropicMessageFormatter extends BaseMessageFormatter {
|
||||
// Add default system prompt with XML tags
|
||||
formattedMessages.push({
|
||||
role: 'user',
|
||||
content: `<instructions>\n${this.getDefaultSystemPrompt()}\n</instructions>`
|
||||
content: PROVIDER_PROMPTS.ANTHROPIC.INSTRUCTIONS_WRAPPER(this.getDefaultSystemPrompt())
|
||||
});
|
||||
|
||||
// Add assistant acknowledgment
|
||||
formattedMessages.push({
|
||||
role: 'assistant',
|
||||
content: "I understand. I'll follow those instructions."
|
||||
content: PROVIDER_PROMPTS.ANTHROPIC.ACKNOWLEDGMENT
|
||||
});
|
||||
|
||||
// Add all user messages
|
||||
|
@ -1,7 +1,7 @@
|
||||
import sanitizeHtml from 'sanitize-html';
|
||||
import type { Message } from '../ai_interface.js';
|
||||
import type { MessageFormatter } from '../interfaces/message_formatter.js';
|
||||
import { DEFAULT_SYSTEM_PROMPT } from '../constants/llm_prompt_constants.js';
|
||||
import { DEFAULT_SYSTEM_PROMPT, PROVIDER_PROMPTS } from '../constants/llm_prompt_constants.js';
|
||||
|
||||
/**
|
||||
* Base formatter with common functionality for all providers
|
||||
@ -25,7 +25,7 @@ export abstract class BaseMessageFormatter implements MessageFormatter {
|
||||
* Uses the default prompt from constants
|
||||
*/
|
||||
protected getDefaultSystemPrompt(systemPrompt?: string): string {
|
||||
return systemPrompt || DEFAULT_SYSTEM_PROMPT;
|
||||
return systemPrompt || DEFAULT_SYSTEM_PROMPT || PROVIDER_PROMPTS.COMMON.DEFAULT_ASSISTANT_INTRO;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1,6 +1,7 @@
|
||||
import type { Message } from '../ai_interface.js';
|
||||
import { BaseMessageFormatter } from './base_formatter.js';
|
||||
import sanitizeHtml from 'sanitize-html';
|
||||
import { PROVIDER_PROMPTS, FORMATTING_PROMPTS } from '../constants/llm_prompt_constants.js';
|
||||
|
||||
/**
|
||||
* Ollama-specific message formatter
|
||||
@ -23,8 +24,8 @@ export class OllamaMessageFormatter extends BaseMessageFormatter {
|
||||
const systemMessages = messages.filter(msg => msg.role === 'system');
|
||||
const userMessages = messages.filter(msg => msg.role === 'user' || msg.role === 'assistant');
|
||||
|
||||
// Create base system message with instructions
|
||||
const basePrompt = this.getDefaultSystemPrompt(systemPrompt);
|
||||
// Create base system message with instructions or use default
|
||||
const basePrompt = systemPrompt || PROVIDER_PROMPTS.COMMON.DEFAULT_ASSISTANT_INTRO;
|
||||
|
||||
// Always add a system message with the base prompt
|
||||
formattedMessages.push({
|
||||
@ -42,10 +43,10 @@ export class OllamaMessageFormatter extends BaseMessageFormatter {
|
||||
if (msg.role === 'user' && !injectedContext) {
|
||||
// Simple context injection directly in the user's message
|
||||
const cleanedContext = this.cleanContextContent(context);
|
||||
const formattedContext =
|
||||
"Here's information from my notes to help answer the question:\n\n" +
|
||||
cleanedContext +
|
||||
"\n\nBased on this information, please answer: " + msg.content;
|
||||
const formattedContext = PROVIDER_PROMPTS.OLLAMA.CONTEXT_INJECTION(
|
||||
cleanedContext,
|
||||
msg.content
|
||||
);
|
||||
|
||||
formattedMessages.push({
|
||||
role: 'user',
|
||||
|
@ -1,6 +1,7 @@
|
||||
import sanitizeHtml from 'sanitize-html';
|
||||
import type { Message } from '../ai_interface.js';
|
||||
import { BaseMessageFormatter } from './base_formatter.js';
|
||||
import { PROVIDER_PROMPTS, FORMATTING_PROMPTS } from '../constants/llm_prompt_constants.js';
|
||||
|
||||
/**
|
||||
* OpenAI-specific message formatter
|
||||
@ -26,12 +27,9 @@ export class OpenAIMessageFormatter extends BaseMessageFormatter {
|
||||
// If we have explicit context, format it properly
|
||||
if (context) {
|
||||
// For OpenAI, it's best to put context in the system message
|
||||
const formattedContext =
|
||||
"You are an AI assistant integrated into TriliumNext Notes. " +
|
||||
"Use the following information from the user's notes to answer their questions:\n\n" +
|
||||
this.cleanContextContent(context) +
|
||||
"\n\nFocus on relevant information from these notes when answering. " +
|
||||
"Be concise and informative in your responses.";
|
||||
const formattedContext = PROVIDER_PROMPTS.OPENAI.SYSTEM_WITH_CONTEXT(
|
||||
this.cleanContextContent(context)
|
||||
);
|
||||
|
||||
// Add as system message
|
||||
formattedMessages.push({
|
||||
@ -87,7 +85,7 @@ export class OpenAIMessageFormatter extends BaseMessageFormatter {
|
||||
try {
|
||||
// Convert HTML to Markdown for better readability
|
||||
const cleaned = sanitizeHtml(content, {
|
||||
allowedTags: ['b', 'i', 'em', 'strong', 'a', 'p', 'br', 'ul', 'ol', 'li', 'h1', 'h2', 'h3', 'h4', 'h5', 'code', 'pre'],
|
||||
allowedTags: FORMATTING_PROMPTS.HTML_ALLOWED_TAGS,
|
||||
allowedAttributes: {
|
||||
'a': ['href']
|
||||
},
|
||||
|
Loading…
x
Reference in New Issue
Block a user