Notes/src/services/llm/ai_interface.ts

103 lines
3.0 KiB
TypeScript
Raw Normal View History

import type { ToolCall } from './tools/tool_interfaces.js';
2025-03-02 19:39:10 -08:00
export interface Message {
role: 'user' | 'assistant' | 'system' | 'tool';
2025-03-02 19:39:10 -08:00
content: string;
name?: string;
tool_call_id?: string;
tool_calls?: ToolCall[] | any[];
2025-03-02 19:39:10 -08:00
}
// Interface for streaming response chunks
export interface StreamChunk {
text: string;
done: boolean;
usage?: {
promptTokens?: number;
completionTokens?: number;
totalTokens?: number;
};
}
2025-03-02 19:39:10 -08:00
export interface ChatCompletionOptions {
model?: string;
temperature?: number;
maxTokens?: number;
2025-03-19 18:49:14 +00:00
topP?: number;
frequencyPenalty?: number;
presencePenalty?: number;
showThinking?: boolean;
2025-03-02 19:39:10 -08:00
systemPrompt?: string;
preserveSystemPrompt?: boolean; // Whether to preserve existing system message
bypassFormatter?: boolean; // Whether to bypass the message formatter entirely
expectsJsonResponse?: boolean; // Whether this request expects a JSON response
stream?: boolean; // Whether to stream the response
enableTools?: boolean; // Whether to enable tool calling
tools?: any[]; // Tools to provide to the LLM
useAdvancedContext?: boolean; // Whether to use advanced context enrichment
2025-04-09 01:24:32 +00:00
toolExecutionStatus?: any[]; // Status information about executed tools for feedback
2025-03-02 19:39:10 -08:00
}
export interface ChatResponse {
text: string;
model: string;
provider: string;
usage?: {
promptTokens?: number;
completionTokens?: number;
totalTokens?: number;
};
// Stream handler - only present when streaming is enabled
stream?: (callback: (chunk: StreamChunk) => Promise<void> | void) => Promise<string>;
// Tool calls from the LLM
tool_calls?: ToolCall[] | any[];
2025-03-02 19:39:10 -08:00
}
export interface AIService {
/**
* Generate a chat completion response
*/
generateChatCompletion(messages: Message[], options?: ChatCompletionOptions): Promise<ChatResponse>;
/**
* Check if the service can be used (API key is set, etc.)
*/
isAvailable(): boolean;
/**
* Get the name of the service
*/
getName(): string;
}
2025-03-19 18:49:14 +00:00
/**
* Interface for the semantic context service, which provides enhanced context retrieval
* for AI conversations based on semantic similarity.
*/
export interface SemanticContextService {
/**
* Initialize the semantic context service
*/
initialize(): Promise<void>;
/**
* Retrieve semantic context based on relevance to user query
*/
getSemanticContext(noteId: string, userQuery: string, maxResults?: number, messages?: Message[]): Promise<string>;
2025-03-19 18:49:14 +00:00
/**
* Get progressive context based on depth
*/
getProgressiveContext?(noteId: string, depth?: number): Promise<string>;
/**
* Get smart context selection that adapts to query complexity
*/
getSmartContext?(noteId: string, userQuery: string): Promise<string>;
/**
* Enhance LLM context with agent tools
*/
getAgentToolsContext(noteId: string, query: string, showThinking?: boolean): Promise<string>;
}