2025-04-06 20:50:08 +00:00
|
|
|
import type { ToolCall } from './tools/tool_interfaces.js';
|
2025-04-09 19:21:34 +00:00
|
|
|
import type { ModelMetadata } from './providers/provider_options.js';
|
2025-04-06 20:50:08 +00:00
|
|
|
|
2025-03-02 19:39:10 -08:00
|
|
|
export interface Message {
|
2025-04-06 20:50:08 +00:00
|
|
|
role: 'user' | 'assistant' | 'system' | 'tool';
|
2025-03-02 19:39:10 -08:00
|
|
|
content: string;
|
2025-04-06 20:50:08 +00:00
|
|
|
name?: string;
|
|
|
|
tool_call_id?: string;
|
|
|
|
tool_calls?: ToolCall[] | any[];
|
2025-03-02 19:39:10 -08:00
|
|
|
}
|
|
|
|
|
2025-03-09 02:19:26 +00:00
|
|
|
// Interface for streaming response chunks
|
|
|
|
export interface StreamChunk {
|
|
|
|
text: string;
|
|
|
|
done: boolean;
|
|
|
|
usage?: {
|
|
|
|
promptTokens?: number;
|
|
|
|
completionTokens?: number;
|
|
|
|
totalTokens?: number;
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2025-04-09 19:53:45 +00:00
|
|
|
/**
|
|
|
|
* Options for chat completion requests
|
|
|
|
*
|
|
|
|
* Key properties:
|
|
|
|
* - stream: If true, the response will be streamed
|
|
|
|
* - model: Model name to use
|
|
|
|
* - provider: Provider to use (openai, anthropic, ollama, etc.)
|
|
|
|
* - enableTools: If true, enables tool support
|
|
|
|
*
|
|
|
|
* The stream option is particularly important and should be consistently handled
|
|
|
|
* throughout the pipeline. It should be explicitly set to true or false.
|
|
|
|
*/
|
2025-03-02 19:39:10 -08:00
|
|
|
export interface ChatCompletionOptions {
|
|
|
|
model?: string;
|
|
|
|
temperature?: number;
|
|
|
|
maxTokens?: number;
|
2025-03-19 18:49:14 +00:00
|
|
|
topP?: number;
|
|
|
|
frequencyPenalty?: number;
|
|
|
|
presencePenalty?: number;
|
|
|
|
showThinking?: boolean;
|
2025-03-02 19:39:10 -08:00
|
|
|
systemPrompt?: string;
|
2025-04-01 21:42:09 +00:00
|
|
|
preserveSystemPrompt?: boolean; // Whether to preserve existing system message
|
|
|
|
bypassFormatter?: boolean; // Whether to bypass the message formatter entirely
|
|
|
|
expectsJsonResponse?: boolean; // Whether this request expects a JSON response
|
2025-03-09 02:19:26 +00:00
|
|
|
stream?: boolean; // Whether to stream the response
|
2025-04-06 20:50:08 +00:00
|
|
|
enableTools?: boolean; // Whether to enable tool calling
|
|
|
|
tools?: any[]; // Tools to provide to the LLM
|
2025-04-07 21:57:18 +00:00
|
|
|
useAdvancedContext?: boolean; // Whether to use advanced context enrichment
|
2025-04-09 01:24:32 +00:00
|
|
|
toolExecutionStatus?: any[]; // Status information about executed tools for feedback
|
2025-04-09 19:21:34 +00:00
|
|
|
providerMetadata?: ModelMetadata; // Metadata about the provider and model capabilities
|
2025-03-02 19:39:10 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
export interface ChatResponse {
|
|
|
|
text: string;
|
|
|
|
model: string;
|
|
|
|
provider: string;
|
|
|
|
usage?: {
|
|
|
|
promptTokens?: number;
|
|
|
|
completionTokens?: number;
|
|
|
|
totalTokens?: number;
|
|
|
|
};
|
2025-03-09 02:19:26 +00:00
|
|
|
// Stream handler - only present when streaming is enabled
|
|
|
|
stream?: (callback: (chunk: StreamChunk) => Promise<void> | void) => Promise<string>;
|
2025-04-06 20:50:08 +00:00
|
|
|
// Tool calls from the LLM
|
|
|
|
tool_calls?: ToolCall[] | any[];
|
2025-03-02 19:39:10 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
export interface AIService {
|
|
|
|
/**
|
|
|
|
* Generate a chat completion response
|
|
|
|
*/
|
|
|
|
generateChatCompletion(messages: Message[], options?: ChatCompletionOptions): Promise<ChatResponse>;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Check if the service can be used (API key is set, etc.)
|
|
|
|
*/
|
|
|
|
isAvailable(): boolean;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Get the name of the service
|
|
|
|
*/
|
|
|
|
getName(): string;
|
|
|
|
}
|
2025-03-19 18:49:14 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Interface for the semantic context service, which provides enhanced context retrieval
|
|
|
|
* for AI conversations based on semantic similarity.
|
|
|
|
*/
|
|
|
|
export interface SemanticContextService {
|
|
|
|
/**
|
|
|
|
* Initialize the semantic context service
|
|
|
|
*/
|
|
|
|
initialize(): Promise<void>;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Retrieve semantic context based on relevance to user query
|
|
|
|
*/
|
2025-03-30 22:13:40 +00:00
|
|
|
getSemanticContext(noteId: string, userQuery: string, maxResults?: number, messages?: Message[]): Promise<string>;
|
2025-03-19 18:49:14 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Get progressive context based on depth
|
|
|
|
*/
|
|
|
|
getProgressiveContext?(noteId: string, depth?: number): Promise<string>;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Get smart context selection that adapts to query complexity
|
|
|
|
*/
|
|
|
|
getSmartContext?(noteId: string, userQuery: string): Promise<string>;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Enhance LLM context with agent tools
|
|
|
|
*/
|
|
|
|
getAgentToolsContext(noteId: string, query: string, showThinking?: boolean): Promise<string>;
|
|
|
|
}
|