2025-04-06 20:50:08 +00:00
|
|
|
import type { ToolCall } from './tools/tool_interfaces.js';
|
2025-04-09 19:21:34 +00:00
|
|
|
import type { ModelMetadata } from './providers/provider_options.js';
|
2025-04-06 20:50:08 +00:00
|
|
|
|
2025-04-10 21:00:12 +00:00
|
|
|
/**
|
|
|
|
* Interface for chat messages between client and LLM models
|
|
|
|
*/
|
2025-03-02 19:39:10 -08:00
|
|
|
export interface Message {
|
2025-04-06 20:50:08 +00:00
|
|
|
role: 'user' | 'assistant' | 'system' | 'tool';
|
2025-03-02 19:39:10 -08:00
|
|
|
content: string;
|
2025-04-06 20:50:08 +00:00
|
|
|
name?: string;
|
|
|
|
tool_call_id?: string;
|
2025-04-16 17:07:54 +00:00
|
|
|
tool_calls?: ToolCall[];
|
2025-04-10 21:00:12 +00:00
|
|
|
sessionId?: string; // Optional session ID for WebSocket communication
|
2025-03-02 19:39:10 -08:00
|
|
|
}
|
|
|
|
|
2025-04-16 17:07:54 +00:00
|
|
|
// Define additional interfaces for tool-related types
|
|
|
|
export interface ToolChoice {
|
|
|
|
type: 'none' | 'auto' | 'function';
|
|
|
|
function?: {
|
|
|
|
name: string;
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
export interface ToolData {
|
|
|
|
type: 'function';
|
|
|
|
function: {
|
|
|
|
name: string;
|
|
|
|
description: string;
|
|
|
|
parameters: Record<string, unknown>;
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
export interface ToolExecutionInfo {
|
|
|
|
type: 'start' | 'update' | 'complete' | 'error';
|
|
|
|
tool: {
|
|
|
|
name: string;
|
|
|
|
arguments: Record<string, unknown>;
|
|
|
|
};
|
|
|
|
result?: string | Record<string, unknown>;
|
|
|
|
}
|
|
|
|
|
2025-04-09 21:33:30 +00:00
|
|
|
/**
|
|
|
|
* Interface for streaming response chunks
|
2025-04-12 19:09:25 +00:00
|
|
|
*
|
2025-04-09 21:33:30 +00:00
|
|
|
* This is the standardized format for all streaming chunks across
|
|
|
|
* different providers (OpenAI, Anthropic, Ollama, etc.).
|
|
|
|
* The original provider-specific chunks are available through
|
|
|
|
* the extended interface in the stream_manager.
|
2025-04-12 19:09:25 +00:00
|
|
|
*
|
2025-04-09 21:33:30 +00:00
|
|
|
* See STREAMING.md for complete documentation on streaming usage.
|
|
|
|
*/
|
2025-03-09 02:19:26 +00:00
|
|
|
export interface StreamChunk {
|
2025-04-09 21:33:30 +00:00
|
|
|
/** The text content in this chunk (may be empty for status updates) */
|
2025-03-09 02:19:26 +00:00
|
|
|
text: string;
|
2025-04-12 19:09:25 +00:00
|
|
|
|
2025-04-09 21:33:30 +00:00
|
|
|
/** Whether this is the final chunk in the stream */
|
2025-03-09 02:19:26 +00:00
|
|
|
done: boolean;
|
2025-04-12 19:09:25 +00:00
|
|
|
|
2025-04-09 21:33:30 +00:00
|
|
|
/** Optional token usage statistics (rarely available in streaming mode) */
|
2025-03-09 02:19:26 +00:00
|
|
|
usage?: {
|
|
|
|
promptTokens?: number;
|
|
|
|
completionTokens?: number;
|
|
|
|
totalTokens?: number;
|
|
|
|
};
|
2025-04-12 19:09:25 +00:00
|
|
|
|
|
|
|
/**
|
2025-04-10 21:00:12 +00:00
|
|
|
* Raw provider-specific data from the original response chunk
|
|
|
|
* This can include thinking state, tool execution info, etc.
|
|
|
|
*/
|
2025-04-16 17:07:54 +00:00
|
|
|
raw?: Record<string, unknown>;
|
2025-04-14 19:39:29 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Tool calls from the LLM (if any)
|
|
|
|
* These may be accumulated over multiple chunks during streaming
|
|
|
|
*/
|
2025-04-16 17:07:54 +00:00
|
|
|
tool_calls?: ToolCall[];
|
2025-04-14 23:42:38 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Tool execution information during streaming
|
|
|
|
* Includes tool name, args, and execution status
|
|
|
|
*/
|
2025-04-16 17:07:54 +00:00
|
|
|
toolExecution?: ToolExecutionInfo;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Tool execution status for feedback to models
|
|
|
|
*/
|
|
|
|
export interface ToolExecutionStatus {
|
|
|
|
toolCallId: string;
|
|
|
|
name: string;
|
|
|
|
success: boolean;
|
|
|
|
result: string;
|
|
|
|
error?: string;
|
2025-03-09 02:19:26 +00:00
|
|
|
}
|
|
|
|
|
2025-04-09 19:53:45 +00:00
|
|
|
/**
|
|
|
|
* Options for chat completion requests
|
2025-04-12 19:09:25 +00:00
|
|
|
*
|
2025-04-09 19:53:45 +00:00
|
|
|
* Key properties:
|
|
|
|
* - stream: If true, the response will be streamed
|
|
|
|
* - model: Model name to use
|
|
|
|
* - provider: Provider to use (openai, anthropic, ollama, etc.)
|
|
|
|
* - enableTools: If true, enables tool support
|
2025-04-12 19:09:25 +00:00
|
|
|
*
|
2025-04-09 19:53:45 +00:00
|
|
|
* The stream option is particularly important and should be consistently handled
|
|
|
|
* throughout the pipeline. It should be explicitly set to true or false.
|
2025-04-12 19:09:25 +00:00
|
|
|
*
|
2025-04-09 21:33:30 +00:00
|
|
|
* Streaming supports two approaches:
|
|
|
|
* 1. Callback-based: Provide a streamCallback to receive chunks directly
|
|
|
|
* 2. API-based: Use the stream property in the response to process chunks
|
2025-04-12 19:09:25 +00:00
|
|
|
*
|
2025-04-09 21:33:30 +00:00
|
|
|
* See STREAMING.md for complete documentation on streaming usage.
|
2025-04-09 19:53:45 +00:00
|
|
|
*/
|
2025-03-02 19:39:10 -08:00
|
|
|
export interface ChatCompletionOptions {
|
|
|
|
model?: string;
|
|
|
|
temperature?: number;
|
|
|
|
maxTokens?: number;
|
2025-03-19 18:49:14 +00:00
|
|
|
topP?: number;
|
|
|
|
frequencyPenalty?: number;
|
|
|
|
presencePenalty?: number;
|
|
|
|
showThinking?: boolean;
|
2025-03-02 19:39:10 -08:00
|
|
|
systemPrompt?: string;
|
2025-04-01 21:42:09 +00:00
|
|
|
preserveSystemPrompt?: boolean; // Whether to preserve existing system message
|
|
|
|
bypassFormatter?: boolean; // Whether to bypass the message formatter entirely
|
|
|
|
expectsJsonResponse?: boolean; // Whether this request expects a JSON response
|
2025-04-12 19:09:25 +00:00
|
|
|
|
2025-04-09 21:33:30 +00:00
|
|
|
/**
|
|
|
|
* Whether to stream the response
|
|
|
|
* When true, response will be delivered incrementally via either:
|
|
|
|
* - The streamCallback if provided
|
|
|
|
* - The stream property in the response object
|
|
|
|
*/
|
|
|
|
stream?: boolean;
|
2025-04-12 19:09:25 +00:00
|
|
|
|
2025-04-09 21:33:30 +00:00
|
|
|
/**
|
|
|
|
* Optional callback function for streaming responses
|
|
|
|
* When provided along with stream:true, this function will be called
|
|
|
|
* for each chunk of the response.
|
2025-04-12 19:09:25 +00:00
|
|
|
*
|
2025-04-09 21:33:30 +00:00
|
|
|
* @param text The text content in this chunk
|
|
|
|
* @param isDone Whether this is the final chunk
|
|
|
|
* @param originalChunk Optional original provider-specific chunk for advanced usage
|
|
|
|
*/
|
2025-04-16 17:07:54 +00:00
|
|
|
streamCallback?: (text: string, isDone: boolean, originalChunk?: Record<string, unknown>) => Promise<void> | void;
|
2025-04-12 19:09:25 +00:00
|
|
|
|
2025-04-06 20:50:08 +00:00
|
|
|
enableTools?: boolean; // Whether to enable tool calling
|
2025-04-16 17:07:54 +00:00
|
|
|
tools?: ToolData[]; // Tools to provide to the LLM
|
|
|
|
tool_choice?: ToolChoice; // Tool choice parameter for the LLM
|
2025-04-07 21:57:18 +00:00
|
|
|
useAdvancedContext?: boolean; // Whether to use advanced context enrichment
|
2025-04-16 17:07:54 +00:00
|
|
|
toolExecutionStatus?: ToolExecutionStatus[]; // Status information about executed tools for feedback
|
2025-04-09 19:21:34 +00:00
|
|
|
providerMetadata?: ModelMetadata; // Metadata about the provider and model capabilities
|
2025-04-13 21:16:18 +00:00
|
|
|
sessionId?: string; // Session ID for storing tool execution results
|
2025-04-12 19:09:25 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Maximum number of tool execution iterations
|
|
|
|
* Used to prevent infinite loops in tool execution
|
|
|
|
*/
|
|
|
|
maxToolIterations?: number;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Current tool execution iteration counter
|
|
|
|
* Internal use for tracking nested tool executions
|
|
|
|
*/
|
|
|
|
currentToolIteration?: number;
|
2025-03-02 19:39:10 -08:00
|
|
|
}
|
|
|
|
|
2025-04-09 21:33:30 +00:00
|
|
|
/**
|
|
|
|
* Response from a chat completion request
|
2025-04-12 19:09:25 +00:00
|
|
|
*
|
2025-04-09 21:33:30 +00:00
|
|
|
* When streaming is used, the behavior depends on how streaming was requested:
|
2025-04-12 19:09:25 +00:00
|
|
|
*
|
2025-04-09 21:33:30 +00:00
|
|
|
* 1. With streamCallback: The text field contains the complete response
|
|
|
|
* collected from all chunks, and the stream property is not present.
|
2025-04-12 19:09:25 +00:00
|
|
|
*
|
2025-04-09 21:33:30 +00:00
|
|
|
* 2. Without streamCallback: The text field is initially empty, and the
|
|
|
|
* stream property provides a function to process chunks and collect
|
|
|
|
* the complete response.
|
2025-04-12 19:09:25 +00:00
|
|
|
*
|
2025-04-09 21:33:30 +00:00
|
|
|
* See STREAMING.md for complete documentation on streaming usage.
|
|
|
|
*/
|
2025-03-02 19:39:10 -08:00
|
|
|
export interface ChatResponse {
|
2025-04-12 19:09:25 +00:00
|
|
|
/**
|
|
|
|
* The complete text response.
|
2025-04-09 21:33:30 +00:00
|
|
|
* If streaming was used with streamCallback, this contains the collected response.
|
|
|
|
* If streaming was used without streamCallback, this is initially empty.
|
|
|
|
*/
|
2025-03-02 19:39:10 -08:00
|
|
|
text: string;
|
2025-04-12 19:09:25 +00:00
|
|
|
|
2025-04-09 21:33:30 +00:00
|
|
|
/** The model that generated the response */
|
2025-03-02 19:39:10 -08:00
|
|
|
model: string;
|
2025-04-12 19:09:25 +00:00
|
|
|
|
2025-04-09 21:33:30 +00:00
|
|
|
/** The provider that served the request (openai, anthropic, ollama, etc.) */
|
2025-03-02 19:39:10 -08:00
|
|
|
provider: string;
|
2025-04-12 19:09:25 +00:00
|
|
|
|
2025-04-09 21:33:30 +00:00
|
|
|
/** Token usage statistics (may not be available when streaming) */
|
2025-03-02 19:39:10 -08:00
|
|
|
usage?: {
|
|
|
|
promptTokens?: number;
|
|
|
|
completionTokens?: number;
|
|
|
|
totalTokens?: number;
|
|
|
|
};
|
2025-04-12 19:09:25 +00:00
|
|
|
|
2025-04-09 21:33:30 +00:00
|
|
|
/**
|
|
|
|
* Stream processor function - only present when streaming is enabled
|
|
|
|
* without a streamCallback. When called with a chunk processor function,
|
|
|
|
* it returns a Promise that resolves to the complete response text.
|
2025-04-12 19:09:25 +00:00
|
|
|
*
|
2025-04-09 21:33:30 +00:00
|
|
|
* @param callback Function to process each chunk of the stream
|
|
|
|
* @returns Promise resolving to the complete text after stream processing
|
|
|
|
*/
|
2025-03-09 02:19:26 +00:00
|
|
|
stream?: (callback: (chunk: StreamChunk) => Promise<void> | void) => Promise<string>;
|
2025-04-12 19:09:25 +00:00
|
|
|
|
2025-04-09 21:33:30 +00:00
|
|
|
/** Tool calls from the LLM (if tools were used and the model supports them) */
|
2025-04-16 17:07:54 +00:00
|
|
|
tool_calls?: ToolCall[];
|
2025-03-02 19:39:10 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
export interface AIService {
|
|
|
|
/**
|
|
|
|
* Generate a chat completion response
|
|
|
|
*/
|
|
|
|
generateChatCompletion(messages: Message[], options?: ChatCompletionOptions): Promise<ChatResponse>;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Check if the service can be used (API key is set, etc.)
|
|
|
|
*/
|
|
|
|
isAvailable(): boolean;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Get the name of the service
|
|
|
|
*/
|
|
|
|
getName(): string;
|
|
|
|
}
|
2025-03-19 18:49:14 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Interface for the semantic context service, which provides enhanced context retrieval
|
|
|
|
* for AI conversations based on semantic similarity.
|
|
|
|
*/
|
|
|
|
export interface SemanticContextService {
|
|
|
|
/**
|
|
|
|
* Initialize the semantic context service
|
|
|
|
*/
|
|
|
|
initialize(): Promise<void>;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Retrieve semantic context based on relevance to user query
|
|
|
|
*/
|
2025-03-30 22:13:40 +00:00
|
|
|
getSemanticContext(noteId: string, userQuery: string, maxResults?: number, messages?: Message[]): Promise<string>;
|
2025-03-19 18:49:14 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Get progressive context based on depth
|
|
|
|
*/
|
|
|
|
getProgressiveContext?(noteId: string, depth?: number): Promise<string>;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Get smart context selection that adapts to query complexity
|
|
|
|
*/
|
|
|
|
getSmartContext?(noteId: string, userQuery: string): Promise<string>;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Enhance LLM context with agent tools
|
|
|
|
*/
|
|
|
|
getAgentToolsContext(noteId: string, query: string, showThinking?: boolean): Promise<string>;
|
|
|
|
}
|