mirror of
https://github.com/TriliumNext/Notes.git
synced 2025-08-10 18:39:22 +08:00
reduce the use of any, part 1
This commit is contained in:
parent
bbb382ef65
commit
64f2a93ac0
@ -9,10 +9,36 @@ export interface Message {
|
||||
content: string;
|
||||
name?: string;
|
||||
tool_call_id?: string;
|
||||
tool_calls?: ToolCall[] | any[];
|
||||
tool_calls?: ToolCall[];
|
||||
sessionId?: string; // Optional session ID for WebSocket communication
|
||||
}
|
||||
|
||||
// Define additional interfaces for tool-related types
|
||||
export interface ToolChoice {
|
||||
type: 'none' | 'auto' | 'function';
|
||||
function?: {
|
||||
name: string;
|
||||
};
|
||||
}
|
||||
|
||||
export interface ToolData {
|
||||
type: 'function';
|
||||
function: {
|
||||
name: string;
|
||||
description: string;
|
||||
parameters: Record<string, unknown>;
|
||||
};
|
||||
}
|
||||
|
||||
export interface ToolExecutionInfo {
|
||||
type: 'start' | 'update' | 'complete' | 'error';
|
||||
tool: {
|
||||
name: string;
|
||||
arguments: Record<string, unknown>;
|
||||
};
|
||||
result?: string | Record<string, unknown>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Interface for streaming response chunks
|
||||
*
|
||||
@ -41,23 +67,30 @@ export interface StreamChunk {
|
||||
* Raw provider-specific data from the original response chunk
|
||||
* This can include thinking state, tool execution info, etc.
|
||||
*/
|
||||
raw?: any;
|
||||
raw?: Record<string, unknown>;
|
||||
|
||||
/**
|
||||
* Tool calls from the LLM (if any)
|
||||
* These may be accumulated over multiple chunks during streaming
|
||||
*/
|
||||
tool_calls?: ToolCall[] | any[];
|
||||
tool_calls?: ToolCall[];
|
||||
|
||||
/**
|
||||
* Tool execution information during streaming
|
||||
* Includes tool name, args, and execution status
|
||||
*/
|
||||
toolExecution?: {
|
||||
type: 'start' | 'update' | 'complete' | 'error';
|
||||
tool: any;
|
||||
result?: any;
|
||||
};
|
||||
toolExecution?: ToolExecutionInfo;
|
||||
}
|
||||
|
||||
/**
|
||||
* Tool execution status for feedback to models
|
||||
*/
|
||||
export interface ToolExecutionStatus {
|
||||
toolCallId: string;
|
||||
name: string;
|
||||
success: boolean;
|
||||
result: string;
|
||||
error?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -108,13 +141,13 @@ export interface ChatCompletionOptions {
|
||||
* @param isDone Whether this is the final chunk
|
||||
* @param originalChunk Optional original provider-specific chunk for advanced usage
|
||||
*/
|
||||
streamCallback?: (text: string, isDone: boolean, originalChunk?: any) => Promise<void> | void;
|
||||
streamCallback?: (text: string, isDone: boolean, originalChunk?: Record<string, unknown>) => Promise<void> | void;
|
||||
|
||||
enableTools?: boolean; // Whether to enable tool calling
|
||||
tools?: any[]; // Tools to provide to the LLM
|
||||
tool_choice?: any; // Tool choice parameter for the LLM
|
||||
tools?: ToolData[]; // Tools to provide to the LLM
|
||||
tool_choice?: ToolChoice; // Tool choice parameter for the LLM
|
||||
useAdvancedContext?: boolean; // Whether to use advanced context enrichment
|
||||
toolExecutionStatus?: any[]; // Status information about executed tools for feedback
|
||||
toolExecutionStatus?: ToolExecutionStatus[]; // Status information about executed tools for feedback
|
||||
providerMetadata?: ModelMetadata; // Metadata about the provider and model capabilities
|
||||
sessionId?: string; // Session ID for storing tool execution results
|
||||
|
||||
@ -177,7 +210,7 @@ export interface ChatResponse {
|
||||
stream?: (callback: (chunk: StreamChunk) => Promise<void> | void) => Promise<string>;
|
||||
|
||||
/** Tool calls from the LLM (if tools were used and the model supports them) */
|
||||
tool_calls?: ToolCall[] | any[];
|
||||
tool_calls?: ToolCall[];
|
||||
}
|
||||
|
||||
export interface AIService {
|
||||
|
@ -1,6 +1,66 @@
|
||||
import sanitizeHtml from 'sanitize-html';
|
||||
import becca from '../../../becca/becca.js';
|
||||
|
||||
// Define interfaces for JSON structures
|
||||
interface CanvasElement {
|
||||
type: string;
|
||||
text?: string;
|
||||
|
||||
}
|
||||
|
||||
interface CanvasContent {
|
||||
elements?: CanvasElement[];
|
||||
|
||||
}
|
||||
|
||||
interface MindMapNode {
|
||||
text?: string;
|
||||
children?: MindMapNode[];
|
||||
|
||||
}
|
||||
|
||||
interface MindMapContent {
|
||||
root?: MindMapNode;
|
||||
|
||||
}
|
||||
|
||||
interface RelationMapNote {
|
||||
noteId: string;
|
||||
title?: string;
|
||||
name?: string;
|
||||
|
||||
}
|
||||
|
||||
interface RelationMapRelation {
|
||||
sourceNoteId: string;
|
||||
targetNoteId: string;
|
||||
name?: string;
|
||||
|
||||
}
|
||||
|
||||
interface RelationMapContent {
|
||||
notes?: RelationMapNote[];
|
||||
relations?: RelationMapRelation[];
|
||||
|
||||
}
|
||||
|
||||
interface GeoMapMarker {
|
||||
title?: string;
|
||||
lat: number;
|
||||
lng: number;
|
||||
description?: string;
|
||||
|
||||
}
|
||||
|
||||
interface GeoMapContent {
|
||||
markers?: GeoMapMarker[];
|
||||
|
||||
}
|
||||
|
||||
interface ErrorWithMessage {
|
||||
message: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the content of a note
|
||||
*/
|
||||
@ -51,21 +111,22 @@ export function formatNoteContent(content: string, type: string, mime: string, t
|
||||
if (mime === 'application/json') {
|
||||
try {
|
||||
// Parse JSON content
|
||||
const jsonContent = JSON.parse(content);
|
||||
const jsonContent = JSON.parse(content) as CanvasContent;
|
||||
|
||||
// Extract text elements from canvas
|
||||
if (jsonContent.elements && Array.isArray(jsonContent.elements)) {
|
||||
const texts = jsonContent.elements
|
||||
.filter((element: any) => element.type === 'text' && element.text)
|
||||
.map((element: any) => element.text);
|
||||
.filter((element) => element.type === 'text' && element.text)
|
||||
.map((element) => element.text as string);
|
||||
|
||||
formattedContent += 'Canvas content:\n' + texts.join('\n');
|
||||
} else {
|
||||
formattedContent += '[Empty canvas]';
|
||||
}
|
||||
}
|
||||
catch (e: any) {
|
||||
formattedContent += `[Error parsing canvas content: ${e.message}]`;
|
||||
catch (e) {
|
||||
const error = e as ErrorWithMessage;
|
||||
formattedContent += `[Error parsing canvas content: ${error.message}]`;
|
||||
}
|
||||
} else {
|
||||
formattedContent += '[Canvas content]';
|
||||
@ -76,10 +137,10 @@ export function formatNoteContent(content: string, type: string, mime: string, t
|
||||
if (mime === 'application/json') {
|
||||
try {
|
||||
// Parse JSON content
|
||||
const jsonContent = JSON.parse(content);
|
||||
const jsonContent = JSON.parse(content) as MindMapContent;
|
||||
|
||||
// Extract node text from mind map
|
||||
const extractMindMapNodes = (node: any): string[] => {
|
||||
const extractMindMapNodes = (node: MindMapNode): string[] => {
|
||||
let texts: string[] = [];
|
||||
if (node.text) {
|
||||
texts.push(node.text);
|
||||
@ -98,8 +159,9 @@ export function formatNoteContent(content: string, type: string, mime: string, t
|
||||
formattedContent += '[Empty mind map]';
|
||||
}
|
||||
}
|
||||
catch (e: any) {
|
||||
formattedContent += `[Error parsing mind map content: ${e.message}]`;
|
||||
catch (e) {
|
||||
const error = e as ErrorWithMessage;
|
||||
formattedContent += `[Error parsing mind map content: ${error.message}]`;
|
||||
}
|
||||
} else {
|
||||
formattedContent += '[Mind map content]';
|
||||
@ -110,23 +172,23 @@ export function formatNoteContent(content: string, type: string, mime: string, t
|
||||
if (mime === 'application/json') {
|
||||
try {
|
||||
// Parse JSON content
|
||||
const jsonContent = JSON.parse(content);
|
||||
const jsonContent = JSON.parse(content) as RelationMapContent;
|
||||
|
||||
// Extract relation map entities and connections
|
||||
let result = 'Relation map content:\n';
|
||||
|
||||
if (jsonContent.notes && Array.isArray(jsonContent.notes)) {
|
||||
result += 'Notes: ' + jsonContent.notes
|
||||
.map((note: any) => note.title || note.name)
|
||||
.map((note) => note.title || note.name)
|
||||
.filter(Boolean)
|
||||
.join(', ') + '\n';
|
||||
}
|
||||
|
||||
if (jsonContent.relations && Array.isArray(jsonContent.relations)) {
|
||||
result += 'Relations: ' + jsonContent.relations
|
||||
.map((rel: any) => {
|
||||
const sourceNote = jsonContent.notes.find((n: any) => n.noteId === rel.sourceNoteId);
|
||||
const targetNote = jsonContent.notes.find((n: any) => n.noteId === rel.targetNoteId);
|
||||
.map((rel) => {
|
||||
const sourceNote = jsonContent.notes?.find((n) => n.noteId === rel.sourceNoteId);
|
||||
const targetNote = jsonContent.notes?.find((n) => n.noteId === rel.targetNoteId);
|
||||
const source = sourceNote ? (sourceNote.title || sourceNote.name) : 'unknown';
|
||||
const target = targetNote ? (targetNote.title || targetNote.name) : 'unknown';
|
||||
return `${source} → ${rel.name || ''} → ${target}`;
|
||||
@ -136,8 +198,9 @@ export function formatNoteContent(content: string, type: string, mime: string, t
|
||||
|
||||
formattedContent += result;
|
||||
}
|
||||
catch (e: any) {
|
||||
formattedContent += `[Error parsing relation map content: ${e.message}]`;
|
||||
catch (e) {
|
||||
const error = e as ErrorWithMessage;
|
||||
formattedContent += `[Error parsing relation map content: ${error.message}]`;
|
||||
}
|
||||
} else {
|
||||
formattedContent += '[Relation map content]';
|
||||
@ -148,14 +211,14 @@ export function formatNoteContent(content: string, type: string, mime: string, t
|
||||
if (mime === 'application/json') {
|
||||
try {
|
||||
// Parse JSON content
|
||||
const jsonContent = JSON.parse(content);
|
||||
const jsonContent = JSON.parse(content) as GeoMapContent;
|
||||
|
||||
let result = 'Geographic map content:\n';
|
||||
|
||||
if (jsonContent.markers && Array.isArray(jsonContent.markers)) {
|
||||
if (jsonContent.markers.length > 0) {
|
||||
result += jsonContent.markers
|
||||
.map((marker: any) => {
|
||||
.map((marker) => {
|
||||
return `Location: ${marker.title || ''} (${marker.lat}, ${marker.lng})${marker.description ? ' - ' + marker.description : ''}`;
|
||||
})
|
||||
.join('\n');
|
||||
@ -168,8 +231,9 @@ export function formatNoteContent(content: string, type: string, mime: string, t
|
||||
|
||||
formattedContent += result;
|
||||
}
|
||||
catch (e: any) {
|
||||
formattedContent += `[Error parsing geographic map content: ${e.message}]`;
|
||||
catch (e) {
|
||||
const error = e as ErrorWithMessage;
|
||||
formattedContent += `[Error parsing geographic map content: ${error.message}]`;
|
||||
}
|
||||
} else {
|
||||
formattedContent += '[Geographic map content]';
|
||||
|
@ -55,11 +55,20 @@ export interface IContextFormatter {
|
||||
): Promise<string>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Interface for LLM Service
|
||||
*/
|
||||
export interface ILLMService {
|
||||
sendMessage(message: string, options?: Record<string, unknown>): Promise<string>;
|
||||
generateEmbedding?(text: string): Promise<number[]>;
|
||||
streamMessage?(message: string, callback: (text: string) => void, options?: Record<string, unknown>): Promise<string>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Interface for query enhancer
|
||||
*/
|
||||
export interface IQueryEnhancer {
|
||||
generateSearchQueries(question: string, llmService: any): Promise<string[]>;
|
||||
generateSearchQueries(question: string, llmService: ILLMService): Promise<string[]>;
|
||||
estimateQueryComplexity(query: string): number;
|
||||
}
|
||||
|
||||
@ -90,6 +99,15 @@ export interface IContentChunker {
|
||||
chunkNoteContent(noteId: string, content: string, title: string): Promise<NoteChunk[]>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Options for context service
|
||||
*/
|
||||
export interface ContextServiceOptions {
|
||||
maxResults?: number;
|
||||
summarize?: boolean;
|
||||
llmService?: ILLMService;
|
||||
}
|
||||
|
||||
/**
|
||||
* Interface for context service
|
||||
*/
|
||||
@ -97,17 +115,13 @@ export interface IContextService {
|
||||
initialize(): Promise<void>;
|
||||
processQuery(
|
||||
userQuestion: string,
|
||||
llmService: any,
|
||||
llmService: ILLMService,
|
||||
contextNoteId?: string | null,
|
||||
showThinking?: boolean
|
||||
): Promise<{ context: string; sources: NoteSearchResult[]; thinking?: string }>;
|
||||
findRelevantNotes(
|
||||
query: string,
|
||||
contextNoteId?: string | null,
|
||||
options?: {
|
||||
maxResults?: number;
|
||||
summarize?: boolean;
|
||||
llmService?: any;
|
||||
}
|
||||
options?: ContextServiceOptions
|
||||
): Promise<NoteSearchResult[]>;
|
||||
}
|
||||
|
@ -2,7 +2,7 @@ import { BaseAIService } from '../base_ai_service.js';
|
||||
import type { Message, ChatCompletionOptions, ChatResponse, StreamChunk } from '../ai_interface.js';
|
||||
import { OllamaMessageFormatter } from '../formatters/ollama_formatter.js';
|
||||
import log from '../../log.js';
|
||||
import type { ToolCall } from '../tools/tool_interfaces.js';
|
||||
import type { ToolCall, Tool } from '../tools/tool_interfaces.js';
|
||||
import toolRegistry from '../tools/tool_registry.js';
|
||||
import type { OllamaOptions } from './provider_options.js';
|
||||
import { getOllamaOptions } from './providers.js';
|
||||
@ -25,6 +25,35 @@ interface ToolExecutionStatus {
|
||||
error?: string;
|
||||
}
|
||||
|
||||
// Interface for Ollama-specific messages
|
||||
interface OllamaMessage {
|
||||
role: string;
|
||||
content: string;
|
||||
tool_call_id?: string;
|
||||
tool_calls?: OllamaToolCall[];
|
||||
name?: string;
|
||||
}
|
||||
|
||||
// Interface for Ollama tool calls
|
||||
interface OllamaToolCall {
|
||||
id: string;
|
||||
function: {
|
||||
name: string;
|
||||
arguments: Record<string, unknown>;
|
||||
};
|
||||
}
|
||||
|
||||
// Interface for Ollama request options
|
||||
interface OllamaRequestOptions {
|
||||
model: string;
|
||||
messages: OllamaMessage[];
|
||||
stream?: boolean;
|
||||
options?: Record<string, unknown>;
|
||||
format?: string;
|
||||
tools?: Tool[];
|
||||
[key: string]: unknown;
|
||||
}
|
||||
|
||||
export class OllamaService extends BaseAIService {
|
||||
private formatter: OllamaMessageFormatter;
|
||||
private client: Ollama | null = null;
|
||||
@ -104,7 +133,7 @@ export class OllamaService extends BaseAIService {
|
||||
// Check if we should add tool execution feedback
|
||||
if (providerOptions.toolExecutionStatus && Array.isArray(providerOptions.toolExecutionStatus) && providerOptions.toolExecutionStatus.length > 0) {
|
||||
log.info(`Adding tool execution feedback to messages`);
|
||||
messages = this.addToolExecutionFeedback(messages, providerOptions.toolExecutionStatus);
|
||||
messages = this.addToolExecutionFeedback(messages, providerOptions.toolExecutionStatus as ToolExecutionStatus[]);
|
||||
}
|
||||
|
||||
// Determine whether to use the formatter or send messages directly
|
||||
@ -126,11 +155,11 @@ export class OllamaService extends BaseAIService {
|
||||
}
|
||||
|
||||
// Get tools if enabled
|
||||
let tools = [];
|
||||
let tools: Tool[] = [];
|
||||
if (providerOptions.enableTools !== false) {
|
||||
try {
|
||||
tools = providerOptions.tools && providerOptions.tools.length > 0
|
||||
? providerOptions.tools
|
||||
? providerOptions.tools as Tool[]
|
||||
: toolRegistry.getAllToolDefinitions();
|
||||
|
||||
// Handle empty tools array
|
||||
@ -145,15 +174,16 @@ export class OllamaService extends BaseAIService {
|
||||
if (tools.length > 0) {
|
||||
log.info(`Sending ${tools.length} tool definitions to Ollama`);
|
||||
}
|
||||
} catch (error: any) {
|
||||
log.error(`Error preparing tools: ${error.message || String(error)}`);
|
||||
} catch (error) {
|
||||
const errorMsg = error instanceof Error ? error.message : String(error);
|
||||
log.error(`Error preparing tools: ${errorMsg}`);
|
||||
tools = []; // Empty fallback
|
||||
}
|
||||
}
|
||||
|
||||
// Convert our message format to Ollama's format
|
||||
const convertedMessages = messagesToSend.map(msg => {
|
||||
const converted: any = {
|
||||
const converted: OllamaMessage = {
|
||||
role: msg.role,
|
||||
content: msg.content
|
||||
};
|
||||
@ -161,21 +191,23 @@ export class OllamaService extends BaseAIService {
|
||||
if (msg.tool_calls) {
|
||||
converted.tool_calls = msg.tool_calls.map(tc => {
|
||||
// For Ollama, arguments must be an object, not a string
|
||||
let processedArgs = tc.function.arguments;
|
||||
let processedArgs: Record<string, unknown> = {};
|
||||
|
||||
// If arguments is a string, try to parse it as JSON
|
||||
if (typeof processedArgs === 'string') {
|
||||
if (typeof tc.function.arguments === 'string') {
|
||||
try {
|
||||
processedArgs = JSON.parse(processedArgs);
|
||||
processedArgs = JSON.parse(tc.function.arguments);
|
||||
} catch (e) {
|
||||
// If parsing fails, create an object with a single property
|
||||
log.info(`Could not parse tool arguments as JSON: ${e}`);
|
||||
processedArgs = { raw: processedArgs };
|
||||
processedArgs = { raw: tc.function.arguments };
|
||||
}
|
||||
} else if (typeof tc.function.arguments === 'object') {
|
||||
processedArgs = tc.function.arguments as Record<string, unknown>;
|
||||
}
|
||||
|
||||
return {
|
||||
id: tc.id,
|
||||
id: tc.id ?? '',
|
||||
function: {
|
||||
name: tc.function.name,
|
||||
arguments: processedArgs
|
||||
@ -196,65 +228,67 @@ export class OllamaService extends BaseAIService {
|
||||
});
|
||||
|
||||
// Prepare base request options
|
||||
const baseRequestOptions = {
|
||||
const baseRequestOptions: OllamaRequestOptions = {
|
||||
model: providerOptions.model,
|
||||
messages: convertedMessages,
|
||||
options: providerOptions.options,
|
||||
// Add tools if available
|
||||
tools: tools.length > 0 ? tools : undefined
|
||||
stream: opts.stream === true
|
||||
};
|
||||
|
||||
// Get client instance
|
||||
// Add tool definitions if available
|
||||
if (tools && tools.length > 0 && providerOptions.enableTools !== false) {
|
||||
baseRequestOptions.tools = tools;
|
||||
}
|
||||
|
||||
// Add any model-specific parameters
|
||||
if (providerOptions.options) {
|
||||
baseRequestOptions.options = providerOptions.options;
|
||||
}
|
||||
|
||||
// If JSON response is expected, set format
|
||||
if (providerOptions.expectsJsonResponse) {
|
||||
baseRequestOptions.format = 'json';
|
||||
}
|
||||
|
||||
log.info(`Sending request to Ollama with model: ${providerOptions.model}`);
|
||||
|
||||
// Handle streaming vs non-streaming responses
|
||||
const client = this.getClient();
|
||||
|
||||
// Handle streaming
|
||||
if (opts.stream || opts.streamCallback) {
|
||||
if (opts.stream === true) {
|
||||
// Use streaming API
|
||||
return this.handleStreamingResponse(client, baseRequestOptions, opts, providerOptions);
|
||||
} else {
|
||||
// Non-streaming request
|
||||
log.info(`Using non-streaming mode with Ollama client`);
|
||||
// Use non-streaming API
|
||||
try {
|
||||
log.info(`Sending non-streaming request to Ollama`);
|
||||
// Create a properly typed request with stream: false
|
||||
const chatRequest: ChatRequest & { stream?: false } = {
|
||||
...baseRequestOptions,
|
||||
stream: false
|
||||
};
|
||||
|
||||
// Create non-streaming request
|
||||
const nonStreamingRequest = {
|
||||
...baseRequestOptions,
|
||||
stream: false as const // Use const assertion for type safety
|
||||
};
|
||||
const response = await client.chat(chatRequest);
|
||||
|
||||
const response = await client.chat(nonStreamingRequest);
|
||||
log.info(`Received response from Ollama`);
|
||||
|
||||
// Log response details
|
||||
log.info(`========== OLLAMA API RESPONSE ==========`);
|
||||
log.info(`Model: ${response.model}, Content length: ${response.message?.content?.length || 0} chars`);
|
||||
log.info(`Tokens: ${response.prompt_eval_count || 0} prompt, ${response.eval_count || 0} completion, ${(response.prompt_eval_count || 0) + (response.eval_count || 0)} total`);
|
||||
// Transform tool calls if present
|
||||
const toolCalls = this.transformToolCalls(response.message.tool_calls);
|
||||
|
||||
// Handle the response and extract tool calls if present
|
||||
const chatResponse: ChatResponse = {
|
||||
text: response.message?.content || '',
|
||||
model: response.model || providerOptions.model,
|
||||
provider: this.getName(),
|
||||
usage: {
|
||||
promptTokens: response.prompt_eval_count || 0,
|
||||
completionTokens: response.eval_count || 0,
|
||||
totalTokens: (response.prompt_eval_count || 0) + (response.eval_count || 0)
|
||||
}
|
||||
};
|
||||
|
||||
// Add tool calls if present
|
||||
if (response.message?.tool_calls && response.message.tool_calls.length > 0) {
|
||||
log.info(`Ollama response includes ${response.message.tool_calls.length} tool calls`);
|
||||
chatResponse.tool_calls = this.transformToolCalls(response.message.tool_calls);
|
||||
return {
|
||||
text: response.message.content,
|
||||
model: providerOptions.model,
|
||||
provider: 'ollama',
|
||||
tool_calls: toolCalls.length > 0 ? toolCalls : undefined
|
||||
};
|
||||
} catch (error) {
|
||||
const errorMsg = error instanceof Error ? error.message : String(error);
|
||||
log.error(`Error in Ollama request: ${errorMsg}`);
|
||||
throw error;
|
||||
}
|
||||
|
||||
return chatResponse;
|
||||
}
|
||||
} catch (error: any) {
|
||||
// Enhanced error handling with detailed diagnostics
|
||||
log.error(`Ollama service error: ${error.message || String(error)}`);
|
||||
if (error.stack) {
|
||||
log.error(`Error stack trace: ${error.stack}`);
|
||||
}
|
||||
|
||||
// Propagate the original error
|
||||
} catch (error) {
|
||||
const errorMsg = error instanceof Error ? error.message : String(error);
|
||||
log.error(`Error in Ollama service: ${errorMsg}`);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
@ -266,7 +300,7 @@ export class OllamaService extends BaseAIService {
|
||||
*/
|
||||
private async handleStreamingResponse(
|
||||
client: Ollama,
|
||||
requestOptions: any,
|
||||
requestOptions: OllamaRequestOptions,
|
||||
opts: ChatCompletionOptions,
|
||||
providerOptions: OllamaOptions
|
||||
): Promise<ChatResponse> {
|
||||
@ -369,7 +403,7 @@ export class OllamaService extends BaseAIService {
|
||||
await callback({
|
||||
text: chunk.message?.content || '',
|
||||
done: false, // Add done property to satisfy StreamChunk
|
||||
raw: chunk
|
||||
raw: chunk as unknown as Record<string, unknown>
|
||||
});
|
||||
|
||||
// Log completion
|
||||
|
Loading…
x
Reference in New Issue
Block a user