2025-03-11 17:30:50 +00:00
|
|
|
import { BaseAIService } from '../base_ai_service.js';
|
2025-04-10 21:00:12 +00:00
|
|
|
import type { Message, ChatCompletionOptions, ChatResponse, StreamChunk } from '../ai_interface.js';
|
2025-03-28 22:50:15 +00:00
|
|
|
import { OllamaMessageFormatter } from '../formatters/ollama_formatter.js';
|
2025-04-06 20:50:08 +00:00
|
|
|
import log from '../../log.js';
|
|
|
|
import type { ToolCall } from '../tools/tool_interfaces.js';
|
|
|
|
import toolRegistry from '../tools/tool_registry.js';
|
2025-04-09 19:11:27 +00:00
|
|
|
import type { OllamaOptions } from './provider_options.js';
|
|
|
|
import { getOllamaOptions } from './providers.js';
|
2025-04-09 20:33:55 +00:00
|
|
|
import { Ollama, type ChatRequest, type ChatResponse as OllamaChatResponse } from 'ollama';
|
2025-04-12 17:13:37 +00:00
|
|
|
import options from '../../options.js';
|
2025-04-13 19:44:04 +00:00
|
|
|
import {
|
|
|
|
StreamProcessor,
|
|
|
|
createStreamHandler,
|
|
|
|
performProviderHealthCheck,
|
|
|
|
processProviderStream,
|
|
|
|
extractStreamStats
|
|
|
|
} from './stream_handler.js';
|
2025-03-28 22:50:15 +00:00
|
|
|
|
2025-04-09 01:24:32 +00:00
|
|
|
// Add an interface for tool execution feedback status
|
|
|
|
interface ToolExecutionStatus {
|
|
|
|
toolCallId: string;
|
|
|
|
name: string;
|
|
|
|
success: boolean;
|
|
|
|
result: string;
|
|
|
|
error?: string;
|
|
|
|
}
|
|
|
|
|
2025-03-02 19:39:10 -08:00
|
|
|
export class OllamaService extends BaseAIService {
|
2025-03-28 22:50:15 +00:00
|
|
|
private formatter: OllamaMessageFormatter;
|
2025-04-09 20:33:55 +00:00
|
|
|
private client: Ollama | null = null;
|
2025-03-28 22:50:15 +00:00
|
|
|
|
2025-03-02 19:39:10 -08:00
|
|
|
constructor() {
|
|
|
|
super('Ollama');
|
2025-03-28 22:50:15 +00:00
|
|
|
this.formatter = new OllamaMessageFormatter();
|
2025-03-02 19:39:10 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
isAvailable(): boolean {
|
2025-03-28 22:50:15 +00:00
|
|
|
return super.isAvailable() && !!options.getOption('ollamaBaseUrl');
|
2025-03-02 19:39:10 -08:00
|
|
|
}
|
|
|
|
|
2025-04-09 20:33:55 +00:00
|
|
|
private getClient(): Ollama {
|
|
|
|
if (!this.client) {
|
|
|
|
const baseUrl = options.getOption('ollamaBaseUrl');
|
|
|
|
if (!baseUrl) {
|
|
|
|
throw new Error('Ollama base URL is not configured');
|
|
|
|
}
|
2025-04-10 21:00:12 +00:00
|
|
|
|
|
|
|
log.info(`Creating new Ollama client with base URL: ${baseUrl}`);
|
|
|
|
|
|
|
|
// Create client with debug options
|
|
|
|
try {
|
|
|
|
this.client = new Ollama({
|
|
|
|
host: baseUrl,
|
|
|
|
fetch: (url, init) => {
|
|
|
|
log.info(`Ollama API request to: ${url}`);
|
|
|
|
log.info(`Ollama API request method: ${init?.method || 'GET'}`);
|
|
|
|
log.info(`Ollama API request headers: ${JSON.stringify(init?.headers || {})}`);
|
|
|
|
|
|
|
|
// Call the actual fetch
|
|
|
|
return fetch(url, init).then(response => {
|
|
|
|
log.info(`Ollama API response status: ${response.status}`);
|
|
|
|
if (!response.ok) {
|
|
|
|
log.error(`Ollama API error response: ${response.statusText}`);
|
|
|
|
}
|
|
|
|
return response;
|
|
|
|
}).catch(error => {
|
|
|
|
log.error(`Ollama API fetch error: ${error.message}`);
|
|
|
|
throw error;
|
|
|
|
});
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
log.info(`Ollama client successfully created`);
|
|
|
|
} catch (error) {
|
|
|
|
log.error(`Error creating Ollama client: ${error}`);
|
|
|
|
throw error;
|
|
|
|
}
|
2025-04-09 20:33:55 +00:00
|
|
|
}
|
|
|
|
return this.client;
|
|
|
|
}
|
|
|
|
|
2025-03-02 19:39:10 -08:00
|
|
|
async generateChatCompletion(messages: Message[], opts: ChatCompletionOptions = {}): Promise<ChatResponse> {
|
|
|
|
if (!this.isAvailable()) {
|
2025-03-28 22:50:15 +00:00
|
|
|
throw new Error('Ollama service is not available. Check API URL in settings.');
|
2025-03-02 19:39:10 -08:00
|
|
|
}
|
|
|
|
|
2025-04-09 19:11:27 +00:00
|
|
|
// Get provider-specific options from the central provider manager
|
|
|
|
const providerOptions = await getOllamaOptions(opts);
|
2025-04-08 21:24:56 +00:00
|
|
|
|
2025-04-09 19:11:27 +00:00
|
|
|
// Log provider metadata if available
|
|
|
|
if (providerOptions.providerMetadata) {
|
|
|
|
log.info(`Using model ${providerOptions.model} from provider ${providerOptions.providerMetadata.provider}`);
|
2025-04-08 21:24:56 +00:00
|
|
|
|
2025-04-09 19:11:27 +00:00
|
|
|
// Log capabilities if available
|
|
|
|
const capabilities = providerOptions.providerMetadata.capabilities;
|
|
|
|
if (capabilities) {
|
|
|
|
log.info(`Model capabilities: ${JSON.stringify(capabilities)}`);
|
|
|
|
}
|
|
|
|
}
|
2025-03-02 19:39:10 -08:00
|
|
|
|
2025-04-09 19:11:27 +00:00
|
|
|
const systemPrompt = this.getSystemPrompt(providerOptions.systemPrompt || options.getOption('aiSystemPrompt'));
|
2025-03-02 19:39:10 -08:00
|
|
|
|
|
|
|
try {
|
2025-04-09 01:24:32 +00:00
|
|
|
// Check if we should add tool execution feedback
|
2025-04-09 19:11:27 +00:00
|
|
|
if (providerOptions.toolExecutionStatus && Array.isArray(providerOptions.toolExecutionStatus) && providerOptions.toolExecutionStatus.length > 0) {
|
2025-04-09 01:24:32 +00:00
|
|
|
log.info(`Adding tool execution feedback to messages`);
|
2025-04-09 19:11:27 +00:00
|
|
|
messages = this.addToolExecutionFeedback(messages, providerOptions.toolExecutionStatus);
|
2025-04-09 01:24:32 +00:00
|
|
|
}
|
|
|
|
|
2025-04-01 21:42:09 +00:00
|
|
|
// Determine whether to use the formatter or send messages directly
|
|
|
|
let messagesToSend: Message[];
|
2025-03-28 22:50:15 +00:00
|
|
|
|
2025-04-09 19:11:27 +00:00
|
|
|
if (providerOptions.bypassFormatter) {
|
2025-04-01 21:42:09 +00:00
|
|
|
// Bypass the formatter entirely - use messages as is
|
|
|
|
messagesToSend = [...messages];
|
2025-04-06 20:50:08 +00:00
|
|
|
log.info(`Bypassing formatter for Ollama request with ${messages.length} messages`);
|
2025-04-01 21:42:09 +00:00
|
|
|
} else {
|
|
|
|
// Use the formatter to prepare messages
|
|
|
|
messagesToSend = this.formatter.formatMessages(
|
|
|
|
messages,
|
|
|
|
systemPrompt,
|
|
|
|
undefined, // context
|
2025-04-09 19:11:27 +00:00
|
|
|
providerOptions.preserveSystemPrompt
|
2025-04-01 21:42:09 +00:00
|
|
|
);
|
2025-04-06 20:50:08 +00:00
|
|
|
log.info(`Sending to Ollama with formatted messages: ${messagesToSend.length}`);
|
2025-04-01 21:42:09 +00:00
|
|
|
}
|
|
|
|
|
2025-04-09 20:33:55 +00:00
|
|
|
// Get tools if enabled
|
|
|
|
let tools = [];
|
2025-04-09 19:11:27 +00:00
|
|
|
if (providerOptions.enableTools !== false) {
|
|
|
|
try {
|
2025-04-09 20:33:55 +00:00
|
|
|
tools = providerOptions.tools && providerOptions.tools.length > 0
|
2025-04-09 19:11:27 +00:00
|
|
|
? providerOptions.tools
|
|
|
|
: toolRegistry.getAllToolDefinitions();
|
|
|
|
|
|
|
|
// Handle empty tools array
|
2025-04-09 20:33:55 +00:00
|
|
|
if (tools.length === 0) {
|
2025-04-09 19:11:27 +00:00
|
|
|
log.info('No tools found, attempting to initialize tools...');
|
2025-04-11 22:52:09 +00:00
|
|
|
// Tools are already initialized in the AIServiceManager constructor
|
|
|
|
// No need to initialize them again
|
2025-04-09 20:33:55 +00:00
|
|
|
tools = toolRegistry.getAllToolDefinitions();
|
|
|
|
log.info(`After initialization: ${tools.length} tools available`);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tools.length > 0) {
|
|
|
|
log.info(`Sending ${tools.length} tool definitions to Ollama`);
|
2025-04-06 20:50:08 +00:00
|
|
|
}
|
2025-04-09 19:11:27 +00:00
|
|
|
} catch (error: any) {
|
|
|
|
log.error(`Error preparing tools: ${error.message || String(error)}`);
|
2025-04-09 20:33:55 +00:00
|
|
|
tools = []; // Empty fallback
|
2025-04-06 20:50:08 +00:00
|
|
|
}
|
2025-04-01 21:42:09 +00:00
|
|
|
}
|
2025-03-28 22:50:15 +00:00
|
|
|
|
2025-04-09 20:33:55 +00:00
|
|
|
// Convert our message format to Ollama's format
|
|
|
|
const convertedMessages = messagesToSend.map(msg => {
|
|
|
|
const converted: any = {
|
|
|
|
role: msg.role,
|
|
|
|
content: msg.content
|
|
|
|
};
|
2025-04-10 21:00:12 +00:00
|
|
|
|
2025-04-09 20:33:55 +00:00
|
|
|
if (msg.tool_calls) {
|
|
|
|
converted.tool_calls = msg.tool_calls.map(tc => {
|
|
|
|
// For Ollama, arguments must be an object, not a string
|
|
|
|
let processedArgs = tc.function.arguments;
|
2025-04-10 21:00:12 +00:00
|
|
|
|
2025-04-09 20:33:55 +00:00
|
|
|
// If arguments is a string, try to parse it as JSON
|
|
|
|
if (typeof processedArgs === 'string') {
|
2025-04-06 20:50:08 +00:00
|
|
|
try {
|
2025-04-09 20:33:55 +00:00
|
|
|
processedArgs = JSON.parse(processedArgs);
|
|
|
|
} catch (e) {
|
|
|
|
// If parsing fails, create an object with a single property
|
|
|
|
log.info(`Could not parse tool arguments as JSON: ${e}`);
|
|
|
|
processedArgs = { raw: processedArgs };
|
2025-04-06 20:50:08 +00:00
|
|
|
}
|
|
|
|
}
|
2025-04-10 21:00:12 +00:00
|
|
|
|
2025-04-09 20:33:55 +00:00
|
|
|
return {
|
|
|
|
id: tc.id,
|
|
|
|
function: {
|
|
|
|
name: tc.function.name,
|
|
|
|
arguments: processedArgs
|
2025-04-09 00:42:15 +00:00
|
|
|
}
|
2025-04-09 20:33:55 +00:00
|
|
|
};
|
2025-04-06 20:50:08 +00:00
|
|
|
});
|
2025-04-09 20:33:55 +00:00
|
|
|
}
|
2025-04-10 21:00:12 +00:00
|
|
|
|
2025-04-09 20:33:55 +00:00
|
|
|
if (msg.tool_call_id) {
|
|
|
|
converted.tool_call_id = msg.tool_call_id;
|
|
|
|
}
|
2025-04-10 21:00:12 +00:00
|
|
|
|
2025-04-09 20:33:55 +00:00
|
|
|
if (msg.name) {
|
|
|
|
converted.name = msg.name;
|
|
|
|
}
|
2025-04-10 21:00:12 +00:00
|
|
|
|
2025-04-09 20:33:55 +00:00
|
|
|
return converted;
|
|
|
|
});
|
2025-04-10 21:00:12 +00:00
|
|
|
|
2025-04-09 20:33:55 +00:00
|
|
|
// Prepare base request options
|
|
|
|
const baseRequestOptions = {
|
|
|
|
model: providerOptions.model,
|
|
|
|
messages: convertedMessages,
|
|
|
|
options: providerOptions.options,
|
|
|
|
// Add tools if available
|
|
|
|
tools: tools.length > 0 ? tools : undefined
|
|
|
|
};
|
2025-04-07 21:57:18 +00:00
|
|
|
|
2025-04-10 21:00:12 +00:00
|
|
|
// Get client instance
|
|
|
|
const client = this.getClient();
|
|
|
|
|
2025-04-09 20:33:55 +00:00
|
|
|
// Handle streaming
|
2025-04-10 21:00:12 +00:00
|
|
|
if (opts.stream || opts.streamCallback) {
|
|
|
|
return this.handleStreamingResponse(client, baseRequestOptions, opts, providerOptions);
|
2025-04-07 21:57:18 +00:00
|
|
|
} else {
|
2025-04-09 20:33:55 +00:00
|
|
|
// Non-streaming request
|
|
|
|
log.info(`Using non-streaming mode with Ollama client`);
|
2025-04-10 21:00:12 +00:00
|
|
|
|
2025-04-09 20:33:55 +00:00
|
|
|
// Create non-streaming request
|
|
|
|
const nonStreamingRequest = {
|
|
|
|
...baseRequestOptions,
|
|
|
|
stream: false as const // Use const assertion for type safety
|
|
|
|
};
|
2025-04-10 21:00:12 +00:00
|
|
|
|
2025-04-09 20:33:55 +00:00
|
|
|
const response = await client.chat(nonStreamingRequest);
|
2025-04-10 21:00:12 +00:00
|
|
|
|
2025-04-09 20:33:55 +00:00
|
|
|
// Log response details
|
|
|
|
log.info(`========== OLLAMA API RESPONSE ==========`);
|
|
|
|
log.info(`Model: ${response.model}, Content length: ${response.message?.content?.length || 0} chars`);
|
|
|
|
log.info(`Tokens: ${response.prompt_eval_count || 0} prompt, ${response.eval_count || 0} completion, ${(response.prompt_eval_count || 0) + (response.eval_count || 0)} total`);
|
2025-04-10 21:00:12 +00:00
|
|
|
|
2025-04-09 20:33:55 +00:00
|
|
|
// Handle the response and extract tool calls if present
|
|
|
|
const chatResponse: ChatResponse = {
|
|
|
|
text: response.message?.content || '',
|
|
|
|
model: response.model || providerOptions.model,
|
|
|
|
provider: this.getName(),
|
|
|
|
usage: {
|
|
|
|
promptTokens: response.prompt_eval_count || 0,
|
|
|
|
completionTokens: response.eval_count || 0,
|
|
|
|
totalTokens: (response.prompt_eval_count || 0) + (response.eval_count || 0)
|
|
|
|
}
|
|
|
|
};
|
2025-04-10 21:00:12 +00:00
|
|
|
|
2025-04-09 20:33:55 +00:00
|
|
|
// Add tool calls if present
|
|
|
|
if (response.message?.tool_calls && response.message.tool_calls.length > 0) {
|
|
|
|
log.info(`Ollama response includes ${response.message.tool_calls.length} tool calls`);
|
|
|
|
chatResponse.tool_calls = this.transformToolCalls(response.message.tool_calls);
|
2025-04-09 00:42:15 +00:00
|
|
|
}
|
2025-04-10 21:00:12 +00:00
|
|
|
|
2025-04-09 20:33:55 +00:00
|
|
|
return chatResponse;
|
2025-04-06 20:50:08 +00:00
|
|
|
}
|
|
|
|
} catch (error: any) {
|
2025-04-09 00:42:15 +00:00
|
|
|
// Enhanced error handling with detailed diagnostics
|
2025-04-06 20:50:08 +00:00
|
|
|
log.error(`Ollama service error: ${error.message || String(error)}`);
|
2025-04-09 00:42:15 +00:00
|
|
|
if (error.stack) {
|
|
|
|
log.error(`Error stack trace: ${error.stack}`);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Propagate the original error
|
2025-03-28 22:50:15 +00:00
|
|
|
throw error;
|
2025-03-28 21:47:28 +00:00
|
|
|
}
|
2025-03-02 19:39:10 -08:00
|
|
|
}
|
2025-04-08 23:55:04 +00:00
|
|
|
|
2025-04-10 21:00:12 +00:00
|
|
|
/**
|
|
|
|
* Handle streaming response from Ollama
|
|
|
|
*
|
2025-04-13 19:44:04 +00:00
|
|
|
* Uses reusable stream handling utilities for processing
|
2025-04-10 21:00:12 +00:00
|
|
|
*/
|
|
|
|
private async handleStreamingResponse(
|
|
|
|
client: Ollama,
|
|
|
|
requestOptions: any,
|
|
|
|
opts: ChatCompletionOptions,
|
|
|
|
providerOptions: OllamaOptions
|
|
|
|
): Promise<ChatResponse> {
|
|
|
|
log.info(`Using streaming mode with Ollama client`);
|
|
|
|
|
|
|
|
// Log detailed information about the streaming setup
|
|
|
|
log.info(`Ollama streaming details: model=${providerOptions.model}, streamCallback=${opts.streamCallback ? 'provided' : 'not provided'}`);
|
|
|
|
|
2025-04-13 19:44:04 +00:00
|
|
|
// Create streaming request
|
|
|
|
const streamingRequest = {
|
|
|
|
...requestOptions,
|
|
|
|
stream: true as const
|
|
|
|
};
|
2025-04-10 21:00:12 +00:00
|
|
|
|
|
|
|
// Handle direct streamCallback if provided
|
|
|
|
if (opts.streamCallback) {
|
|
|
|
try {
|
2025-04-13 19:44:04 +00:00
|
|
|
// Perform health check before streaming
|
|
|
|
await performProviderHealthCheck(
|
|
|
|
async () => await client.list(),
|
|
|
|
this.getName()
|
|
|
|
);
|
2025-04-10 21:00:12 +00:00
|
|
|
|
2025-04-13 19:44:04 +00:00
|
|
|
log.info(`Making Ollama streaming request after successful health check`);
|
|
|
|
// Get the stream iterator
|
|
|
|
const streamIterator = await client.chat(streamingRequest);
|
2025-04-10 21:00:12 +00:00
|
|
|
|
2025-04-13 19:44:04 +00:00
|
|
|
// Process the stream with our reusable utility
|
|
|
|
const streamResult = await processProviderStream(
|
|
|
|
streamIterator,
|
|
|
|
{
|
|
|
|
providerName: this.getName(),
|
|
|
|
modelName: providerOptions.model
|
|
|
|
},
|
|
|
|
opts.streamCallback
|
|
|
|
);
|
|
|
|
|
|
|
|
// Create the final response after streaming is complete
|
|
|
|
return {
|
|
|
|
text: streamResult.completeText,
|
|
|
|
model: providerOptions.model,
|
|
|
|
provider: this.getName(),
|
|
|
|
tool_calls: this.transformToolCalls(streamResult.toolCalls),
|
|
|
|
usage: extractStreamStats(streamResult.finalChunk, this.getName())
|
|
|
|
};
|
|
|
|
} catch (error) {
|
|
|
|
log.error(`Error in Ollama streaming with callback: ${error}`);
|
|
|
|
log.error(`Error details: ${error instanceof Error ? error.stack : 'No stack trace available'}`);
|
|
|
|
throw error;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Create a stream handler using our reusable StreamProcessor
|
|
|
|
const streamHandler = createStreamHandler(
|
|
|
|
{
|
|
|
|
providerName: this.getName(),
|
|
|
|
modelName: providerOptions.model,
|
|
|
|
streamCallback: opts.streamCallback
|
|
|
|
},
|
|
|
|
async (callback) => {
|
|
|
|
let completeText = '';
|
|
|
|
let responseToolCalls: any[] = [];
|
|
|
|
let chunkCount = 0;
|
2025-04-10 21:00:12 +00:00
|
|
|
|
|
|
|
try {
|
2025-04-13 19:44:04 +00:00
|
|
|
// Perform health check
|
|
|
|
await performProviderHealthCheck(
|
|
|
|
async () => await client.list(),
|
|
|
|
this.getName()
|
|
|
|
);
|
2025-04-10 21:00:12 +00:00
|
|
|
|
2025-04-13 19:44:04 +00:00
|
|
|
// Get the stream iterator
|
|
|
|
log.info(`Getting stream iterator from Ollama`);
|
|
|
|
const streamIterator = await client.chat(streamingRequest);
|
2025-04-10 21:00:12 +00:00
|
|
|
|
2025-04-13 19:44:04 +00:00
|
|
|
if (!streamIterator || typeof streamIterator[Symbol.asyncIterator] !== 'function') {
|
|
|
|
throw new Error('Invalid stream iterator returned');
|
|
|
|
}
|
2025-04-10 21:00:12 +00:00
|
|
|
|
2025-04-13 19:44:04 +00:00
|
|
|
// Process each chunk using our stream processor
|
|
|
|
for await (const chunk of streamIterator) {
|
|
|
|
chunkCount++;
|
2025-04-10 21:00:12 +00:00
|
|
|
|
2025-04-13 19:44:04 +00:00
|
|
|
// Process the chunk and update our accumulated text
|
|
|
|
const result = await StreamProcessor.processChunk(
|
|
|
|
chunk,
|
|
|
|
completeText,
|
|
|
|
chunkCount,
|
|
|
|
{ providerName: this.getName(), modelName: providerOptions.model }
|
|
|
|
);
|
2025-04-10 21:00:12 +00:00
|
|
|
|
2025-04-13 19:44:04 +00:00
|
|
|
completeText = result.completeText;
|
2025-04-10 21:00:12 +00:00
|
|
|
|
2025-04-13 19:44:04 +00:00
|
|
|
// Extract any tool calls
|
|
|
|
const toolCalls = StreamProcessor.extractToolCalls(chunk);
|
|
|
|
if (toolCalls.length > 0) {
|
|
|
|
responseToolCalls = toolCalls;
|
|
|
|
}
|
2025-04-10 21:00:12 +00:00
|
|
|
|
2025-04-13 19:44:04 +00:00
|
|
|
// Send to callback - directly pass the content without accumulating
|
|
|
|
await callback({
|
|
|
|
text: chunk.message?.content || '',
|
|
|
|
done: false, // Add done property to satisfy StreamChunk
|
|
|
|
raw: chunk
|
|
|
|
});
|
2025-04-10 21:00:12 +00:00
|
|
|
|
2025-04-13 19:44:04 +00:00
|
|
|
// Log completion
|
|
|
|
if (chunk.done && !result.logged) {
|
|
|
|
log.info(`Reached final chunk after ${chunkCount} chunks, content length: ${completeText.length} chars`);
|
2025-04-13 17:56:57 +00:00
|
|
|
}
|
2025-04-10 21:00:12 +00:00
|
|
|
}
|
|
|
|
|
2025-04-13 19:44:04 +00:00
|
|
|
return completeText;
|
|
|
|
} catch (error) {
|
|
|
|
log.error(`Error in Ollama streaming: ${error}`);
|
|
|
|
log.error(`Error details: ${error instanceof Error ? error.stack : 'No stack trace available'}`);
|
|
|
|
throw error;
|
2025-04-10 21:00:12 +00:00
|
|
|
}
|
|
|
|
}
|
2025-04-13 19:44:04 +00:00
|
|
|
);
|
2025-04-10 21:00:12 +00:00
|
|
|
|
2025-04-13 19:44:04 +00:00
|
|
|
// Return a response object with the stream handler
|
|
|
|
return {
|
|
|
|
text: '', // Initial text is empty, will be populated during streaming
|
|
|
|
model: providerOptions.model,
|
|
|
|
provider: this.getName(),
|
|
|
|
stream: streamHandler as (callback: (chunk: StreamChunk) => Promise<void> | void) => Promise<string>
|
|
|
|
};
|
2025-04-10 21:00:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2025-04-08 23:55:04 +00:00
|
|
|
/**
|
2025-04-09 20:33:55 +00:00
|
|
|
* Transform Ollama tool calls to the standard format expected by the pipeline
|
2025-04-08 23:55:04 +00:00
|
|
|
*/
|
2025-04-09 20:33:55 +00:00
|
|
|
private transformToolCalls(toolCalls: any[] | undefined): ToolCall[] {
|
|
|
|
if (!toolCalls || !Array.isArray(toolCalls) || toolCalls.length === 0) {
|
|
|
|
return [];
|
2025-04-08 23:55:04 +00:00
|
|
|
}
|
2025-04-10 21:00:12 +00:00
|
|
|
|
2025-04-09 20:33:55 +00:00
|
|
|
return toolCalls.map((toolCall, index) => {
|
|
|
|
// Generate a unique ID if none is provided
|
|
|
|
const id = toolCall.id || `tool-call-${Date.now()}-${index}`;
|
2025-04-10 21:00:12 +00:00
|
|
|
|
2025-04-09 20:33:55 +00:00
|
|
|
// Handle arguments based on their type
|
|
|
|
let processedArguments: Record<string, any> | string = toolCall.function?.arguments || {};
|
2025-04-10 21:00:12 +00:00
|
|
|
|
2025-04-09 20:33:55 +00:00
|
|
|
if (typeof processedArguments === 'string') {
|
|
|
|
try {
|
|
|
|
processedArguments = JSON.parse(processedArguments);
|
|
|
|
} catch (error) {
|
|
|
|
// If we can't parse as JSON, create a simple object
|
|
|
|
log.info(`Could not parse tool arguments as JSON in transformToolCalls: ${error}`);
|
|
|
|
processedArguments = { raw: processedArguments };
|
|
|
|
}
|
|
|
|
}
|
2025-04-10 21:00:12 +00:00
|
|
|
|
2025-04-09 20:33:55 +00:00
|
|
|
return {
|
|
|
|
id,
|
|
|
|
type: 'function',
|
|
|
|
function: {
|
|
|
|
name: toolCall.function?.name || '',
|
|
|
|
arguments: processedArguments
|
|
|
|
}
|
|
|
|
};
|
|
|
|
});
|
2025-04-08 23:55:04 +00:00
|
|
|
}
|
2025-04-09 01:24:32 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Adds a system message with feedback about tool execution status
|
|
|
|
* @param messages The current message array
|
|
|
|
* @param toolExecutionStatus Array of tool execution status objects
|
|
|
|
* @returns Updated message array with feedback
|
|
|
|
*/
|
|
|
|
private addToolExecutionFeedback(messages: Message[], toolExecutionStatus: ToolExecutionStatus[]): Message[] {
|
|
|
|
if (!toolExecutionStatus || toolExecutionStatus.length === 0) {
|
|
|
|
return messages;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a copy of the messages
|
|
|
|
const updatedMessages = [...messages];
|
|
|
|
|
|
|
|
// Create a feedback message that explains what happened with each tool call
|
|
|
|
let feedbackContent = `Tool execution feedback:\n\n`;
|
|
|
|
|
|
|
|
toolExecutionStatus.forEach((status, index) => {
|
|
|
|
// Add status for each tool
|
|
|
|
const statusText = status.success ? 'successfully executed' : 'failed to execute';
|
|
|
|
const toolName = status.name || 'unknown tool';
|
|
|
|
|
|
|
|
feedbackContent += `Tool call ${index + 1} (${toolName}): ${statusText}\n`;
|
|
|
|
|
|
|
|
// Add error information if available and tool failed
|
|
|
|
if (!status.success && status.error) {
|
|
|
|
feedbackContent += `Error: ${status.error}\n`;
|
|
|
|
feedbackContent += `Please fix this issue in your next response or try a different approach.\n`;
|
|
|
|
}
|
|
|
|
|
|
|
|
feedbackContent += `\n`;
|
|
|
|
});
|
|
|
|
|
|
|
|
// Add feedback message to the conversation
|
|
|
|
updatedMessages.push({
|
|
|
|
role: 'system',
|
|
|
|
content: feedbackContent
|
|
|
|
});
|
|
|
|
|
|
|
|
log.info(`Added tool execution feedback: ${toolExecutionStatus.length} statuses`);
|
|
|
|
return updatedMessages;
|
|
|
|
}
|
2025-03-02 19:39:10 -08:00
|
|
|
}
|