well, we ripped out our custom ollama implementation in favor of the SDK

This commit is contained in:
perf3ct 2025-04-09 20:33:55 +00:00
parent 7f92dfc3f1
commit 53223b5750
No known key found for this signature in database
GPG Key ID: 569C4EEC436F5232
6 changed files with 286 additions and 440 deletions

16
package-lock.json generated
View File

@ -67,6 +67,7 @@
"multer": "1.4.5-lts.2", "multer": "1.4.5-lts.2",
"normalize-strings": "1.1.1", "normalize-strings": "1.1.1",
"normalize.css": "8.0.1", "normalize.css": "8.0.1",
"ollama": "0.5.14",
"rand-token": "1.0.1", "rand-token": "1.0.1",
"safe-compare": "1.1.4", "safe-compare": "1.1.4",
"sanitize-filename": "1.6.3", "sanitize-filename": "1.6.3",
@ -15894,6 +15895,15 @@
"node": "^10.13.0 || >=12.0.0" "node": "^10.13.0 || >=12.0.0"
} }
}, },
"node_modules/ollama": {
"version": "0.5.14",
"resolved": "https://registry.npmjs.org/ollama/-/ollama-0.5.14.tgz",
"integrity": "sha512-pvOuEYa2WkkAumxzJP0RdEYHkbZ64AYyyUszXVX7ruLvk5L+EiO2G71da2GqEQ4IAk4j6eLoUbGk5arzFT1wJA==",
"license": "MIT",
"dependencies": {
"whatwg-fetch": "^3.6.20"
}
},
"node_modules/omggif": { "node_modules/omggif": {
"version": "1.0.10", "version": "1.0.10",
"resolved": "https://registry.npmjs.org/omggif/-/omggif-1.0.10.tgz", "resolved": "https://registry.npmjs.org/omggif/-/omggif-1.0.10.tgz",
@ -21335,6 +21345,12 @@
"node": ">=18" "node": ">=18"
} }
}, },
"node_modules/whatwg-fetch": {
"version": "3.6.20",
"resolved": "https://registry.npmjs.org/whatwg-fetch/-/whatwg-fetch-3.6.20.tgz",
"integrity": "sha512-EqhiFU6daOA8kpjOWTL0olhVOF3i7OrFzSYiGsEMB8GcXS+RrzauAERX65xMeNWVqxA6HXH2m69Z9LaKKdisfg==",
"license": "MIT"
},
"node_modules/whatwg-mimetype": { "node_modules/whatwg-mimetype": {
"version": "4.0.0", "version": "4.0.0",
"resolved": "https://registry.npmjs.org/whatwg-mimetype/-/whatwg-mimetype-4.0.0.tgz", "resolved": "https://registry.npmjs.org/whatwg-mimetype/-/whatwg-mimetype-4.0.0.tgz",

View File

@ -129,6 +129,7 @@
"multer": "1.4.5-lts.2", "multer": "1.4.5-lts.2",
"normalize-strings": "1.1.1", "normalize-strings": "1.1.1",
"normalize.css": "8.0.1", "normalize.css": "8.0.1",
"ollama": "0.5.14",
"rand-token": "1.0.1", "rand-token": "1.0.1",
"safe-compare": "1.1.4", "safe-compare": "1.1.4",
"sanitize-filename": "1.6.3", "sanitize-filename": "1.6.3",

View File

@ -1,7 +1,7 @@
import axios from 'axios';
import options from "../../services/options.js"; import options from "../../services/options.js";
import log from "../../services/log.js"; import log from "../../services/log.js";
import type { Request, Response } from "express"; import type { Request, Response } from "express";
import { Ollama } from "ollama";
/** /**
* @swagger * @swagger
@ -40,19 +40,16 @@ async function listModels(req: Request, res: Response) {
try { try {
const baseUrl = req.query.baseUrl as string || await options.getOption('ollamaBaseUrl') || 'http://localhost:11434'; const baseUrl = req.query.baseUrl as string || await options.getOption('ollamaBaseUrl') || 'http://localhost:11434';
// Call Ollama API to get models // Create Ollama client
const response = await axios.get(`${baseUrl}/api/tags?format=json`, { const ollama = new Ollama({ host: baseUrl });
headers: { 'Content-Type': 'application/json' },
timeout: 10000 // Call Ollama API to get models using the official client
}); const response = await ollama.list();
// Return the models list // Return the models list
const models = response.data.models || [];
// Important: don't use "return res.send()" - just return the data
return { return {
success: true, success: true,
models: models models: response.models || []
}; };
} catch (error: any) { } catch (error: any) {
log.error(`Error listing Ollama models: ${error.message || 'Unknown error'}`); log.error(`Error listing Ollama models: ${error.message || 'Unknown error'}`);

View File

@ -4,17 +4,29 @@ import type { EmbeddingConfig } from "../embeddings_interface.js";
import { NormalizationStatus } from "../embeddings_interface.js"; import { NormalizationStatus } from "../embeddings_interface.js";
import { LLM_CONSTANTS } from "../../constants/provider_constants.js"; import { LLM_CONSTANTS } from "../../constants/provider_constants.js";
import type { EmbeddingModelInfo } from "../../interfaces/embedding_interfaces.js"; import type { EmbeddingModelInfo } from "../../interfaces/embedding_interfaces.js";
import { Ollama } from "ollama";
/** /**
* Ollama embedding provider implementation * Ollama embedding provider implementation using the official Ollama client
*/ */
export class OllamaEmbeddingProvider extends BaseEmbeddingProvider { export class OllamaEmbeddingProvider extends BaseEmbeddingProvider {
name = "ollama"; name = "ollama";
private client: Ollama | null = null;
constructor(config: EmbeddingConfig) { constructor(config: EmbeddingConfig) {
super(config); super(config);
} }
/**
* Get the Ollama client instance
*/
private getClient(): Ollama {
if (!this.client) {
this.client = new Ollama({ host: this.baseUrl });
}
return this.client;
}
/** /**
* Initialize the provider by detecting model capabilities * Initialize the provider by detecting model capabilities
*/ */
@ -39,24 +51,13 @@ export class OllamaEmbeddingProvider extends BaseEmbeddingProvider {
*/ */
private async fetchModelCapabilities(modelName: string): Promise<EmbeddingModelInfo | null> { private async fetchModelCapabilities(modelName: string): Promise<EmbeddingModelInfo | null> {
try { try {
// First try the /api/show endpoint which has detailed model information const client = this.getClient();
const url = new URL(`${this.baseUrl}/api/show`);
url.searchParams.append('name', modelName); // Get model info using the client's show method
const modelData = await client.show({ model: modelName });
const showResponse = await fetch(url, { if (modelData && modelData.parameters) {
method: 'GET', const params = modelData.parameters as any;
headers: { "Content-Type": "application/json" },
signal: AbortSignal.timeout(10000)
});
if (!showResponse.ok) {
throw new Error(`HTTP error! status: ${showResponse.status}`);
}
const data = await showResponse.json();
if (data && data.parameters) {
const params = data.parameters;
// Extract context length from parameters (different models might use different parameter names) // Extract context length from parameters (different models might use different parameter names)
const contextWindow = params.context_length || const contextWindow = params.context_length ||
params.num_ctx || params.num_ctx ||
@ -66,7 +67,7 @@ export class OllamaEmbeddingProvider extends BaseEmbeddingProvider {
// Some models might provide embedding dimensions // Some models might provide embedding dimensions
const embeddingDimension = params.embedding_length || params.dim || null; const embeddingDimension = params.embedding_length || params.dim || null;
log.info(`Fetched Ollama model info from API for ${modelName}: context window ${contextWindow}`); log.info(`Fetched Ollama model info for ${modelName}: context window ${contextWindow}`);
return { return {
name: modelName, name: modelName,
@ -76,7 +77,7 @@ export class OllamaEmbeddingProvider extends BaseEmbeddingProvider {
}; };
} }
} catch (error: any) { } catch (error: any) {
log.info(`Could not fetch model info from Ollama show API: ${error.message}. Will try embedding test.`); log.info(`Could not fetch model info from Ollama API: ${error.message}. Will try embedding test.`);
// We'll fall back to embedding test if this fails // We'll fall back to embedding test if this fails
} }
@ -162,26 +163,20 @@ export class OllamaEmbeddingProvider extends BaseEmbeddingProvider {
* Detect embedding dimension by making a test API call * Detect embedding dimension by making a test API call
*/ */
private async detectEmbeddingDimension(modelName: string): Promise<number> { private async detectEmbeddingDimension(modelName: string): Promise<number> {
const testResponse = await fetch(`${this.baseUrl}/api/embeddings`, { try {
method: 'POST', const client = this.getClient();
headers: { "Content-Type": "application/json" }, const embedResponse = await client.embeddings({
body: JSON.stringify({
model: modelName, model: modelName,
prompt: "Test" prompt: "Test"
}), });
signal: AbortSignal.timeout(10000)
}); if (embedResponse && Array.isArray(embedResponse.embedding)) {
return embedResponse.embedding.length;
if (!testResponse.ok) { } else {
throw new Error(`HTTP error! status: ${testResponse.status}`); throw new Error("Could not detect embedding dimensions");
} }
} catch (error) {
const data = await testResponse.json(); throw new Error(`Failed to detect embedding dimensions: ${error}`);
if (data && Array.isArray(data.embedding)) {
return data.embedding.length;
} else {
throw new Error("Could not detect embedding dimensions");
} }
} }
@ -218,26 +213,15 @@ export class OllamaEmbeddingProvider extends BaseEmbeddingProvider {
const charLimit = (modelInfo.contextWidth || 8192) * 4; // Rough estimate: avg 4 chars per token const charLimit = (modelInfo.contextWidth || 8192) * 4; // Rough estimate: avg 4 chars per token
const trimmedText = text.length > charLimit ? text.substring(0, charLimit) : text; const trimmedText = text.length > charLimit ? text.substring(0, charLimit) : text;
const response = await fetch(`${this.baseUrl}/api/embeddings`, { const client = this.getClient();
method: 'POST', const response = await client.embeddings({
headers: { "Content-Type": "application/json" }, model: modelName,
body: JSON.stringify({ prompt: trimmedText
model: modelName,
prompt: trimmedText,
format: "json"
}),
signal: AbortSignal.timeout(60000) // Increased timeout for larger texts (60 seconds)
}); });
if (!response.ok) { if (response && Array.isArray(response.embedding)) {
throw new Error(`HTTP error! status: ${response.status}`);
}
const data = await response.json();
if (data && Array.isArray(data.embedding)) {
// Success! Return the embedding // Success! Return the embedding
return new Float32Array(data.embedding); return new Float32Array(response.embedding);
} else { } else {
throw new Error("Unexpected response structure from Ollama API"); throw new Error("Unexpected response structure from Ollama API");
} }

View File

@ -1,45 +1,13 @@
import options from '../../options.js'; import options from '../../options.js';
import { BaseAIService } from '../base_ai_service.js'; import { BaseAIService } from '../base_ai_service.js';
import type { Message, ChatCompletionOptions, ChatResponse } from '../ai_interface.js'; import type { Message, ChatCompletionOptions, ChatResponse } from '../ai_interface.js';
import sanitizeHtml from 'sanitize-html';
import { OllamaMessageFormatter } from '../formatters/ollama_formatter.js'; import { OllamaMessageFormatter } from '../formatters/ollama_formatter.js';
import log from '../../log.js'; import log from '../../log.js';
import type { ToolCall } from '../tools/tool_interfaces.js'; import type { ToolCall } from '../tools/tool_interfaces.js';
import toolRegistry from '../tools/tool_registry.js'; import toolRegistry from '../tools/tool_registry.js';
import type { OllamaOptions } from './provider_options.js'; import type { OllamaOptions } from './provider_options.js';
import { getOllamaOptions } from './providers.js'; import { getOllamaOptions } from './providers.js';
import { Ollama, type ChatRequest, type ChatResponse as OllamaChatResponse } from 'ollama';
interface OllamaFunctionArguments {
[key: string]: any;
}
interface OllamaFunctionCall {
function: {
name: string;
arguments: OllamaFunctionArguments | string;
};
id?: string;
}
interface OllamaMessage {
role: string;
content: string;
tool_calls?: OllamaFunctionCall[];
}
interface OllamaResponse {
model: string;
created_at: string;
message: OllamaMessage;
done: boolean;
done_reason?: string;
total_duration: number;
load_duration: number;
prompt_eval_count: number;
prompt_eval_duration: number;
eval_count: number;
eval_duration: number;
}
// Add an interface for tool execution feedback status // Add an interface for tool execution feedback status
interface ToolExecutionStatus { interface ToolExecutionStatus {
@ -52,6 +20,7 @@ interface ToolExecutionStatus {
export class OllamaService extends BaseAIService { export class OllamaService extends BaseAIService {
private formatter: OllamaMessageFormatter; private formatter: OllamaMessageFormatter;
private client: Ollama | null = null;
constructor() { constructor() {
super('Ollama'); super('Ollama');
@ -62,6 +31,17 @@ export class OllamaService extends BaseAIService {
return super.isAvailable() && !!options.getOption('ollamaBaseUrl'); return super.isAvailable() && !!options.getOption('ollamaBaseUrl');
} }
private getClient(): Ollama {
if (!this.client) {
const baseUrl = options.getOption('ollamaBaseUrl');
if (!baseUrl) {
throw new Error('Ollama base URL is not configured');
}
this.client = new Ollama({ host: baseUrl });
}
return this.client;
}
async generateChatCompletion(messages: Message[], opts: ChatCompletionOptions = {}): Promise<ChatResponse> { async generateChatCompletion(messages: Message[], opts: ChatCompletionOptions = {}): Promise<ChatResponse> {
if (!this.isAvailable()) { if (!this.isAvailable()) {
throw new Error('Ollama service is not available. Check API URL in settings.'); throw new Error('Ollama service is not available. Check API URL in settings.');
@ -108,79 +88,39 @@ export class OllamaService extends BaseAIService {
log.info(`Sending to Ollama with formatted messages: ${messagesToSend.length}`); log.info(`Sending to Ollama with formatted messages: ${messagesToSend.length}`);
} }
// Build request body base // Log request details
const requestBody: any = { log.info(`========== OLLAMA API REQUEST ==========`);
model: providerOptions.model, log.info(`Model: ${providerOptions.model}, Messages: ${messagesToSend.length}`);
messages: messagesToSend log.info(`Stream: ${opts.streamCallback ? true : false}`);
};
// Debug logging for stream option
log.info(`Stream option in providerOptions: ${providerOptions.stream}`);
log.info(`Stream option type: ${typeof providerOptions.stream}`);
// Handle streaming in a way that respects the provided option but ensures consistency:
// - If explicitly true, set to true
// - If explicitly false, set to false
// - If undefined, default to false unless we have a streamCallback
if (providerOptions.stream !== undefined) {
// Explicit value provided - respect it
requestBody.stream = providerOptions.stream === true;
log.info(`Stream explicitly provided in options, set to: ${requestBody.stream}`);
} else if (opts.streamCallback) {
// No explicit value but we have a stream callback - enable streaming
requestBody.stream = true;
log.info(`Stream not explicitly set but streamCallback provided, enabling streaming`);
} else {
// Default to false
requestBody.stream = false;
log.info(`Stream not explicitly set and no streamCallback, defaulting to false`);
}
// Log additional information about the streaming context // Get tools if enabled
log.info(`Streaming context: Will stream to client: ${typeof opts.streamCallback === 'function'}`); let tools = [];
// If we have a streaming callback but the stream flag isn't set for some reason, warn about it
if (typeof opts.streamCallback === 'function' && !requestBody.stream) {
log.info(`WARNING: Stream callback provided but stream=false in request. This may cause streaming issues.`);
}
// Add options object if provided
if (providerOptions.options) {
requestBody.options = { ...providerOptions.options };
}
// Add tools if enabled
if (providerOptions.enableTools !== false) { if (providerOptions.enableTools !== false) {
// Use provided tools or get from registry
try { try {
requestBody.tools = providerOptions.tools && providerOptions.tools.length > 0 tools = providerOptions.tools && providerOptions.tools.length > 0
? providerOptions.tools ? providerOptions.tools
: toolRegistry.getAllToolDefinitions(); : toolRegistry.getAllToolDefinitions();
// Handle empty tools array // Handle empty tools array
if (requestBody.tools.length === 0) { if (tools.length === 0) {
log.info('No tools found, attempting to initialize tools...'); log.info('No tools found, attempting to initialize tools...');
const toolInitializer = await import('../tools/tool_initializer.js'); const toolInitializer = await import('../tools/tool_initializer.js');
await toolInitializer.default.initializeTools(); await toolInitializer.default.initializeTools();
requestBody.tools = toolRegistry.getAllToolDefinitions(); tools = toolRegistry.getAllToolDefinitions();
log.info(`After initialization: ${requestBody.tools.length} tools available`); log.info(`After initialization: ${tools.length} tools available`);
}
if (tools.length > 0) {
log.info(`Sending ${tools.length} tool definitions to Ollama`);
} }
} catch (error: any) { } catch (error: any) {
log.error(`Error preparing tools: ${error.message || String(error)}`); log.error(`Error preparing tools: ${error.message || String(error)}`);
requestBody.tools = []; // Empty fallback tools = []; // Empty fallback
} }
} }
// Log request details
log.info(`========== OLLAMA API REQUEST ==========`);
log.info(`Model: ${requestBody.model}, Messages: ${requestBody.messages.length}, Tools: ${requestBody.tools ? requestBody.tools.length : 0}`);
log.info(`Stream: ${requestBody.stream || false}, JSON response expected: ${providerOptions.expectsJsonResponse}`);
if (requestBody.options) {
log.info(`Options: ${JSON.stringify(requestBody.options)}`);
}
// Check message structure and log detailed information about each message // Check message structure and log detailed information about each message
requestBody.messages.forEach((msg: any, index: number) => { messagesToSend.forEach((msg: any, index: number) => {
const keys = Object.keys(msg); const keys = Object.keys(msg);
log.info(`Message ${index}, Role: ${msg.role}, Keys: ${keys.join(', ')}`); log.info(`Message ${index}, Role: ${msg.role}, Keys: ${keys.join(', ')}`);
@ -194,16 +134,7 @@ export class OllamaService extends BaseAIService {
// Log tool-related details // Log tool-related details
if (keys.includes('tool_calls')) { if (keys.includes('tool_calls')) {
log.info(`Message ${index} has ${msg.tool_calls.length} tool calls:`); log.info(`Message ${index} has ${msg.tool_calls.length} tool calls`);
msg.tool_calls.forEach((call: any, callIdx: number) => {
log.info(` Tool call ${callIdx}: ${call.function?.name || 'unknown'}, ID: ${call.id || 'unspecified'}`);
if (call.function?.arguments) {
const argsPreview = typeof call.function.arguments === 'string'
? call.function.arguments.substring(0, 100)
: JSON.stringify(call.function.arguments).substring(0, 100);
log.info(` Arguments: ${argsPreview}...`);
}
});
} }
if (keys.includes('tool_call_id')) { if (keys.includes('tool_call_id')) {
@ -215,231 +146,163 @@ export class OllamaService extends BaseAIService {
} }
}); });
// Log tool definitions // Get client instance
if (requestBody.tools && requestBody.tools.length > 0) { const client = this.getClient();
log.info(`Sending ${requestBody.tools.length} tool definitions:`);
requestBody.tools.forEach((tool: any, toolIdx: number) => { // Convert our message format to Ollama's format
log.info(` Tool ${toolIdx}: ${tool.function?.name || 'unnamed'}`); const convertedMessages = messagesToSend.map(msg => {
if (tool.function?.description) { const converted: any = {
log.info(` Description: ${tool.function.description.substring(0, 100)}...`); role: msg.role,
} content: msg.content
if (tool.function?.parameters) { };
const paramNames = tool.function.parameters.properties
? Object.keys(tool.function.parameters.properties) if (msg.tool_calls) {
: []; converted.tool_calls = msg.tool_calls.map(tc => {
log.info(` Parameters: ${paramNames.join(', ')}`); // For Ollama, arguments must be an object, not a string
} let processedArgs = tc.function.arguments;
});
} // If arguments is a string, try to parse it as JSON
if (typeof processedArgs === 'string') {
// Log full request body (with improved logging for debug purposes) try {
const requestStr = JSON.stringify(requestBody); processedArgs = JSON.parse(processedArgs);
log.info(`========== FULL OLLAMA REQUEST ==========`); } catch (e) {
// If parsing fails, create an object with a single property
// Log request in manageable chunks log.info(`Could not parse tool arguments as JSON: ${e}`);
log.info(`Full request: ${requestStr}`); processedArgs = { raw: processedArgs };
log.info(`========== END FULL OLLAMA REQUEST ==========`); }
}
// Send the request
const response = await fetch(`${providerOptions.baseUrl}/api/chat`, { return {
method: 'POST', id: tc.id,
headers: { 'Content-Type': 'application/json' }, function: {
body: JSON.stringify(requestBody) name: tc.function.name,
}); arguments: processedArgs
}
if (!response.ok) { };
const errorBody = await response.text(); });
log.error(`Ollama API error: ${response.status} ${response.statusText} - ${errorBody}`);
throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
}
const data: OllamaResponse = await response.json();
// Log response details
log.info(`========== OLLAMA API RESPONSE ==========`);
log.info(`Model: ${data.model}, Content length: ${data.message.content.length} chars`);
log.info(`Tokens: ${data.prompt_eval_count} prompt, ${data.eval_count} completion, ${data.prompt_eval_count + data.eval_count} total`);
log.info(`Duration: ${data.total_duration}ns total, ${data.prompt_eval_duration}ns prompt, ${data.eval_duration}ns completion`);
log.info(`Done: ${data.done}, Reason: ${data.done_reason || 'not specified'}`);
// Log content preview
const contentPreview = data.message.content && data.message.content.length > 300
? `${data.message.content.substring(0, 300)}...`
: data.message.content;
log.info(`Response content: ${contentPreview}`);
// Log the full raw response for debugging
log.info(`========== FULL OLLAMA RESPONSE ==========`);
log.info(`Raw response object: ${JSON.stringify(data)}`);
// Handle the response and extract tool calls if present
const chatResponse: ChatResponse = {
text: data.message.content,
model: data.model,
provider: this.getName(),
usage: {
promptTokens: data.prompt_eval_count,
completionTokens: data.eval_count,
totalTokens: data.prompt_eval_count + data.eval_count
} }
if (msg.tool_call_id) {
converted.tool_call_id = msg.tool_call_id;
}
if (msg.name) {
converted.name = msg.name;
}
return converted;
});
// Prepare base request options
const baseRequestOptions = {
model: providerOptions.model,
messages: convertedMessages,
options: providerOptions.options,
// Add tools if available
tools: tools.length > 0 ? tools : undefined
}; };
// Add tool calls if present // Handle streaming
if (data.message.tool_calls && data.message.tool_calls.length > 0) { if (opts.streamCallback) {
log.info(`========== OLLAMA TOOL CALLS DETECTED ==========`); let responseText = '';
log.info(`Ollama response includes ${data.message.tool_calls.length} tool calls`); let responseToolCalls: any[] = [];
// Log detailed information about each tool call log.info(`Using streaming mode with Ollama client`);
const transformedToolCalls: ToolCall[] = [];
let streamResponse: OllamaChatResponse | null = null;
// Log detailed information about the tool calls in the response
log.info(`========== OLLAMA TOOL CALLS IN RESPONSE ==========`); // Create streaming request
data.message.tool_calls.forEach((toolCall, index) => { const streamingRequest = {
log.info(`Tool call ${index + 1}:`); ...baseRequestOptions,
log.info(` Name: ${toolCall.function?.name || 'unknown'}`); stream: true as const // Use const assertion to fix the type
log.info(` ID: ${toolCall.id || `auto-${index + 1}`}`); };
// Generate a unique ID if none is provided // Get the async iterator
const id = toolCall.id || `tool-call-${Date.now()}-${index}`; const streamIterator = await client.chat(streamingRequest);
// Handle arguments based on their type // Process each chunk
let processedArguments: Record<string, any> | string; for await (const chunk of streamIterator) {
// Save the last chunk for final stats
if (typeof toolCall.function.arguments === 'string') { streamResponse = chunk;
// Log raw string arguments in full for debugging
log.info(` Raw string arguments: ${toolCall.function.arguments}`); // Accumulate text
if (chunk.message?.content) {
// Try to parse JSON string arguments responseText += chunk.message.content;
try {
processedArguments = JSON.parse(toolCall.function.arguments);
log.info(` Successfully parsed arguments to object with keys: ${Object.keys(processedArguments).join(', ')}`);
log.info(` Parsed argument values:`);
Object.entries(processedArguments).forEach(([key, value]) => {
const valuePreview = typeof value === 'string'
? (value.length > 100 ? `${value.substring(0, 100)}...` : value)
: JSON.stringify(value);
log.info(` ${key}: ${valuePreview}`);
});
} catch (e: unknown) {
// If parsing fails, keep as string and log the error
processedArguments = toolCall.function.arguments;
const errorMessage = e instanceof Error ? e.message : String(e);
log.info(` Could not parse arguments as JSON: ${errorMessage}`);
log.info(` Keeping as string: ${processedArguments.substring(0, 200)}${processedArguments.length > 200 ? '...' : ''}`);
// Try to clean and parse again with more aggressive methods
try {
const cleaned = toolCall.function.arguments
.replace(/^['"]|['"]$/g, '') // Remove surrounding quotes
.replace(/\\"/g, '"') // Replace escaped quotes
.replace(/([{,])\s*'([^']+)'\s*:/g, '$1"$2":') // Replace single quotes around property names
.replace(/([{,])\s*(\w+)\s*:/g, '$1"$2":'); // Add quotes around unquoted property names
log.info(` Attempting to parse cleaned argument: ${cleaned}`);
const reparseArg = JSON.parse(cleaned);
log.info(` Successfully parsed cleaned argument with keys: ${Object.keys(reparseArg).join(', ')}`);
// Use reparsed arguments if successful
processedArguments = reparseArg;
} catch (cleanErr: unknown) {
const cleanErrMessage = cleanErr instanceof Error ? cleanErr.message : String(cleanErr);
log.info(` Failed to parse cleaned arguments: ${cleanErrMessage}`);
}
}
} else {
// If it's already an object, use it directly and log details
processedArguments = toolCall.function.arguments;
log.info(` Object arguments with keys: ${Object.keys(processedArguments).join(', ')}`);
log.info(` Argument values:`);
Object.entries(processedArguments).forEach(([key, value]) => {
const valuePreview = typeof value === 'string'
? (value.length > 100 ? `${value.substring(0, 100)}...` : value)
: JSON.stringify(value);
log.info(` ${key}: ${valuePreview}`);
});
} }
// If arguments are still empty or invalid, create a default argument // Check for tool calls
if (!processedArguments || if (chunk.message?.tool_calls && chunk.message.tool_calls.length > 0) {
(typeof processedArguments === 'object' && Object.keys(processedArguments).length === 0)) { responseToolCalls = [...chunk.message.tool_calls];
log.info(` Empty or invalid arguments for tool ${toolCall.function.name}, creating default`);
// Get tool definition to determine required parameters
const allToolDefs = toolRegistry.getAllToolDefinitions();
const toolDef = allToolDefs.find(t => t.function?.name === toolCall.function.name);
if (toolDef && toolDef.function && toolDef.function.parameters) {
const params = toolDef.function.parameters;
processedArguments = {};
// Create default values for required parameters
if (params.required && Array.isArray(params.required)) {
params.required.forEach((param: string) => {
// Extract text from the response to use as default value
const defaultValue = data.message.content?.includes(param)
? extractValueFromText(data.message.content, param)
: "default";
(processedArguments as Record<string, any>)[param] = defaultValue;
log.info(` Added default value for required param ${param}: ${defaultValue}`);
});
}
}
} }
// Convert to our standard ToolCall format // Call the callback with the current chunk content
transformedToolCalls.push({ if (opts.streamCallback) {
id, // Original callback expects text content, isDone flag, and optional original chunk
type: 'function', opts.streamCallback(
function: { chunk.message?.content || '',
name: toolCall.function.name, !!chunk.done,
arguments: processedArguments chunk
} );
});
});
// Add transformed tool calls to response
chatResponse.tool_calls = transformedToolCalls;
log.info(`Transformed ${transformedToolCalls.length} tool calls for execution`);
log.info(`Tool calls after transformation: ${JSON.stringify(chatResponse.tool_calls)}`);
// Ensure tool_calls is properly exposed and formatted
// This is to make sure the pipeline can detect and execute the tools
if (transformedToolCalls.length > 0) {
// Make sure the tool_calls are exposed in the exact format expected by pipeline
chatResponse.tool_calls = transformedToolCalls.map(tc => ({
id: tc.id,
type: 'function',
function: {
name: tc.function.name,
arguments: tc.function.arguments
}
}));
// If the content is empty, use a placeholder to avoid issues
if (!chatResponse.text) {
chatResponse.text = "Processing your request...";
} }
log.info(`Final tool_calls format for pipeline: ${JSON.stringify(chatResponse.tool_calls)}`);
} }
log.info(`========== END OLLAMA TOOL CALLS ==========`);
// Create the final response after streaming is complete
return {
text: responseText,
model: providerOptions.model,
provider: this.getName(),
tool_calls: this.transformToolCalls(responseToolCalls),
usage: {
promptTokens: streamResponse?.prompt_eval_count || 0,
completionTokens: streamResponse?.eval_count || 0,
totalTokens: (streamResponse?.prompt_eval_count || 0) + (streamResponse?.eval_count || 0)
}
};
} else { } else {
log.info(`========== NO OLLAMA TOOL CALLS DETECTED ==========`); // Non-streaming request
log.info(`Checking raw message response format: ${JSON.stringify(data.message)}`); log.info(`Using non-streaming mode with Ollama client`);
// Attempt to analyze the response to see if it contains tool call intent // Create non-streaming request
const responseText = data.message.content || ''; const nonStreamingRequest = {
if (responseText.includes('search_notes') || ...baseRequestOptions,
responseText.includes('create_note') || stream: false as const // Use const assertion for type safety
responseText.includes('function') || };
responseText.includes('tool')) {
log.info(`Response may contain tool call intent but isn't formatted properly`); const response = await client.chat(nonStreamingRequest);
log.info(`Content that might indicate tool call intent: ${responseText.substring(0, 500)}`);
// Log response details
log.info(`========== OLLAMA API RESPONSE ==========`);
log.info(`Model: ${response.model}, Content length: ${response.message?.content?.length || 0} chars`);
log.info(`Tokens: ${response.prompt_eval_count || 0} prompt, ${response.eval_count || 0} completion, ${(response.prompt_eval_count || 0) + (response.eval_count || 0)} total`);
// Log content preview
const contentPreview = response.message?.content && response.message.content.length > 300
? `${response.message.content.substring(0, 300)}...`
: response.message?.content || '';
log.info(`Response content: ${contentPreview}`);
// Handle the response and extract tool calls if present
const chatResponse: ChatResponse = {
text: response.message?.content || '',
model: response.model || providerOptions.model,
provider: this.getName(),
usage: {
promptTokens: response.prompt_eval_count || 0,
completionTokens: response.eval_count || 0,
totalTokens: (response.prompt_eval_count || 0) + (response.eval_count || 0)
}
};
// Add tool calls if present
if (response.message?.tool_calls && response.message.tool_calls.length > 0) {
log.info(`Ollama response includes ${response.message.tool_calls.length} tool calls`);
chatResponse.tool_calls = this.transformToolCalls(response.message.tool_calls);
log.info(`Transformed tool calls: ${JSON.stringify(chatResponse.tool_calls)}`);
} }
log.info(`========== END OLLAMA RESPONSE ==========`);
return chatResponse;
} }
log.info(`========== END OLLAMA RESPONSE ==========`);
return chatResponse;
} catch (error: any) { } catch (error: any) {
// Enhanced error handling with detailed diagnostics // Enhanced error handling with detailed diagnostics
log.error(`Ollama service error: ${error.message || String(error)}`); log.error(`Ollama service error: ${error.message || String(error)}`);
@ -447,40 +310,45 @@ export class OllamaService extends BaseAIService {
log.error(`Error stack trace: ${error.stack}`); log.error(`Error stack trace: ${error.stack}`);
} }
if (error.message && error.message.includes('Cannot read properties of null')) {
log.error('Tool registry connection issue detected. Tool may not be properly registered or available.');
log.error('Check tool registry initialization and tool availability before execution.');
}
// Propagate the original error // Propagate the original error
throw error; throw error;
} }
} }
/** /**
* Gets the context window size in tokens for a given model * Transform Ollama tool calls to the standard format expected by the pipeline
* @param modelName The name of the model
* @returns The context window size in tokens
*/ */
private async getModelContextWindowTokens(modelName: string): Promise<number> { private transformToolCalls(toolCalls: any[] | undefined): ToolCall[] {
try { if (!toolCalls || !Array.isArray(toolCalls) || toolCalls.length === 0) {
// Import model capabilities service return [];
const modelCapabilitiesService = (await import('../model_capabilities_service.js')).default;
// Get model capabilities
const modelCapabilities = await modelCapabilitiesService.getModelCapabilities(modelName);
// Get context window tokens with a default fallback
const contextWindowTokens = modelCapabilities.contextWindowTokens || 8192;
log.info(`Using context window size for ${modelName}: ${contextWindowTokens} tokens`);
return contextWindowTokens;
} catch (error: any) {
// Log error but provide a reasonable default
log.error(`Error getting model context window: ${error.message}`);
return 8192; // Default to 8192 tokens if there's an error
} }
return toolCalls.map((toolCall, index) => {
// Generate a unique ID if none is provided
const id = toolCall.id || `tool-call-${Date.now()}-${index}`;
// Handle arguments based on their type
let processedArguments: Record<string, any> | string = toolCall.function?.arguments || {};
if (typeof processedArguments === 'string') {
try {
processedArguments = JSON.parse(processedArguments);
} catch (error) {
// If we can't parse as JSON, create a simple object
log.info(`Could not parse tool arguments as JSON in transformToolCalls: ${error}`);
processedArguments = { raw: processedArguments };
}
}
return {
id,
type: 'function',
function: {
name: toolCall.function?.name || '',
arguments: processedArguments
}
};
});
} }
/** /**
@ -526,27 +394,3 @@ export class OllamaService extends BaseAIService {
return updatedMessages; return updatedMessages;
} }
} }
/**
* Simple utility to extract a value from text based on a parameter name
* @param text The text to search in
* @param param The parameter name to look for
* @returns Extracted value or default
*/
function extractValueFromText(text: string, param: string): string {
// Simple regex to find "param: value" or "param = value" or "param value" patterns
const patterns = [
new RegExp(`${param}[\\s]*:[\\s]*["']?([^"',\\s]+)["']?`, 'i'),
new RegExp(`${param}[\\s]*=[\\s]*["']?([^"',\\s]+)["']?`, 'i'),
new RegExp(`${param}[\\s]+["']?([^"',\\s]+)["']?`, 'i')
];
for (const pattern of patterns) {
const match = text.match(pattern);
if (match && match[1]) {
return match[1];
}
}
return "default_value";
}

View File

@ -566,24 +566,28 @@ export async function getOllamaOptions(
} }
/** /**
* Get context window size for Ollama model * Get context window size for Ollama model using the official client
*/ */
async function getOllamaModelContextWindow(modelName: string): Promise<number> { async function getOllamaModelContextWindow(modelName: string): Promise<number> {
try { try {
const baseUrl = options.getOption('ollamaBaseUrl'); const baseUrl = options.getOption('ollamaBaseUrl');
if (!baseUrl) {
throw new Error('Ollama base URL is not configured');
}
// Use the official Ollama client
const { Ollama } = await import('ollama');
const client = new Ollama({ host: baseUrl });
// Try to get model information from Ollama API // Try to get model information from Ollama API
const response = await fetch(`${baseUrl}/api/show`, { const modelData = await client.show({ model: modelName });
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ name: modelName })
});
if (response.ok) { // Get context window from model parameters
const data = await response.json(); if (modelData && modelData.parameters) {
// Get context window from model parameters const params = modelData.parameters as any;
if (data && data.parameters && data.parameters.num_ctx) { if (params.num_ctx) {
return data.parameters.num_ctx; return params.num_ctx;
} }
} }