format decomp files

This commit is contained in:
perf3ct 2025-04-17 17:50:16 +00:00
parent 19c36b18a6
commit d83cce88cb
No known key found for this signature in database
GPG Key ID: 569C4EEC436F5232
4 changed files with 400 additions and 321 deletions

View File

@ -100,7 +100,8 @@ Format your answer as a valid JSON array without markdown code blocks, like this
maxTokens: SEARCH_CONSTANTS.LIMITS.QUERY_PROCESSOR_MAX_TOKENS,
bypassFormatter: true,
expectsJsonResponse: true,
_bypassContextProcessing: true // Prevent recursive calls
_bypassContextProcessing: true, // Prevent recursive calls
enableTools: false // Explicitly disable tools for this request
};
// Get the response from the LLM
@ -167,69 +168,33 @@ Format your answer as a valid JSON array without markdown code blocks, like this
// Try to get LLM service if not provided
const service = llmService || await this.getLLMService();
// For when no LLM service is available, use the basic approach
// If no LLM service is available, use basic decomposition
if (!service) {
if (!service) {
log.info(`No LLM service available for query decomposition, using original query`);
}
log.info(`Using basic decomposition approach (complexity: ${complexity})`);
const mainSubQuery = {
id: this.generateSubQueryId(),
text: query,
reason: "Direct question that can be answered without decomposition",
isAnswered: false
};
// Add a generic exploration query for context
const genericQuery = {
id: this.generateSubQueryId(),
text: `What information is related to ${query}?`,
reason: "General exploration to find related content",
isAnswered: false
};
return {
originalQuery: query,
subQueries: [mainSubQuery, genericQuery],
status: 'pending',
complexity
};
log.info(`No LLM service available for query decomposition, using original query`);
return this.createBasicDecomposition(query, complexity);
}
// For when the LLM available, we can use more advanced decomposition
if (service) {
try {
// Try to use LLM for advanced decomposition
log.info(`Using advanced LLM-based decomposition for complex query (complexity: ${complexity})`);
const enhancedSubQueries = await this.createLLMSubQueries(query, context, service);
// With LLM service available, always use advanced decomposition regardless of complexity
try {
log.info(`Using advanced LLM-based decomposition for query (complexity: ${complexity})`);
const enhancedSubQueries = await this.createLLMSubQueries(query, context, service);
if (enhancedSubQueries && enhancedSubQueries.length > 0) {
log.info(`LLM decomposed query into ${enhancedSubQueries.length} sub-queries: ${JSON.stringify(enhancedSubQueries)}`);
return {
originalQuery: query,
subQueries: enhancedSubQueries,
status: 'pending',
complexity
};
}
} catch (error: any) {
log.error(`Error during LLM-based decomposition: ${error.message}, falling back to basic decomposition`);
// Continue to fallback with basic decomposition
if (enhancedSubQueries && enhancedSubQueries.length > 0) {
log.info(`LLM decomposed query into ${enhancedSubQueries.length} sub-queries`);
return {
originalQuery: query,
subQueries: enhancedSubQueries,
status: 'pending',
complexity
};
}
} catch (error: any) {
log.error(`Error during LLM-based decomposition: ${error.message}, falling back to basic decomposition`);
// Fall through to basic decomposition
}
// Fallback to basic decomposition
const subQueries = this.createSubQueries(query, context);
log.info(`Decomposed query into ${subQueries.length} sub-queries`);
return {
originalQuery: query,
subQueries,
status: 'pending',
complexity
};
return this.createBasicDecomposition(query, complexity);
} catch (error: any) {
log.error(`Error decomposing query: ${error.message}`);
@ -248,6 +213,39 @@ Format your answer as a valid JSON array without markdown code blocks, like this
}
}
/**
* Create a basic decomposition of a query without using LLM
*
* @param query The original query
* @param complexity The assessed complexity
* @returns A basic decomposed query
*/
private createBasicDecomposition(query: string, complexity: number): DecomposedQuery {
log.info(`Using basic decomposition approach (complexity: ${complexity})`);
const mainSubQuery = {
id: this.generateSubQueryId(),
text: query,
reason: "Direct question that can be answered without decomposition",
isAnswered: false
};
// Add a generic exploration query for context
const genericQuery = {
id: this.generateSubQueryId(),
text: `What information is related to ${query}?`,
reason: "General exploration to find related content",
isAnswered: false
};
return {
originalQuery: query,
subQueries: [mainSubQuery, genericQuery],
status: 'pending',
complexity
};
}
/**
* Use LLM to create advanced sub-queries from a complex query
*
@ -267,25 +265,42 @@ Format your answer as a valid JSON array without markdown code blocks, like this
}
try {
// Build a prompt from existing templates in the constants
const contextPart = context ? `\nContext: ${context}` : '';
// Create a much better prompt for more effective query decomposition
const prompt = `Decompose the following query into 3-5 specific search queries that would help find comprehensive information.
// Use existing templates from QUERY_DECOMPOSITION_STRINGS to build the prompt
const prompt = `I need to break down a complex query into sub-queries.
Query: ${query}${contextPart}
Your task is to identify the main concepts and break them down into specific, targeted search queries.
Please analyze this query and identify the key aspects that need to be addressed.`;
DO NOT simply rephrase the original query or create a generic "what's related to X" pattern.
DO create specific queries that explore different aspects of the topic.
For example:
If the query is "How does Docker compare to Kubernetes?", good sub-queries would be:
- "Docker container architecture and features"
- "Kubernetes container orchestration capabilities"
- "Docker vs Kubernetes performance comparison"
- "When to use Docker versus Kubernetes"
Format your response as a JSON array of objects with 'text' and 'reason' properties.
Example: [
{"text": "Docker container architecture", "reason": "Understanding Docker's core technology"},
{"text": "Kubernetes orchestration features", "reason": "Exploring Kubernetes' main capabilities"}
]
${context ? `\nContext: ${context}` : ''}
Query: ${query}`;
const messages = [
{ role: "system" as const, content: prompt }
];
const options = {
temperature: SEARCH_CONSTANTS.TEMPERATURE.QUERY_PROCESSOR,
temperature: 0.7, // Higher temperature for more creative decomposition
maxTokens: SEARCH_CONSTANTS.LIMITS.QUERY_PROCESSOR_MAX_TOKENS,
bypassFormatter: true,
expectsJsonResponse: true,
_bypassContextProcessing: true // Prevent recursive calls
_bypassContextProcessing: true, // Prevent recursive calls
enableTools: false // Explicitly disable tools for this request
};
// Get the response from the LLM
@ -300,6 +315,11 @@ Please analyze this query and identify the key aspects that need to be addressed
reason?: string;
}
// Log the response for debugging
log.info(`Received response from LLM for query decomposition, extracting JSON...`);
log.info(`Response: ${responseText}`);
// Extract JSON from the response
const extractedData = JsonExtractor.extract<RawSubQuery[]>(responseText, {
extractArrays: true,
@ -307,15 +327,74 @@ Please analyze this query and identify the key aspects that need to be addressed
useFallbacks: true
});
if (Array.isArray(extractedData) && extractedData.length > 0) {
// Convert the raw data to SubQuery objects
return extractedData.map(item => ({
id: this.generateSubQueryId(),
text: item.text,
reason: item.reason || "Sub-aspect of the main question",
isAnswered: false
}));
// Validate the extracted data
if (!Array.isArray(extractedData)) {
log.error(`Failed to extract array from LLM response, got: ${typeof extractedData}`);
return this.createSubQueries(query, context);
}
if (extractedData.length === 0) {
log.error(`Extracted array is empty, falling back to basic decomposition`);
return this.createSubQueries(query, context);
}
log.info(`Successfully extracted ${extractedData.length} items using regex pattern`);
// Validate each sub-query to ensure it has a text property
const validSubQueries = extractedData.filter(item => {
if (!item || typeof item !== 'object') {
log.error(`Invalid sub-query item: ${JSON.stringify(item)}`);
return false;
}
if (!item.text || typeof item.text !== 'string') {
log.error(`Sub-query missing text property: ${JSON.stringify(item)}`);
return false;
}
return true;
});
if (validSubQueries.length === 0) {
log.error(`No valid sub-queries found after validation, falling back to basic decomposition`);
return this.createSubQueries(query, context);
}
if (validSubQueries.length < extractedData.length) {
log.info(`Some invalid sub-queries were filtered out: ${extractedData.length} -> ${validSubQueries.length}`);
}
// Convert the raw data to SubQuery objects
let subQueries = validSubQueries.map(item => ({
id: this.generateSubQueryId(),
text: item.text,
reason: item.reason || "Sub-aspect of the main question",
isAnswered: false
}));
// Make sure we have at least the original query
const hasOriginalQuery = subQueries.some(sq => {
// Check if either sq.text or query is null/undefined before using toLowerCase
if (!sq.text) return false;
const sqText = sq.text.toLowerCase();
const originalQuery = query.toLowerCase();
return sqText.includes(originalQuery) || originalQuery.includes(sqText);
});
if (!hasOriginalQuery) {
subQueries.unshift({
id: this.generateSubQueryId(),
text: query,
reason: "Original query",
isAnswered: false
});
}
// Log the extracted sub-queries for debugging
log.info(`Successfully extracted ${subQueries.length} sub-queries from LLM response`);
return subQueries;
} catch (error: any) {
log.error(`Error extracting sub-queries from LLM response: ${error.message}`);
// Fall through to traditional decomposition

View File

@ -9,139 +9,139 @@ import type BAttribute from '../../../becca/entities/battribute.js';
* Interface for the AI service used by agent tools
*/
export interface LLMServiceInterface {
generateChatCompletion(messages: Array<{
role: 'user' | 'assistant' | 'system';
content: string;
}>, options?: {
temperature?: number;
maxTokens?: number;
model?: string;
stream?: boolean;
systemPrompt?: string;
}): Promise<ChatResponse>;
generateChatCompletion(messages: Array<{
role: 'user' | 'assistant' | 'system';
content: string;
}>, options?: {
temperature?: number;
maxTokens?: number;
model?: string;
stream?: boolean;
systemPrompt?: string;
}): Promise<ChatResponse>;
}
/**
* Interface for tool initialization
*/
export interface AgentToolInitializationParams {
aiServiceManager: LLMServiceInterface;
aiServiceManager: LLMServiceInterface;
}
/**
* Interface for agent tool manager
*/
export interface IAgentToolsManager {
initialize(aiServiceManager: LLMServiceInterface): Promise<void>;
isInitialized(): boolean;
getAllTools(): {
vectorSearch: IVectorSearchTool;
noteNavigator: INoteNavigatorTool;
queryDecomposition: IQueryDecompositionTool;
contextualThinking: IContextualThinkingTool;
};
getVectorSearchTool(): IVectorSearchTool;
getNoteNavigatorTool(): INoteNavigatorTool;
getQueryDecompositionTool(): IQueryDecompositionTool;
getContextualThinkingTool(): IContextualThinkingTool;
initialize(aiServiceManager: LLMServiceInterface): Promise<void>;
isInitialized(): boolean;
getAllTools(): {
vectorSearch: IVectorSearchTool;
noteNavigator: INoteNavigatorTool;
queryDecomposition: IQueryDecompositionTool;
contextualThinking: IContextualThinkingTool;
};
getVectorSearchTool(): IVectorSearchTool;
getNoteNavigatorTool(): INoteNavigatorTool;
getQueryDecompositionTool(): IQueryDecompositionTool;
getContextualThinkingTool(): IContextualThinkingTool;
}
/**
* Interface for context service used by vector search
*/
export interface IContextService {
findRelevantNotesMultiQuery(queries: string[], contextNoteId: string | null, limit: number): Promise<VectorSearchResult[]>;
processQuery(userQuestion: string, llmService: LLMServiceInterface, contextNoteId: string | null, showThinking: boolean): Promise<{
context: string;
sources: Array<{
noteId: string;
title: string;
similarity: number;
findRelevantNotesMultiQuery(queries: string[], contextNoteId: string | null, limit: number): Promise<VectorSearchResult[]>;
processQuery(userQuestion: string, llmService: LLMServiceInterface, contextNoteId: string | null, showThinking: boolean): Promise<{
context: string;
sources: Array<{
noteId: string;
title: string;
similarity: number;
}>;
thinking?: string;
}>;
thinking?: string;
}>;
}
/**
* Interface for vector search tool
*/
export interface IVectorSearchTool {
setContextService(contextService: IContextService): void;
search(
query: string,
contextNoteId?: string,
searchOptions?: {
limit?: number;
threshold?: number;
includeContent?: boolean;
}
): Promise<VectorSearchResult[]>;
searchNotes(query: string, options?: {
parentNoteId?: string;
maxResults?: number;
similarityThreshold?: number;
}): Promise<VectorSearchResult[]>;
searchContentChunks(query: string, options?: {
noteId?: string;
maxResults?: number;
similarityThreshold?: number;
}): Promise<VectorSearchResult[]>;
explainResults(query: string, results: VectorSearchResult[]): string;
setContextService(contextService: IContextService): void;
search(
query: string,
contextNoteId?: string,
searchOptions?: {
limit?: number;
threshold?: number;
includeContent?: boolean;
}
): Promise<VectorSearchResult[]>;
searchNotes(query: string, options?: {
parentNoteId?: string;
maxResults?: number;
similarityThreshold?: number;
}): Promise<VectorSearchResult[]>;
searchContentChunks(query: string, options?: {
noteId?: string;
maxResults?: number;
similarityThreshold?: number;
}): Promise<VectorSearchResult[]>;
explainResults(query: string, results: VectorSearchResult[]): string;
}
/**
* Interface for note navigator tool
*/
export interface INoteNavigatorTool {
getNoteInfo(noteId: string): NoteInfo | null;
getNotePathsFromRoot(noteId: string): NotePathInfo[];
getNoteHierarchy(noteId: string, depth?: number): NoteHierarchyLevel | null;
getNoteAttributes(noteId: string): BAttribute[];
findPathBetweenNotes(fromNoteId: string, toNoteId: string): NotePathInfo | null;
searchNotesByTitle(searchTerm: string, limit?: number): NoteInfo[];
getNoteClones(noteId: string): Promise<NoteInfo[]>;
getNoteContextDescription(noteId: string): Promise<string>;
getNoteStructure(noteId: string): Promise<{
noteId: string;
title: string;
type: string;
childCount: number;
attributes: Array<{name: string, value: string}>;
parentPath: Array<{title: string, noteId: string}>;
}>;
getChildNotes(noteId: string, limit?: number): Promise<Array<{noteId: string, title: string}>>;
getParentNotes(noteId: string): Promise<Array<{noteId: string, title: string}>>;
getLinkedNotes(noteId: string, limit?: number): Promise<Array<{noteId: string, title: string, direction: 'from'|'to'}>>;
getNotePath(noteId: string): Promise<string>;
getNoteInfo(noteId: string): NoteInfo | null;
getNotePathsFromRoot(noteId: string): NotePathInfo[];
getNoteHierarchy(noteId: string, depth?: number): NoteHierarchyLevel | null;
getNoteAttributes(noteId: string): BAttribute[];
findPathBetweenNotes(fromNoteId: string, toNoteId: string): NotePathInfo | null;
searchNotesByTitle(searchTerm: string, limit?: number): NoteInfo[];
getNoteClones(noteId: string): Promise<NoteInfo[]>;
getNoteContextDescription(noteId: string): Promise<string>;
getNoteStructure(noteId: string): Promise<{
noteId: string;
title: string;
type: string;
childCount: number;
attributes: Array<{ name: string, value: string }>;
parentPath: Array<{ title: string, noteId: string }>;
}>;
getChildNotes(noteId: string, limit?: number): Promise<Array<{ noteId: string, title: string }>>;
getParentNotes(noteId: string): Promise<Array<{ noteId: string, title: string }>>;
getLinkedNotes(noteId: string, limit?: number): Promise<Array<{ noteId: string, title: string, direction: 'from' | 'to' }>>;
getNotePath(noteId: string): Promise<string>;
}
/**
* Interface for query decomposition tool
*/
export interface IQueryDecompositionTool {
decomposeQuery(query: string, context?: string): DecomposedQuery;
updateSubQueryAnswer(decomposedQuery: DecomposedQuery, subQueryId: string, answer: string): DecomposedQuery;
synthesizeAnswer(decomposedQuery: DecomposedQuery): string;
getQueryStatus(decomposedQuery: DecomposedQuery): string;
assessQueryComplexity(query: string): number;
generateSubQueryId(): string;
createSubQueries(query: string, context?: string): SubQuery[];
decomposeQuery(query: string, context?: string): DecomposedQuery;
updateSubQueryAnswer(decomposedQuery: DecomposedQuery, subQueryId: string, answer: string): DecomposedQuery;
synthesizeAnswer(decomposedQuery: DecomposedQuery): string;
getQueryStatus(decomposedQuery: DecomposedQuery): string;
assessQueryComplexity(query: string): number;
generateSubQueryId(): string;
createSubQueries(query: string, context?: string): SubQuery[];
}
/**
* Interface for contextual thinking tool
*/
export interface IContextualThinkingTool {
startThinking(query: string): string;
addThinkingStep(
processId: string,
step: Omit<ThinkingStep, 'id'>,
parentId?: string
): string;
completeThinking(processId?: string): ThinkingProcess | null;
getThinkingProcess(processId: string): ThinkingProcess | null;
getActiveThinkingProcess(): ThinkingProcess | null;
visualizeThinking(thinkingId: string): string;
getThinkingSummary(thinkingId: string): string;
resetActiveThinking(): void;
startThinking(query: string): string;
addThinkingStep(
processId: string,
step: Omit<ThinkingStep, 'id'>,
parentId?: string
): string;
completeThinking(processId?: string): ThinkingProcess | null;
getThinkingProcess(processId: string): ThinkingProcess | null;
getActiveThinkingProcess(): ThinkingProcess | null;
visualizeThinking(thinkingId: string): string;
getThinkingSummary(thinkingId: string): string;
resetActiveThinking(): void;
}

View File

@ -5,19 +5,19 @@ import type { ToolCall } from '../tools/tool_interfaces.js';
* Model metadata interface to track provider information
*/
export interface ModelMetadata {
// The provider that supports this model
provider: 'openai' | 'anthropic' | 'ollama' | 'local';
// The actual model identifier used by the provider's API
modelId: string;
// Display name for UI (optional)
displayName?: string;
// Model capabilities
capabilities?: {
contextWindow?: number;
supportsTools?: boolean;
supportsVision?: boolean;
supportsStreaming?: boolean;
};
// The provider that supports this model
provider: 'openai' | 'anthropic' | 'ollama' | 'local';
// The actual model identifier used by the provider's API
modelId: string;
// Display name for UI (optional)
displayName?: string;
// Model capabilities
capabilities?: {
contextWindow?: number;
supportsTools?: boolean;
supportsVision?: boolean;
supportsStreaming?: boolean;
};
}
/**
@ -25,196 +25,196 @@ export interface ModelMetadata {
* but not necessarily sent directly to APIs
*/
export interface ProviderConfig {
// Internal configuration
systemPrompt?: string;
// Provider metadata for model routing
providerMetadata?: ModelMetadata;
// Internal configuration
systemPrompt?: string;
// Provider metadata for model routing
providerMetadata?: ModelMetadata;
}
/**
* OpenAI-specific options, structured to match the OpenAI API
*/
export interface OpenAIOptions extends ProviderConfig {
// Connection settings (not sent to API)
apiKey: string;
baseUrl: string;
// Connection settings (not sent to API)
apiKey: string;
baseUrl: string;
// Direct API parameters as they appear in requests
model: string;
messages?: Message[];
temperature?: number;
max_tokens?: number;
stream?: boolean;
top_p?: number;
frequency_penalty?: number;
presence_penalty?: number;
tools?: any[];
tool_choice?: string | object;
// Direct API parameters as they appear in requests
model: string;
messages?: Message[];
temperature?: number;
max_tokens?: number;
stream?: boolean;
top_p?: number;
frequency_penalty?: number;
presence_penalty?: number;
tools?: any[];
tool_choice?: string | object;
// Internal control flags (not sent directly to API)
enableTools?: boolean;
// Streaming callback handler
streamCallback?: (text: string, isDone: boolean, originalChunk?: any) => Promise<void> | void;
// Internal control flags (not sent directly to API)
enableTools?: boolean;
// Streaming callback handler
streamCallback?: (text: string, isDone: boolean, originalChunk?: any) => Promise<void> | void;
}
/**
* Anthropic-specific options, structured to match the Anthropic API
*/
export interface AnthropicOptions extends ProviderConfig {
// Connection settings (not sent to API)
apiKey: string;
baseUrl: string;
apiVersion?: string;
betaVersion?: string;
// Connection settings (not sent to API)
apiKey: string;
baseUrl: string;
apiVersion?: string;
betaVersion?: string;
// Direct API parameters as they appear in requests
model: string;
messages?: any[];
system?: string;
temperature?: number;
max_tokens?: number;
stream?: boolean;
top_p?: number;
// Direct API parameters as they appear in requests
model: string;
messages?: any[];
system?: string;
temperature?: number;
max_tokens?: number;
stream?: boolean;
top_p?: number;
// Internal parameters (not sent directly to API)
formattedMessages?: { messages: any[], system: string };
// Streaming callback handler
streamCallback?: (text: string, isDone: boolean, originalChunk?: any) => Promise<void> | void;
// Internal parameters (not sent directly to API)
formattedMessages?: { messages: any[], system: string };
// Streaming callback handler
streamCallback?: (text: string, isDone: boolean, originalChunk?: any) => Promise<void> | void;
}
/**
* Ollama-specific options, structured to match the Ollama API
*/
export interface OllamaOptions extends ProviderConfig {
// Connection settings (not sent to API)
baseUrl: string;
// Connection settings (not sent to API)
baseUrl: string;
// Direct API parameters as they appear in requests
model: string;
messages?: Message[];
stream?: boolean;
options?: {
temperature?: number;
num_ctx?: number;
top_p?: number;
top_k?: number;
num_predict?: number; // equivalent to max_tokens
response_format?: { type: string };
};
tools?: any[];
// Direct API parameters as they appear in requests
model: string;
messages?: Message[];
stream?: boolean;
options?: {
temperature?: number;
num_ctx?: number;
top_p?: number;
top_k?: number;
num_predict?: number; // equivalent to max_tokens
response_format?: { type: string };
};
tools?: any[];
// Internal control flags (not sent directly to API)
enableTools?: boolean;
bypassFormatter?: boolean;
preserveSystemPrompt?: boolean;
expectsJsonResponse?: boolean;
toolExecutionStatus?: any[];
// Streaming callback handler
streamCallback?: (text: string, isDone: boolean, originalChunk?: any) => Promise<void> | void;
// Internal control flags (not sent directly to API)
enableTools?: boolean;
bypassFormatter?: boolean;
preserveSystemPrompt?: boolean;
expectsJsonResponse?: boolean;
toolExecutionStatus?: any[];
// Streaming callback handler
streamCallback?: (text: string, isDone: boolean, originalChunk?: any) => Promise<void> | void;
}
/**
* Create OpenAI options from generic options and config
*/
export function createOpenAIOptions(
opts: ChatCompletionOptions = {},
apiKey: string,
baseUrl: string,
defaultModel: string
opts: ChatCompletionOptions = {},
apiKey: string,
baseUrl: string,
defaultModel: string
): OpenAIOptions {
return {
// Connection settings
apiKey,
baseUrl,
return {
// Connection settings
apiKey,
baseUrl,
// API parameters
model: opts.model || defaultModel,
temperature: opts.temperature,
max_tokens: opts.maxTokens,
stream: opts.stream,
top_p: opts.topP,
frequency_penalty: opts.frequencyPenalty,
presence_penalty: opts.presencePenalty,
tools: opts.tools,
// API parameters
model: opts.model || defaultModel,
temperature: opts.temperature,
max_tokens: opts.maxTokens,
stream: opts.stream,
top_p: opts.topP,
frequency_penalty: opts.frequencyPenalty,
presence_penalty: opts.presencePenalty,
tools: opts.tools,
// Internal configuration
systemPrompt: opts.systemPrompt,
enableTools: opts.enableTools,
// Pass through streaming callback
streamCallback: opts.streamCallback,
// Include provider metadata
providerMetadata: opts.providerMetadata,
};
// Internal configuration
systemPrompt: opts.systemPrompt,
enableTools: opts.enableTools,
// Pass through streaming callback
streamCallback: opts.streamCallback,
// Include provider metadata
providerMetadata: opts.providerMetadata,
};
}
/**
* Create Anthropic options from generic options and config
*/
export function createAnthropicOptions(
opts: ChatCompletionOptions = {},
apiKey: string,
baseUrl: string,
defaultModel: string,
apiVersion: string,
betaVersion: string
opts: ChatCompletionOptions = {},
apiKey: string,
baseUrl: string,
defaultModel: string,
apiVersion: string,
betaVersion: string
): AnthropicOptions {
return {
// Connection settings
apiKey,
baseUrl,
apiVersion,
betaVersion,
return {
// Connection settings
apiKey,
baseUrl,
apiVersion,
betaVersion,
// API parameters
model: opts.model || defaultModel,
temperature: opts.temperature,
max_tokens: opts.maxTokens,
stream: opts.stream,
top_p: opts.topP,
// API parameters
model: opts.model || defaultModel,
temperature: opts.temperature,
max_tokens: opts.maxTokens,
stream: opts.stream,
top_p: opts.topP,
// Internal configuration
systemPrompt: opts.systemPrompt,
// Pass through streaming callback
streamCallback: opts.streamCallback,
// Include provider metadata
providerMetadata: opts.providerMetadata,
};
// Internal configuration
systemPrompt: opts.systemPrompt,
// Pass through streaming callback
streamCallback: opts.streamCallback,
// Include provider metadata
providerMetadata: opts.providerMetadata,
};
}
/**
* Create Ollama options from generic options and config
*/
export function createOllamaOptions(
opts: ChatCompletionOptions = {},
baseUrl: string,
defaultModel: string,
contextWindow: number
opts: ChatCompletionOptions = {},
baseUrl: string,
defaultModel: string,
contextWindow: number
): OllamaOptions {
return {
// Connection settings
baseUrl,
return {
// Connection settings
baseUrl,
// API parameters
model: opts.model || defaultModel,
stream: opts.stream,
options: {
temperature: opts.temperature,
num_ctx: contextWindow,
num_predict: opts.maxTokens,
response_format: opts.expectsJsonResponse ? { type: "json_object" } : undefined
},
tools: opts.tools,
// API parameters
model: opts.model || defaultModel,
stream: opts.stream,
options: {
temperature: opts.temperature,
num_ctx: contextWindow,
num_predict: opts.maxTokens,
response_format: opts.expectsJsonResponse ? { type: "json_object" } : undefined
},
tools: opts.tools,
// Internal configuration
systemPrompt: opts.systemPrompt,
enableTools: opts.enableTools,
bypassFormatter: opts.bypassFormatter,
preserveSystemPrompt: opts.preserveSystemPrompt,
expectsJsonResponse: opts.expectsJsonResponse,
toolExecutionStatus: opts.toolExecutionStatus,
// Pass through streaming callback
streamCallback: opts.streamCallback,
// Include provider metadata
providerMetadata: opts.providerMetadata,
};
// Internal configuration
systemPrompt: opts.systemPrompt,
enableTools: opts.enableTools,
bypassFormatter: opts.bypassFormatter,
preserveSystemPrompt: opts.preserveSystemPrompt,
expectsJsonResponse: opts.expectsJsonResponse,
toolExecutionStatus: opts.toolExecutionStatus,
// Pass through streaming callback
streamCallback: opts.streamCallback,
// Include provider metadata
providerMetadata: opts.providerMetadata,
};
}

View File

@ -153,7 +153,7 @@ export async function createEmbeddingProviderConfig(
dateCreated, utcDateCreated, dateModified, utcDateModified)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)`,
[providerId, name, priority, JSON.stringify(config),
now, utcNow, now, utcNow]
now, utcNow, now, utcNow]
);
return providerId;
@ -460,8 +460,8 @@ export function getAnthropicOptions(
supportsStreaming: true,
// Anthropic models typically have large context windows
contextWindow: modelName.includes('claude-3-opus') ? 200000 :
modelName.includes('claude-3-sonnet') ? 180000 :
modelName.includes('claude-3.5-sonnet') ? 200000 : 100000
modelName.includes('claude-3-sonnet') ? 180000 :
modelName.includes('claude-3.5-sonnet') ? 200000 : 100000
}
};