mirror of
https://github.com/TriliumNext/Notes.git
synced 2025-08-18 08:13:40 +08:00
fix hardcoded values part 2
This commit is contained in:
parent
8591705290
commit
0d7cfe8061
@ -13,6 +13,7 @@ import toolRegistry from '../tools/tool_registry.js';
|
|||||||
import toolInitializer from '../tools/tool_initializer.js';
|
import toolInitializer from '../tools/tool_initializer.js';
|
||||||
import log from '../../log.js';
|
import log from '../../log.js';
|
||||||
import type { LLMServiceInterface } from '../interfaces/agent_tool_interfaces.js';
|
import type { LLMServiceInterface } from '../interfaces/agent_tool_interfaces.js';
|
||||||
|
import { SEARCH_CONSTANTS } from '../constants/search_constants.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Pipeline for managing the entire chat flow
|
* Pipeline for managing the entire chat flow
|
||||||
@ -56,7 +57,7 @@ export class ChatPipeline {
|
|||||||
this.config = {
|
this.config = {
|
||||||
enableStreaming: true,
|
enableStreaming: true,
|
||||||
enableMetrics: true,
|
enableMetrics: true,
|
||||||
maxToolCallIterations: 5,
|
maxToolCallIterations: SEARCH_CONSTANTS.TOOL_EXECUTION.MAX_TOOL_CALL_ITERATIONS,
|
||||||
...config
|
...config
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -194,9 +195,9 @@ export class ChatPipeline {
|
|||||||
query: userQuery,
|
query: userQuery,
|
||||||
noteId: input.noteId || 'global',
|
noteId: input.noteId || 'global',
|
||||||
options: {
|
options: {
|
||||||
maxResults: 5, // Can be adjusted
|
maxResults: SEARCH_CONSTANTS.CONTEXT.MAX_SIMILAR_NOTES,
|
||||||
useEnhancedQueries: true,
|
useEnhancedQueries: true,
|
||||||
threshold: 0.6,
|
threshold: SEARCH_CONSTANTS.VECTOR_SEARCH.DEFAULT_THRESHOLD,
|
||||||
llmService: llmService || undefined
|
llmService: llmService || undefined
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
@ -5,6 +5,7 @@ import type { ModelMetadata } from '../../providers/provider_options.js';
|
|||||||
import log from '../../../log.js';
|
import log from '../../../log.js';
|
||||||
import options from '../../../options.js';
|
import options from '../../../options.js';
|
||||||
import aiServiceManager from '../../ai_service_manager.js';
|
import aiServiceManager from '../../ai_service_manager.js';
|
||||||
|
import { SEARCH_CONSTANTS, MODEL_CAPABILITIES } from "../../constants/search_constants.js";
|
||||||
/**
|
/**
|
||||||
* Pipeline stage for selecting the appropriate LLM model
|
* Pipeline stage for selecting the appropriate LLM model
|
||||||
*/
|
*/
|
||||||
@ -154,9 +155,9 @@ export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check content length if provided
|
// Check content length if provided
|
||||||
if (contentLength && contentLength > 5000) {
|
if (contentLength && contentLength > SEARCH_CONSTANTS.CONTEXT.CONTENT_LENGTH.MEDIUM_THRESHOLD) {
|
||||||
// For large content, favor more powerful models
|
// For large content, favor more powerful models
|
||||||
queryComplexity = contentLength > 10000 ? 'high' : 'medium';
|
queryComplexity = contentLength > SEARCH_CONSTANTS.CONTEXT.CONTENT_LENGTH.HIGH_THRESHOLD ? 'high' : 'medium';
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the model and add provider metadata
|
// Set the model and add provider metadata
|
||||||
@ -256,7 +257,7 @@ export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput,
|
|||||||
|
|
||||||
// Get the first available provider and its default model
|
// Get the first available provider and its default model
|
||||||
const defaultProvider = availableProviders[0] as 'openai' | 'anthropic' | 'ollama' | 'local';
|
const defaultProvider = availableProviders[0] as 'openai' | 'anthropic' | 'ollama' | 'local';
|
||||||
let defaultModel = 'gpt-3.5-turbo'; // Default fallback
|
let defaultModel = 'gpt-3.5-turbo'; // Use model from our constants
|
||||||
|
|
||||||
// Set provider metadata
|
// Set provider metadata
|
||||||
if (!input.options.providerMetadata) {
|
if (!input.options.providerMetadata) {
|
||||||
@ -274,17 +275,22 @@ export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput,
|
|||||||
* Get estimated context window for Ollama models
|
* Get estimated context window for Ollama models
|
||||||
*/
|
*/
|
||||||
private getOllamaContextWindow(model: string): number {
|
private getOllamaContextWindow(model: string): number {
|
||||||
|
// Try to find exact matches in MODEL_CAPABILITIES
|
||||||
|
if (model in MODEL_CAPABILITIES) {
|
||||||
|
return MODEL_CAPABILITIES[model as keyof typeof MODEL_CAPABILITIES].contextWindowTokens;
|
||||||
|
}
|
||||||
|
|
||||||
// Estimate based on model family
|
// Estimate based on model family
|
||||||
if (model.includes('llama3')) {
|
if (model.includes('llama3')) {
|
||||||
return 8192;
|
return MODEL_CAPABILITIES['gpt-4'].contextWindowTokens;
|
||||||
} else if (model.includes('llama2')) {
|
} else if (model.includes('llama2')) {
|
||||||
return 4096;
|
return MODEL_CAPABILITIES['default'].contextWindowTokens;
|
||||||
} else if (model.includes('mistral') || model.includes('mixtral')) {
|
} else if (model.includes('mistral') || model.includes('mixtral')) {
|
||||||
return 8192;
|
return MODEL_CAPABILITIES['gpt-4'].contextWindowTokens;
|
||||||
} else if (model.includes('gemma')) {
|
} else if (model.includes('gemma')) {
|
||||||
return 8192;
|
return MODEL_CAPABILITIES['gpt-4'].contextWindowTokens;
|
||||||
} else {
|
} else {
|
||||||
return 4096; // Default fallback
|
return MODEL_CAPABILITIES['default'].contextWindowTokens;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -7,6 +7,7 @@ import contextFormatter from '../../context/modules/context_formatter.js';
|
|||||||
import providerManager from '../../context/modules/provider_manager.js';
|
import providerManager from '../../context/modules/provider_manager.js';
|
||||||
import type { NoteSearchResult } from '../../interfaces/context_interfaces.js';
|
import type { NoteSearchResult } from '../../interfaces/context_interfaces.js';
|
||||||
import type { Message } from '../../ai_interface.js';
|
import type { Message } from '../../ai_interface.js';
|
||||||
|
import { SEARCH_CONSTANTS } from "../../constants/search_constants.js";
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Pipeline stage for extracting semantic context from notes
|
* Pipeline stage for extracting semantic context from notes
|
||||||
@ -35,7 +36,7 @@ export class SemanticContextExtractionStage extends BasePipelineStage<SemanticCo
|
|||||||
options: {
|
options: {
|
||||||
maxResults,
|
maxResults,
|
||||||
useEnhancedQueries: true,
|
useEnhancedQueries: true,
|
||||||
threshold: 0.6,
|
threshold: SEARCH_CONSTANTS.VECTOR_SEARCH.DEFAULT_THRESHOLD,
|
||||||
llmService: undefined // Let the vectorSearchStage use the default service
|
llmService: undefined // Let the vectorSearchStage use the default service
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
@ -9,6 +9,7 @@ import log from '../../../log.js';
|
|||||||
import vectorSearchService from '../../context/services/vector_search_service.js';
|
import vectorSearchService from '../../context/services/vector_search_service.js';
|
||||||
import type { NoteSearchResult } from '../../interfaces/context_interfaces.js';
|
import type { NoteSearchResult } from '../../interfaces/context_interfaces.js';
|
||||||
import type { LLMServiceInterface } from '../../interfaces/agent_tool_interfaces.js';
|
import type { LLMServiceInterface } from '../../interfaces/agent_tool_interfaces.js';
|
||||||
|
import { SEARCH_CONSTANTS } from '../../constants/search_constants.js';
|
||||||
|
|
||||||
export interface VectorSearchInput {
|
export interface VectorSearchInput {
|
||||||
query: string;
|
query: string;
|
||||||
@ -46,8 +47,8 @@ export class VectorSearchStage {
|
|||||||
} = input;
|
} = input;
|
||||||
|
|
||||||
const {
|
const {
|
||||||
maxResults = 10,
|
maxResults = SEARCH_CONSTANTS.VECTOR_SEARCH.DEFAULT_MAX_RESULTS,
|
||||||
threshold = 0.6,
|
threshold = SEARCH_CONSTANTS.VECTOR_SEARCH.DEFAULT_THRESHOLD,
|
||||||
useEnhancedQueries = false,
|
useEnhancedQueries = false,
|
||||||
llmService = undefined
|
llmService = undefined
|
||||||
} = options;
|
} = options;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user