mirror of
https://github.com/TriliumNext/Notes.git
synced 2025-07-27 18:12:29 +08:00
feat(llm): remove everything to do with embeddings, part 3
This commit is contained in:
parent
44a2e7df21
commit
4550c12c6e
@ -44,18 +44,7 @@ interface OptionRow {}
|
|||||||
|
|
||||||
interface NoteReorderingRow {}
|
interface NoteReorderingRow {}
|
||||||
|
|
||||||
interface NoteEmbeddingRow {
|
|
||||||
embedId: string;
|
|
||||||
noteId: string;
|
|
||||||
providerId: string;
|
|
||||||
modelId: string;
|
|
||||||
dimension: number;
|
|
||||||
version: number;
|
|
||||||
dateCreated: string;
|
|
||||||
utcDateCreated: string;
|
|
||||||
dateModified: string;
|
|
||||||
utcDateModified: string;
|
|
||||||
}
|
|
||||||
|
|
||||||
type EntityRowMappings = {
|
type EntityRowMappings = {
|
||||||
notes: NoteRow;
|
notes: NoteRow;
|
||||||
|
@ -1195,7 +1195,7 @@
|
|||||||
"restore_provider": "Restore provider to search",
|
"restore_provider": "Restore provider to search",
|
||||||
"similarity_threshold": "Similarity Threshold",
|
"similarity_threshold": "Similarity Threshold",
|
||||||
"similarity_threshold_description": "Minimum similarity score (0-1) for notes to be included in context for LLM queries",
|
"similarity_threshold_description": "Minimum similarity score (0-1) for notes to be included in context for LLM queries",
|
||||||
"reprocess_started": "Embedding reprocessing started in the background",
|
|
||||||
"reprocess_index": "Rebuild Search Index",
|
"reprocess_index": "Rebuild Search Index",
|
||||||
"reprocessing_index": "Rebuilding...",
|
"reprocessing_index": "Rebuilding...",
|
||||||
"reprocess_index_started": "Search index optimization started in the background",
|
"reprocess_index_started": "Search index optimization started in the background",
|
||||||
|
@ -6,7 +6,7 @@ import type { OpenAIModelResponse, AnthropicModelResponse, OllamaModelResponse }
|
|||||||
|
|
||||||
export class ProviderService {
|
export class ProviderService {
|
||||||
constructor(private $widget: JQuery<HTMLElement>) {
|
constructor(private $widget: JQuery<HTMLElement>) {
|
||||||
// Embedding functionality removed
|
// AI provider settings
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -16,7 +16,7 @@ export const TPL = `
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<!-- Embedding statistics section removed -->
|
<!-- AI settings template -->
|
||||||
|
|
||||||
<div class="ai-providers-section options-section">
|
<div class="ai-providers-section options-section">
|
||||||
<h4>${t("ai_llm.provider_configuration")}</h4>
|
<h4>${t("ai_llm.provider_configuration")}</h4>
|
||||||
|
@ -48,17 +48,6 @@ interface AnthropicModel {
|
|||||||
* type: string
|
* type: string
|
||||||
* type:
|
* type:
|
||||||
* type: string
|
* type: string
|
||||||
* embeddingModels:
|
|
||||||
* type: array
|
|
||||||
* items:
|
|
||||||
* type: object
|
|
||||||
* properties:
|
|
||||||
* id:
|
|
||||||
* type: string
|
|
||||||
* name:
|
|
||||||
* type: string
|
|
||||||
* type:
|
|
||||||
* type: string
|
|
||||||
* '500':
|
* '500':
|
||||||
* description: Error listing models
|
* description: Error listing models
|
||||||
* security:
|
* security:
|
||||||
@ -90,14 +79,10 @@ async function listModels(req: Request, res: Response) {
|
|||||||
type: 'chat'
|
type: 'chat'
|
||||||
}));
|
}));
|
||||||
|
|
||||||
// Anthropic doesn't currently have embedding models
|
|
||||||
const embeddingModels: AnthropicModel[] = [];
|
|
||||||
|
|
||||||
// Return the models list
|
// Return the models list
|
||||||
return {
|
return {
|
||||||
success: true,
|
success: true,
|
||||||
chatModels,
|
chatModels
|
||||||
embeddingModels
|
|
||||||
};
|
};
|
||||||
} catch (error: any) {
|
} catch (error: any) {
|
||||||
log.error(`Error listing Anthropic models: ${error.message || 'Unknown error'}`);
|
log.error(`Error listing Anthropic models: ${error.message || 'Unknown error'}`);
|
||||||
|
@ -40,17 +40,6 @@ import OpenAI from "openai";
|
|||||||
* type: string
|
* type: string
|
||||||
* type:
|
* type:
|
||||||
* type: string
|
* type: string
|
||||||
* embeddingModels:
|
|
||||||
* type: array
|
|
||||||
* items:
|
|
||||||
* type: object
|
|
||||||
* properties:
|
|
||||||
* id:
|
|
||||||
* type: string
|
|
||||||
* name:
|
|
||||||
* type: string
|
|
||||||
* type:
|
|
||||||
* type: string
|
|
||||||
* '500':
|
* '500':
|
||||||
* description: Error listing models
|
* description: Error listing models
|
||||||
* security:
|
* security:
|
||||||
@ -82,8 +71,7 @@ async function listModels(req: Request, res: Response) {
|
|||||||
// Filter and categorize models
|
// Filter and categorize models
|
||||||
const allModels = response.data || [];
|
const allModels = response.data || [];
|
||||||
|
|
||||||
// Include all models as chat models, without filtering by specific model names
|
// Include all models as chat models, excluding embedding models
|
||||||
// This allows models from providers like OpenRouter to be displayed
|
|
||||||
const chatModels = allModels
|
const chatModels = allModels
|
||||||
.filter((model) =>
|
.filter((model) =>
|
||||||
// Exclude models that are explicitly for embeddings
|
// Exclude models that are explicitly for embeddings
|
||||||
@ -96,23 +84,10 @@ async function listModels(req: Request, res: Response) {
|
|||||||
type: 'chat'
|
type: 'chat'
|
||||||
}));
|
}));
|
||||||
|
|
||||||
const embeddingModels = allModels
|
|
||||||
.filter((model) =>
|
|
||||||
// Only include embedding-specific models
|
|
||||||
model.id.includes('embedding') ||
|
|
||||||
model.id.includes('embed')
|
|
||||||
)
|
|
||||||
.map((model) => ({
|
|
||||||
id: model.id,
|
|
||||||
name: model.id,
|
|
||||||
type: 'embedding'
|
|
||||||
}));
|
|
||||||
|
|
||||||
// Return the models list
|
// Return the models list
|
||||||
return {
|
return {
|
||||||
success: true,
|
success: true,
|
||||||
chatModels,
|
chatModels
|
||||||
embeddingModels
|
|
||||||
};
|
};
|
||||||
} catch (error: any) {
|
} catch (error: any) {
|
||||||
log.error(`Error listing OpenAI models: ${error.message || 'Unknown error'}`);
|
log.error(`Error listing OpenAI models: ${error.message || 'Unknown error'}`);
|
||||||
|
@ -92,7 +92,7 @@ const ALLOWED_OPTIONS = new Set<OptionNames>([
|
|||||||
"showLoginInShareTheme",
|
"showLoginInShareTheme",
|
||||||
"splitEditorOrientation",
|
"splitEditorOrientation",
|
||||||
|
|
||||||
// AI/LLM integration options (embedding options removed)
|
// AI/LLM integration options
|
||||||
"aiEnabled",
|
"aiEnabled",
|
||||||
"aiTemperature",
|
"aiTemperature",
|
||||||
"aiSystemPrompt",
|
"aiSystemPrompt",
|
||||||
|
@ -1,24 +1,17 @@
|
|||||||
|
/**
|
||||||
|
* Configuration constants for LLM providers
|
||||||
|
*/
|
||||||
export const PROVIDER_CONSTANTS = {
|
export const PROVIDER_CONSTANTS = {
|
||||||
ANTHROPIC: {
|
ANTHROPIC: {
|
||||||
API_VERSION: '2023-06-01',
|
|
||||||
BETA_VERSION: 'messages-2023-12-15',
|
|
||||||
BASE_URL: 'https://api.anthropic.com',
|
BASE_URL: 'https://api.anthropic.com',
|
||||||
DEFAULT_MODEL: 'claude-3-haiku-20240307',
|
DEFAULT_MODEL: 'claude-3-5-sonnet-20241022',
|
||||||
// Model mapping for simplified model names to their full versions
|
API_VERSION: '2023-06-01',
|
||||||
MODEL_MAPPING: {
|
BETA_VERSION: undefined,
|
||||||
'claude-3.7-sonnet': 'claude-3-7-sonnet-20250219',
|
CONTEXT_WINDOW: 200000,
|
||||||
'claude-3.5-sonnet': 'claude-3-5-sonnet-20241022',
|
|
||||||
'claude-3.5-haiku': 'claude-3-5-haiku-20241022',
|
|
||||||
'claude-3-opus': 'claude-3-opus-20240229',
|
|
||||||
'claude-3-sonnet': 'claude-3-sonnet-20240229',
|
|
||||||
'claude-3-haiku': 'claude-3-haiku-20240307',
|
|
||||||
'claude-2': 'claude-2.1'
|
|
||||||
},
|
|
||||||
// These are the currently available models from Anthropic
|
|
||||||
AVAILABLE_MODELS: [
|
AVAILABLE_MODELS: [
|
||||||
{
|
{
|
||||||
id: 'claude-3-7-sonnet-20250219',
|
id: 'claude-3-5-sonnet-20250106',
|
||||||
name: 'Claude 3.7 Sonnet',
|
name: 'Claude 3.5 Sonnet (New)',
|
||||||
description: 'Most intelligent model with hybrid reasoning capabilities',
|
description: 'Most intelligent model with hybrid reasoning capabilities',
|
||||||
maxTokens: 8192
|
maxTokens: 8192
|
||||||
},
|
},
|
||||||
@ -64,12 +57,7 @@ export const PROVIDER_CONSTANTS = {
|
|||||||
OPENAI: {
|
OPENAI: {
|
||||||
BASE_URL: 'https://api.openai.com/v1',
|
BASE_URL: 'https://api.openai.com/v1',
|
||||||
DEFAULT_MODEL: 'gpt-3.5-turbo',
|
DEFAULT_MODEL: 'gpt-3.5-turbo',
|
||||||
DEFAULT_EMBEDDING_MODEL: 'text-embedding-ada-002',
|
|
||||||
CONTEXT_WINDOW: 16000,
|
CONTEXT_WINDOW: 16000,
|
||||||
EMBEDDING_DIMENSIONS: {
|
|
||||||
ADA: 1536,
|
|
||||||
DEFAULT: 1536
|
|
||||||
},
|
|
||||||
AVAILABLE_MODELS: [
|
AVAILABLE_MODELS: [
|
||||||
{
|
{
|
||||||
id: 'gpt-4o',
|
id: 'gpt-4o',
|
||||||
@ -132,51 +120,6 @@ export const LLM_CONSTANTS = {
|
|||||||
DEFAULT: 6000
|
DEFAULT: 6000
|
||||||
},
|
},
|
||||||
|
|
||||||
// Embedding dimensions (verify these with your actual models)
|
|
||||||
EMBEDDING_DIMENSIONS: {
|
|
||||||
OLLAMA: {
|
|
||||||
DEFAULT: 384,
|
|
||||||
NOMIC: 768,
|
|
||||||
MISTRAL: 1024
|
|
||||||
},
|
|
||||||
OPENAI: {
|
|
||||||
ADA: 1536,
|
|
||||||
DEFAULT: 1536
|
|
||||||
},
|
|
||||||
ANTHROPIC: {
|
|
||||||
CLAUDE: 1024,
|
|
||||||
DEFAULT: 1024
|
|
||||||
},
|
|
||||||
VOYAGE: {
|
|
||||||
DEFAULT: 1024
|
|
||||||
}
|
|
||||||
},
|
|
||||||
|
|
||||||
// Model-specific embedding dimensions for Ollama models
|
|
||||||
OLLAMA_MODEL_DIMENSIONS: {
|
|
||||||
"llama3": 8192,
|
|
||||||
"llama3.1": 8192,
|
|
||||||
"mistral": 8192,
|
|
||||||
"nomic": 768,
|
|
||||||
"mxbai": 1024,
|
|
||||||
"nomic-embed-text": 768,
|
|
||||||
"mxbai-embed-large": 1024,
|
|
||||||
"default": 384
|
|
||||||
},
|
|
||||||
|
|
||||||
// Model-specific context windows for Ollama models
|
|
||||||
OLLAMA_MODEL_CONTEXT_WINDOWS: {
|
|
||||||
"llama3": 8192,
|
|
||||||
"llama3.1": 8192,
|
|
||||||
"llama3.2": 8192,
|
|
||||||
"mistral": 8192,
|
|
||||||
"nomic": 32768,
|
|
||||||
"mxbai": 32768,
|
|
||||||
"nomic-embed-text": 32768,
|
|
||||||
"mxbai-embed-large": 32768,
|
|
||||||
"default": 8192
|
|
||||||
},
|
|
||||||
|
|
||||||
// Batch size configuration
|
// Batch size configuration
|
||||||
BATCH_SIZE: {
|
BATCH_SIZE: {
|
||||||
OPENAI: 10, // OpenAI can handle larger batches efficiently
|
OPENAI: 10, // OpenAI can handle larger batches efficiently
|
||||||
@ -189,8 +132,7 @@ export const LLM_CONSTANTS = {
|
|||||||
CHUNKING: {
|
CHUNKING: {
|
||||||
DEFAULT_SIZE: 1500,
|
DEFAULT_SIZE: 1500,
|
||||||
OLLAMA_SIZE: 1000,
|
OLLAMA_SIZE: 1000,
|
||||||
DEFAULT_OVERLAP: 100,
|
DEFAULT_OVERLAP: 100
|
||||||
MAX_SIZE_FOR_SINGLE_EMBEDDING: 5000
|
|
||||||
},
|
},
|
||||||
|
|
||||||
// Search/similarity thresholds
|
// Search/similarity thresholds
|
||||||
|
@ -6,7 +6,7 @@ import type { ICacheManager, CachedNoteData, CachedQueryResults } from '../../in
|
|||||||
* Provides a centralized caching system to avoid redundant operations
|
* Provides a centralized caching system to avoid redundant operations
|
||||||
*/
|
*/
|
||||||
export class CacheManager implements ICacheManager {
|
export class CacheManager implements ICacheManager {
|
||||||
// Cache for recently used context to avoid repeated embedding lookups
|
// Cache for recently used context to avoid repeated lookups
|
||||||
private noteDataCache = new Map<string, CachedNoteData<unknown>>();
|
private noteDataCache = new Map<string, CachedNoteData<unknown>>();
|
||||||
|
|
||||||
// Cache for recently used queries
|
// Cache for recently used queries
|
||||||
|
@ -1,37 +1 @@
|
|||||||
import log from '../../../log.js';
|
// This file has been removed as embedding functionality has been completely removed from the codebase
|
||||||
|
|
||||||
/**
|
|
||||||
* Manages embedding providers for context services
|
|
||||||
* Simplified since embedding functionality has been removed
|
|
||||||
*/
|
|
||||||
export class ProviderManager {
|
|
||||||
/**
|
|
||||||
* Get the selected embedding provider based on user settings
|
|
||||||
* Returns null since embeddings have been removed
|
|
||||||
*/
|
|
||||||
async getSelectedEmbeddingProvider(): Promise<null> {
|
|
||||||
log.info('Embedding providers have been removed - returning null');
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get all enabled embedding providers
|
|
||||||
* Returns empty array since embeddings have been removed
|
|
||||||
*/
|
|
||||||
async getEnabledEmbeddingProviders(): Promise<never[]> {
|
|
||||||
log.info('Embedding providers have been removed - returning empty array');
|
|
||||||
return [];
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Check if embedding providers are available
|
|
||||||
* Returns false since embeddings have been removed
|
|
||||||
*/
|
|
||||||
isEmbeddingAvailable(): boolean {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Export singleton instance
|
|
||||||
export const providerManager = new ProviderManager();
|
|
||||||
export default providerManager;
|
|
||||||
|
@ -11,7 +11,6 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import log from '../../../log.js';
|
import log from '../../../log.js';
|
||||||
import providerManager from '../modules/provider_manager.js';
|
|
||||||
import cacheManager from '../modules/cache_manager.js';
|
import cacheManager from '../modules/cache_manager.js';
|
||||||
import queryProcessor from './query_processor.js';
|
import queryProcessor from './query_processor.js';
|
||||||
import contextFormatter from '../modules/context_formatter.js';
|
import contextFormatter from '../modules/context_formatter.js';
|
||||||
@ -56,17 +55,11 @@ export class ContextService {
|
|||||||
|
|
||||||
this.initPromise = (async () => {
|
this.initPromise = (async () => {
|
||||||
try {
|
try {
|
||||||
// Initialize provider
|
|
||||||
const provider = await providerManager.getSelectedEmbeddingProvider();
|
|
||||||
if (!provider) {
|
|
||||||
throw new Error(`No embedding provider available. Could not initialize context service.`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Agent tools are already initialized in the AIServiceManager constructor
|
// Agent tools are already initialized in the AIServiceManager constructor
|
||||||
// No need to initialize them again
|
// No need to initialize them again
|
||||||
|
|
||||||
this.initialized = true;
|
this.initialized = true;
|
||||||
log.info(`Context service initialized - embeddings disabled`);
|
log.info(`Context service initialized`);
|
||||||
} catch (error: unknown) {
|
} catch (error: unknown) {
|
||||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||||
log.error(`Failed to initialize context service: ${errorMessage}`);
|
log.error(`Failed to initialize context service: ${errorMessage}`);
|
||||||
@ -177,9 +170,8 @@ export class ContextService {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Step 3: Find relevant notes using basic text search (since embeddings are removed)
|
// Step 3: Find relevant notes using traditional search
|
||||||
// This will use traditional note search instead of vector similarity
|
log.info("Using traditional search for note discovery");
|
||||||
log.info("Using traditional search instead of embedding-based search");
|
|
||||||
|
|
||||||
// Use fallback context based on the context note if provided
|
// Use fallback context based on the context note if provided
|
||||||
if (contextNoteId) {
|
if (contextNoteId) {
|
||||||
@ -215,13 +207,10 @@ export class ContextService {
|
|||||||
log.info(`Final combined results: ${relevantNotes.length} relevant notes`);
|
log.info(`Final combined results: ${relevantNotes.length} relevant notes`);
|
||||||
|
|
||||||
// Step 4: Build context from the notes
|
// Step 4: Build context from the notes
|
||||||
const provider = await providerManager.getSelectedEmbeddingProvider();
|
|
||||||
const providerId = 'default'; // Provider is always null since embeddings removed
|
|
||||||
|
|
||||||
const context = await contextFormatter.buildContextFromNotes(
|
const context = await contextFormatter.buildContextFromNotes(
|
||||||
relevantNotes,
|
relevantNotes,
|
||||||
userQuestion,
|
userQuestion,
|
||||||
providerId
|
'default'
|
||||||
);
|
);
|
||||||
|
|
||||||
// Step 5: Add agent tools context if requested
|
// Step 5: Add agent tools context if requested
|
||||||
|
@ -60,7 +60,6 @@ export interface IContextFormatter {
|
|||||||
*/
|
*/
|
||||||
export interface ILLMService {
|
export interface ILLMService {
|
||||||
sendMessage(message: string, options?: Record<string, unknown>): Promise<string>;
|
sendMessage(message: string, options?: Record<string, unknown>): Promise<string>;
|
||||||
generateEmbedding?(text: string): Promise<number[]>;
|
|
||||||
streamMessage?(message: string, callback: (text: string) => void, options?: Record<string, unknown>): Promise<string>;
|
streamMessage?(message: string, callback: (text: string) => void, options?: Record<string, unknown>): Promise<string>;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -36,16 +36,6 @@ export interface OllamaError extends LLMServiceError {
|
|||||||
code?: string;
|
code?: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Embedding-specific error interface
|
|
||||||
*/
|
|
||||||
export interface EmbeddingError extends LLMServiceError {
|
|
||||||
provider: string;
|
|
||||||
model?: string;
|
|
||||||
batchSize?: number;
|
|
||||||
isRetryable: boolean;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Guard function to check if an error is a specific type of error
|
* Guard function to check if an error is a specific type of error
|
||||||
*/
|
*/
|
||||||
|
@ -6,7 +6,7 @@ import aiServiceManager from './ai_service_manager.js';
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Service for fetching and caching model capabilities
|
* Service for fetching and caching model capabilities
|
||||||
* Simplified to only handle chat models since embeddings have been removed
|
* Handles chat model capabilities
|
||||||
*/
|
*/
|
||||||
export class ModelCapabilitiesService {
|
export class ModelCapabilitiesService {
|
||||||
// Cache model capabilities
|
// Cache model capabilities
|
||||||
|
@ -8,7 +8,7 @@ import { ModelSelectionStage } from './stages/model_selection_stage.js';
|
|||||||
import { LLMCompletionStage } from './stages/llm_completion_stage.js';
|
import { LLMCompletionStage } from './stages/llm_completion_stage.js';
|
||||||
import { ResponseProcessingStage } from './stages/response_processing_stage.js';
|
import { ResponseProcessingStage } from './stages/response_processing_stage.js';
|
||||||
import { ToolCallingStage } from './stages/tool_calling_stage.js';
|
import { ToolCallingStage } from './stages/tool_calling_stage.js';
|
||||||
// VectorSearchStage removed along with embedding functionality
|
// Traditional search is used instead of vector search
|
||||||
import toolRegistry from '../tools/tool_registry.js';
|
import toolRegistry from '../tools/tool_registry.js';
|
||||||
import toolInitializer from '../tools/tool_initializer.js';
|
import toolInitializer from '../tools/tool_initializer.js';
|
||||||
import log from '../../log.js';
|
import log from '../../log.js';
|
||||||
@ -29,7 +29,7 @@ export class ChatPipeline {
|
|||||||
llmCompletion: LLMCompletionStage;
|
llmCompletion: LLMCompletionStage;
|
||||||
responseProcessing: ResponseProcessingStage;
|
responseProcessing: ResponseProcessingStage;
|
||||||
toolCalling: ToolCallingStage;
|
toolCalling: ToolCallingStage;
|
||||||
// vectorSearch removed with embedding functionality
|
// traditional search is used instead of vector search
|
||||||
};
|
};
|
||||||
|
|
||||||
config: ChatPipelineConfig;
|
config: ChatPipelineConfig;
|
||||||
@ -50,7 +50,7 @@ export class ChatPipeline {
|
|||||||
llmCompletion: new LLMCompletionStage(),
|
llmCompletion: new LLMCompletionStage(),
|
||||||
responseProcessing: new ResponseProcessingStage(),
|
responseProcessing: new ResponseProcessingStage(),
|
||||||
toolCalling: new ToolCallingStage(),
|
toolCalling: new ToolCallingStage(),
|
||||||
// vectorSearch removed with embedding functionality
|
// traditional search is used instead of vector search
|
||||||
};
|
};
|
||||||
|
|
||||||
// Set default configuration values
|
// Set default configuration values
|
||||||
|
@ -83,16 +83,6 @@ async function checkChatProviderConfigs(result: ProviderValidationResult): Promi
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Check if we have at least one valid embedding provider available
|
|
||||||
* Returns false since embeddings have been removed
|
|
||||||
*/
|
|
||||||
export async function getEmbeddingProviderAvailability(): Promise<boolean> {
|
|
||||||
log.info("Embedding providers have been removed, returning false");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
export default {
|
export default {
|
||||||
validateProviders,
|
validateProviders
|
||||||
getEmbeddingProviderAvailability
|
|
||||||
};
|
};
|
@ -11,7 +11,7 @@ import attributes from '../../attributes.js';
|
|||||||
import aiServiceManager from '../ai_service_manager.js';
|
import aiServiceManager from '../ai_service_manager.js';
|
||||||
import { SEARCH_CONSTANTS } from '../constants/search_constants.js';
|
import { SEARCH_CONSTANTS } from '../constants/search_constants.js';
|
||||||
import searchService from '../../search/services/search.js';
|
import searchService from '../../search/services/search.js';
|
||||||
// Define types locally since embeddings are no longer available
|
// Define types locally for relationship tool
|
||||||
interface Backlink {
|
interface Backlink {
|
||||||
noteId: string;
|
noteId: string;
|
||||||
title: string;
|
title: string;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user