update LLM API endpoints

This commit is contained in:
perf3ct 2025-04-01 10:55:20 -07:00
parent b7f2a9663e
commit 1690217797
No known key found for this signature in database
GPG Key ID: 569C4EEC436F5232
4 changed files with 386 additions and 469 deletions

View File

@ -2,14 +2,14 @@ import server from "../../../../services/server.js";
import toastService from "../../../../services/toast.js";
import { t } from "../../../../services/i18n.js";
import options from "../../../../services/options.js";
import { OpenAIModelResponse, AnthropicModelResponse, OllamaModelResponse } from "./interfaces.js";
import type { OpenAIModelResponse, AnthropicModelResponse, OllamaModelResponse } from "./interfaces.js";
export class ProviderService {
constructor(private $widget: JQuery<HTMLElement>) {
// Initialize Voyage models (since they don't have a dynamic refresh yet)
this.initializeVoyageModels();
}
/**
* Initialize Voyage models with default values and ensure proper selection
*/
@ -22,7 +22,7 @@ export class ProviderService {
}
}, 100); // Small delay to ensure the widget is fully initialized
}
/**
* Ensures the dropdown has the correct value set, prioritizing:
* 1. Current UI value if present
@ -57,14 +57,14 @@ export class ProviderService {
*/
async refreshOpenAIModels(showLoading: boolean, openaiModelsRefreshed: boolean): Promise<boolean> {
if (!this.$widget) return false;
const $refreshOpenAIModels = this.$widget.find('.refresh-openai-models');
// If we've already refreshed and we're not forcing a refresh, don't do it again
if (openaiModelsRefreshed && !showLoading) {
return openaiModelsRefreshed;
}
if (showLoading) {
$refreshOpenAIModels.prop('disabled', true);
$refreshOpenAIModels.html(`<i class="spinner-border spinner-border-sm"></i>`);
@ -72,7 +72,7 @@ export class ProviderService {
try {
const openaiBaseUrl = this.$widget.find('.openai-base-url').val() as string;
const response = await server.post<OpenAIModelResponse>('openai/list-models', { baseUrl: openaiBaseUrl });
const response = await server.get<OpenAIModelResponse>(`llm/providers/openai/models?baseUrl=${encodeURIComponent(openaiBaseUrl)}`);
if (response && response.success) {
// Update the chat models dropdown
@ -120,12 +120,12 @@ export class ProviderService {
const totalModels = (response.chatModels?.length || 0) + (response.embeddingModels?.length || 0);
toastService.showMessage(`${totalModels} OpenAI models found.`);
}
return true;
} else if (showLoading) {
toastService.showError(`No OpenAI models found. Please check your API key and settings.`);
}
return openaiModelsRefreshed;
} catch (e) {
console.error(`Error fetching OpenAI models:`, e);
@ -140,7 +140,7 @@ export class ProviderService {
}
}
}
/**
* Refreshes the list of Anthropic models
* @param showLoading Whether to show loading indicators and toasts
@ -149,14 +149,14 @@ export class ProviderService {
*/
async refreshAnthropicModels(showLoading: boolean, anthropicModelsRefreshed: boolean): Promise<boolean> {
if (!this.$widget) return false;
const $refreshAnthropicModels = this.$widget.find('.refresh-anthropic-models');
// If we've already refreshed and we're not forcing a refresh, don't do it again
if (anthropicModelsRefreshed && !showLoading) {
return anthropicModelsRefreshed;
}
if (showLoading) {
$refreshAnthropicModels.prop('disabled', true);
$refreshAnthropicModels.html(`<i class="spinner-border spinner-border-sm"></i>`);
@ -164,7 +164,7 @@ export class ProviderService {
try {
const anthropicBaseUrl = this.$widget.find('.anthropic-base-url').val() as string;
const response = await server.post<AnthropicModelResponse>('anthropic/list-models', { baseUrl: anthropicBaseUrl });
const response = await server.get<AnthropicModelResponse>(`llm/providers/anthropic/models?baseUrl=${encodeURIComponent(anthropicBaseUrl)}`);
if (response && response.success) {
// Update the chat models dropdown
@ -197,12 +197,12 @@ export class ProviderService {
const totalModels = (response.chatModels?.length || 0) + (response.embeddingModels?.length || 0);
toastService.showMessage(`${totalModels} Anthropic models found.`);
}
return true;
} else if (showLoading) {
toastService.showError(`No Anthropic models found. Please check your API key and settings.`);
}
return anthropicModelsRefreshed;
} catch (e) {
console.error(`Error fetching Anthropic models:`, e);
@ -217,7 +217,7 @@ export class ProviderService {
}
}
}
/**
* Refreshes the list of Ollama models
* @param showLoading Whether to show loading indicators and toasts
@ -226,14 +226,14 @@ export class ProviderService {
*/
async refreshOllamaModels(showLoading: boolean, ollamaModelsRefreshed: boolean): Promise<boolean> {
if (!this.$widget) return false;
const $refreshModels = this.$widget.find('.refresh-models');
// If we've already refreshed and we're not forcing a refresh, don't do it again
if (ollamaModelsRefreshed && !showLoading) {
return ollamaModelsRefreshed;
}
if (showLoading) {
$refreshModels.prop('disabled', true);
$refreshModels.text(t("ai_llm.refreshing_models"));
@ -241,7 +241,7 @@ export class ProviderService {
try {
const ollamaBaseUrl = this.$widget.find('.ollama-base-url').val() as string;
const response = await server.post<OllamaModelResponse>('ollama/list-models', { baseUrl: ollamaBaseUrl });
const response = await server.get<OllamaModelResponse>(`llm/providers/ollama/models?baseUrl=${encodeURIComponent(ollamaBaseUrl)}`);
if (response && response.success && response.models && response.models.length > 0) {
const $embedModelSelect = this.$widget.find('.ollama-embedding-model');
@ -295,12 +295,12 @@ export class ProviderService {
if (showLoading) {
toastService.showMessage(`${response.models.length} Ollama models found.`);
}
return true;
} else if (showLoading) {
toastService.showError(`No Ollama models found. Please check if Ollama is running.`);
}
return ollamaModelsRefreshed;
} catch (e) {
console.error(`Error fetching Ollama models:`, e);
@ -315,4 +315,4 @@ export class ProviderService {
}
}
}
}
}

View File

@ -1144,39 +1144,34 @@ async function sendMessage(req: Request, res: Response) {
/**
* @swagger
* /api/llm/index/stats:
* /api/llm/indexes/stats:
* get:
* summary: Get statistics about the vector index
* summary: Get stats about the LLM knowledge base indexing status
* operationId: llm-index-stats
* responses:
* '200':
* description: Vector index statistics
* content:
* application/json:
* schema:
* type: object
* properties:
* totalEmbeddings:
* type: integer
* totalIndexedNotes:
* type: integer
* lastIndexed:
* type: string
* format: date-time
* embeddingProvider:
* type: string
* description: Index stats successfully retrieved
* security:
* - session: []
* tags: ["llm"]
*/
async function getIndexStats(req: Request, res: Response) {
try {
if (!isDatabaseInitialized()) {
throw new Error('Database is not initialized yet');
// Check if AI is enabled
const aiEnabled = await options.getOptionBool('aiEnabled');
if (!aiEnabled) {
return {
success: false,
message: "AI features are disabled"
};
}
// Return indexing stats
const stats = await indexService.getIndexingStats();
return stats;
return {
success: true,
...stats
};
} catch (error: any) {
log.error(`Error getting index stats: ${error.message || 'Unknown error'}`);
throw new Error(`Failed to get index stats: ${error.message || 'Unknown error'}`);
@ -1185,9 +1180,9 @@ async function getIndexStats(req: Request, res: Response) {
/**
* @swagger
* /api/llm/index/start:
* /api/llm/indexes:
* post:
* summary: Start or restart the indexing process
* summary: Start or continue indexing the knowledge base
* operationId: llm-start-indexing
* requestBody:
* required: false
@ -1196,413 +1191,345 @@ async function getIndexStats(req: Request, res: Response) {
* schema:
* type: object
* properties:
* forceReindex:
* type: boolean
* description: Whether to force reindexing of all notes
* branchId:
* type: string
* description: Optional branch ID to limit indexing scope
* responses:
* '200':
* description: Indexing process started
* content:
* application/json:
* schema:
* type: object
* properties:
* message:
* type: string
* notesToIndex:
* type: integer
* security:
* - session: []
* tags: ["llm"]
*/
async function startIndexing(req: Request, res: Response) {
try {
if (!isDatabaseInitialized()) {
throw new Error('Database is not initialized yet');
}
const { force, batchSize } = req.body || {};
let result;
if (batchSize) {
// Run a limited batch indexing
result = await indexService.runBatchIndexing(batchSize);
return {
success: result,
message: result ? `Batch indexing started with size ${batchSize}` : 'Indexing already in progress'
};
} else {
// Start full indexing
result = await indexService.startFullIndexing(force);
return {
success: result,
message: result ? 'Full indexing started' : 'Indexing already in progress or not needed'
};
}
} catch (error: any) {
log.error(`Error starting indexing: ${error.message || 'Unknown error'}`);
throw new Error(`Failed to start indexing: ${error.message || 'Unknown error'}`);
}
}
/**
* @swagger
* /api/llm/index/failed:
* get:
* summary: Get list of notes that failed to be indexed
* operationId: llm-failed-indexes
* responses:
* '200':
* description: List of failed note indexes
* content:
* application/json:
* schema:
* type: array
* items:
* type: object
* properties:
* noteId:
* type: string
* title:
* type: string
* error:
* type: string
* timestamp:
* type: string
* format: date-time
* security:
* - session: []
* tags: ["llm"]
*/
async function getFailedIndexes(req: Request, res: Response) {
try {
if (!isDatabaseInitialized()) {
throw new Error('Database is not initialized yet');
}
const limit = req.query.limit ? parseInt(req.query.limit as string, 10) : 100;
const failedNotes = await indexService.getFailedIndexes(limit);
return {
count: failedNotes.length,
failedNotes
};
} catch (error: any) {
log.error(`Error getting failed indexes: ${error.message || 'Unknown error'}`);
throw new Error(`Failed to get failed indexes: ${error.message || 'Unknown error'}`);
}
}
/**
* @swagger
* /api/llm/index/failed/{noteId}/retry:
* post:
* summary: Retry indexing a specific failed note
* operationId: llm-retry-failed-index
* parameters:
* - name: noteId
* in: path
* required: true
* schema:
* type: string
* responses:
* '200':
* description: Retry process started
* content:
* application/json:
* schema:
* type: object
* properties:
* success:
* type: boolean
* message:
* type: string
* '404':
* description: Failed note not found
* security:
* - session: []
* tags: ["llm"]
*/
async function retryFailedIndex(req: Request, res: Response) {
try {
if (!isDatabaseInitialized()) {
throw new Error('Database is not initialized yet');
}
const { noteId } = req.params;
if (!noteId) {
throw new Error('Note ID is required');
}
const success = await indexService.retryFailedNote(noteId);
return {
success,
message: success ? `Note ${noteId} queued for retry` : `Note ${noteId} not found in failed queue`
};
} catch (error: any) {
log.error(`Error retrying failed index: ${error.message || 'Unknown error'}`);
throw new Error(`Failed to retry index: ${error.message || 'Unknown error'}`);
}
}
/**
* @swagger
* /api/llm/index/failed/retry-all:
* post:
* summary: Retry indexing all failed notes
* operationId: llm-retry-all-failed
* responses:
* '200':
* description: Retry process started for all failed notes
* content:
* application/json:
* schema:
* type: object
* properties:
* success:
* type: boolean
* message:
* type: string
* count:
* type: integer
* security:
* - session: []
* tags: ["llm"]
*/
async function retryAllFailedIndexes(req: Request, res: Response) {
try {
if (!isDatabaseInitialized()) {
throw new Error('Database is not initialized yet');
}
const count = await indexService.retryAllFailedNotes();
return {
success: true,
count,
message: `${count} notes queued for retry`
};
} catch (error: any) {
log.error(`Error retrying all failed indexes: ${error.message || 'Unknown error'}`);
throw new Error(`Failed to retry indexes: ${error.message || 'Unknown error'}`);
}
}
/**
* @swagger
* /api/llm/similar:
* post:
* summary: Find notes similar to the provided content
* operationId: llm-find-similar
* requestBody:
* required: true
* content:
* application/json:
* schema:
* type: object
* properties:
* content:
* type: string
* description: Content to find similar notes for
* limit:
* type: integer
* description: Maximum number of results to return
* threshold:
* type: number
* description: Similarity threshold (0.0-1.0)
* responses:
* '200':
* description: List of similar notes
* content:
* application/json:
* schema:
* type: array
* items:
* type: object
* properties:
* noteId:
* type: string
* title:
* type: string
* similarity:
* type: number
* branchId:
* type: string
* security:
* - session: []
* tags: ["llm"]
*/
async function findSimilarNotes(req: Request, res: Response) {
try {
if (!isDatabaseInitialized()) {
throw new Error('Database is not initialized yet');
}
const { query, contextNoteId, limit } = req.body || {};
if (!query || typeof query !== 'string' || query.trim().length === 0) {
throw new Error('Query is required');
}
const similarNotes = await indexService.findSimilarNotes(
query,
contextNoteId,
limit || 10
);
return {
count: similarNotes.length,
similarNotes
};
} catch (error: any) {
log.error(`Error finding similar notes: ${error.message || 'Unknown error'}`);
throw new Error(`Failed to find similar notes: ${error.message || 'Unknown error'}`);
}
}
/**
* @swagger
* /api/llm/generate-context:
* post:
* summary: Generate context from similar notes for a query
* operationId: llm-generate-context
* requestBody:
* required: true
* content:
* application/json:
* schema:
* type: object
* properties:
* query:
* type: string
* description: Query to generate context for
* limit:
* type: integer
* description: Maximum number of notes to include
* contextNoteId:
* type: string
* description: Optional note ID to provide additional context
* responses:
* '200':
* description: Generated context and sources
* content:
* application/json:
* schema:
* type: object
* properties:
* context:
* type: string
* sources:
* type: array
* items:
* type: object
* properties:
* noteId:
* type: string
* title:
* type: string
* similarity:
* type: number
* security:
* - session: []
* tags: ["llm"]
*/
async function generateQueryContext(req: Request, res: Response) {
try {
if (!isDatabaseInitialized()) {
throw new Error('Database is not initialized yet');
}
const { query, contextNoteId, depth } = req.body || {};
if (!query || typeof query !== 'string' || query.trim().length === 0) {
throw new Error('Query is required');
}
const context = await indexService.generateQueryContext(
query,
contextNoteId,
depth || 2
);
return {
context,
length: context.length
};
} catch (error: any) {
log.error(`Error generating query context: ${error.message || 'Unknown error'}`);
throw new Error(`Failed to generate query context: ${error.message || 'Unknown error'}`);
}
}
/**
* @swagger
* /api/llm/index/note/{noteId}:
* post:
* summary: Index or reindex a specific note
* operationId: llm-index-note
* parameters:
* - name: noteId
* in: path
* required: true
* schema:
* type: string
* requestBody:
* required: false
* content:
* application/json:
* schema:
* type: object
* properties:
* force:
* type: boolean
* description: Whether to force reindexing even if already indexed
* description: Whether to force reindexing of all notes
* responses:
* '200':
* description: Note indexing result
* content:
* application/json:
* schema:
* type: object
* properties:
* success:
* type: boolean
* message:
* type: string
* '404':
* description: Note not found
* description: Indexing started successfully
* security:
* - session: []
* tags: ["llm"]
*/
async function startIndexing(req: Request, res: Response) {
try {
// Check if AI is enabled
const aiEnabled = await options.getOptionBool('aiEnabled');
if (!aiEnabled) {
return {
success: false,
message: "AI features are disabled"
};
}
const { force = false } = req.body;
// Start indexing
await indexService.startFullIndexing(force);
return {
success: true,
message: "Indexing started"
};
} catch (error: any) {
log.error(`Error starting indexing: ${error.message || 'Unknown error'}`);
throw new Error(`Failed to start indexing: ${error.message || 'Unknown error'}`);
}
}
/**
* @swagger
* /api/llm/indexes/failed:
* get:
* summary: Get list of notes that failed to index
* operationId: llm-failed-indexes
* parameters:
* - name: limit
* in: query
* required: false
* schema:
* type: integer
* default: 100
* responses:
* '200':
* description: Failed indexes successfully retrieved
* security:
* - session: []
* tags: ["llm"]
*/
async function getFailedIndexes(req: Request, res: Response) {
try {
// Check if AI is enabled
const aiEnabled = await options.getOptionBool('aiEnabled');
if (!aiEnabled) {
return {
success: false,
message: "AI features are disabled"
};
}
const limit = parseInt(req.query.limit as string || "100", 10);
// Get failed indexes
const failed = await indexService.getFailedIndexes(limit);
return {
success: true,
failed
};
} catch (error: any) {
log.error(`Error getting failed indexes: ${error.message || 'Unknown error'}`);
throw new Error(`Failed to get failed indexes: ${error.message || 'Unknown error'}`);
}
}
/**
* @swagger
* /api/llm/indexes/notes/{noteId}:
* put:
* summary: Retry indexing a specific note that previously failed
* operationId: llm-retry-index
* parameters:
* - name: noteId
* in: path
* required: true
* schema:
* type: string
* responses:
* '200':
* description: Index retry successfully initiated
* security:
* - session: []
* tags: ["llm"]
*/
async function retryFailedIndex(req: Request, res: Response) {
try {
// Check if AI is enabled
const aiEnabled = await options.getOptionBool('aiEnabled');
if (!aiEnabled) {
return {
success: false,
message: "AI features are disabled"
};
}
const { noteId } = req.params;
// Retry indexing the note
const result = await indexService.retryFailedNote(noteId);
return {
success: true,
message: result ? "Note queued for indexing" : "Failed to queue note for indexing"
};
} catch (error: any) {
log.error(`Error retrying failed index: ${error.message || 'Unknown error'}`);
throw new Error(`Failed to retry index: ${error.message || 'Unknown error'}`);
}
}
/**
* @swagger
* /api/llm/indexes/failed:
* put:
* summary: Retry indexing all failed notes
* operationId: llm-retry-all-indexes
* responses:
* '200':
* description: Retry of all failed indexes successfully initiated
* security:
* - session: []
* tags: ["llm"]
*/
async function retryAllFailedIndexes(req: Request, res: Response) {
try {
// Check if AI is enabled
const aiEnabled = await options.getOptionBool('aiEnabled');
if (!aiEnabled) {
return {
success: false,
message: "AI features are disabled"
};
}
// Retry all failed notes
const count = await indexService.retryAllFailedNotes();
return {
success: true,
message: `${count} notes queued for reprocessing`
};
} catch (error: any) {
log.error(`Error retrying all failed indexes: ${error.message || 'Unknown error'}`);
throw new Error(`Failed to retry all indexes: ${error.message || 'Unknown error'}`);
}
}
/**
* @swagger
* /api/llm/indexes/notes/similar:
* get:
* summary: Find notes similar to a query string
* operationId: llm-find-similar-notes
* parameters:
* - name: query
* in: query
* required: true
* schema:
* type: string
* - name: contextNoteId
* in: query
* required: false
* schema:
* type: string
* - name: limit
* in: query
* required: false
* schema:
* type: integer
* default: 5
* responses:
* '200':
* description: Similar notes found successfully
* security:
* - session: []
* tags: ["llm"]
*/
async function findSimilarNotes(req: Request, res: Response) {
try {
// Check if AI is enabled
const aiEnabled = await options.getOptionBool('aiEnabled');
if (!aiEnabled) {
return {
success: false,
message: "AI features are disabled"
};
}
const query = req.query.query as string;
const contextNoteId = req.query.contextNoteId as string | undefined;
const limit = parseInt(req.query.limit as string || "5", 10);
if (!query) {
return {
success: false,
message: "Query is required"
};
}
// Find similar notes
const similar = await indexService.findSimilarNotes(query, contextNoteId, limit);
return {
success: true,
similar
};
} catch (error: any) {
log.error(`Error finding similar notes: ${error.message || 'Unknown error'}`);
throw new Error(`Failed to find similar notes: ${error.message || 'Unknown error'}`);
}
}
/**
* @swagger
* /api/llm/indexes/context:
* get:
* summary: Generate context for an LLM query based on the knowledge base
* operationId: llm-generate-context
* parameters:
* - name: query
* in: query
* required: true
* schema:
* type: string
* - name: contextNoteId
* in: query
* required: false
* schema:
* type: string
* - name: depth
* in: query
* required: false
* schema:
* type: integer
* default: 2
* responses:
* '200':
* description: Context generated successfully
* security:
* - session: []
* tags: ["llm"]
*/
async function generateQueryContext(req: Request, res: Response) {
try {
// Check if AI is enabled
const aiEnabled = await options.getOptionBool('aiEnabled');
if (!aiEnabled) {
return {
success: false,
message: "AI features are disabled"
};
}
const query = req.query.query as string;
const contextNoteId = req.query.contextNoteId as string | undefined;
const depth = parseInt(req.query.depth as string || "2", 10);
if (!query) {
return {
success: false,
message: "Query is required"
};
}
// Generate context
const context = await indexService.generateQueryContext(query, contextNoteId, depth);
return {
success: true,
context
};
} catch (error: any) {
log.error(`Error generating query context: ${error.message || 'Unknown error'}`);
throw new Error(`Failed to generate query context: ${error.message || 'Unknown error'}`);
}
}
/**
* @swagger
* /api/llm/indexes/notes/{noteId}:
* post:
* summary: Index a specific note for LLM knowledge base
* operationId: llm-index-note
* parameters:
* - name: noteId
* in: path
* required: true
* schema:
* type: string
* responses:
* '200':
* description: Note indexed successfully
* security:
* - session: []
* tags: ["llm"]
*/
async function indexNote(req: Request, res: Response) {
try {
if (!isDatabaseInitialized()) {
throw new Error('Database is not initialized yet');
// Check if AI is enabled
const aiEnabled = await options.getOptionBool('aiEnabled');
if (!aiEnabled) {
return {
success: false,
message: "AI features are disabled"
};
}
const { noteId } = req.params;
if (!noteId) {
throw new Error('Note ID is required');
return {
success: false,
message: "Note ID is required"
};
}
// Check if note exists
const note = becca.getNote(noteId);
if (!note) {
throw new Error(`Note ${noteId} not found`);
}
const success = await indexService.generateNoteIndex(noteId);
// Index the note
const result = await indexService.generateNoteIndex(noteId);
return {
success,
noteId,
noteTitle: note.title,
message: success ? `Note "${note.title}" indexed successfully` : `Failed to index note "${note.title}"`
success: true,
message: result ? "Note indexed successfully" : "Failed to index note"
};
} catch (error: any) {
log.error(`Error indexing note: ${error.message || 'Unknown error'}`);

View File

@ -5,20 +5,17 @@ import type { Request, Response } from "express";
/**
* @swagger
* /api/ollama/models:
* post:
* /api/llm/providers/ollama/models:
* get:
* summary: List available models from Ollama
* operationId: ollama-list-models
* requestBody:
* required: false
* content:
* application/json:
* schema:
* type: object
* properties:
* baseUrl:
* type: string
* description: Optional custom Ollama API base URL
* parameters:
* - name: baseUrl
* in: query
* required: false
* schema:
* type: string
* description: Optional custom Ollama API base URL
* responses:
* '200':
* description: List of available Ollama models
@ -41,13 +38,10 @@ import type { Request, Response } from "express";
*/
async function listModels(req: Request, res: Response) {
try {
const { baseUrl } = req.body;
// Use provided base URL or default from options
const ollamaBaseUrl = baseUrl || await options.getOption('ollamaBaseUrl') || 'http://localhost:11434';
const baseUrl = req.query.baseUrl as string || await options.getOption('ollamaBaseUrl') || 'http://localhost:11434';
// Call Ollama API to get models
const response = await axios.get(`${ollamaBaseUrl}/api/tags?format=json`, {
const response = await axios.get(`${baseUrl}/api/tags?format=json`, {
headers: { 'Content-Type': 'application/json' },
timeout: 10000
});

View File

@ -414,24 +414,20 @@ function register(app: express.Application) {
apiRoute(PST, "/api/llm/sessions/:sessionId/messages", llmRoute.sendMessage);
apiRoute(GET, "/api/llm/sessions/:sessionId/messages", llmRoute.sendMessage);
// LLM index management endpoints
apiRoute(GET, "/api/llm/index/stats", llmRoute.getIndexStats);
apiRoute(PST, "/api/llm/index/start", llmRoute.startIndexing);
apiRoute(GET, "/api/llm/index/failed", llmRoute.getFailedIndexes);
apiRoute(PST, "/api/llm/index/retry/:noteId", llmRoute.retryFailedIndex);
apiRoute(PST, "/api/llm/index/retry-all", llmRoute.retryAllFailedIndexes);
apiRoute(PST, "/api/llm/index/similar", llmRoute.findSimilarNotes);
apiRoute(PST, "/api/llm/index/context", llmRoute.generateQueryContext);
apiRoute(PST, "/api/llm/index/notes/:noteId", llmRoute.indexNote);
// LLM index management endpoints - reorganized for REST principles
apiRoute(GET, "/api/llm/indexes/stats", llmRoute.getIndexStats);
apiRoute(PST, "/api/llm/indexes", llmRoute.startIndexing); // Create index process
apiRoute(GET, "/api/llm/indexes/failed", llmRoute.getFailedIndexes);
apiRoute(PUT, "/api/llm/indexes/notes/:noteId", llmRoute.retryFailedIndex); // Update index for note
apiRoute(PUT, "/api/llm/indexes/failed", llmRoute.retryAllFailedIndexes); // Update all failed indexes
apiRoute(GET, "/api/llm/indexes/notes/similar", llmRoute.findSimilarNotes); // Get similar notes
apiRoute(GET, "/api/llm/indexes/context", llmRoute.generateQueryContext); // Get context
apiRoute(PST, "/api/llm/indexes/notes/:noteId", llmRoute.indexNote); // Create index for specific note
// Ollama API endpoints
apiRoute(PST, "/api/ollama/list-models", ollamaRoute.listModels);
// OpenAI API endpoints
apiRoute(PST, "/api/openai/list-models", openaiRoute.listModels);
// Anthropic API endpoints
apiRoute(PST, "/api/anthropic/list-models", anthropicRoute.listModels);
// LLM provider endpoints - moved under /api/llm/providers hierarchy
apiRoute(GET, "/api/llm/providers/ollama/models", ollamaRoute.listModels);
apiRoute(GET, "/api/llm/providers/openai/models", openaiRoute.listModels);
apiRoute(GET, "/api/llm/providers/anthropic/models", anthropicRoute.listModels);
// API Documentation
apiDocsRoute.register(app);