mirror of
https://github.com/TriliumNext/Notes.git
synced 2025-08-11 11:02:27 +08:00
add embedding recreation button back
This commit is contained in:
parent
d8d41a14cf
commit
9dd76873ac
@ -264,6 +264,9 @@ export default class LlmChatPanel extends BasicWidget {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handle sending a user message to the LLM service
|
||||||
|
*/
|
||||||
private async sendMessage(content: string) {
|
private async sendMessage(content: string) {
|
||||||
if (!content.trim() || !this.sessionId) {
|
if (!content.trim() || !this.sessionId) {
|
||||||
return;
|
return;
|
||||||
@ -272,21 +275,10 @@ export default class LlmChatPanel extends BasicWidget {
|
|||||||
// Check for provider validation issues before sending
|
// Check for provider validation issues before sending
|
||||||
await this.validateEmbeddingProviders();
|
await this.validateEmbeddingProviders();
|
||||||
|
|
||||||
// Add user message to the chat
|
// Process the user message
|
||||||
this.addMessageToChat('user', content);
|
await this.processUserMessage(content);
|
||||||
|
|
||||||
// Add to our local message array too
|
|
||||||
this.messages.push({
|
|
||||||
role: 'user',
|
|
||||||
content,
|
|
||||||
timestamp: new Date()
|
|
||||||
});
|
|
||||||
|
|
||||||
// Save to note
|
|
||||||
this.saveCurrentData().catch(err => {
|
|
||||||
console.error("Failed to save user message to note:", err);
|
|
||||||
});
|
|
||||||
|
|
||||||
|
// Clear input and show loading state
|
||||||
this.noteContextChatInput.value = '';
|
this.noteContextChatInput.value = '';
|
||||||
this.showLoadingIndicator();
|
this.showLoadingIndicator();
|
||||||
this.hideSources();
|
this.hideSources();
|
||||||
@ -306,183 +298,260 @@ export default class LlmChatPanel extends BasicWidget {
|
|||||||
showThinking
|
showThinking
|
||||||
};
|
};
|
||||||
|
|
||||||
// First, send the message via POST request
|
// First try to get a direct response
|
||||||
const postResponse = await server.post<any>(`llm/sessions/${this.sessionId}/messages`, messageParams);
|
const handled = await this.handleDirectResponse(messageParams);
|
||||||
|
if (handled) return;
|
||||||
|
|
||||||
// If the POST request returned content directly, display it
|
// If no direct response, set up streaming
|
||||||
if (postResponse && postResponse.content) {
|
await this.setupStreamingResponse(messageParams);
|
||||||
this.addMessageToChat('assistant', postResponse.content);
|
} catch (error) {
|
||||||
|
this.handleError(error as Error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Add to our local message array too
|
/**
|
||||||
this.messages.push({
|
* Process a new user message - add to UI and save
|
||||||
role: 'assistant',
|
*/
|
||||||
content: postResponse.content,
|
private async processUserMessage(content: string) {
|
||||||
timestamp: new Date()
|
// Add user message to the chat UI
|
||||||
});
|
this.addMessageToChat('user', content);
|
||||||
|
|
||||||
// Save to note
|
// Add to our local message array too
|
||||||
this.saveCurrentData().catch(err => {
|
this.messages.push({
|
||||||
console.error("Failed to save assistant response to note:", err);
|
role: 'user',
|
||||||
});
|
content,
|
||||||
|
timestamp: new Date()
|
||||||
|
});
|
||||||
|
|
||||||
// If there are sources, show them
|
// Save to note
|
||||||
if (postResponse.sources && postResponse.sources.length > 0) {
|
this.saveCurrentData().catch(err => {
|
||||||
this.showSources(postResponse.sources);
|
console.error("Failed to save user message to note:", err);
|
||||||
}
|
});
|
||||||
|
}
|
||||||
|
|
||||||
this.hideLoadingIndicator();
|
/**
|
||||||
return;
|
* Try to get a direct response from the server
|
||||||
|
* @returns true if response was handled, false if streaming should be used
|
||||||
|
*/
|
||||||
|
private async handleDirectResponse(messageParams: any): Promise<boolean> {
|
||||||
|
// Send the message via POST request
|
||||||
|
const postResponse = await server.post<any>(`llm/sessions/${this.sessionId}/messages`, messageParams);
|
||||||
|
|
||||||
|
// If the POST request returned content directly, display it
|
||||||
|
if (postResponse && postResponse.content) {
|
||||||
|
this.processAssistantResponse(postResponse.content);
|
||||||
|
|
||||||
|
// If there are sources, show them
|
||||||
|
if (postResponse.sources && postResponse.sources.length > 0) {
|
||||||
|
this.showSources(postResponse.sources);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Then set up streaming via EventSource
|
this.hideLoadingIndicator();
|
||||||
const streamUrl = `./api/llm/sessions/${this.sessionId}/messages?format=stream&useAdvancedContext=${useAdvancedContext}&showThinking=${showThinking}`;
|
return true;
|
||||||
const source = new EventSource(streamUrl);
|
}
|
||||||
|
|
||||||
let assistantResponse = '';
|
return false;
|
||||||
let receivedAnyContent = false;
|
}
|
||||||
let timeoutId: number | null = null;
|
|
||||||
|
|
||||||
// Set a timeout to handle case where streaming doesn't work properly
|
/**
|
||||||
timeoutId = window.setTimeout(() => {
|
* Process an assistant response - add to UI and save
|
||||||
if (!receivedAnyContent) {
|
*/
|
||||||
// If we haven't received any content after a reasonable timeout (10 seconds),
|
private async processAssistantResponse(content: string) {
|
||||||
// add a fallback message and close the stream
|
// Add the response to the chat UI
|
||||||
this.hideLoadingIndicator();
|
this.addMessageToChat('assistant', content);
|
||||||
const errorMessage = 'I\'m having trouble generating a response right now. Please try again later.';
|
|
||||||
this.addMessageToChat('assistant', errorMessage);
|
|
||||||
|
|
||||||
// Add to our local message array too
|
// Add to our local message array too
|
||||||
this.messages.push({
|
this.messages.push({
|
||||||
role: 'assistant',
|
role: 'assistant',
|
||||||
content: errorMessage,
|
content,
|
||||||
timestamp: new Date()
|
timestamp: new Date()
|
||||||
});
|
});
|
||||||
|
|
||||||
// Save to note
|
// Save to note
|
||||||
this.saveCurrentData().catch(err => {
|
this.saveCurrentData().catch(err => {
|
||||||
console.error("Failed to save assistant error response to note:", err);
|
console.error("Failed to save assistant response to note:", err);
|
||||||
});
|
});
|
||||||
|
}
|
||||||
|
|
||||||
source.close();
|
/**
|
||||||
}
|
* Set up streaming response from the server
|
||||||
}, 10000);
|
*/
|
||||||
|
private async setupStreamingResponse(messageParams: any) {
|
||||||
|
const useAdvancedContext = messageParams.useAdvancedContext;
|
||||||
|
const showThinking = messageParams.showThinking;
|
||||||
|
|
||||||
// Handle streaming response
|
// Set up streaming via EventSource
|
||||||
source.onmessage = (event) => {
|
const streamUrl = `./api/llm/sessions/${this.sessionId}/messages?format=stream&useAdvancedContext=${useAdvancedContext}&showThinking=${showThinking}`;
|
||||||
if (event.data === '[DONE]') {
|
const source = new EventSource(streamUrl);
|
||||||
// Stream completed
|
|
||||||
source.close();
|
|
||||||
this.hideLoadingIndicator();
|
|
||||||
|
|
||||||
// Clear the timeout since we're done
|
let assistantResponse = '';
|
||||||
if (timeoutId !== null) {
|
let receivedAnyContent = false;
|
||||||
window.clearTimeout(timeoutId);
|
let timeoutId: number | null = null;
|
||||||
}
|
|
||||||
|
|
||||||
// If we didn't receive any content but the stream completed normally,
|
// Set up timeout for streaming response
|
||||||
// display a message to the user
|
timeoutId = this.setupStreamingTimeout(source);
|
||||||
if (!receivedAnyContent) {
|
|
||||||
const defaultMessage = 'I processed your request, but I don\'t have any specific information to share at the moment.';
|
|
||||||
this.addMessageToChat('assistant', defaultMessage);
|
|
||||||
|
|
||||||
// Add to our local message array too
|
// Handle streaming response
|
||||||
this.messages.push({
|
source.onmessage = (event) => this.handleStreamingMessage(
|
||||||
role: 'assistant',
|
event,
|
||||||
content: defaultMessage,
|
source,
|
||||||
timestamp: new Date()
|
timeoutId,
|
||||||
});
|
assistantResponse,
|
||||||
|
receivedAnyContent
|
||||||
|
);
|
||||||
|
|
||||||
// Save to note
|
// Handle streaming errors
|
||||||
this.saveCurrentData().catch(err => {
|
source.onerror = () => this.handleStreamingError(
|
||||||
console.error("Failed to save assistant response to note:", err);
|
source,
|
||||||
});
|
timeoutId,
|
||||||
} else if (assistantResponse) {
|
receivedAnyContent
|
||||||
// Save the completed streaming response to the message array
|
);
|
||||||
this.messages.push({
|
}
|
||||||
role: 'assistant',
|
|
||||||
content: assistantResponse,
|
|
||||||
timestamp: new Date()
|
|
||||||
});
|
|
||||||
|
|
||||||
// Save to note
|
/**
|
||||||
this.saveCurrentData().catch(err => {
|
* Set up timeout for streaming response
|
||||||
console.error("Failed to save assistant response to note:", err);
|
* @returns Timeout ID for the created timeout
|
||||||
});
|
*/
|
||||||
}
|
private setupStreamingTimeout(source: EventSource): number {
|
||||||
return;
|
// Set a timeout to handle case where streaming doesn't work properly
|
||||||
}
|
return window.setTimeout(() => {
|
||||||
|
// If we haven't received any content after a reasonable timeout (10 seconds),
|
||||||
|
// add a fallback message and close the stream
|
||||||
|
this.hideLoadingIndicator();
|
||||||
|
const errorMessage = 'I\'m having trouble generating a response right now. Please try again later.';
|
||||||
|
this.processAssistantResponse(errorMessage);
|
||||||
|
source.close();
|
||||||
|
}, 10000);
|
||||||
|
}
|
||||||
|
|
||||||
try {
|
/**
|
||||||
const data = JSON.parse(event.data);
|
* Handle messages from the streaming response
|
||||||
console.log("Received streaming data:", data); // Debug log
|
*/
|
||||||
|
private handleStreamingMessage(
|
||||||
|
event: MessageEvent,
|
||||||
|
source: EventSource,
|
||||||
|
timeoutId: number | null,
|
||||||
|
assistantResponse: string,
|
||||||
|
receivedAnyContent: boolean
|
||||||
|
) {
|
||||||
|
if (event.data === '[DONE]') {
|
||||||
|
this.handleStreamingComplete(source, timeoutId, receivedAnyContent, assistantResponse);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
// Handle both content and error cases
|
try {
|
||||||
if (data.content) {
|
const data = JSON.parse(event.data);
|
||||||
receivedAnyContent = true;
|
console.log("Received streaming data:", data); // Debug log
|
||||||
assistantResponse += data.content;
|
|
||||||
|
|
||||||
// Update the UI with the accumulated response
|
// Handle both content and error cases
|
||||||
const assistantElement = this.noteContextChatMessages.querySelector('.assistant-message:last-child .message-content');
|
if (data.content) {
|
||||||
if (assistantElement) {
|
receivedAnyContent = true;
|
||||||
assistantElement.innerHTML = this.formatMarkdown(assistantResponse);
|
assistantResponse += data.content;
|
||||||
// Apply syntax highlighting to any code blocks in the updated content
|
|
||||||
applySyntaxHighlight($(assistantElement as HTMLElement));
|
|
||||||
} else {
|
|
||||||
this.addMessageToChat('assistant', assistantResponse);
|
|
||||||
}
|
|
||||||
} else if (data.error) {
|
|
||||||
// Handle error message
|
|
||||||
this.hideLoadingIndicator();
|
|
||||||
this.addMessageToChat('assistant', `Error: ${data.error}`);
|
|
||||||
receivedAnyContent = true;
|
|
||||||
source.close();
|
|
||||||
|
|
||||||
if (timeoutId !== null) {
|
// Update the UI with the accumulated response
|
||||||
window.clearTimeout(timeoutId);
|
this.updateStreamingUI(assistantResponse);
|
||||||
}
|
} else if (data.error) {
|
||||||
}
|
// Handle error message
|
||||||
|
|
||||||
// Scroll to the bottom
|
|
||||||
this.chatContainer.scrollTop = this.chatContainer.scrollHeight;
|
|
||||||
} catch (e) {
|
|
||||||
console.error('Error parsing SSE message:', e, 'Raw data:', event.data);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
source.onerror = () => {
|
|
||||||
source.close();
|
|
||||||
this.hideLoadingIndicator();
|
this.hideLoadingIndicator();
|
||||||
|
this.addMessageToChat('assistant', `Error: ${data.error}`);
|
||||||
|
receivedAnyContent = true;
|
||||||
|
source.close();
|
||||||
|
|
||||||
// Clear the timeout if there was an error
|
|
||||||
if (timeoutId !== null) {
|
if (timeoutId !== null) {
|
||||||
window.clearTimeout(timeoutId);
|
window.clearTimeout(timeoutId);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Only show error message if we haven't received any content yet
|
// Scroll to the bottom
|
||||||
if (!receivedAnyContent) {
|
this.chatContainer.scrollTop = this.chatContainer.scrollHeight;
|
||||||
const connectionError = 'Error connecting to the LLM service. Please try again.';
|
} catch (e) {
|
||||||
this.addMessageToChat('assistant', connectionError);
|
console.error('Error parsing SSE message:', e, 'Raw data:', event.data);
|
||||||
|
|
||||||
// Add to our local message array too
|
|
||||||
this.messages.push({
|
|
||||||
role: 'assistant',
|
|
||||||
content: connectionError,
|
|
||||||
timestamp: new Date()
|
|
||||||
});
|
|
||||||
|
|
||||||
// Save to note
|
|
||||||
this.saveCurrentData().catch(err => {
|
|
||||||
console.error("Failed to save connection error to note:", err);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
} catch (error) {
|
|
||||||
this.hideLoadingIndicator();
|
|
||||||
toastService.showError('Error sending message: ' + (error as Error).message);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Update the UI with streaming content as it arrives
|
||||||
|
*/
|
||||||
|
private updateStreamingUI(assistantResponse: string) {
|
||||||
|
const assistantElement = this.noteContextChatMessages.querySelector('.assistant-message:last-child .message-content');
|
||||||
|
if (assistantElement) {
|
||||||
|
assistantElement.innerHTML = this.formatMarkdown(assistantResponse);
|
||||||
|
// Apply syntax highlighting to any code blocks in the updated content
|
||||||
|
applySyntaxHighlight($(assistantElement as HTMLElement));
|
||||||
|
} else {
|
||||||
|
this.addMessageToChat('assistant', assistantResponse);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handle completion of streaming response
|
||||||
|
*/
|
||||||
|
private handleStreamingComplete(
|
||||||
|
source: EventSource,
|
||||||
|
timeoutId: number | null,
|
||||||
|
receivedAnyContent: boolean,
|
||||||
|
assistantResponse: string
|
||||||
|
) {
|
||||||
|
// Stream completed
|
||||||
|
source.close();
|
||||||
|
this.hideLoadingIndicator();
|
||||||
|
|
||||||
|
// Clear the timeout since we're done
|
||||||
|
if (timeoutId !== null) {
|
||||||
|
window.clearTimeout(timeoutId);
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we didn't receive any content but the stream completed normally,
|
||||||
|
// display a message to the user
|
||||||
|
if (!receivedAnyContent) {
|
||||||
|
const defaultMessage = 'I processed your request, but I don\'t have any specific information to share at the moment.';
|
||||||
|
this.processAssistantResponse(defaultMessage);
|
||||||
|
} else if (assistantResponse) {
|
||||||
|
// Save the completed streaming response to the message array
|
||||||
|
this.messages.push({
|
||||||
|
role: 'assistant',
|
||||||
|
content: assistantResponse,
|
||||||
|
timestamp: new Date()
|
||||||
|
});
|
||||||
|
|
||||||
|
// Save to note
|
||||||
|
this.saveCurrentData().catch(err => {
|
||||||
|
console.error("Failed to save assistant response to note:", err);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handle errors during streaming response
|
||||||
|
*/
|
||||||
|
private handleStreamingError(
|
||||||
|
source: EventSource,
|
||||||
|
timeoutId: number | null,
|
||||||
|
receivedAnyContent: boolean
|
||||||
|
) {
|
||||||
|
source.close();
|
||||||
|
this.hideLoadingIndicator();
|
||||||
|
|
||||||
|
// Clear the timeout if there was an error
|
||||||
|
if (timeoutId !== null) {
|
||||||
|
window.clearTimeout(timeoutId);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only show error message if we haven't received any content yet
|
||||||
|
if (!receivedAnyContent) {
|
||||||
|
const connectionError = 'Error connecting to the LLM service. Please try again.';
|
||||||
|
this.processAssistantResponse(connectionError);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handle general errors in the send message flow
|
||||||
|
*/
|
||||||
|
private handleError(error: Error) {
|
||||||
|
this.hideLoadingIndicator();
|
||||||
|
toastService.showError('Error sending message: ' + error.message);
|
||||||
|
}
|
||||||
|
|
||||||
private addMessageToChat(role: 'user' | 'assistant', content: string) {
|
private addMessageToChat(role: 'user' | 'assistant', content: string) {
|
||||||
const messageElement = document.createElement('div');
|
const messageElement = document.createElement('div');
|
||||||
messageElement.className = `chat-message ${role}-message mb-3 d-flex`;
|
messageElement.className = `chat-message ${role}-message mb-3 d-flex`;
|
||||||
|
@ -145,11 +145,28 @@ export default class AiSettingsWidget extends OptionsWidget {
|
|||||||
await this.fetchFailedEmbeddingNotes();
|
await this.fetchFailedEmbeddingNotes();
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// Recreate embeddings button
|
||||||
|
const $recreateEmbeddings = this.$widget.find('.recreate-embeddings');
|
||||||
|
$recreateEmbeddings.on('click', async () => {
|
||||||
|
if (confirm(t("ai_llm.recreate_embeddings_confirm") || "Are you sure you want to recreate all embeddings? This may take a long time.")) {
|
||||||
|
try {
|
||||||
|
await server.post('embeddings/reprocess');
|
||||||
|
toastService.showMessage(t("ai_llm.recreate_embeddings_started"));
|
||||||
|
|
||||||
|
// Start progress polling
|
||||||
|
this.pollIndexRebuildProgress();
|
||||||
|
} catch (e) {
|
||||||
|
console.error('Error starting embeddings regeneration:', e);
|
||||||
|
toastService.showError(t("ai_llm.recreate_embeddings_error"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
// Rebuild index button
|
// Rebuild index button
|
||||||
const $rebuildIndex = this.$widget.find('.rebuild-embeddings-index');
|
const $rebuildIndex = this.$widget.find('.rebuild-embeddings-index');
|
||||||
$rebuildIndex.on('click', async () => {
|
$rebuildIndex.on('click', async () => {
|
||||||
try {
|
try {
|
||||||
await server.post('embeddings/rebuild');
|
await server.post('embeddings/rebuild-index');
|
||||||
toastService.showMessage(t("ai_llm.rebuild_index_started"));
|
toastService.showMessage(t("ai_llm.rebuild_index_started"));
|
||||||
|
|
||||||
// Start progress polling
|
// Start progress polling
|
||||||
@ -340,7 +357,7 @@ export default class AiSettingsWidget extends OptionsWidget {
|
|||||||
if (!this.$widget) return;
|
if (!this.$widget) return;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const response = await server.get<FailedEmbeddingNotes>('embeddings/failed-notes');
|
const response = await server.get<FailedEmbeddingNotes>('embeddings/failed');
|
||||||
|
|
||||||
if (response && response.success) {
|
if (response && response.success) {
|
||||||
const failedNotes = response.failedNotes || [];
|
const failedNotes = response.failedNotes || [];
|
||||||
|
@ -269,7 +269,15 @@ export const TPL = `
|
|||||||
<div class="form-text">${t("ai_llm.embedding_auto_update_enabled_description")}</div>
|
<div class="form-text">${t("ai_llm.embedding_auto_update_enabled_description")}</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<!-- Rebuild index button with counter -->
|
<!-- Recreate embeddings button -->
|
||||||
|
<div class="form-group mt-3">
|
||||||
|
<button class="btn btn-outline-primary recreate-embeddings">
|
||||||
|
${t("ai_llm.recreate_embeddings")}
|
||||||
|
</button>
|
||||||
|
<div class="form-text">${t("ai_llm.recreate_embeddings_description")}</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Rebuild index button -->
|
||||||
<div class="form-group mt-3">
|
<div class="form-group mt-3">
|
||||||
<button class="btn btn-outline-primary rebuild-embeddings-index">
|
<button class="btn btn-outline-primary rebuild-embeddings-index">
|
||||||
${t("ai_llm.rebuild_index")}
|
${t("ai_llm.rebuild_index")}
|
||||||
|
@ -1204,8 +1204,22 @@
|
|||||||
"enable_automatic_indexing_description": "Automatically generate embeddings for new and updated notes",
|
"enable_automatic_indexing_description": "Automatically generate embeddings for new and updated notes",
|
||||||
"embedding_auto_update_enabled": "Auto-update Embeddings",
|
"embedding_auto_update_enabled": "Auto-update Embeddings",
|
||||||
"embedding_auto_update_enabled_description": "Automatically update embeddings when notes are modified",
|
"embedding_auto_update_enabled_description": "Automatically update embeddings when notes are modified",
|
||||||
|
"recreate_embeddings": "Recreate All Embeddings",
|
||||||
|
"recreate_embeddings_description": "Regenerate all note embeddings from scratch (may take a long time for large note collections)",
|
||||||
|
"recreate_embeddings_started": "Embeddings regeneration started. This may take a long time for large note collections.",
|
||||||
|
"recreate_embeddings_error": "Error starting embeddings regeneration. Check logs for details.",
|
||||||
|
"recreate_embeddings_confirm": "Are you sure you want to recreate all embeddings? This may take a long time for large note collections.",
|
||||||
"rebuild_index": "Rebuild Index",
|
"rebuild_index": "Rebuild Index",
|
||||||
"rebuild_index_description": "Regenerate all note embeddings (may take some time for large note collections)",
|
"rebuild_index_description": "Rebuild the vector search index for better performance (much faster than recreating embeddings)",
|
||||||
|
"rebuild_index_started": "Embedding index rebuild started. This may take several minutes.",
|
||||||
|
"rebuild_index_error": "Error starting index rebuild. Check logs for details.",
|
||||||
|
"note_title": "Note Title",
|
||||||
|
"error": "Error",
|
||||||
|
"last_attempt": "Last Attempt",
|
||||||
|
"actions": "Actions",
|
||||||
|
"retry": "Retry",
|
||||||
|
"retry_queued": "Note queued for retry",
|
||||||
|
"retry_failed": "Failed to queue note for retry",
|
||||||
"embedding_provider_precedence_description": "Comma-separated list of providers in order of precedence for embeddings search (e.g., 'openai,ollama,anthropic')",
|
"embedding_provider_precedence_description": "Comma-separated list of providers in order of precedence for embeddings search (e.g., 'openai,ollama,anthropic')",
|
||||||
"embedding_dimension_strategy": "Embedding Dimension Strategy",
|
"embedding_dimension_strategy": "Embedding Dimension Strategy",
|
||||||
"embedding_dimension_auto": "Auto (Recommended)",
|
"embedding_dimension_auto": "Auto (Recommended)",
|
||||||
|
Loading…
x
Reference in New Issue
Block a user