From c4429436726386b207a3a858eecd3e19787e10c9 Mon Sep 17 00:00:00 2001 From: perf3ct Date: Sat, 8 Mar 2025 22:03:58 +0000 Subject: [PATCH] add additional AI / LLM options and translations --- .../type_widgets/options/ai_settings.ts | 86 +++++++++++++++++++ src/public/translations/en/translation.json | 16 +++- 2 files changed, 101 insertions(+), 1 deletion(-) diff --git a/src/public/app/widgets/type_widgets/options/ai_settings.ts b/src/public/app/widgets/type_widgets/options/ai_settings.ts index f7c9d4900..e0bf6259a 100644 --- a/src/public/app/widgets/type_widgets/options/ai_settings.ts +++ b/src/public/app/widgets/type_widgets/options/ai_settings.ts @@ -1,6 +1,8 @@ import OptionsWidget from "./options_widget.js"; import { t } from "../../../services/i18n.js"; import type { FilterOptionsByType, OptionMap } from "../../../../../services/options_interface.js"; +import server from "../../../services/server.js"; +import toastService from "../../../services/toast.js"; export default class AiSettingsWidget extends OptionsWidget { doRender() { @@ -111,6 +113,45 @@ export default class AiSettingsWidget extends OptionsWidget {
${t("ai_llm.ollama_model_description")}
+ +
+ +
+
${t("ai_llm.embedding_configuration")}
+ +
+ +
${t("ai_llm.enable_auto_update_embeddings_description")}
+
+ +
+ + +
${t("ai_llm.embedding_batch_size_description")}
+
+ +
+ + +
${t("ai_llm.embedding_update_interval_description")}
+
+ +
+ + +
${t("ai_llm.embedding_default_dimension_description")}
+
+ +
+ +
${t("ai_llm.reprocess_all_embeddings_description")}
+
+
`); const $aiEnabled = this.$widget.find('.ai-enabled'); @@ -179,6 +220,44 @@ export default class AiSettingsWidget extends OptionsWidget { await this.updateOption('ollamaDefaultModel', $ollamaDefaultModel.val() as string); }); + // Embedding options event handlers + const $embeddingAutoUpdateEnabled = this.$widget.find('.embedding-auto-update-enabled'); + $embeddingAutoUpdateEnabled.on('change', async () => { + await this.updateOption('embeddingAutoUpdateEnabled', $embeddingAutoUpdateEnabled.prop('checked') ? "true" : "false"); + }); + + const $embeddingBatchSize = this.$widget.find('.embedding-batch-size'); + $embeddingBatchSize.on('change', async () => { + await this.updateOption('embeddingBatchSize', $embeddingBatchSize.val() as string); + }); + + const $embeddingUpdateInterval = this.$widget.find('.embedding-update-interval'); + $embeddingUpdateInterval.on('change', async () => { + await this.updateOption('embeddingUpdateInterval', $embeddingUpdateInterval.val() as string); + }); + + const $embeddingDefaultDimension = this.$widget.find('.embedding-default-dimension'); + $embeddingDefaultDimension.on('change', async () => { + await this.updateOption('embeddingDefaultDimension', $embeddingDefaultDimension.val() as string); + }); + + const $embeddingReprocessAll = this.$widget.find('.embedding-reprocess-all'); + $embeddingReprocessAll.on('click', async () => { + $embeddingReprocessAll.prop('disabled', true); + $embeddingReprocessAll.text(t("ai_llm.reprocessing_embeddings")); + + try { + await server.post('embeddings/reprocess'); + toastService.showMessage(t("ai_llm.reprocess_started")); + } catch (error) { + console.error("Error reprocessing embeddings:", error); + toastService.showError(t("ai_llm.reprocess_error")); + } finally { + $embeddingReprocessAll.prop('disabled', false); + $embeddingReprocessAll.text(t("ai_llm.reprocess_all_embeddings")); + } + }); + return this.$widget; } @@ -188,6 +267,7 @@ export default class AiSettingsWidget extends OptionsWidget { const aiEnabled = this.$widget.find('.ai-enabled').prop('checked'); this.$widget.find('.ai-providers-section').toggle(aiEnabled); this.$widget.find('.ai-provider').toggle(aiEnabled); + this.$widget.find('.embedding-section').toggle(aiEnabled); } optionsLoaded(options: OptionMap) { @@ -211,6 +291,12 @@ export default class AiSettingsWidget extends OptionsWidget { this.$widget.find('.ollama-base-url').val(options.ollamaBaseUrl); this.$widget.find('.ollama-default-model').val(options.ollamaDefaultModel); + // Load embedding options + this.setCheckboxState(this.$widget.find('.embedding-auto-update-enabled'), options.embeddingAutoUpdateEnabled); + this.$widget.find('.embedding-batch-size').val(options.embeddingBatchSize); + this.$widget.find('.embedding-update-interval').val(options.embeddingUpdateInterval); + this.$widget.find('.embedding-default-dimension').val(options.embeddingDefaultDimension); + this.updateAiSectionVisibility(); } } diff --git a/src/public/translations/en/translation.json b/src/public/translations/en/translation.json index cc2ccffc8..03512d06b 100644 --- a/src/public/translations/en/translation.json +++ b/src/public/translations/en/translation.json @@ -1145,7 +1145,21 @@ "enable_ollama": "Enable Ollama", "enable_ollama_description": "Enable Ollama for local AI model usage", "ollama_url_description": "Default: http://localhost:11434", - "ollama_model_description": "Examples: llama3, mistral, phi3" + "ollama_model_description": "Examples: llama3, mistral, phi3", + "embedding_configuration": "Embeddings Configuration", + "enable_auto_update_embeddings": "Auto-update Embeddings", + "enable_auto_update_embeddings_description": "Automatically update embeddings when notes are modified", + "embedding_batch_size": "Batch Size", + "embedding_batch_size_description": "Number of notes to process in a single batch (1-50)", + "embedding_update_interval": "Update Interval (ms)", + "embedding_update_interval_description": "Time between processing batches of embeddings (in milliseconds)", + "embedding_default_dimension": "Default Dimension", + "embedding_default_dimension_description": "Default embedding vector dimension when creating new embeddings", + "reprocess_all_embeddings": "Reprocess All Notes", + "reprocess_all_embeddings_description": "Queue all notes for embedding generation or update", + "reprocessing_embeddings": "Processing...", + "reprocess_started": "All notes have been queued for embedding processing", + "reprocess_error": "Error starting embedding reprocessing" }, "zoom_factor": { "title": "Zoom Factor (desktop build only)",