From 53c756be4f64bc91bada0b6a6760c89344f8214b Mon Sep 17 00:00:00 2001 From: perf3ct Date: Sun, 30 Mar 2025 19:35:13 +0000 Subject: [PATCH] fix duplicate translations --- src/public/translations/en/translation.json | 29 ++------------------- 1 file changed, 2 insertions(+), 27 deletions(-) diff --git a/src/public/translations/en/translation.json b/src/public/translations/en/translation.json index e44f4de20..6c08a11f0 100644 --- a/src/public/translations/en/translation.json +++ b/src/public/translations/en/translation.json @@ -1142,8 +1142,6 @@ "ollama_tab": "Ollama", "enable_ai": "Enable AI/LLM features", "enable_ai_desc": "Enable AI features like note summarization, content generation, and other LLM capabilities", - "enable_ai_features": "Enable AI/LLM features", - "enable_ai_description": "Enable AI features like note summarization, content generation, and other LLM capabilities", "provider_configuration": "AI Provider Configuration", "provider_precedence": "Provider Precedence", "provider_precedence_description": "Comma-separated list of providers in order of precedence (e.g., 'openai,anthropic,ollama')", @@ -1156,39 +1154,31 @@ "api_key": "API Key", "url": "Base URL", "model": "Model", - "refresh_models": "Refresh Models", "openai_api_key_description": "Your OpenAI API key for accessing their AI services", "anthropic_api_key_description": "Your Anthropic API key for accessing Claude models", "default_model": "Default Model", - "model": "Model", "openai_model_description": "Examples: gpt-4o, gpt-4-turbo, gpt-3.5-turbo", "embedding_model": "Embedding Model", "openai_embedding_model_description": "Model used for generating embeddings (text-embedding-3-small recommended)", "base_url": "Base URL", - "url": "URL", "openai_url_description": "Default: https://api.openai.com/v1", "anthropic_settings": "Anthropic Settings", "anthropic_url_description": "Base URL for the Anthropic API (default: https://api.anthropic.com)", "anthropic_model_description": "Anthropic Claude models for chat completion", - "voyage_settings": "Voyage AI Settings", + "voyage_settings": "Voyage AI Settings", "voyage_api_key_description": "Your Voyage AI API key for accessing embeddings services", "ollama_settings": "Ollama Settings", "ollama_url_description": "URL for the Ollama API (default: http://localhost:11434)", "ollama_model_description": "Ollama model to use for chat completion", - "ollama_embedding_model_description": "Specialized model for generating embeddings (vector representations)", "anthropic_configuration": "Anthropic Configuration", - "anthropic_model_description": "Examples: claude-3-opus-20240229, claude-3-sonnet-20240229", "voyage_embedding_model_description": "Voyage AI embedding models for text embeddings (voyage-2 recommended)", "voyage_configuration": "Voyage AI Configuration", - "voyage_api_key_description": "Your Voyage AI API key for generating embeddings", "voyage_url_description": "Default: https://api.voyageai.com/v1", "ollama_configuration": "Ollama Configuration", "enable_ollama": "Enable Ollama", "enable_ollama_description": "Enable Ollama for local AI model usage", "ollama_url": "Ollama URL", - "ollama_url_description": "Default: http://localhost:11434", "ollama_model": "Ollama Model", - "ollama_model_description": "Examples: llama3, mistral, phi3", "ollama_embedding_model": "Embedding Model", "ollama_embedding_model_description": "Specialized model for generating embeddings (vector representations)", "refresh_models": "Refresh Models", @@ -1199,7 +1189,6 @@ "embedding_provider_precedence": "Embedding Provider Precedence", "embedding_providers_order": "Embedding Provider Order", "embedding_providers_order_description": "Set the order of embedding providers in comma-separated format (e.g., \"openai,voyage,ollama,local\")", - "embeddings_configuration": "Embeddings Configuration", "enable_automatic_indexing": "Enable Automatic Indexing", "enable_automatic_indexing_description": "Automatically generate embeddings for new and updated notes", "embedding_auto_update_enabled": "Auto-update Embeddings", @@ -1215,7 +1204,7 @@ "rebuild_index_error": "Error starting index rebuild. Check logs for details.", "note_title": "Note Title", "error": "Error", - "last_attempt": "Last Attempt", + "last_attempt": "Last Attempt", "actions": "Actions", "retry": "Retry", "retry_queued": "Note queued for retry", @@ -1242,13 +1231,8 @@ "enable_auto_update_embeddings_description": "Automatically update embeddings when notes are modified", "auto_update_embeddings": "Auto-update Embeddings", "auto_update_embeddings_desc": "Automatically update embeddings when notes are modified", - "no_failed_embeddings": "No failed embeddings", - "enable_automatic_indexing": "Automatic Indexing", - "enable_automatic_indexing_description": "Periodically run indexing jobs in the background to maintain the knowledge base", "similarity_threshold": "Similarity Threshold", "similarity_threshold_description": "Minimum similarity score (0-1) for notes to be included in context for LLM queries", - "max_notes_per_llm_query": "Max Notes Per Query", - "max_notes_per_llm_query_description": "Maximum number of notes to include as context for each LLM query", "embedding_batch_size": "Batch Size", "embedding_batch_size_description": "Number of notes to process in a single batch (1-50)", "embedding_update_interval": "Update Interval (ms)", @@ -1271,19 +1255,10 @@ "index_rebuilding": "Optimizing index ({{percentage}}%)", "index_rebuild_complete": "Index optimization complete", "index_rebuild_status_error": "Error checking index rebuild status", - - "embedding_statistics": "Embedding Statistics", - "total_notes": "Total Notes", - "processed_notes": "Processed Notes", - "queued_notes": "Queued Notes", - "failed_notes": "Failed Notes", - "last_processed": "Last Processed", "never": "Never", - "progress": "Progress", "processing": "Processing ({{percentage}}%)", "incomplete": "Incomplete ({{percentage}}%)", "complete": "Complete (100%)", - "refresh_stats": "Refresh Stats", "refreshing": "Refreshing...", "stats_error": "Error fetching embedding statistics", "auto_refresh_notice": "Auto-refreshes every {{seconds}} seconds",