refactor(server): add missing override

This commit is contained in:
Elian Doran 2025-05-29 13:34:42 +03:00
parent 2309863d64
commit b88f07c202
No known key found for this signature in database
17 changed files with 54 additions and 54 deletions

View File

@ -103,7 +103,7 @@ class BAttachment extends AbstractBeccaEntity<BAttachment> {
}
/** @returns true if the note has string content (not binary) */
hasStringContent(): boolean {
override hasStringContent(): boolean {
return utils.isStringNote(this.type, this.mime); // here was !== undefined && utils.isStringNote(this.type, this.mime); I dont know why we need !=undefined. But it filters out canvas libary items
}
@ -203,7 +203,7 @@ class BAttachment extends AbstractBeccaEntity<BAttachment> {
return utils.formatDownloadTitle(this.title, type, this.mime);
}
beforeSaving() {
override beforeSaving() {
super.beforeSaving();
if (this.position === undefined || this.position === null) {
@ -239,7 +239,7 @@ class BAttachment extends AbstractBeccaEntity<BAttachment> {
};
}
getPojoToSave() {
override getPojoToSave() {
const pojo = this.getPojo();
delete pojo.contentLength;

View File

@ -62,7 +62,7 @@ class BAttribute extends AbstractBeccaEntity<BAttribute> {
return this;
}
init() {
override init() {
if (this.attributeId) {
this.becca.attributes[this.attributeId] = this;
}
@ -162,11 +162,11 @@ class BAttribute extends AbstractBeccaEntity<BAttribute> {
}
}
get isDeleted() {
override get isDeleted() {
return !(this.attributeId in this.becca.attributes);
}
beforeSaving(opts: SavingOpts = {}) {
override beforeSaving(opts: SavingOpts = {}) {
if (!opts.skipValidation) {
this.validate();
}

View File

@ -63,7 +63,7 @@ class BBranch extends AbstractBeccaEntity<BBranch> {
return this;
}
init() {
override init() {
if (this.branchId) {
this.becca.branches[this.branchId] = this;
}
@ -115,7 +115,7 @@ class BBranch extends AbstractBeccaEntity<BBranch> {
return this.becca.notes[this.parentNoteId];
}
get isDeleted() {
override get isDeleted() {
return this.branchId == undefined || !(this.branchId in this.becca.branches);
}
@ -204,7 +204,7 @@ class BBranch extends AbstractBeccaEntity<BBranch> {
}
}
beforeSaving() {
override beforeSaving() {
if (!this.noteId || !this.parentNoteId) {
throw new Error(`noteId and parentNoteId are mandatory properties for Branch`);
}

View File

@ -41,7 +41,7 @@ class BEtapiToken extends AbstractBeccaEntity<BEtapiToken> {
this.init();
}
get isDeleted() {
override get isDeleted() {
return !!this._isDeleted;
}
@ -58,7 +58,7 @@ class BEtapiToken extends AbstractBeccaEntity<BEtapiToken> {
}
}
init() {
override init() {
if (this.etapiTokenId) {
this.becca.etapiTokens[this.etapiTokenId] = this;
}
@ -75,7 +75,7 @@ class BEtapiToken extends AbstractBeccaEntity<BEtapiToken> {
};
}
beforeSaving() {
override beforeSaving() {
this.utcDateModified = dateUtils.utcNowDateTime();
super.beforeSaving();

View File

@ -160,7 +160,7 @@ class BNote extends AbstractBeccaEntity<BNote> {
return this;
}
init() {
override init() {
this.parentBranches = [];
this.parents = [];
this.children = [];
@ -318,7 +318,7 @@ class BNote extends AbstractBeccaEntity<BNote> {
}
/** @returns true if the note has string content (not binary) */
hasStringContent() {
override hasStringContent() {
return utils.isStringNote(this.type, this.mime);
}
@ -1557,7 +1557,7 @@ class BNote extends AbstractBeccaEntity<BNote> {
return this.noteId.startsWith("_options");
}
get isDeleted() {
override get isDeleted() {
// isBeingDeleted is relevant only in the transition period when the deletion process has begun, but not yet
// finished (note is still in becca)
return !(this.noteId in this.becca.notes) || this.isBeingDeleted;
@ -1672,7 +1672,7 @@ class BNote extends AbstractBeccaEntity<BNote> {
return utils.formatDownloadTitle(this.title, this.type, this.mime);
}
beforeSaving() {
override beforeSaving() {
super.beforeSaving();
this.becca.addNote(this.noteId, this);
@ -1697,7 +1697,7 @@ class BNote extends AbstractBeccaEntity<BNote> {
};
}
getPojoToSave() {
override getPojoToSave() {
const pojo = this.getPojo();
if (pojo.isProtected) {

View File

@ -46,7 +46,7 @@ class BNoteEmbedding extends AbstractBeccaEntity<BNoteEmbedding> {
this.utcDateModified = row.utcDateModified;
}
beforeSaving() {
override beforeSaving() {
super.beforeSaving();
this.dateModified = dateUtils.localNowDateTime();

View File

@ -37,7 +37,7 @@ class BOption extends AbstractBeccaEntity<BOption> {
this.utcDateModified = row.utcDateModified;
}
beforeSaving() {
override beforeSaving() {
super.beforeSaving();
this.utcDateModified = dateUtils.utcNowDateTime();

View File

@ -75,7 +75,7 @@ class BRevision extends AbstractBeccaEntity<BRevision> {
}
/** @returns true if the note has string content (not binary) */
hasStringContent(): boolean {
override hasStringContent(): boolean {
return utils.isStringNote(this.type, this.mime);
}
@ -179,7 +179,7 @@ class BRevision extends AbstractBeccaEntity<BRevision> {
}
}
beforeSaving() {
override beforeSaving() {
super.beforeSaving();
this.utcDateModified = dateUtils.utcNowDateTime();
@ -204,7 +204,7 @@ class BRevision extends AbstractBeccaEntity<BRevision> {
};
}
getPojoToSave() {
override getPojoToSave() {
const pojo = this.getPojo();
delete pojo.content; // not getting persisted
delete pojo.contentLength; // not getting persisted

View File

@ -13,7 +13,7 @@ import { normalizeMimeTypeForCKEditor } from "@triliumnext/commons";
*/
class CustomMarkdownRenderer extends Renderer {
heading(data: Tokens.Heading): string {
override heading(data: Tokens.Heading): string {
// Treat h1 as raw text.
if (data.depth === 1) {
return `<h1>${data.text}</h1>`;
@ -22,11 +22,11 @@ class CustomMarkdownRenderer extends Renderer {
return super.heading(data).trimEnd();
}
paragraph(data: Tokens.Paragraph): string {
override paragraph(data: Tokens.Paragraph): string {
return super.paragraph(data).trimEnd();
}
code({ text, lang }: Tokens.Code): string {
override code({ text, lang }: Tokens.Code): string {
if (!text) {
return "";
}
@ -41,7 +41,7 @@ class CustomMarkdownRenderer extends Renderer {
return `<pre><code class="language-${ckEditorLanguage}">${text}</code></pre>`;
}
list(token: Tokens.List): string {
override list(token: Tokens.List): string {
let result = super.list(token)
.replace("\n", "") // we replace the first one only.
.trimEnd();
@ -54,13 +54,13 @@ class CustomMarkdownRenderer extends Renderer {
return result;
}
checkbox({ checked }: Tokens.Checkbox): string {
override checkbox({ checked }: Tokens.Checkbox): string {
return '<input type="checkbox"'
+ (checked ? 'checked="checked" ' : '')
+ 'disabled="disabled">';
}
listitem(item: Tokens.ListItem): string {
override listitem(item: Tokens.ListItem): string {
// Handle todo-list in the CKEditor format.
if (item.task) {
let itemBody = '';
@ -91,12 +91,12 @@ class CustomMarkdownRenderer extends Renderer {
return super.listitem(item).trimEnd();
}
image(token: Tokens.Image): string {
override image(token: Tokens.Image): string {
return super.image(token)
.replace(` alt=""`, "");
}
blockquote({ tokens }: Tokens.Blockquote): string {
override blockquote({ tokens }: Tokens.Blockquote): string {
const body = renderer.parser.parse(tokens);
const admonitionMatch = /^<p>\[\!([A-Z]+)\]/.exec(body);

View File

@ -10,7 +10,7 @@ import crypto from "crypto";
* for exact matches when no other providers are available.
*/
export class LocalEmbeddingProvider extends BaseEmbeddingProvider {
name = "local";
override name = "local";
constructor(config: EmbeddingConfig) {
super(config);
@ -60,7 +60,7 @@ export class LocalEmbeddingProvider extends BaseEmbeddingProvider {
/**
* Generate embeddings for multiple texts
*/
async generateBatchEmbeddings(texts: string[]): Promise<Float32Array[]> {
override async generateBatchEmbeddings(texts: string[]): Promise<Float32Array[]> {
const results: Float32Array[] = [];
for (const text of texts) {

View File

@ -10,7 +10,7 @@ import { Ollama } from "ollama";
* Ollama embedding provider implementation using the official Ollama client
*/
export class OllamaEmbeddingProvider extends BaseEmbeddingProvider {
name = "ollama";
override name = "ollama";
private client: Ollama | null = null;
constructor(config: EmbeddingConfig) {
@ -30,7 +30,7 @@ export class OllamaEmbeddingProvider extends BaseEmbeddingProvider {
/**
* Initialize the provider by detecting model capabilities
*/
async initialize(): Promise<void> {
override async initialize(): Promise<void> {
const modelName = this.config.model || "llama3";
try {
// Detect model capabilities
@ -52,7 +52,7 @@ export class OllamaEmbeddingProvider extends BaseEmbeddingProvider {
private async fetchModelCapabilities(modelName: string): Promise<EmbeddingModelInfo | null> {
try {
const client = this.getClient();
// Get model info using the client's show method
const modelData = await client.show({ model: modelName });
@ -169,7 +169,7 @@ export class OllamaEmbeddingProvider extends BaseEmbeddingProvider {
model: modelName,
prompt: "Test"
});
if (embedResponse && Array.isArray(embedResponse.embedding)) {
return embedResponse.embedding.length;
} else {
@ -183,7 +183,7 @@ export class OllamaEmbeddingProvider extends BaseEmbeddingProvider {
/**
* Get the current embedding dimension
*/
getDimension(): number {
override getDimension(): number {
return this.config.dimension;
}
@ -260,7 +260,7 @@ export class OllamaEmbeddingProvider extends BaseEmbeddingProvider {
/**
* More specific implementation of batch size error detection for Ollama
*/
protected isBatchSizeError(error: any): boolean {
protected override isBatchSizeError(error: any): boolean {
const errorMessage = error?.message || '';
const ollamaBatchSizeErrorPatterns = [
'context length', 'token limit', 'out of memory',
@ -279,7 +279,7 @@ export class OllamaEmbeddingProvider extends BaseEmbeddingProvider {
* Note: Ollama API doesn't support batch embedding, so we process them sequentially
* but using the adaptive batch processor to handle rate limits and retries
*/
async generateBatchEmbeddings(texts: string[]): Promise<Float32Array[]> {
override async generateBatchEmbeddings(texts: string[]): Promise<Float32Array[]> {
if (texts.length === 0) {
return [];
}
@ -318,7 +318,7 @@ export class OllamaEmbeddingProvider extends BaseEmbeddingProvider {
* Returns the normalization status for Ollama embeddings
* Ollama embeddings are not guaranteed to be normalized
*/
getNormalizationStatus(): NormalizationStatus {
override getNormalizationStatus(): NormalizationStatus {
return NormalizationStatus.NEVER; // Be conservative and always normalize
}
}

View File

@ -11,7 +11,7 @@ import { PROVIDER_EMBEDDING_CAPABILITIES } from '../../constants/search_constant
* OpenAI embedding provider implementation using the official SDK
*/
export class OpenAIEmbeddingProvider extends BaseEmbeddingProvider {
name = "openai";
override name = "openai";
private client: OpenAI | null = null;
constructor(config: EmbeddingConfig) {
@ -34,7 +34,7 @@ export class OpenAIEmbeddingProvider extends BaseEmbeddingProvider {
/**
* Initialize the provider by detecting model capabilities
*/
async initialize(): Promise<void> {
override async initialize(): Promise<void> {
const modelName = this.config.model || "text-embedding-3-small";
try {
// Initialize client if needed
@ -222,7 +222,7 @@ export class OpenAIEmbeddingProvider extends BaseEmbeddingProvider {
/**
* More specific implementation of batch size error detection for OpenAI
*/
protected isBatchSizeError(error: any): boolean {
protected override isBatchSizeError(error: any): boolean {
const errorMessage = error?.message || '';
const openAIBatchSizeErrorPatterns = [
'batch size', 'too many inputs', 'context length exceeded',
@ -272,7 +272,7 @@ export class OpenAIEmbeddingProvider extends BaseEmbeddingProvider {
* Generate embeddings for multiple texts in a single batch
* OpenAI API supports batch embedding, so we implement a custom version
*/
async generateBatchEmbeddings(texts: string[]): Promise<Float32Array[]> {
override async generateBatchEmbeddings(texts: string[]): Promise<Float32Array[]> {
if (texts.length === 0) {
return [];
}
@ -312,7 +312,7 @@ export class OpenAIEmbeddingProvider extends BaseEmbeddingProvider {
* Returns the normalization status for OpenAI embeddings
* OpenAI embeddings are guaranteed to be normalized to unit length
*/
getNormalizationStatus(): NormalizationStatus {
override getNormalizationStatus(): NormalizationStatus {
return NormalizationStatus.GUARANTEED;
}
}

View File

@ -17,7 +17,7 @@ const VOYAGE_MODEL_DIMENSIONS = Object.entries(PROVIDER_EMBEDDING_CAPABILITIES.V
* Voyage AI embedding provider implementation
*/
export class VoyageEmbeddingProvider extends BaseEmbeddingProvider {
name = "voyage";
override name = "voyage";
constructor(config: EmbeddingConfig) {
super(config);
@ -31,7 +31,7 @@ export class VoyageEmbeddingProvider extends BaseEmbeddingProvider {
/**
* Initialize the provider by detecting model capabilities
*/
async initialize(): Promise<void> {
override async initialize(): Promise<void> {
const modelName = this.config.model || "voyage-2";
try {
// Detect model capabilities
@ -201,7 +201,7 @@ export class VoyageEmbeddingProvider extends BaseEmbeddingProvider {
/**
* More specific implementation of batch size error detection for Voyage AI
*/
protected isBatchSizeError(error: any): boolean {
protected override isBatchSizeError(error: any): boolean {
const errorMessage = error?.message || '';
const voyageBatchSizeErrorPatterns = [
'batch size', 'too many inputs', 'context length exceeded',
@ -217,7 +217,7 @@ export class VoyageEmbeddingProvider extends BaseEmbeddingProvider {
/**
* Generate embeddings for multiple texts in a single batch
*/
async generateBatchEmbeddings(texts: string[]): Promise<Float32Array[]> {
override async generateBatchEmbeddings(texts: string[]): Promise<Float32Array[]> {
if (texts.length === 0) {
return [];
}
@ -279,7 +279,7 @@ export class VoyageEmbeddingProvider extends BaseEmbeddingProvider {
* Returns the normalization status for Voyage embeddings
* Voyage embeddings are generally normalized by the API
*/
getNormalizationStatus(): NormalizationStatus {
override getNormalizationStatus(): NormalizationStatus {
return NormalizationStatus.GUARANTEED;
}
}

View File

@ -87,7 +87,7 @@ export class OpenAIMessageFormatter extends BaseMessageFormatter {
* Clean context content for OpenAI
* OpenAI handles HTML better than Ollama but still benefits from some cleaning
*/
cleanContextContent(content: string): string {
override cleanContextContent(content: string): string {
if (!content) return '';
try {

View File

@ -30,7 +30,7 @@ export class AnthropicService extends BaseAIService {
super('Anthropic');
}
isAvailable(): boolean {
override isAvailable(): boolean {
return super.isAvailable() && !!options.getOption('anthropicApiKey');
}

View File

@ -63,7 +63,7 @@ export class OllamaService extends BaseAIService {
this.formatter = new OllamaMessageFormatter();
}
isAvailable(): boolean {
override isAvailable(): boolean {
return super.isAvailable() && !!options.getOption('ollamaBaseUrl');
}

View File

@ -11,7 +11,7 @@ export class OpenAIService extends BaseAIService {
super('OpenAI');
}
isAvailable(): boolean {
override isAvailable(): boolean {
return super.isAvailable() && !!options.getOption('openaiApiKey');
}