Merge pull request #2282 from TriliumNext/feat/add-llm-model-unit-tests

feat(unit): add unit tests around LLM model names within outgoing req…
This commit is contained in:
Elian Doran 2025-06-11 22:10:36 +03:00 committed by GitHub
commit 57ad6065d8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 612 additions and 0 deletions

View File

@ -94,6 +94,83 @@ describe('configuration_helpers', () => {
fullIdentifier: ''
});
});
// Tests for special characters in model names
it('should handle model names with periods', () => {
const result = configHelpers.parseModelIdentifier('gpt-4.1-turbo-preview');
expect(result).toStrictEqual({
modelId: 'gpt-4.1-turbo-preview',
fullIdentifier: 'gpt-4.1-turbo-preview'
});
});
it('should handle model names with provider prefix and periods', () => {
const result = configHelpers.parseModelIdentifier('openai:gpt-4.1-turbo');
expect(result).toStrictEqual({
provider: 'openai',
modelId: 'gpt-4.1-turbo',
fullIdentifier: 'openai:gpt-4.1-turbo'
});
});
it('should handle model names with multiple colons', () => {
const result = configHelpers.parseModelIdentifier('custom:model:v1.2:latest');
expect(result).toStrictEqual({
modelId: 'custom:model:v1.2:latest',
fullIdentifier: 'custom:model:v1.2:latest'
});
});
it('should handle Ollama model names with colons', () => {
const result = configHelpers.parseModelIdentifier('ollama:llama3.1:70b-instruct-q4_K_M');
expect(result).toStrictEqual({
provider: 'ollama',
modelId: 'llama3.1:70b-instruct-q4_K_M',
fullIdentifier: 'ollama:llama3.1:70b-instruct-q4_K_M'
});
});
it('should handle model names with slashes', () => {
const result = configHelpers.parseModelIdentifier('library/mistral:7b-instruct');
expect(result).toStrictEqual({
modelId: 'library/mistral:7b-instruct',
fullIdentifier: 'library/mistral:7b-instruct'
});
});
it('should handle complex model names with special characters', () => {
const complexName = 'org/model-v1.2.3:tag@version#variant';
const result = configHelpers.parseModelIdentifier(complexName);
expect(result).toStrictEqual({
modelId: complexName,
fullIdentifier: complexName
});
});
it('should handle model names with @ symbols', () => {
const result = configHelpers.parseModelIdentifier('claude-3.5-sonnet@20241022');
expect(result).toStrictEqual({
modelId: 'claude-3.5-sonnet@20241022',
fullIdentifier: 'claude-3.5-sonnet@20241022'
});
});
it('should not modify or encode special characters', () => {
const specialChars = 'model!@#$%^&*()_+-=[]{}|;:\'",.<>?/~`';
const result = configHelpers.parseModelIdentifier(specialChars);
expect(result).toStrictEqual({
modelId: specialChars,
fullIdentifier: specialChars
});
});
});
describe('createModelConfig', () => {
@ -155,6 +232,34 @@ describe('configuration_helpers', () => {
expect(result).toBe('llama2');
expect(optionService.getOption).toHaveBeenCalledWith('ollamaDefaultModel');
});
// Tests for special characters in model names
it('should handle OpenAI model names with periods', async () => {
const modelName = 'gpt-4.1-turbo-preview';
vi.mocked(optionService.getOption).mockReturnValue(modelName);
const result = await configHelpers.getDefaultModelForProvider('openai');
expect(result).toBe(modelName);
});
it('should handle Anthropic model names with periods and @ symbols', async () => {
const modelName = 'claude-3.5-sonnet@20241022';
vi.mocked(optionService.getOption).mockReturnValue(modelName);
const result = await configHelpers.getDefaultModelForProvider('anthropic');
expect(result).toBe(modelName);
});
it('should handle Ollama model names with colons and slashes', async () => {
const modelName = 'library/llama3.1:70b-instruct-q4_K_M';
vi.mocked(optionService.getOption).mockReturnValue(modelName);
const result = await configHelpers.getDefaultModelForProvider('ollama');
expect(result).toBe(modelName);
});
});
describe('getProviderSettings', () => {
@ -381,4 +486,122 @@ describe('configuration_helpers', () => {
expect(() => configHelpers.clearConfigurationCache()).not.toThrow();
});
});
describe('getValidModelConfig', () => {
it('should handle model names with special characters', async () => {
const modelName = 'gpt-4.1-turbo@latest';
vi.mocked(optionService.getOption)
.mockReturnValueOnce(modelName) // openaiDefaultModel
.mockReturnValueOnce('test-key') // openaiApiKey
.mockReturnValueOnce('') // openaiBaseUrl
.mockReturnValueOnce(''); // openaiDefaultModel
const result = await configHelpers.getValidModelConfig('openai');
expect(result).toStrictEqual({
model: modelName,
provider: 'openai'
});
});
it('should handle Anthropic model with complex naming', async () => {
const modelName = 'claude-3.5-sonnet-20241022';
vi.mocked(optionService.getOption)
.mockReturnValueOnce(modelName) // anthropicDefaultModel
.mockReturnValueOnce('anthropic-key') // anthropicApiKey
.mockReturnValueOnce('') // anthropicBaseUrl
.mockReturnValueOnce(''); // anthropicDefaultModel
const result = await configHelpers.getValidModelConfig('anthropic');
expect(result).toStrictEqual({
model: modelName,
provider: 'anthropic'
});
});
it('should handle Ollama model with colons', async () => {
const modelName = 'custom/llama3.1:70b-q4_K_M@latest';
vi.mocked(optionService.getOption)
.mockReturnValueOnce(modelName) // ollamaDefaultModel
.mockReturnValueOnce('http://localhost:11434') // ollamaBaseUrl
.mockReturnValueOnce(''); // ollamaDefaultModel
const result = await configHelpers.getValidModelConfig('ollama');
expect(result).toStrictEqual({
model: modelName,
provider: 'ollama'
});
});
});
describe('getSelectedModelConfig', () => {
it('should preserve OpenAI model names with special characters', async () => {
const modelName = 'gpt-4.1-turbo-preview@2024';
vi.mocked(optionService.getOption)
.mockReturnValueOnce('openai') // aiSelectedProvider
.mockReturnValueOnce(modelName) // openaiDefaultModel
.mockReturnValueOnce('test-key') // openaiApiKey
.mockReturnValueOnce('') // openaiBaseUrl
.mockReturnValueOnce(''); // openaiDefaultModel
const result = await configHelpers.getSelectedModelConfig();
expect(result).toStrictEqual({
model: modelName,
provider: 'openai'
});
});
it('should handle model names with URL-like patterns', async () => {
const modelName = 'https://models.example.com/gpt-4.1';
vi.mocked(optionService.getOption)
.mockReturnValueOnce('openai') // aiSelectedProvider
.mockReturnValueOnce(modelName) // openaiDefaultModel
.mockReturnValueOnce('test-key') // openaiApiKey
.mockReturnValueOnce('') // openaiBaseUrl
.mockReturnValueOnce(''); // openaiDefaultModel
const result = await configHelpers.getSelectedModelConfig();
expect(result).toStrictEqual({
model: modelName,
provider: 'openai'
});
});
it('should handle model names that look like file paths', async () => {
const modelName = '/models/custom/gpt-4.1.safetensors';
vi.mocked(optionService.getOption)
.mockReturnValueOnce('ollama') // aiSelectedProvider
.mockReturnValueOnce(modelName) // ollamaDefaultModel
.mockReturnValueOnce('http://localhost:11434') // ollamaBaseUrl
.mockReturnValueOnce(''); // ollamaDefaultModel
const result = await configHelpers.getSelectedModelConfig();
expect(result).toStrictEqual({
model: modelName,
provider: 'ollama'
});
});
it('should handle model names with all possible special characters', async () => {
const modelName = 'model!@#$%^&*()_+-=[]{}|;:\'",.<>?/~`';
vi.mocked(optionService.getOption)
.mockReturnValueOnce('anthropic') // aiSelectedProvider
.mockReturnValueOnce(modelName) // anthropicDefaultModel
.mockReturnValueOnce('test-key') // anthropicApiKey
.mockReturnValueOnce('') // anthropicBaseUrl
.mockReturnValueOnce(''); // anthropicDefaultModel
const result = await configHelpers.getSelectedModelConfig();
expect(result).toStrictEqual({
model: modelName,
provider: 'anthropic'
});
});
});
});

View File

@ -0,0 +1,389 @@
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { OpenAIService } from './openai_service.js';
import { AnthropicService } from './anthropic_service.js';
import { OllamaService } from './ollama_service.js';
import type { ChatCompletionOptions } from '../ai_interface.js';
import * as providers from './providers.js';
import options from '../../options.js';
// Mock dependencies
vi.mock('../../options.js', () => ({
default: {
getOption: vi.fn(),
getOptionBool: vi.fn()
}
}));
vi.mock('../../log.js', () => ({
default: {
info: vi.fn(),
error: vi.fn(),
warn: vi.fn()
}
}));
vi.mock('openai', () => ({
default: class MockOpenAI {
chat = {
completions: {
create: vi.fn()
}
};
}
}));
vi.mock('@anthropic-ai/sdk', () => ({
default: class MockAnthropic {
messages = {
create: vi.fn()
};
}
}));
vi.mock('ollama', () => ({
Ollama: class MockOllama {
chat = vi.fn();
show = vi.fn();
}
}));
describe('LLM Model Selection with Special Characters', () => {
beforeEach(() => {
vi.clearAllMocks();
// Set default options
vi.mocked(options.getOption).mockImplementation((key: string) => {
const optionMap: Record<string, string> = {
'aiEnabled': 'true',
'aiTemperature': '0.7',
'aiSystemPrompt': 'You are a helpful assistant.',
'openaiApiKey': 'test-api-key',
'openaiBaseUrl': 'https://api.openai.com/v1',
'anthropicApiKey': 'test-anthropic-key',
'anthropicBaseUrl': 'https://api.anthropic.com',
'ollamaBaseUrl': 'http://localhost:11434'
};
return optionMap[key] || '';
});
vi.mocked(options.getOptionBool).mockReturnValue(true);
});
describe('OpenAI Model Names', () => {
it('should correctly handle model names with periods', async () => {
const modelName = 'gpt-4.1-turbo-preview';
vi.mocked(options.getOption).mockImplementation((key: string) => {
if (key === 'openaiDefaultModel') return modelName;
return '';
});
const service = new OpenAIService();
const opts: ChatCompletionOptions = {
stream: false
};
// Spy on getOpenAIOptions to verify model name is passed correctly
const getOpenAIOptionsSpy = vi.spyOn(providers, 'getOpenAIOptions');
try {
await service.generateChatCompletion([{ role: 'user', content: 'test' }], opts);
} catch (error) {
// Expected to fail due to mocked API
}
expect(getOpenAIOptionsSpy).toHaveBeenCalledWith(opts);
const result = getOpenAIOptionsSpy.mock.results[0].value;
expect(result.model).toBe(modelName);
});
it('should handle model names with slashes', async () => {
const modelName = 'openai/gpt-4/turbo-2024';
vi.mocked(options.getOption).mockImplementation((key: string) => {
if (key === 'openaiDefaultModel') return modelName;
return '';
});
const service = new OpenAIService();
const opts: ChatCompletionOptions = {
model: modelName,
stream: false
};
const getOpenAIOptionsSpy = vi.spyOn(providers, 'getOpenAIOptions');
try {
await service.generateChatCompletion([{ role: 'user', content: 'test' }], opts);
} catch (error) {
// Expected to fail due to mocked API
}
const result = getOpenAIOptionsSpy.mock.results[0].value;
expect(result.model).toBe(modelName);
});
it('should handle model names with colons', async () => {
const modelName = 'custom:gpt-4:finetuned';
const opts: ChatCompletionOptions = {
model: modelName,
stream: false
};
const getOpenAIOptionsSpy = vi.spyOn(providers, 'getOpenAIOptions');
const openaiOptions = providers.getOpenAIOptions(opts);
expect(openaiOptions.model).toBe(modelName);
});
it('should handle model names with underscores and hyphens', async () => {
const modelName = 'gpt-4_turbo-preview_v2.1';
const opts: ChatCompletionOptions = {
model: modelName,
stream: false
};
const openaiOptions = providers.getOpenAIOptions(opts);
expect(openaiOptions.model).toBe(modelName);
});
it('should handle model names with special characters in API request', async () => {
const modelName = 'gpt-4.1-turbo@latest';
vi.mocked(options.getOption).mockImplementation((key: string) => {
if (key === 'openaiDefaultModel') return modelName;
if (key === 'openaiApiKey') return 'test-key';
if (key === 'openaiBaseUrl') return 'https://api.openai.com/v1';
return '';
});
const service = new OpenAIService();
// Access the private openai client through the service
const client = (service as any).getClient('test-key');
const createSpy = vi.spyOn(client.chat.completions, 'create');
try {
await service.generateChatCompletion(
[{ role: 'user', content: 'test' }],
{ stream: false }
);
} catch (error) {
// Expected due to mock
}
expect(createSpy).toHaveBeenCalledWith(
expect.objectContaining({
model: modelName
})
);
});
});
describe('Anthropic Model Names', () => {
it('should correctly handle Anthropic model names with periods', async () => {
const modelName = 'claude-3.5-sonnet-20241022';
vi.mocked(options.getOption).mockImplementation((key: string) => {
if (key === 'anthropicDefaultModel') return modelName;
if (key === 'anthropicApiKey') return 'test-key';
return '';
});
const opts: ChatCompletionOptions = {
stream: false
};
const anthropicOptions = providers.getAnthropicOptions(opts);
expect(anthropicOptions.model).toBe(modelName);
});
it('should handle Anthropic model names with colons', async () => {
const modelName = 'anthropic:claude-3:opus';
const opts: ChatCompletionOptions = {
model: modelName,
stream: false
};
const anthropicOptions = providers.getAnthropicOptions(opts);
expect(anthropicOptions.model).toBe(modelName);
});
it('should handle Anthropic model names in API request', async () => {
const modelName = 'claude-3.5-sonnet@beta';
vi.mocked(options.getOption).mockImplementation((key: string) => {
if (key === 'anthropicDefaultModel') return modelName;
if (key === 'anthropicApiKey') return 'test-key';
if (key === 'anthropicBaseUrl') return 'https://api.anthropic.com';
return '';
});
const service = new AnthropicService();
// Access the private anthropic client
const client = (service as any).getClient('test-key');
const createSpy = vi.spyOn(client.messages, 'create');
try {
await service.generateChatCompletion(
[{ role: 'user', content: 'test' }],
{ stream: false }
);
} catch (error) {
// Expected due to mock
}
expect(createSpy).toHaveBeenCalledWith(
expect.objectContaining({
model: modelName
})
);
});
});
describe('Ollama Model Names', () => {
it('should correctly handle Ollama model names with colons', async () => {
const modelName = 'llama3.1:70b-instruct-q4_K_M';
vi.mocked(options.getOption).mockImplementation((key: string) => {
if (key === 'ollamaDefaultModel') return modelName;
if (key === 'ollamaBaseUrl') return 'http://localhost:11434';
return '';
});
const opts: ChatCompletionOptions = {
stream: false
};
const ollamaOptions = await providers.getOllamaOptions(opts);
expect(ollamaOptions.model).toBe(modelName);
});
it('should handle Ollama model names with slashes', async () => {
const modelName = 'library/mistral:7b-instruct-v0.3';
const opts: ChatCompletionOptions = {
model: modelName,
stream: false
};
const ollamaOptions = await providers.getOllamaOptions(opts);
expect(ollamaOptions.model).toBe(modelName);
});
it('should handle Ollama model names with special characters in options', async () => {
const modelName = 'custom/llama3.1:70b-q4_K_M@latest';
vi.mocked(options.getOption).mockImplementation((key: string) => {
if (key === 'ollamaDefaultModel') return modelName;
if (key === 'ollamaBaseUrl') return 'http://localhost:11434';
return '';
});
// Test that the model name is preserved in the options
const opts: ChatCompletionOptions = {
stream: false
};
const ollamaOptions = await providers.getOllamaOptions(opts);
expect(ollamaOptions.model).toBe(modelName);
// Also test with model specified in options
const optsWithModel: ChatCompletionOptions = {
model: 'another/model:v2.0@beta',
stream: false
};
const ollamaOptionsWithModel = await providers.getOllamaOptions(optsWithModel);
expect(ollamaOptionsWithModel.model).toBe('another/model:v2.0@beta');
});
});
describe('Model Name Edge Cases', () => {
it('should handle empty model names gracefully', () => {
const opts: ChatCompletionOptions = {
model: '',
stream: false
};
expect(() => providers.getOpenAIOptions(opts)).toThrow('No OpenAI model configured');
});
it('should handle model names with unicode characters', async () => {
const modelName = 'gpt-4-日本語-model';
const opts: ChatCompletionOptions = {
model: modelName,
stream: false
};
const openaiOptions = providers.getOpenAIOptions(opts);
expect(openaiOptions.model).toBe(modelName);
});
it('should handle model names with spaces (encoded)', async () => {
const modelName = 'custom model v2.1';
const opts: ChatCompletionOptions = {
model: modelName,
stream: false
};
const openaiOptions = providers.getOpenAIOptions(opts);
expect(openaiOptions.model).toBe(modelName);
});
it('should preserve exact model name without transformation', async () => {
const complexModelName = 'org/model-v1.2.3:tag@version#variant';
const opts: ChatCompletionOptions = {
model: complexModelName,
stream: false
};
// Test for all providers
const openaiOptions = providers.getOpenAIOptions(opts);
expect(openaiOptions.model).toBe(complexModelName);
const anthropicOptions = providers.getAnthropicOptions(opts);
expect(anthropicOptions.model).toBe(complexModelName);
const ollamaOptions = await providers.getOllamaOptions(opts);
expect(ollamaOptions.model).toBe(complexModelName);
});
});
describe('Model Configuration Parsing', () => {
it('should not confuse provider prefix with model name containing colons', async () => {
// This model name has a colon but 'custom' is not a known provider
const modelName = 'custom:model:v1.2';
const opts: ChatCompletionOptions = {
model: modelName,
stream: false
};
const openaiOptions = providers.getOpenAIOptions(opts);
expect(openaiOptions.model).toBe(modelName);
});
it('should handle provider prefix correctly', async () => {
// When model has provider prefix, it should still use the full string
const modelName = 'openai:gpt-4.1-turbo';
const opts: ChatCompletionOptions = {
model: modelName,
stream: false
};
const openaiOptions = providers.getOpenAIOptions(opts);
expect(openaiOptions.model).toBe(modelName);
});
});
describe('Integration with REST API', () => {
it('should pass model names correctly through REST chat service', async () => {
const modelName = 'gpt-4.1-turbo-preview@latest';
// Mock the configuration helpers
vi.doMock('../config/configuration_helpers.js', () => ({
getSelectedModelConfig: vi.fn().mockResolvedValue({
model: modelName,
provider: 'openai'
}),
isAIEnabled: vi.fn().mockResolvedValue(true)
}));
const { getSelectedModelConfig } = await import('../config/configuration_helpers.js');
const config = await getSelectedModelConfig();
expect(config?.model).toBe(modelName);
});
});
});