Notes/src/services/llm/providers/anthropic_service.ts

112 lines
4.5 KiB
TypeScript
Raw Normal View History

2025-03-11 17:30:50 +00:00
import options from '../../options.js';
import { BaseAIService } from '../base_ai_service.js';
import type { ChatCompletionOptions, ChatResponse, Message } from '../ai_interface.js';
import { PROVIDER_CONSTANTS } from '../constants/provider_constants.js';
2025-03-02 19:39:10 -08:00
export class AnthropicService extends BaseAIService {
2025-03-26 04:13:04 +00:00
// Map of simplified model names to full model names with versions
private static MODEL_MAPPING: Record<string, string> = {
'claude-3-opus': 'claude-3-opus-20240229',
'claude-3-sonnet': 'claude-3-sonnet-20240229',
'claude-3-haiku': 'claude-3-haiku-20240307',
'claude-2': 'claude-2.1'
};
2025-03-02 19:39:10 -08:00
constructor() {
super('Anthropic');
}
isAvailable(): boolean {
return super.isAvailable() && !!options.getOption('anthropicApiKey');
}
async generateChatCompletion(messages: Message[], opts: ChatCompletionOptions = {}): Promise<ChatResponse> {
if (!this.isAvailable()) {
throw new Error('Anthropic service is not available. Check API key and AI settings.');
}
const apiKey = options.getOption('anthropicApiKey');
const baseUrl = options.getOption('anthropicBaseUrl') || PROVIDER_CONSTANTS.ANTHROPIC.BASE_URL;
const model = opts.model || options.getOption('anthropicDefaultModel') || PROVIDER_CONSTANTS.ANTHROPIC.DEFAULT_MODEL;
2025-03-26 04:13:04 +00:00
2025-03-02 19:39:10 -08:00
const temperature = opts.temperature !== undefined
? opts.temperature
: parseFloat(options.getOption('aiTemperature') || '0.7');
const systemPrompt = this.getSystemPrompt(opts.systemPrompt || options.getOption('aiSystemPrompt'));
// Format for Anthropic's API
const formattedMessages = this.formatMessages(messages, systemPrompt);
try {
2025-03-26 04:13:04 +00:00
// Ensure base URL doesn't already include '/v1' and build the complete endpoint
const cleanBaseUrl = baseUrl.replace(/\/+$/, '').replace(/\/v1$/, '');
const endpoint = `${cleanBaseUrl}/v1/messages`;
console.log(`Anthropic API endpoint: ${endpoint}`);
console.log(`Using model: ${model}`);
2025-03-02 19:39:10 -08:00
const response = await fetch(endpoint, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
2025-03-26 04:13:04 +00:00
'X-Api-Key': apiKey,
'anthropic-version': PROVIDER_CONSTANTS.ANTHROPIC.API_VERSION,
'anthropic-beta': PROVIDER_CONSTANTS.ANTHROPIC.BETA_VERSION
2025-03-02 19:39:10 -08:00
},
body: JSON.stringify({
model,
messages: formattedMessages.messages,
system: formattedMessages.system,
temperature,
max_tokens: opts.maxTokens || 4000,
})
});
if (!response.ok) {
const errorBody = await response.text();
2025-03-26 04:13:04 +00:00
console.error(`Anthropic API error (${response.status}): ${errorBody}`);
2025-03-02 19:39:10 -08:00
throw new Error(`Anthropic API error: ${response.status} ${response.statusText} - ${errorBody}`);
}
const data = await response.json();
return {
text: data.content[0].text,
model: data.model,
provider: this.getName(),
usage: {
// Anthropic doesn't provide token usage in the same format as OpenAI
// but we can still estimate based on input/output length
totalTokens: data.usage?.input_tokens + data.usage?.output_tokens
}
};
} catch (error) {
console.error('Anthropic service error:', error);
throw error;
}
}
private formatMessages(messages: Message[], systemPrompt: string): { messages: any[], system: string } {
// Extract system messages
const systemMessages = messages.filter(m => m.role === 'system');
const nonSystemMessages = messages.filter(m => m.role !== 'system');
// Combine all system messages with our default
const combinedSystemPrompt = [systemPrompt]
.concat(systemMessages.map(m => m.content))
.join('\n\n');
// Format remaining messages for Anthropic's API
const formattedMessages = nonSystemMessages.map(m => ({
2025-03-26 04:13:04 +00:00
role: m.role === 'user' ? 'user' : 'assistant',
2025-03-02 19:39:10 -08:00
content: m.content
}));
return {
messages: formattedMessages,
system: combinedSystemPrompt
};
}
}