mirror of
https://github.com/TriliumNext/Notes.git
synced 2025-07-28 18:42:28 +08:00
223 lines
10 KiB
TypeScript
223 lines
10 KiB
TypeScript
import options from '../../options.js';
|
|
import { BaseAIService } from '../base_ai_service.js';
|
|
import type { ChatCompletionOptions, ChatResponse, Message, StreamChunk } from '../ai_interface.js';
|
|
import { getOpenAIOptions } from './providers.js';
|
|
import OpenAI from 'openai';
|
|
|
|
export class OpenAIService extends BaseAIService {
|
|
private openai: OpenAI | null = null;
|
|
|
|
constructor() {
|
|
super('OpenAI');
|
|
}
|
|
|
|
isAvailable(): boolean {
|
|
return super.isAvailable() && !!options.getOption('openaiApiKey');
|
|
}
|
|
|
|
private getClient(apiKey: string, baseUrl?: string): OpenAI {
|
|
if (!this.openai) {
|
|
this.openai = new OpenAI({
|
|
apiKey,
|
|
baseURL: baseUrl
|
|
});
|
|
}
|
|
return this.openai;
|
|
}
|
|
|
|
async generateChatCompletion(messages: Message[], opts: ChatCompletionOptions = {}): Promise<ChatResponse> {
|
|
if (!this.isAvailable()) {
|
|
throw new Error('OpenAI service is not available. Check API key and AI settings.');
|
|
}
|
|
|
|
// Get provider-specific options from the central provider manager
|
|
const providerOptions = getOpenAIOptions(opts);
|
|
|
|
// Initialize the OpenAI client
|
|
const client = this.getClient(providerOptions.apiKey, providerOptions.baseUrl);
|
|
|
|
const systemPrompt = this.getSystemPrompt(providerOptions.systemPrompt || options.getOption('aiSystemPrompt'));
|
|
|
|
// Ensure we have a system message
|
|
const systemMessageExists = messages.some(m => m.role === 'system');
|
|
const messagesWithSystem = systemMessageExists
|
|
? messages
|
|
: [{ role: 'system', content: systemPrompt }, ...messages];
|
|
|
|
try {
|
|
// Create params object for the OpenAI SDK
|
|
const params: OpenAI.Chat.ChatCompletionCreateParams = {
|
|
model: providerOptions.model,
|
|
messages: messagesWithSystem as OpenAI.Chat.ChatCompletionMessageParam[],
|
|
temperature: providerOptions.temperature,
|
|
max_tokens: providerOptions.max_tokens,
|
|
stream: providerOptions.stream,
|
|
top_p: providerOptions.top_p,
|
|
frequency_penalty: providerOptions.frequency_penalty,
|
|
presence_penalty: providerOptions.presence_penalty
|
|
};
|
|
|
|
// Add tools if enabled
|
|
if (providerOptions.enableTools && providerOptions.tools && providerOptions.tools.length > 0) {
|
|
params.tools = providerOptions.tools as OpenAI.Chat.ChatCompletionTool[];
|
|
}
|
|
|
|
if (providerOptions.tool_choice) {
|
|
params.tool_choice = providerOptions.tool_choice as OpenAI.Chat.ChatCompletionToolChoiceOption;
|
|
}
|
|
|
|
// If streaming is requested
|
|
if (providerOptions.stream) {
|
|
params.stream = true;
|
|
|
|
// Get stream from OpenAI SDK
|
|
const stream = await client.chat.completions.create(params);
|
|
|
|
// Create a closure to hold accumulated tool calls
|
|
let accumulatedToolCalls: any[] = [];
|
|
|
|
// Return a response with the stream handler
|
|
const response: ChatResponse = {
|
|
text: '', // Initial empty text, will be populated during streaming
|
|
model: params.model,
|
|
provider: this.getName(),
|
|
// Add tool_calls property that will be populated during streaming
|
|
tool_calls: [],
|
|
stream: async (callback) => {
|
|
let completeText = '';
|
|
|
|
try {
|
|
// Process the stream
|
|
if (Symbol.asyncIterator in stream) {
|
|
for await (const chunk of stream as AsyncIterable<OpenAI.Chat.ChatCompletionChunk>) {
|
|
const content = chunk.choices[0]?.delta?.content || '';
|
|
const isDone = !!chunk.choices[0]?.finish_reason;
|
|
|
|
// Check for tool calls in the delta
|
|
const deltaToolCalls = chunk.choices[0]?.delta?.tool_calls;
|
|
|
|
if (deltaToolCalls) {
|
|
// Process and accumulate tool calls from this chunk
|
|
for (const deltaToolCall of deltaToolCalls) {
|
|
const toolCallId = deltaToolCall.index;
|
|
|
|
// Initialize or update the accumulated tool call
|
|
if (!accumulatedToolCalls[toolCallId]) {
|
|
accumulatedToolCalls[toolCallId] = {
|
|
id: deltaToolCall.id || `call_${toolCallId}`,
|
|
type: deltaToolCall.type || 'function',
|
|
function: {
|
|
name: '',
|
|
arguments: ''
|
|
}
|
|
};
|
|
}
|
|
|
|
// Update function name if present
|
|
if (deltaToolCall.function?.name) {
|
|
accumulatedToolCalls[toolCallId].function.name =
|
|
deltaToolCall.function.name;
|
|
}
|
|
|
|
// Append to function arguments if present
|
|
if (deltaToolCall.function?.arguments) {
|
|
accumulatedToolCalls[toolCallId].function.arguments +=
|
|
deltaToolCall.function.arguments;
|
|
}
|
|
}
|
|
|
|
// Important: Update the response's tool_calls with accumulated tool calls
|
|
response.tool_calls = accumulatedToolCalls.filter(Boolean);
|
|
}
|
|
|
|
if (content) {
|
|
completeText += content;
|
|
}
|
|
|
|
// Send the chunk to the caller with raw data and any accumulated tool calls
|
|
const streamChunk: StreamChunk & { raw: any } = {
|
|
text: content,
|
|
done: isDone,
|
|
raw: chunk
|
|
};
|
|
|
|
// Add accumulated tool calls to raw data for compatibility with tool execution display
|
|
if (accumulatedToolCalls.length > 0) {
|
|
// Add tool calls to raw data for proper display
|
|
streamChunk.raw = {
|
|
...streamChunk.raw,
|
|
tool_calls: accumulatedToolCalls.filter(Boolean)
|
|
};
|
|
}
|
|
|
|
await callback(streamChunk);
|
|
|
|
if (isDone) {
|
|
break;
|
|
}
|
|
}
|
|
} else {
|
|
// Fallback for non-iterable response
|
|
console.warn('Stream is not iterable, falling back to non-streaming response');
|
|
|
|
if ('choices' in stream) {
|
|
const content = stream.choices[0]?.message?.content || '';
|
|
completeText = content;
|
|
|
|
// Check if there are tool calls in the non-stream response
|
|
const toolCalls = stream.choices[0]?.message?.tool_calls;
|
|
if (toolCalls) {
|
|
response.tool_calls = toolCalls;
|
|
}
|
|
|
|
await callback({
|
|
text: content,
|
|
done: true,
|
|
raw: stream,
|
|
tool_calls: toolCalls
|
|
});
|
|
}
|
|
}
|
|
} catch (error) {
|
|
console.error('Error processing stream:', error);
|
|
throw error;
|
|
}
|
|
|
|
// Update the response's text with the complete text
|
|
response.text = completeText;
|
|
|
|
// Return the complete text
|
|
return completeText;
|
|
}
|
|
};
|
|
|
|
return response;
|
|
} else {
|
|
// Non-streaming response
|
|
params.stream = false;
|
|
|
|
const completion = await client.chat.completions.create(params);
|
|
|
|
if (!('choices' in completion)) {
|
|
throw new Error('Unexpected response format from OpenAI API');
|
|
}
|
|
|
|
return {
|
|
text: completion.choices[0].message.content || '',
|
|
model: completion.model,
|
|
provider: this.getName(),
|
|
usage: {
|
|
promptTokens: completion.usage?.prompt_tokens,
|
|
completionTokens: completion.usage?.completion_tokens,
|
|
totalTokens: completion.usage?.total_tokens
|
|
},
|
|
tool_calls: completion.choices[0].message.tool_calls
|
|
};
|
|
}
|
|
} catch (error) {
|
|
console.error('OpenAI service error:', error);
|
|
throw error;
|
|
}
|
|
}
|
|
}
|