Merge branch 'TriliumNext:develop' into nginx

This commit is contained in:
hulmgulm 2025-06-08 09:52:18 +02:00 committed by GitHub
commit 765601bb75
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
15 changed files with 522 additions and 373 deletions

View File

@ -1260,6 +1260,7 @@
},
"create_new_ai_chat": "Create new AI Chat",
"configuration_warnings": "There are some issues with your AI configuration. Please check your settings.",
"experimental_warning": "The LLM feature is currently experimental - you have been warned.",
"selected_provider": "Selected Provider",
"selected_provider_description": "Choose the AI provider for chat and completion features",
"select_model": "Select model...",

View File

@ -350,6 +350,115 @@ export default class LlmChatPanel extends BasicWidget {
}
}
/**
* Save current chat data to a specific note ID
*/
async saveCurrentDataToSpecificNote(targetNoteId: string | null) {
if (!this.onSaveData || !targetNoteId) {
console.warn('Cannot save chat data: no saveData callback or no targetNoteId available');
return;
}
try {
// Extract current tool execution steps if any exist
const toolSteps = extractInChatToolSteps(this.noteContextChatMessages);
// Get tool executions from both UI and any cached executions in metadata
let toolExecutions: Array<{
id: string;
name: string;
arguments: any;
result: any;
error?: string;
timestamp: string;
}> = [];
// First include any tool executions already in metadata (from streaming events)
if (this.metadata?.toolExecutions && Array.isArray(this.metadata.toolExecutions)) {
toolExecutions = [...this.metadata.toolExecutions];
console.log(`Including ${toolExecutions.length} tool executions from metadata`);
}
// Also extract any visible tool steps from the UI
const extractedExecutions = toolSteps.map(step => {
// Parse tool execution information
if (step.type === 'tool-execution') {
try {
const content = JSON.parse(step.content);
return {
id: content.toolCallId || `tool-${Date.now()}-${Math.random().toString(36).substring(2, 7)}`,
name: content.tool || 'unknown',
arguments: content.args || {},
result: content.result || {},
error: content.error,
timestamp: new Date().toISOString()
};
} catch (e) {
// If we can't parse it, create a basic record
return {
id: `tool-${Date.now()}-${Math.random().toString(36).substring(2, 7)}`,
name: 'unknown',
arguments: {},
result: step.content,
timestamp: new Date().toISOString()
};
}
} else if (step.type === 'result' && step.name) {
// Handle result steps with a name
return {
id: `tool-${Date.now()}-${Math.random().toString(36).substring(2, 7)}`,
name: step.name,
arguments: {},
result: step.content,
timestamp: new Date().toISOString()
};
}
return {
id: `tool-${Date.now()}-${Math.random().toString(36).substring(2, 7)}`,
name: 'unknown',
arguments: {},
result: 'Unrecognized tool step',
timestamp: new Date().toISOString()
};
});
// Merge the tool executions, keeping only unique IDs
const existingIds = new Set(toolExecutions.map((t: {id: string}) => t.id));
for (const exec of extractedExecutions) {
if (!existingIds.has(exec.id)) {
toolExecutions.push(exec);
existingIds.add(exec.id);
}
}
const dataToSave = {
messages: this.messages,
noteId: targetNoteId,
chatNoteId: targetNoteId, // For backward compatibility
toolSteps: toolSteps,
// Add sources if we have them
sources: this.sources || [],
// Add metadata
metadata: {
model: this.metadata?.model || undefined,
provider: this.metadata?.provider || undefined,
temperature: this.metadata?.temperature || 0.7,
lastUpdated: new Date().toISOString(),
// Add tool executions
toolExecutions: toolExecutions
}
};
console.log(`Saving chat data to specific note ${targetNoteId}, ${toolSteps.length} tool steps, ${this.sources?.length || 0} sources, ${toolExecutions.length} tool executions`);
// Save the data to the note attribute via the callback
// This is the ONLY place we should save data, letting the container widget handle persistence
await this.onSaveData(dataToSave);
} catch (error) {
console.error('Error saving chat data to specific note:', error);
}
}
/**
* Load saved chat data from the note attribute
*/
@ -867,8 +976,8 @@ export default class LlmChatPanel extends BasicWidget {
this.showSources(postResponse.sources);
}
// Process the assistant response
this.processAssistantResponse(postResponse.content, postResponse);
// Process the assistant response with original chat note ID
this.processAssistantResponse(postResponse.content, postResponse, this.noteId);
hideLoadingIndicator(this.loadingIndicator);
return true;
@ -884,7 +993,7 @@ export default class LlmChatPanel extends BasicWidget {
/**
* Process an assistant response - add to UI and save
*/
private async processAssistantResponse(content: string, fullResponse?: any) {
private async processAssistantResponse(content: string, fullResponse?: any, originalChatNoteId?: string | null) {
// Add the response to the chat UI
this.addMessageToChat('assistant', content);
@ -910,8 +1019,8 @@ export default class LlmChatPanel extends BasicWidget {
];
}
// Save to note
this.saveCurrentData().catch(err => {
// Save to note - use original chat note ID if provided
this.saveCurrentDataToSpecificNote(originalChatNoteId || this.noteId).catch(err => {
console.error("Failed to save assistant response to note:", err);
});
}
@ -936,12 +1045,15 @@ export default class LlmChatPanel extends BasicWidget {
timestamp: string;
}> = [];
// Store the original chat note ID to ensure we save to the correct note even if user switches
const originalChatNoteId = this.noteId;
return setupStreamingResponse(
this.noteId,
messageParams,
// Content update handler
(content: string, isDone: boolean = false) => {
this.updateStreamingUI(content, isDone);
this.updateStreamingUI(content, isDone, originalChatNoteId);
// Update session data with additional metadata when streaming is complete
if (isDone) {
@ -1067,13 +1179,13 @@ export default class LlmChatPanel extends BasicWidget {
/**
* Update the UI with streaming content
*/
private updateStreamingUI(assistantResponse: string, isDone: boolean = false) {
private updateStreamingUI(assistantResponse: string, isDone: boolean = false, originalChatNoteId?: string | null) {
// Track if we have a streaming message in progress
const hasStreamingMessage = !!this.noteContextChatMessages.querySelector('.assistant-message.streaming');
// Create a new message element or use the existing streaming one
let assistantMessageEl: HTMLElement;
if (hasStreamingMessage) {
// Use the existing streaming message
assistantMessageEl = this.noteContextChatMessages.querySelector('.assistant-message.streaming')!;
@ -1103,7 +1215,7 @@ export default class LlmChatPanel extends BasicWidget {
if (isDone) {
// Remove the streaming class to mark this message as complete
assistantMessageEl.classList.remove('streaming');
// Apply syntax highlighting
formatCodeBlocks($(assistantMessageEl as HTMLElement));
@ -1118,8 +1230,8 @@ export default class LlmChatPanel extends BasicWidget {
timestamp: new Date()
});
// Save the updated message list
this.saveCurrentData();
// Save the updated message list to the original chat note
this.saveCurrentDataToSpecificNote(originalChatNoteId || this.noteId);
}
// Scroll to bottom

View File

@ -2,6 +2,7 @@
* Validation functions for LLM Chat
*/
import options from "../../services/options.js";
import { t } from "../../services/i18n.js";
/**
* Validate providers configuration
@ -37,6 +38,9 @@ export async function validateProviders(validationWarning: HTMLElement): Promise
// Check for configuration issues with providers in the precedence list
const configIssues: string[] = [];
// Always add experimental warning as the first item
configIssues.push(t("ai_llm.experimental_warning"));
// Check each provider in the precedence list for proper configuration
for (const provider of precedenceList) {
if (provider === 'openai') {

View File

@ -182,17 +182,30 @@ export default class AiChatTypeWidget extends TypeWidget {
// Save chat data to the note
async saveData(data: any) {
if (!this.note) {
// If we have a noteId in the data, that's the AI Chat note we should save to
// This happens when the chat panel is saving its conversation
const targetNoteId = data.noteId;
// If no noteId in data, use the current note (for new chats)
const noteIdToUse = targetNoteId || this.note?.noteId;
if (!noteIdToUse) {
console.warn("Cannot save AI Chat data: no note ID available");
return;
}
try {
console.log(`AiChatTypeWidget: Saving data for note ${this.note.noteId}`);
console.log(`AiChatTypeWidget: Saving data for note ${noteIdToUse} (current note: ${this.note?.noteId}, data.noteId: ${data.noteId})`);
// Safety check: if we have both IDs and they don't match, warn about it
if (targetNoteId && this.note?.noteId && targetNoteId !== this.note.noteId) {
console.warn(`Note ID mismatch: saving to ${targetNoteId} but current note is ${this.note.noteId}`);
}
// Format the data properly - this is the canonical format of the data
const formattedData = {
messages: data.messages || [],
noteId: this.note.noteId, // Always use the note's own ID
noteId: noteIdToUse, // Always preserve the correct note ID
toolSteps: data.toolSteps || [],
sources: data.sources || [],
metadata: {
@ -201,8 +214,8 @@ export default class AiChatTypeWidget extends TypeWidget {
}
};
// Save the data to the note
await server.put(`notes/${this.note.noteId}/data`, {
// Save the data to the correct note
await server.put(`notes/${noteIdToUse}/data`, {
content: JSON.stringify(formattedData, null, 2)
});
} catch (e) {

View File

@ -48,7 +48,7 @@ export default class AiSettingsWidget extends OptionsWidget {
if (optionName === 'aiEnabled') {
try {
const isEnabled = value === 'true';
if (isEnabled) {
toastService.showMessage(t("ai_llm.ai_enabled") || "AI features enabled");
} else {
@ -203,6 +203,11 @@ export default class AiSettingsWidget extends OptionsWidget {
// Get selected provider
const selectedProvider = this.$widget.find('.ai-selected-provider').val() as string;
// Start with experimental warning
const allWarnings = [
t("ai_llm.experimental_warning")
];
// Check for selected provider configuration
const providerWarnings: string[] = [];
if (selectedProvider === 'openai') {
@ -222,10 +227,8 @@ export default class AiSettingsWidget extends OptionsWidget {
}
}
// Combine all warnings
const allWarnings = [
...providerWarnings
];
// Add provider warnings to all warnings
allWarnings.push(...providerWarnings);
// Show or hide warnings
if (allWarnings.length > 0) {

View File

@ -0,0 +1,3 @@
TRILIUM_ENV=dev
TRILIUM_RESOURCE_DIR=./apps/server/dist
TRILIUM_PUBLIC_SERVER=http://localhost:4200

View File

@ -11,25 +11,25 @@
"@types/archiver": "6.0.3",
"@types/better-sqlite3": "7.6.13",
"@types/cls-hooked": "4.3.9",
"@types/compression": "1.8.0",
"@types/cookie-parser": "1.4.8",
"@types/compression": "1.8.1",
"@types/cookie-parser": "1.4.9",
"@types/debounce": "1.2.4",
"@types/ejs": "3.1.5",
"@types/escape-html": "1.0.4",
"@types/express-http-proxy": "1.6.6",
"@types/express-session": "1.18.1",
"@types/express-session": "1.18.2",
"@types/fs-extra": "11.0.4",
"@types/html": "1.0.4",
"@types/ini": "4.1.1",
"@types/js-yaml": "4.0.9",
"@types/jsdom": "21.1.7",
"@types/mime-types": "3.0.0",
"@types/multer": "1.4.12",
"@types/mime-types": "3.0.1",
"@types/multer": "1.4.13",
"@types/safe-compare": "1.1.2",
"@types/sanitize-html": "2.16.0",
"@types/sax": "1.2.7",
"@types/serve-favicon": "2.5.7",
"@types/serve-static": "1.15.7",
"@types/serve-static": "1.15.8",
"@types/session-file-store": "1.2.5",
"@types/stream-throttle": "0.1.4",
"@types/supertest": "6.0.3",
@ -129,6 +129,23 @@
"runBuildTargetDependencies": false
}
},
"serve-nodir": {
"executor": "@nx/js:node",
"dependsOn": [
{
"projects": [
"client"
],
"target": "serve"
},
"build-without-client"
],
"continuous": true,
"options": {
"buildTarget": "server:build-without-client:development",
"runBuildTargetDependencies": false
}
},
"edit-integration-db": {
"executor": "@nx/js:node",
"dependsOn": [

View File

@ -3,8 +3,8 @@ import build from "./build.js";
import packageJson from "../../package.json" with { type: "json" };
import dataDir from "./data_dir.js";
const APP_DB_VERSION = 231;
const SYNC_VERSION = 35;
const APP_DB_VERSION = 232;
const SYNC_VERSION = 36;
const CLIPPER_PROTOCOL_VERSION = "1.0";
export default {

View File

@ -40,8 +40,8 @@ interface NoteContext {
}
export class AIServiceManager implements IAIServiceManager {
private services: Partial<Record<ServiceProviders, AIService>> = {};
private currentService: AIService | null = null;
private currentProvider: ServiceProviders | null = null;
private initialized = false;
constructor() {
@ -50,9 +50,8 @@ export class AIServiceManager implements IAIServiceManager {
log.error(`Error initializing LLM tools during AIServiceManager construction: ${error.message || String(error)}`);
});
// Set up event listener for provider changes
this.setupProviderChangeListener();
// Removed complex provider change listener - we'll read options fresh each time
this.initialized = true;
}
@ -140,15 +139,15 @@ export class AIServiceManager implements IAIServiceManager {
*/
async getOrCreateAnyService(): Promise<AIService> {
this.ensureInitialized();
// Get the selected provider using the new configuration system
const selectedProvider = await this.getSelectedProviderAsync();
if (!selectedProvider) {
throw new Error('No AI provider is selected. Please select a provider (OpenAI, Anthropic, or Ollama) in your AI settings.');
}
try {
const service = await this.getOrCreateChatProvider(selectedProvider);
if (service) {
@ -166,7 +165,7 @@ export class AIServiceManager implements IAIServiceManager {
*/
isAnyServiceAvailable(): boolean {
this.ensureInitialized();
// Check if we have the selected provider available
return this.getAvailableProviders().length > 0;
}
@ -174,43 +173,37 @@ export class AIServiceManager implements IAIServiceManager {
/**
* Get list of available providers
*/
getAvailableProviders(): ServiceProviders[] {
getAvailableProviders(): ServiceProviders[] {
this.ensureInitialized();
const allProviders: ServiceProviders[] = ['openai', 'anthropic', 'ollama'];
const availableProviders: ServiceProviders[] = [];
for (const providerName of allProviders) {
// Use a sync approach - check if we can create the provider
const service = this.services[providerName];
if (service && service.isAvailable()) {
availableProviders.push(providerName);
} else {
// For providers not yet created, check configuration to see if they would be available
try {
switch (providerName) {
case 'openai':
if (options.getOption('openaiApiKey')) {
availableProviders.push(providerName);
}
break;
case 'anthropic':
if (options.getOption('anthropicApiKey')) {
availableProviders.push(providerName);
}
break;
case 'ollama':
if (options.getOption('ollamaBaseUrl')) {
availableProviders.push(providerName);
}
break;
}
} catch (error) {
// Ignore configuration errors, provider just won't be available
// Check configuration to see if provider would be available
try {
switch (providerName) {
case 'openai':
if (options.getOption('openaiApiKey') || options.getOption('openaiBaseUrl')) {
availableProviders.push(providerName);
}
break;
case 'anthropic':
if (options.getOption('anthropicApiKey')) {
availableProviders.push(providerName);
}
break;
case 'ollama':
if (options.getOption('ollamaBaseUrl')) {
availableProviders.push(providerName);
}
break;
}
} catch (error) {
// Ignore configuration errors, provider just won't be available
}
}
return availableProviders;
}
@ -234,11 +227,11 @@ export class AIServiceManager implements IAIServiceManager {
// Get the selected provider
const selectedProvider = await this.getSelectedProviderAsync();
if (!selectedProvider) {
throw new Error('No AI provider is selected. Please select a provider in your AI settings.');
}
// Check if the selected provider is available
const availableProviders = this.getAvailableProviders();
if (!availableProviders.includes(selectedProvider)) {
@ -379,47 +372,68 @@ export class AIServiceManager implements IAIServiceManager {
}
/**
* Get or create a chat provider on-demand with inline validation
* Clear the current provider (forces recreation on next access)
*/
public clearCurrentProvider(): void {
this.currentService = null;
this.currentProvider = null;
log.info('Cleared current provider - will be recreated on next access');
}
/**
* Get or create the current provider instance - only one instance total
*/
private async getOrCreateChatProvider(providerName: ServiceProviders): Promise<AIService | null> {
// Return existing provider if already created
if (this.services[providerName]) {
return this.services[providerName];
// If provider type changed, clear the old one
if (this.currentProvider && this.currentProvider !== providerName) {
log.info(`Provider changed from ${this.currentProvider} to ${providerName}, clearing old service`);
this.currentService = null;
this.currentProvider = null;
}
// Create and validate provider on-demand
// Return existing service if it matches and is available
if (this.currentService && this.currentProvider === providerName && this.currentService.isAvailable()) {
return this.currentService;
}
// Clear invalid service
if (this.currentService) {
this.currentService = null;
this.currentProvider = null;
}
// Create new service for the requested provider
try {
let service: AIService | null = null;
switch (providerName) {
case 'openai': {
const apiKey = options.getOption('openaiApiKey');
const baseUrl = options.getOption('openaiBaseUrl');
if (!apiKey && !baseUrl) return null;
service = new OpenAIService();
// Validate by checking if it's available
if (!service.isAvailable()) {
throw new Error('OpenAI service not available');
}
break;
}
case 'anthropic': {
const apiKey = options.getOption('anthropicApiKey');
if (!apiKey) return null;
service = new AnthropicService();
if (!service.isAvailable()) {
throw new Error('Anthropic service not available');
}
break;
}
case 'ollama': {
const baseUrl = options.getOption('ollamaBaseUrl');
if (!baseUrl) return null;
service = new OllamaService();
if (!service.isAvailable()) {
throw new Error('Ollama service not available');
@ -427,9 +441,12 @@ export class AIServiceManager implements IAIServiceManager {
break;
}
}
if (service) {
this.services[providerName] = service;
// Cache the new service
this.currentService = service;
this.currentProvider = providerName;
log.info(`Created and cached new ${providerName} service`);
return service;
}
} catch (error: any) {
@ -630,28 +647,47 @@ export class AIServiceManager implements IAIServiceManager {
* Check if a specific provider is available
*/
isProviderAvailable(provider: string): boolean {
return this.services[provider as ServiceProviders]?.isAvailable() ?? false;
// Check if this is the current provider and if it's available
if (this.currentProvider === provider && this.currentService) {
return this.currentService.isAvailable();
}
// For other providers, check configuration
try {
switch (provider) {
case 'openai':
return !!(options.getOption('openaiApiKey') || options.getOption('openaiBaseUrl'));
case 'anthropic':
return !!options.getOption('anthropicApiKey');
case 'ollama':
return !!options.getOption('ollamaBaseUrl');
default:
return false;
}
} catch {
return false;
}
}
/**
* Get metadata about a provider
*/
getProviderMetadata(provider: string): ProviderMetadata | null {
const service = this.services[provider as ServiceProviders];
if (!service) {
return null;
// Only return metadata if this is the current active provider
if (this.currentProvider === provider && this.currentService) {
return {
name: provider,
capabilities: {
chat: true,
streaming: true,
functionCalling: provider === 'openai' // Only OpenAI has function calling
},
models: ['default'], // Placeholder, could be populated from the service
defaultModel: 'default'
};
}
return {
name: provider,
capabilities: {
chat: true,
streaming: true,
functionCalling: provider === 'openai' // Only OpenAI has function calling
},
models: ['default'], // Placeholder, could be populated from the service
defaultModel: 'default'
};
return null;
}
@ -665,67 +701,8 @@ export class AIServiceManager implements IAIServiceManager {
return String(error);
}
/**
* Set up event listener for provider changes
*/
private setupProviderChangeListener(): void {
// List of AI-related options that should trigger service recreation
const aiRelatedOptions = [
'aiEnabled',
'aiSelectedProvider',
'openaiApiKey',
'openaiBaseUrl',
'openaiDefaultModel',
'anthropicApiKey',
'anthropicBaseUrl',
'anthropicDefaultModel',
'ollamaBaseUrl',
'ollamaDefaultModel'
];
eventService.subscribe(['entityChanged'], async ({ entityName, entity }) => {
if (entityName === 'options' && entity && aiRelatedOptions.includes(entity.name)) {
log.info(`AI-related option '${entity.name}' changed, recreating LLM services`);
// Special handling for aiEnabled toggle
if (entity.name === 'aiEnabled') {
const isEnabled = entity.value === 'true';
if (isEnabled) {
log.info('AI features enabled, initializing AI service');
// Initialize the AI service
await this.initialize();
} else {
log.info('AI features disabled, clearing providers');
// Clear chat providers
this.services = {};
}
} else {
// For other AI-related options, recreate services on-demand
await this.recreateServices();
}
}
});
}
/**
* Recreate LLM services when provider settings change
*/
private async recreateServices(): Promise<void> {
try {
log.info('Recreating LLM services due to configuration change');
// Clear configuration cache first
clearConfigurationCache();
// Clear existing chat providers (they will be recreated on-demand)
this.services = {};
log.info('LLM services recreated successfully');
} catch (error) {
log.error(`Error recreating LLM services: ${this.handleError(error)}`);
}
}
// Removed complex event listener and cache invalidation logic
// Services will be created fresh when needed by reading current options
}

View File

@ -1,4 +1,3 @@
import configurationManager from './configuration_manager.js';
import optionService from '../../options.js';
import log from '../../log.js';
import type {
@ -13,7 +12,7 @@ import type {
*/
/**
* Get the selected AI provider
* Get the selected AI provider - always fresh from options
*/
export async function getSelectedProvider(): Promise<ProviderType | null> {
const providerOption = optionService.getOption('aiSelectedProvider');
@ -25,38 +24,100 @@ export async function getSelectedProvider(): Promise<ProviderType | null> {
* Parse a model identifier (handles "provider:model" format)
*/
export function parseModelIdentifier(modelString: string): ModelIdentifier {
return configurationManager.parseModelIdentifier(modelString);
if (!modelString) {
return {
modelId: '',
fullIdentifier: ''
};
}
const parts = modelString.split(':');
if (parts.length === 1) {
// No provider prefix, just model name
return {
modelId: modelString,
fullIdentifier: modelString
};
}
// Check if first part is a known provider
const potentialProvider = parts[0].toLowerCase();
const knownProviders: ProviderType[] = ['openai', 'anthropic', 'ollama'];
if (knownProviders.includes(potentialProvider as ProviderType)) {
// Provider prefix format
const provider = potentialProvider as ProviderType;
const modelId = parts.slice(1).join(':'); // Rejoin in case model has colons
return {
provider,
modelId,
fullIdentifier: modelString
};
}
// Not a provider prefix, treat whole string as model name
return {
modelId: modelString,
fullIdentifier: modelString
};
}
/**
* Create a model configuration from a model string
*/
export function createModelConfig(modelString: string, defaultProvider?: ProviderType): ModelConfig {
return configurationManager.createModelConfig(modelString, defaultProvider);
const identifier = parseModelIdentifier(modelString);
const provider = identifier.provider || defaultProvider || 'openai'; // fallback to openai if no provider specified
return {
provider,
modelId: identifier.modelId,
displayName: identifier.fullIdentifier
};
}
/**
* Get the default model for a specific provider
* Get the default model for a specific provider - always fresh from options
*/
export async function getDefaultModelForProvider(provider: ProviderType): Promise<string | undefined> {
const config = await configurationManager.getAIConfig();
return config.defaultModels[provider]; // This can now be undefined
const optionKey = `${provider}DefaultModel` as const;
return optionService.getOption(optionKey) || undefined;
}
/**
* Get provider settings for a specific provider
* Get provider settings for a specific provider - always fresh from options
*/
export async function getProviderSettings(provider: ProviderType) {
const config = await configurationManager.getAIConfig();
return config.providerSettings[provider];
switch (provider) {
case 'openai':
return {
apiKey: optionService.getOption('openaiApiKey'),
baseUrl: optionService.getOption('openaiBaseUrl'),
defaultModel: optionService.getOption('openaiDefaultModel')
};
case 'anthropic':
return {
apiKey: optionService.getOption('anthropicApiKey'),
baseUrl: optionService.getOption('anthropicBaseUrl'),
defaultModel: optionService.getOption('anthropicDefaultModel')
};
case 'ollama':
return {
baseUrl: optionService.getOption('ollamaBaseUrl'),
defaultModel: optionService.getOption('ollamaDefaultModel')
};
default:
return {};
}
}
/**
* Check if AI is enabled
* Check if AI is enabled - always fresh from options
*/
export async function isAIEnabled(): Promise<boolean> {
const config = await configurationManager.getAIConfig();
return config.enabled;
return optionService.getOptionBool('aiEnabled');
}
/**
@ -82,7 +143,7 @@ export async function isProviderConfigured(provider: ProviderType): Promise<bool
*/
export async function getAvailableSelectedProvider(): Promise<ProviderType | null> {
const selectedProvider = await getSelectedProvider();
if (!selectedProvider) {
return null; // No provider selected
}
@ -95,17 +156,51 @@ export async function getAvailableSelectedProvider(): Promise<ProviderType | nul
}
/**
* Validate the current AI configuration
* Validate the current AI configuration - simplified validation
*/
export async function validateConfiguration() {
return configurationManager.validateConfig();
const result = {
isValid: true,
errors: [] as string[],
warnings: [] as string[]
};
const aiEnabled = await isAIEnabled();
if (!aiEnabled) {
result.warnings.push('AI features are disabled');
return result;
}
const selectedProvider = await getSelectedProvider();
if (!selectedProvider) {
result.errors.push('No AI provider selected');
result.isValid = false;
return result;
}
// Validate provider-specific settings
const settings = await getProviderSettings(selectedProvider);
if (selectedProvider === 'openai' && !(settings as any)?.apiKey) {
result.warnings.push('OpenAI API key is not configured');
}
if (selectedProvider === 'anthropic' && !(settings as any)?.apiKey) {
result.warnings.push('Anthropic API key is not configured');
}
if (selectedProvider === 'ollama' && !(settings as any)?.baseUrl) {
result.warnings.push('Ollama base URL is not configured');
}
return result;
}
/**
* Clear cached configuration (use when settings change)
* Clear cached configuration (no-op since we removed caching)
*/
export function clearConfigurationCache(): void {
configurationManager.clearCache();
// No caching anymore, so nothing to clear
}
/**
@ -136,7 +231,7 @@ export async function getValidModelConfig(provider: ProviderType): Promise<{ mod
*/
export async function getSelectedModelConfig(): Promise<{ model: string; provider: ProviderType } | null> {
const selectedProvider = await getSelectedProvider();
if (!selectedProvider) {
return null; // No provider selected
}

View File

@ -21,11 +21,6 @@ import type {
*/
export class ConfigurationManager {
private static instance: ConfigurationManager | null = null;
private cachedConfig: AIConfig | null = null;
private lastConfigUpdate: number = 0;
// Cache for 5 minutes to avoid excessive option reads
private static readonly CACHE_DURATION = 5 * 60 * 1000;
private constructor() {}
@ -37,14 +32,9 @@ export class ConfigurationManager {
}
/**
* Get the complete AI configuration
* Get the complete AI configuration - always fresh, no caching
*/
public async getAIConfig(): Promise<AIConfig> {
const now = Date.now();
if (this.cachedConfig && (now - this.lastConfigUpdate) < ConfigurationManager.CACHE_DURATION) {
return this.cachedConfig;
}
try {
const config: AIConfig = {
enabled: await this.getAIEnabled(),
@ -53,8 +43,6 @@ export class ConfigurationManager {
providerSettings: await this.getProviderSettings()
};
this.cachedConfig = config;
this.lastConfigUpdate = now;
return config;
} catch (error) {
log.error(`Error loading AI configuration: ${error}`);
@ -263,14 +251,6 @@ export class ConfigurationManager {
return result;
}
/**
* Clear cached configuration (force reload on next access)
*/
public clearCache(): void {
this.cachedConfig = null;
this.lastConfigUpdate = 0;
}
// Private helper methods
private async getAIEnabled(): Promise<boolean> {

View File

@ -111,19 +111,13 @@ export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput,
const { getValidModelConfig } = await import('../../config/configuration_helpers.js');
const modelConfig = await getValidModelConfig(selectedProvider);
if (modelConfig) {
// We have a valid configured model
updatedOptions.model = modelConfig.model;
} else {
// No model configured, try to fetch and set a default from the service
const fetchedModel = await this.fetchAndSetDefaultModel(selectedProvider);
if (!fetchedModel) {
throw new Error(`No default model configured for provider ${selectedProvider}. Please set a default model in your AI settings or ensure the provider service is available.`);
}
// Use the fetched model
updatedOptions.model = fetchedModel;
if (!modelConfig) {
throw new Error(`No default model configured for provider ${selectedProvider}. Please set a default model in your AI settings.`);
}
// Use the configured model
updatedOptions.model = modelConfig.model;
log.info(`Selected provider: ${selectedProvider}, model: ${updatedOptions.model}`);
// Determine query complexity
@ -183,20 +177,8 @@ export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput,
return;
}
// If no provider could be determined, try to use precedence
// Use the explicitly provided provider - no automatic fallbacks
let selectedProvider = provider;
if (!selectedProvider) {
// List of providers in precedence order
const providerPrecedence = ['anthropic', 'openai', 'ollama'];
// Find the first available provider
for (const p of providerPrecedence) {
if (aiServiceManager.isProviderAvailable(p)) {
selectedProvider = p as ServiceProviders;
break;
}
}
}
// Set the provider metadata in the options
if (selectedProvider) {
@ -218,47 +200,7 @@ export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput,
}
}
/**
* Determine model based on selected provider using the new configuration system
* This method is now simplified and delegates to the main model selection logic
*/
private async determineDefaultModel(input: ModelSelectionInput): Promise<string> {
try {
// Use the same logic as the main process method
const { getValidModelConfig, getSelectedProvider } = await import('../../config/configuration_helpers.js');
const selectedProvider = await getSelectedProvider();
if (!selectedProvider) {
throw new Error('No AI provider is selected. Please select a provider in your AI settings.');
}
// Check if the provider is available through the service manager
if (!aiServiceManager.isProviderAvailable(selectedProvider)) {
throw new Error(`Selected provider ${selectedProvider} is not available`);
}
// Try to get a valid model config
const modelConfig = await getValidModelConfig(selectedProvider);
if (!modelConfig) {
throw new Error(`No default model configured for provider ${selectedProvider}. Please configure a default model in your AI settings.`);
}
// Set provider metadata
if (!input.options.providerMetadata) {
input.options.providerMetadata = {
provider: selectedProvider as 'openai' | 'anthropic' | 'ollama' | 'local',
modelId: modelConfig.model
};
}
log.info(`Selected default model ${modelConfig.model} from provider ${selectedProvider}`);
return modelConfig.model;
} catch (error) {
log.error(`Error determining default model: ${error}`);
throw error; // Don't provide fallback defaults, let the error propagate
}
}
/**
* Get estimated context window for Ollama models
@ -283,48 +225,5 @@ export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput,
}
}
/**
* Use AI service manager to get a configured model for the provider
* This eliminates duplication and uses the existing service layer
*/
private async fetchAndSetDefaultModel(provider: ProviderType): Promise<string | null> {
try {
log.info(`Getting default model for provider ${provider} using AI service manager`);
// Use the existing AI service manager instead of duplicating API calls
const service = await aiServiceManager.getInstance().getService(provider);
if (!service || !service.isAvailable()) {
log.info(`Provider ${provider} service is not available`);
return null;
}
// Check if the service has a method to get available models
if (typeof (service as any).getAvailableModels === 'function') {
try {
const models = await (service as any).getAvailableModels();
if (models && models.length > 0) {
// Use the first available model - no hardcoded preferences
const selectedModel = models[0];
// Import server-side options to update the default model
const optionService = (await import('../../../options.js')).default;
const optionKey = `${provider}DefaultModel` as const;
await optionService.setOption(optionKey, selectedModel);
log.info(`Set default ${provider} model to: ${selectedModel}`);
return selectedModel;
}
} catch (modelError) {
log.error(`Error fetching models from ${provider} service: ${modelError}`);
}
}
log.info(`Provider ${provider} does not support dynamic model fetching`);
return null;
} catch (error) {
log.error(`Error getting default model for provider ${provider}: ${error}`);
return null;
}
}
}

View File

@ -26,7 +26,11 @@ export function getOpenAIOptions(
}
const baseUrl = options.getOption('openaiBaseUrl') || PROVIDER_CONSTANTS.OPENAI.BASE_URL;
const modelName = opts.model || options.getOption('openaiDefaultModel') || PROVIDER_CONSTANTS.OPENAI.DEFAULT_MODEL;
const modelName = opts.model || options.getOption('openaiDefaultModel');
if (!modelName) {
throw new Error('No OpenAI model configured. Please set a default model in your AI settings.');
}
// Create provider metadata
const providerMetadata: ModelMetadata = {
@ -87,7 +91,11 @@ export function getAnthropicOptions(
}
const baseUrl = options.getOption('anthropicBaseUrl') || PROVIDER_CONSTANTS.ANTHROPIC.BASE_URL;
const modelName = opts.model || options.getOption('anthropicDefaultModel') || PROVIDER_CONSTANTS.ANTHROPIC.DEFAULT_MODEL;
const modelName = opts.model || options.getOption('anthropicDefaultModel');
if (!modelName) {
throw new Error('No Anthropic model configured. Please set a default model in your AI settings.');
}
// Create provider metadata
const providerMetadata: ModelMetadata = {
@ -150,8 +158,12 @@ export async function getOllamaOptions(
throw new Error('Ollama API URL is not configured');
}
// Get the model name - no prefix handling needed now
let modelName = opts.model || options.getOption('ollamaDefaultModel') || 'llama3';
// Get the model name - no defaults, must be configured by user
let modelName = opts.model || options.getOption('ollamaDefaultModel');
if (!modelName) {
throw new Error('No Ollama model configured. Please set a default model in your AI settings.');
}
// Create provider metadata
const providerMetadata: ModelMetadata = {
@ -249,4 +261,4 @@ async function getOllamaModelContextWindow(modelName: string): Promise<number> {
log.info(`Error getting context window for model ${modelName}: ${error}`);
return MODEL_CAPABILITIES['default'].contextWindowTokens; // Default fallback
}
}
}

View File

@ -82,6 +82,26 @@ function setOption<T extends OptionNames>(name: T, value: string | OptionDefinit
} else {
createOption(name, value, false);
}
// Clear current AI provider when AI-related options change
const aiOptions = [
'aiSelectedProvider', 'openaiApiKey', 'openaiBaseUrl', 'openaiDefaultModel',
'anthropicApiKey', 'anthropicBaseUrl', 'anthropicDefaultModel',
'ollamaBaseUrl', 'ollamaDefaultModel'
];
if (aiOptions.includes(name)) {
// Import dynamically to avoid circular dependencies
setImmediate(async () => {
try {
const aiServiceManager = (await import('./llm/ai_service_manager.js')).default;
aiServiceManager.getInstance().clearCurrentProvider();
console.log(`Cleared AI provider after ${name} option changed`);
} catch (error) {
console.log(`Could not clear AI provider: ${error}`);
}
});
}
}
/**

133
pnpm-lock.yaml generated
View File

@ -77,7 +77,7 @@ importers:
version: link:apps/server
'@types/express':
specifier: ^5.0.0
version: 5.0.2
version: 5.0.3
'@types/node':
specifier: 22.15.30
version: 22.15.30
@ -495,11 +495,11 @@ importers:
specifier: 4.3.9
version: 4.3.9
'@types/compression':
specifier: 1.8.0
version: 1.8.0
specifier: 1.8.1
version: 1.8.1
'@types/cookie-parser':
specifier: 1.4.8
version: 1.4.8(@types/express@5.0.2)
specifier: 1.4.9
version: 1.4.9(@types/express@5.0.3)
'@types/debounce':
specifier: 1.2.4
version: 1.2.4
@ -513,8 +513,8 @@ importers:
specifier: 1.6.6
version: 1.6.6
'@types/express-session':
specifier: 1.18.1
version: 1.18.1
specifier: 1.18.2
version: 1.18.2
'@types/fs-extra':
specifier: 11.0.4
version: 11.0.4
@ -531,11 +531,11 @@ importers:
specifier: 21.1.7
version: 21.1.7
'@types/mime-types':
specifier: 3.0.0
version: 3.0.0
specifier: 3.0.1
version: 3.0.1
'@types/multer':
specifier: 1.4.12
version: 1.4.12
specifier: 1.4.13
version: 1.4.13
'@types/safe-compare':
specifier: 1.1.2
version: 1.1.2
@ -549,8 +549,8 @@ importers:
specifier: 2.5.7
version: 2.5.7
'@types/serve-static':
specifier: 1.15.7
version: 1.15.7
specifier: 1.15.8
version: 1.15.8
'@types/session-file-store':
specifier: 1.2.5
version: 1.2.5
@ -4349,8 +4349,8 @@ packages:
'@types/better-sqlite3@7.6.13':
resolution: {integrity: sha512-NMv9ASNARoKksWtsq/SHakpYAYnhBrQgGD8zkLYk/jaK8jUGn08CfEdTRgYhMypUQAfzSP8W6gNLe0q19/t4VA==}
'@types/body-parser@1.19.5':
resolution: {integrity: sha512-fB3Zu92ucau0iQ0JMCFQE7b/dv8Ot07NI3KaZIkIUNXq82k4eBAqUaneXfleGY9JWskeS9y+u0nXMyspcuQrCg==}
'@types/body-parser@1.19.6':
resolution: {integrity: sha512-HLFeCYgz89uk22N5Qg3dvGvsv46B8GLvKKo1zKG4NybA8U2DiEO3w9lqGg29t/tfLRJpJ6iQxnVw4OnB7MoM9g==}
'@types/bonjour@3.5.13':
resolution: {integrity: sha512-z9fJ5Im06zvUL548KvYNecEVlA7cVDkGUi6kZusb04mpyEFKCIZJvloCcmpmLaIahDpOQGHaHmG6imtPMmPXGQ==}
@ -4373,8 +4373,8 @@ packages:
'@types/color-name@1.1.5':
resolution: {integrity: sha512-j2K5UJqGTxeesj6oQuGpMgifpT5k9HprgQd8D1Y0lOFqKHl3PJu5GMeS4Y5EgjS55AE6OQxf8mPED9uaGbf4Cg==}
'@types/compression@1.8.0':
resolution: {integrity: sha512-g4vmPIwbTii9dX1HVioHbOolubEaf4re4vDxuzpKrzz9uI7uarBExi9begX0cXyIB85jXZ5X2A/v8rsHZxSAPw==}
'@types/compression@1.8.1':
resolution: {integrity: sha512-kCFuWS0ebDbmxs0AXYn6e2r2nrGAb5KwQhknjSPSPgJcGd8+HVSILlUyFhGqML2gk39HcG7D1ydW9/qpYkN00Q==}
'@types/connect-history-api-fallback@1.5.4':
resolution: {integrity: sha512-n6Cr2xS1h4uAulPRdlw6Jl6s1oG8KrVilPN2yUITEs+K48EzMJJ3W1xy8K5eWuFvjp3R74AOIGSmp2UfBJ8HFw==}
@ -4382,8 +4382,8 @@ packages:
'@types/connect@3.4.38':
resolution: {integrity: sha512-K6uROf1LD88uDQqJCktA4yzL1YYAK6NgfsI0v/mTgyPKWsX1CnJ0XPSDhViejru1GcRkLWb8RlzFYJRqGUbaug==}
'@types/cookie-parser@1.4.8':
resolution: {integrity: sha512-l37JqFrOJ9yQfRQkljb41l0xVphc7kg5JTjjr+pLRZ0IyZ49V4BQ8vbF4Ut2C2e+WH4al3xD3ZwYwIUfnbT4NQ==}
'@types/cookie-parser@1.4.9':
resolution: {integrity: sha512-tGZiZ2Gtc4m3wIdLkZ8mkj1T6CEHb35+VApbL2T14Dew8HA7c+04dmKqsKRNC+8RJPm16JEK0tFSwdZqubfc4g==}
peerDependencies:
'@types/express': '*'
@ -4523,14 +4523,14 @@ packages:
'@types/express-serve-static-core@5.0.6':
resolution: {integrity: sha512-3xhRnjJPkULekpSzgtoNYYcTWgEZkp4myc+Saevii5JPnHNvHMRlBSHDbs7Bh1iPPoVTERHEZXyhyLbMEsExsA==}
'@types/express-session@1.18.1':
resolution: {integrity: sha512-S6TkD/lljxDlQ2u/4A70luD8/ZxZcrU5pQwI1rVXCiaVIywoFgbA+PIUNDjPhQpPdK0dGleLtYc/y7XWBfclBg==}
'@types/express-session@1.18.2':
resolution: {integrity: sha512-k+I0BxwVXsnEU2hV77cCobC08kIsn4y44C3gC0b46uxZVMaXA04lSPgRLR/bSL2w0t0ShJiG8o4jPzRG/nscFg==}
'@types/express@4.17.22':
resolution: {integrity: sha512-eZUmSnhRX9YRSkplpz0N+k6NljUUn5l3EWZIKZvYzhvMphEuNiyyy1viH/ejgt66JWgALwC/gtSUAeQKtSwW/w==}
'@types/express@4.17.23':
resolution: {integrity: sha512-Crp6WY9aTYP3qPi2wGDo9iUe/rceX01UMhnF1jmwDcKCFM6cx7YhGP/Mpr3y9AASpfHixIG0E6azCcL5OcDHsQ==}
'@types/express@5.0.2':
resolution: {integrity: sha512-BtjL3ZwbCQriyb0DGw+Rt12qAXPiBTPs815lsUvtt1Grk0vLRMZNMUZ741d5rjk+UQOxfDiBZ3dxpX00vSkK3g==}
'@types/express@5.0.3':
resolution: {integrity: sha512-wGA0NX93b19/dZC1J18tKWVIYWyyF2ZjT9vin/NRu0qzzvfVzWjs04iq2rQ3H65vCTQYlRqs3YHfY7zjdV+9Kw==}
'@types/fs-extra@11.0.4':
resolution: {integrity: sha512-yTbItCNreRooED33qjunPthRcSjERP1r4MqCZc7wv0u2sUkzTFp45tgUfS5+r7FrZPdmCCNflLhVSP/o+SemsQ==}
@ -4616,6 +4616,9 @@ packages:
'@types/mime-types@3.0.0':
resolution: {integrity: sha512-9gFWMsVgEtbsD6yY/2z8pAtnZhdRKl4Q9xmKQJy5gv0fMpzJeeWtQyd7WpdhaIbRSwPCfnjXOsNMcoQvu5giGg==}
'@types/mime-types@3.0.1':
resolution: {integrity: sha512-xRMsfuQbnRq1Ef+C+RKaENOxXX87Ygl38W1vDfPHRku02TgQr+Qd8iivLtAMcR0KF5/29xlnFihkTlbqFrGOVQ==}
'@types/mime@1.3.5':
resolution: {integrity: sha512-/pyBZWSLD2n0dcHE3hq8s8ZvcETHtEuF+3E7XVt0Ig2nvsVQXdghHVcEkIWjy9A0wKfTn97a/PSDYohKIlnP/w==}
@ -4625,8 +4628,8 @@ packages:
'@types/ms@2.1.0':
resolution: {integrity: sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==}
'@types/multer@1.4.12':
resolution: {integrity: sha512-pQ2hoqvXiJt2FP9WQVLPRO+AmiIm/ZYkavPlIQnx282u4ZrVdztx0pkh3jjpQt0Kz+YI0YhSG264y08UJKoUQg==}
'@types/multer@1.4.13':
resolution: {integrity: sha512-bhhdtPw7JqCiEfC9Jimx5LqX9BDIPJEh2q/fQ4bqbBPtyEZYr3cvF22NwG0DmPZNYA0CAf2CnqDB4KIGGpJcaw==}
'@types/node-forge@1.3.11':
resolution: {integrity: sha512-FQx220y22OKNTqaByeBGqHWYz4cl94tpcxeFdvBo3wjG6XPBuZ0BNgNZRV5J5TFmmcsJ4IzsLkmGRiQbnYsBEQ==}
@ -4687,14 +4690,17 @@ packages:
'@types/send@0.17.4':
resolution: {integrity: sha512-x2EM6TJOybec7c52BX0ZspPodMsQUd5L6PRwOunVyVUhXiBSKf3AezDL8Dgvgt5o0UfKNfuA0eMLr2wLT4AiBA==}
'@types/send@0.17.5':
resolution: {integrity: sha512-z6F2D3cOStZvuk2SaP6YrwkNO65iTZcwA2ZkSABegdkAh/lf+Aa/YQndZVfmEXT5vgAp6zv06VQ3ejSVjAny4w==}
'@types/serve-favicon@2.5.7':
resolution: {integrity: sha512-z9TNUQXdQ+W/OJMP1e3KOYUZ99qJS4+ZfFOIrPGImcayqKoyifbJSEFkVq1MCKBbqjMZpjPj3B5ilrQAR2+TOw==}
'@types/serve-index@1.9.4':
resolution: {integrity: sha512-qLpGZ/c2fhSs5gnYsQxtDEq3Oy8SXPClIXkW5ghvAvsNuVSA8k+gCONcUCS/UjLEYvYps+e8uBtfgXgvhwfNug==}
'@types/serve-static@1.15.7':
resolution: {integrity: sha512-W8Ym+h8nhuRwaKPaDw34QUkwsGi6Rc4yYqvKFo5rm2FUEhCFbzVWrxXUxuKK8TASjWsysJY0nsmNCGhCOIsrOw==}
'@types/serve-static@1.15.8':
resolution: {integrity: sha512-roei0UY3LhpOJvjbIP6ZZFngyLKl5dskOtDhxY5THRSpO+ZI+nzJ+m5yUMzGrp89YRa7lvknKkMYjqQFGwA7Sg==}
'@types/session-file-store@1.2.5':
resolution: {integrity: sha512-xjIyh40IznXLrvbAY/nmxu5cMcPcE3ZoDrSDvd02m6p8UjUgOtZAGI7Os5DDd6THuxClLWNhFo/awy1tYp64Bg==}
@ -17395,7 +17401,7 @@ snapshots:
dependencies:
'@types/node': 22.15.21
'@types/body-parser@1.19.5':
'@types/body-parser@1.19.6':
dependencies:
'@types/connect': 3.4.38
'@types/node': 22.15.30
@ -17429,10 +17435,10 @@ snapshots:
'@types/color-name@1.1.5': {}
'@types/compression@1.8.0':
'@types/compression@1.8.1':
dependencies:
'@types/express': 4.17.22
'@types/node': 22.15.21
'@types/express': 5.0.3
'@types/node': 22.15.30
'@types/connect-history-api-fallback@1.5.4':
dependencies:
@ -17443,9 +17449,9 @@ snapshots:
dependencies:
'@types/node': 22.15.30
'@types/cookie-parser@1.4.8(@types/express@5.0.2)':
'@types/cookie-parser@1.4.9(@types/express@5.0.3)':
dependencies:
'@types/express': 5.0.2
'@types/express': 5.0.3
'@types/cookie@0.6.0':
optional: true
@ -17603,31 +17609,31 @@ snapshots:
'@types/express-http-proxy@1.6.6':
dependencies:
'@types/express': 5.0.2
'@types/express': 5.0.3
'@types/express-serve-static-core@5.0.6':
dependencies:
'@types/node': 22.15.30
'@types/qs': 6.14.0
'@types/range-parser': 1.2.7
'@types/send': 0.17.4
'@types/send': 0.17.5
'@types/express-session@1.18.1':
'@types/express-session@1.18.2':
dependencies:
'@types/express': 5.0.2
'@types/express': 5.0.3
'@types/express@4.17.22':
'@types/express@4.17.23':
dependencies:
'@types/body-parser': 1.19.5
'@types/body-parser': 1.19.6
'@types/express-serve-static-core': 5.0.6
'@types/qs': 6.14.0
'@types/serve-static': 1.15.7
'@types/serve-static': 1.15.8
'@types/express@5.0.2':
'@types/express@5.0.3':
dependencies:
'@types/body-parser': 1.19.5
'@types/body-parser': 1.19.6
'@types/express-serve-static-core': 5.0.6
'@types/serve-static': 1.15.7
'@types/serve-static': 1.15.8
'@types/fs-extra@11.0.4':
dependencies:
@ -17722,15 +17728,17 @@ snapshots:
'@types/mime-types@3.0.0': {}
'@types/mime-types@3.0.1': {}
'@types/mime@1.3.5': {}
'@types/minimatch@5.1.2': {}
'@types/ms@2.1.0': {}
'@types/multer@1.4.12':
'@types/multer@1.4.13':
dependencies:
'@types/express': 5.0.2
'@types/express': 5.0.3
'@types/node-forge@1.3.11':
dependencies:
@ -17795,24 +17803,29 @@ snapshots:
'@types/mime': 1.3.5
'@types/node': 22.15.30
'@types/send@0.17.5':
dependencies:
'@types/mime': 1.3.5
'@types/node': 22.15.30
'@types/serve-favicon@2.5.7':
dependencies:
'@types/express': 5.0.2
'@types/express': 5.0.3
'@types/serve-index@1.9.4':
dependencies:
'@types/express': 5.0.2
'@types/express': 5.0.3
'@types/serve-static@1.15.7':
'@types/serve-static@1.15.8':
dependencies:
'@types/http-errors': 2.0.4
'@types/node': 22.15.21
'@types/node': 22.15.30
'@types/send': 0.17.4
'@types/session-file-store@1.2.5':
dependencies:
'@types/express': 5.0.2
'@types/express-session': 1.18.1
'@types/express': 5.0.3
'@types/express-session': 1.18.2
'@types/sinonjs__fake-timers@8.1.5': {}
@ -17845,8 +17858,8 @@ snapshots:
'@types/swagger-ui-express@4.1.8':
dependencies:
'@types/express': 5.0.2
'@types/serve-static': 1.15.7
'@types/express': 5.0.3
'@types/serve-static': 1.15.8
'@types/tmp@0.2.6': {}
@ -21818,7 +21831,7 @@ snapshots:
transitivePeerDependencies:
- supports-color
http-proxy-middleware@2.0.9(@types/express@4.17.22):
http-proxy-middleware@2.0.9(@types/express@4.17.23):
dependencies:
'@types/http-proxy': 1.17.16
http-proxy: 1.18.1
@ -21826,7 +21839,7 @@ snapshots:
is-plain-obj: 3.0.0
micromatch: 4.0.8
optionalDependencies:
'@types/express': 4.17.22
'@types/express': 4.17.23
transitivePeerDependencies:
- debug
@ -27461,10 +27474,10 @@ snapshots:
dependencies:
'@types/bonjour': 3.5.13
'@types/connect-history-api-fallback': 1.5.4
'@types/express': 4.17.22
'@types/express': 4.17.23
'@types/express-serve-static-core': 5.0.6
'@types/serve-index': 1.9.4
'@types/serve-static': 1.15.7
'@types/serve-static': 1.15.8
'@types/sockjs': 0.3.36
'@types/ws': 8.18.1
ansi-html-community: 0.0.8
@ -27475,7 +27488,7 @@ snapshots:
connect-history-api-fallback: 2.0.0
express: 4.21.2
graceful-fs: 4.2.11
http-proxy-middleware: 2.0.9(@types/express@4.17.22)
http-proxy-middleware: 2.0.9(@types/express@4.17.23)
ipaddr.js: 2.2.0
launch-editor: 2.10.0
open: 10.1.1