diff --git a/apps/server/src/routes/api/llm.spec.ts b/apps/server/src/routes/api/llm.spec.ts index 3703590f5..c19df825f 100644 --- a/apps/server/src/routes/api/llm.spec.ts +++ b/apps/server/src/routes/api/llm.spec.ts @@ -323,7 +323,7 @@ describe("LLM API Tests", () => { const options = (await import("../../services/options.js")).default; // Setup default mock behaviors - options.getOptionBool.mockReturnValue(true); // AI enabled + (options.getOptionBool as any).mockReturnValue(true); // AI enabled mockAiServiceManager.getOrCreateAnyService.mockResolvedValue({}); mockGetSelectedModelConfig.mockResolvedValue({ model: 'test-model', @@ -466,8 +466,8 @@ describe("LLM API Tests", () => { getContent: () => 'Root note content for testing' }) }) - }; - vi.mocked(await import('../../becca/becca.js')).default = mockBecca; + } as any; + (await import('../../becca/becca.js') as any).default = mockBecca; // Setup streaming with mention context mockChatPipelineExecute.mockImplementation(async (input) => { @@ -628,7 +628,7 @@ describe("LLM API Tests", () => { it("should handle AI disabled state", async () => { // Import options service to access mock const options = (await import("../../services/options.js")).default; - options.getOptionBool.mockReturnValue(false); // AI disabled + (options.getOptionBool as any).mockReturnValue(false); // AI disabled const response = await supertest(app) .post(`/api/llm/chat/${testChatId}/messages/stream`) @@ -740,7 +740,7 @@ describe("LLM API Tests", () => { const ws = (await import("../../services/ws.js")).default; // Verify multiple chunks were sent - const streamCalls = ws.sendMessageToAllClients.mock.calls.filter( + const streamCalls = (ws.sendMessageToAllClients as any).mock.calls.filter( call => call[0].type === 'llm-stream' && call[0].content ); expect(streamCalls.length).toBeGreaterThan(5); diff --git a/apps/server/src/services/llm/pipeline/chat_pipeline.spec.ts b/apps/server/src/services/llm/pipeline/chat_pipeline.spec.ts index d73a2833e..68eb814c1 100644 --- a/apps/server/src/services/llm/pipeline/chat_pipeline.spec.ts +++ b/apps/server/src/services/llm/pipeline/chat_pipeline.spec.ts @@ -269,7 +269,7 @@ describe('ChatPipeline', () => { it('should handle tool calling iterations', async () => { // Mock LLM response to include tool calls - pipeline.stages.llmCompletion.execute.mockResolvedValue({ + (pipeline.stages.llmCompletion.execute as any).mockResolvedValue({ response: { text: 'Hello! How can I help you?', role: 'assistant', @@ -279,7 +279,7 @@ describe('ChatPipeline', () => { }); // Mock tool calling to require iteration then stop - pipeline.stages.toolCalling.execute + (pipeline.stages.toolCalling.execute as any) .mockResolvedValueOnce({ needsFollowUp: true, messages: [] }) .mockResolvedValueOnce({ needsFollowUp: false, messages: [] }); @@ -290,7 +290,7 @@ describe('ChatPipeline', () => { it('should respect max tool call iterations', async () => { // Mock LLM response to include tool calls - pipeline.stages.llmCompletion.execute.mockResolvedValue({ + (pipeline.stages.llmCompletion.execute as any).mockResolvedValue({ response: { text: 'Hello! How can I help you?', role: 'assistant', @@ -300,7 +300,7 @@ describe('ChatPipeline', () => { }); // Mock tool calling to always require iteration - pipeline.stages.toolCalling.execute.mockResolvedValue({ needsFollowUp: true, messages: [] }); + (pipeline.stages.toolCalling.execute as any).mockResolvedValue({ needsFollowUp: true, messages: [] }); await pipeline.execute(input); @@ -309,7 +309,7 @@ describe('ChatPipeline', () => { }); it('should handle stage errors gracefully', async () => { - pipeline.stages.modelSelection.execute.mockRejectedValueOnce(new Error('Model selection failed')); + (pipeline.stages.modelSelection.execute as any).mockRejectedValueOnce(new Error('Model selection failed')); await expect(pipeline.execute(input)).rejects.toThrow('Model selection failed'); }); @@ -408,7 +408,7 @@ describe('ChatPipeline', () => { }; it('should propagate errors from stages', async () => { - pipeline.stages.modelSelection.execute.mockRejectedValueOnce(new Error('Model selection failed')); + (pipeline.stages.modelSelection.execute as any).mockRejectedValueOnce(new Error('Model selection failed')); await expect(pipeline.execute(input)).rejects.toThrow('Model selection failed'); }); diff --git a/apps/server/src/services/llm/providers/integration/streaming.spec.ts b/apps/server/src/services/llm/providers/integration/streaming.spec.ts index 2a363f25b..8c38ecfeb 100644 --- a/apps/server/src/services/llm/providers/integration/streaming.spec.ts +++ b/apps/server/src/services/llm/providers/integration/streaming.spec.ts @@ -255,7 +255,7 @@ describe('Provider Streaming Integration Tests', () => { // Anthropic format needs conversion to our standard format if (chunk.type === 'content_block_delta') { yield { - message: { content: chunk.delta.text }, + message: { content: chunk.delta?.text || '' }, done: false }; } else if (chunk.type === 'message_stop') { diff --git a/apps/server/src/services/llm/streaming/error_handling.spec.ts b/apps/server/src/services/llm/streaming/error_handling.spec.ts index 1b074be73..22058aea2 100644 --- a/apps/server/src/services/llm/streaming/error_handling.spec.ts +++ b/apps/server/src/services/llm/streaming/error_handling.spec.ts @@ -3,7 +3,7 @@ import { processProviderStream, StreamProcessor } from '../providers/stream_hand import type { ProviderStreamOptions } from '../providers/stream_handler.js'; // Mock log service -vi.mock('../log.js', () => ({ +vi.mock('../../log.js', () => ({ default: { info: vi.fn(), error: vi.fn(), @@ -17,7 +17,7 @@ describe('Streaming Error Handling Tests', () => { beforeEach(async () => { vi.clearAllMocks(); - log = (await import('../log.js')).default; + log = (await import('../../log.js')).default; mockOptions = { providerName: 'ErrorTestProvider', modelName: 'error-test-model' @@ -147,7 +147,7 @@ describe('Streaming Error Handling Tests', () => { } }; - const hangingCallback = vi.fn(async () => { + const hangingCallback = vi.fn(async (): Promise => { // Never resolves return new Promise(() => {}); }); diff --git a/apps/server/src/services/llm/streaming/tool_execution.spec.ts b/apps/server/src/services/llm/streaming/tool_execution.spec.ts index ec5e8021f..e6b383701 100644 --- a/apps/server/src/services/llm/streaming/tool_execution.spec.ts +++ b/apps/server/src/services/llm/streaming/tool_execution.spec.ts @@ -3,7 +3,7 @@ import { processProviderStream, StreamProcessor } from '../providers/stream_hand import type { ProviderStreamOptions } from '../providers/stream_handler.js'; // Mock log service -vi.mock('../log.js', () => ({ +vi.mock('../../log.js', () => ({ default: { info: vi.fn(), error: vi.fn(), @@ -623,7 +623,7 @@ describe('Tool Execution During Streaming Tests', () => { describe('Tool Call Logging and Debugging', () => { it('should log tool call detection', async () => { - const log = (await import('../log.js')).default; + const log = (await import('../../log.js')).default; const toolChunk = { message: {