fix(unit): resolve type errors

This commit is contained in:
perf3ct 2025-06-08 21:03:07 +00:00
parent 0c44dd0e63
commit 224cae6db2
No known key found for this signature in database
GPG Key ID: 569C4EEC436F5232
5 changed files with 17 additions and 17 deletions

View File

@ -323,7 +323,7 @@ describe("LLM API Tests", () => {
const options = (await import("../../services/options.js")).default;
// Setup default mock behaviors
options.getOptionBool.mockReturnValue(true); // AI enabled
(options.getOptionBool as any).mockReturnValue(true); // AI enabled
mockAiServiceManager.getOrCreateAnyService.mockResolvedValue({});
mockGetSelectedModelConfig.mockResolvedValue({
model: 'test-model',
@ -466,8 +466,8 @@ describe("LLM API Tests", () => {
getContent: () => 'Root note content for testing'
})
})
};
vi.mocked(await import('../../becca/becca.js')).default = mockBecca;
} as any;
(await import('../../becca/becca.js') as any).default = mockBecca;
// Setup streaming with mention context
mockChatPipelineExecute.mockImplementation(async (input) => {
@ -628,7 +628,7 @@ describe("LLM API Tests", () => {
it("should handle AI disabled state", async () => {
// Import options service to access mock
const options = (await import("../../services/options.js")).default;
options.getOptionBool.mockReturnValue(false); // AI disabled
(options.getOptionBool as any).mockReturnValue(false); // AI disabled
const response = await supertest(app)
.post(`/api/llm/chat/${testChatId}/messages/stream`)
@ -740,7 +740,7 @@ describe("LLM API Tests", () => {
const ws = (await import("../../services/ws.js")).default;
// Verify multiple chunks were sent
const streamCalls = ws.sendMessageToAllClients.mock.calls.filter(
const streamCalls = (ws.sendMessageToAllClients as any).mock.calls.filter(
call => call[0].type === 'llm-stream' && call[0].content
);
expect(streamCalls.length).toBeGreaterThan(5);

View File

@ -269,7 +269,7 @@ describe('ChatPipeline', () => {
it('should handle tool calling iterations', async () => {
// Mock LLM response to include tool calls
pipeline.stages.llmCompletion.execute.mockResolvedValue({
(pipeline.stages.llmCompletion.execute as any).mockResolvedValue({
response: {
text: 'Hello! How can I help you?',
role: 'assistant',
@ -279,7 +279,7 @@ describe('ChatPipeline', () => {
});
// Mock tool calling to require iteration then stop
pipeline.stages.toolCalling.execute
(pipeline.stages.toolCalling.execute as any)
.mockResolvedValueOnce({ needsFollowUp: true, messages: [] })
.mockResolvedValueOnce({ needsFollowUp: false, messages: [] });
@ -290,7 +290,7 @@ describe('ChatPipeline', () => {
it('should respect max tool call iterations', async () => {
// Mock LLM response to include tool calls
pipeline.stages.llmCompletion.execute.mockResolvedValue({
(pipeline.stages.llmCompletion.execute as any).mockResolvedValue({
response: {
text: 'Hello! How can I help you?',
role: 'assistant',
@ -300,7 +300,7 @@ describe('ChatPipeline', () => {
});
// Mock tool calling to always require iteration
pipeline.stages.toolCalling.execute.mockResolvedValue({ needsFollowUp: true, messages: [] });
(pipeline.stages.toolCalling.execute as any).mockResolvedValue({ needsFollowUp: true, messages: [] });
await pipeline.execute(input);
@ -309,7 +309,7 @@ describe('ChatPipeline', () => {
});
it('should handle stage errors gracefully', async () => {
pipeline.stages.modelSelection.execute.mockRejectedValueOnce(new Error('Model selection failed'));
(pipeline.stages.modelSelection.execute as any).mockRejectedValueOnce(new Error('Model selection failed'));
await expect(pipeline.execute(input)).rejects.toThrow('Model selection failed');
});
@ -408,7 +408,7 @@ describe('ChatPipeline', () => {
};
it('should propagate errors from stages', async () => {
pipeline.stages.modelSelection.execute.mockRejectedValueOnce(new Error('Model selection failed'));
(pipeline.stages.modelSelection.execute as any).mockRejectedValueOnce(new Error('Model selection failed'));
await expect(pipeline.execute(input)).rejects.toThrow('Model selection failed');
});

View File

@ -255,7 +255,7 @@ describe('Provider Streaming Integration Tests', () => {
// Anthropic format needs conversion to our standard format
if (chunk.type === 'content_block_delta') {
yield {
message: { content: chunk.delta.text },
message: { content: chunk.delta?.text || '' },
done: false
};
} else if (chunk.type === 'message_stop') {

View File

@ -3,7 +3,7 @@ import { processProviderStream, StreamProcessor } from '../providers/stream_hand
import type { ProviderStreamOptions } from '../providers/stream_handler.js';
// Mock log service
vi.mock('../log.js', () => ({
vi.mock('../../log.js', () => ({
default: {
info: vi.fn(),
error: vi.fn(),
@ -17,7 +17,7 @@ describe('Streaming Error Handling Tests', () => {
beforeEach(async () => {
vi.clearAllMocks();
log = (await import('../log.js')).default;
log = (await import('../../log.js')).default;
mockOptions = {
providerName: 'ErrorTestProvider',
modelName: 'error-test-model'
@ -147,7 +147,7 @@ describe('Streaming Error Handling Tests', () => {
}
};
const hangingCallback = vi.fn(async () => {
const hangingCallback = vi.fn(async (): Promise<void> => {
// Never resolves
return new Promise(() => {});
});

View File

@ -3,7 +3,7 @@ import { processProviderStream, StreamProcessor } from '../providers/stream_hand
import type { ProviderStreamOptions } from '../providers/stream_handler.js';
// Mock log service
vi.mock('../log.js', () => ({
vi.mock('../../log.js', () => ({
default: {
info: vi.fn(),
error: vi.fn(),
@ -623,7 +623,7 @@ describe('Tool Execution During Streaming Tests', () => {
describe('Tool Call Logging and Debugging', () => {
it('should log tool call detection', async () => {
const log = (await import('../log.js')).default;
const log = (await import('../../log.js')).default;
const toolChunk = {
message: {