mirror of
https://github.com/TriliumNext/Notes.git
synced 2025-07-27 10:02:59 +08:00
test(server): try to reduce number to avoid CI crashing
This commit is contained in:
parent
2d5754c005
commit
1dce202d21
@ -70,7 +70,7 @@ describe('Provider Streaming Integration Tests', () => {
|
||||
expect(result.completeText).toBe('Hello world!');
|
||||
expect(result.chunkCount).toBe(4);
|
||||
expect(receivedChunks.length).toBeGreaterThan(0);
|
||||
|
||||
|
||||
// Verify callback received content chunks
|
||||
const contentChunks = receivedChunks.filter(c => c.text);
|
||||
expect(contentChunks.length).toBe(3);
|
||||
@ -101,10 +101,10 @@ describe('Provider Streaming Integration Tests', () => {
|
||||
choices: [{ delta: { content: 'The answer is 4' } }],
|
||||
model: 'gpt-3.5-turbo'
|
||||
},
|
||||
{
|
||||
{
|
||||
choices: [{ finish_reason: 'stop' }],
|
||||
model: 'gpt-3.5-turbo',
|
||||
done: true
|
||||
done: true
|
||||
}
|
||||
];
|
||||
|
||||
@ -174,7 +174,7 @@ describe('Provider Streaming Integration Tests', () => {
|
||||
|
||||
expect(result.completeText).toBe('The weather today is sunny.');
|
||||
expect(result.chunkCount).toBe(4);
|
||||
|
||||
|
||||
// Verify final chunk has usage stats
|
||||
expect(result.finalChunk.prompt_eval_count).toBe(15);
|
||||
expect(result.finalChunk.eval_count).toBe(8);
|
||||
@ -319,7 +319,7 @@ describe('Provider Streaming Integration Tests', () => {
|
||||
);
|
||||
|
||||
expect(result.completeText).toBe('Based on my analysis, the answer is 42.');
|
||||
|
||||
|
||||
// Verify thinking states were captured
|
||||
const thinkingChunks = receivedChunks.filter(c => c.chunk?.message?.thinking);
|
||||
expect(thinkingChunks.length).toBe(2);
|
||||
@ -332,7 +332,7 @@ describe('Provider Streaming Integration Tests', () => {
|
||||
async *[Symbol.asyncIterator]() {
|
||||
yield { message: { content: 'Starting...' } };
|
||||
// Simulate timeout
|
||||
await new Promise((_, reject) =>
|
||||
await new Promise((_, reject) =>
|
||||
setTimeout(() => reject(new Error('Request timeout')), 100)
|
||||
);
|
||||
}
|
||||
@ -481,7 +481,7 @@ describe('Provider Streaming Integration Tests', () => {
|
||||
describe('Memory Management', () => {
|
||||
it('should not leak memory during long streaming sessions', async () => {
|
||||
// Reduced chunk count for CI stability - still tests memory management
|
||||
const chunkCount = 1000; // Reduced from 10000
|
||||
const chunkCount = 500; // Reduced from 10000
|
||||
const longSessionIterator = {
|
||||
async *[Symbol.asyncIterator]() {
|
||||
for (let i = 0; i < chunkCount; i++) {
|
||||
@ -489,7 +489,7 @@ describe('Provider Streaming Integration Tests', () => {
|
||||
message: { content: `Chunk ${i} with some additional content to increase memory usage` },
|
||||
done: i === (chunkCount - 1)
|
||||
};
|
||||
|
||||
|
||||
// Periodic yield to event loop to prevent blocking
|
||||
if (i % 50 === 0) { // More frequent yields for shorter test
|
||||
await new Promise(resolve => setImmediate(resolve));
|
||||
@ -499,16 +499,16 @@ describe('Provider Streaming Integration Tests', () => {
|
||||
};
|
||||
|
||||
const initialMemory = process.memoryUsage();
|
||||
|
||||
|
||||
const result = await processProviderStream(
|
||||
longSessionIterator,
|
||||
mockProviderOptions
|
||||
);
|
||||
|
||||
const finalMemory = process.memoryUsage();
|
||||
|
||||
|
||||
expect(result.chunkCount).toBe(chunkCount);
|
||||
|
||||
|
||||
// Memory increase should be reasonable (less than 20MB for smaller test)
|
||||
const memoryIncrease = finalMemory.heapUsed - initialMemory.heapUsed;
|
||||
expect(memoryIncrease).toBeLessThan(20 * 1024 * 1024);
|
||||
@ -581,4 +581,4 @@ describe('Provider Streaming Integration Tests', () => {
|
||||
)).rejects.toThrow('Invalid stream iterator');
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
Loading…
x
Reference in New Issue
Block a user