167 lines
5.2 KiB
TypeScript
167 lines
5.2 KiB
TypeScript
import { tap, expect } from '../../ts_tapbundle/index.js';
|
|
|
|
// Create tests with known, distinct timing patterns to verify metrics calculation
|
|
tap.test('metric test 1 - 10ms baseline', async (tools) => {
|
|
await tools.delayFor(10);
|
|
expect(true).toBeTrue();
|
|
});
|
|
|
|
tap.test('metric test 2 - 20ms double baseline', async (tools) => {
|
|
await tools.delayFor(20);
|
|
expect(true).toBeTrue();
|
|
});
|
|
|
|
tap.test('metric test 3 - 30ms triple baseline', async (tools) => {
|
|
await tools.delayFor(30);
|
|
expect(true).toBeTrue();
|
|
});
|
|
|
|
tap.test('metric test 4 - 40ms quadruple baseline', async (tools) => {
|
|
await tools.delayFor(40);
|
|
expect(true).toBeTrue();
|
|
});
|
|
|
|
tap.test('metric test 5 - 50ms quintuple baseline', async (tools) => {
|
|
await tools.delayFor(50);
|
|
expect(true).toBeTrue();
|
|
});
|
|
|
|
// Test that should be the slowest
|
|
tap.test('metric test slowest - 200ms intentionally slow', async (tools) => {
|
|
await tools.delayFor(200);
|
|
expect(true).toBeTrue();
|
|
});
|
|
|
|
// Tests to verify edge cases in average calculation
|
|
tap.test('metric test fast 1 - minimal work', async () => {
|
|
expect(1).toEqual(1);
|
|
});
|
|
|
|
tap.test('metric test fast 2 - minimal work', async () => {
|
|
expect(2).toEqual(2);
|
|
});
|
|
|
|
tap.test('metric test fast 3 - minimal work', async () => {
|
|
expect(3).toEqual(3);
|
|
});
|
|
|
|
// Test to verify that failed tests still contribute to timing metrics
|
|
tap.test('metric test that fails - 60ms before failure', async (tools) => {
|
|
await tools.delayFor(60);
|
|
expect(true).toBeFalse(); // This will fail
|
|
});
|
|
|
|
// Describe block with timing to test aggregation
|
|
tap.describe('performance metrics in describe block', () => {
|
|
tap.test('described test 1 - 15ms', async (tools) => {
|
|
await tools.delayFor(15);
|
|
expect(true).toBeTrue();
|
|
});
|
|
|
|
tap.test('described test 2 - 25ms', async (tools) => {
|
|
await tools.delayFor(25);
|
|
expect(true).toBeTrue();
|
|
});
|
|
|
|
tap.test('described test 3 - 35ms', async (tools) => {
|
|
await tools.delayFor(35);
|
|
expect(true).toBeTrue();
|
|
});
|
|
});
|
|
|
|
// Test timing with hooks
|
|
tap.describe('performance with hooks', () => {
|
|
let hookTime = 0;
|
|
|
|
tap.beforeEach(async () => {
|
|
// Hooks shouldn't count toward test time
|
|
await new Promise(resolve => setTimeout(resolve, 10));
|
|
hookTime += 10;
|
|
});
|
|
|
|
tap.afterEach(async () => {
|
|
// Hooks shouldn't count toward test time
|
|
await new Promise(resolve => setTimeout(resolve, 10));
|
|
hookTime += 10;
|
|
});
|
|
|
|
tap.test('test with hooks 1 - should only count test time', async (tools) => {
|
|
await tools.delayFor(30);
|
|
expect(true).toBeTrue();
|
|
// Test time should be ~30ms, not 50ms (including hooks)
|
|
});
|
|
|
|
tap.test('test with hooks 2 - should only count test time', async (tools) => {
|
|
await tools.delayFor(40);
|
|
expect(true).toBeTrue();
|
|
// Test time should be ~40ms, not 60ms (including hooks)
|
|
});
|
|
});
|
|
|
|
// Parallel tests to verify timing is captured correctly
|
|
tap.describe('parallel timing verification', () => {
|
|
const startTimes: Map<string, number> = new Map();
|
|
const endTimes: Map<string, number> = new Map();
|
|
|
|
tap.testParallel('parallel metric 1 - 80ms', async (tools) => {
|
|
startTimes.set('p1', Date.now());
|
|
await tools.delayFor(80);
|
|
endTimes.set('p1', Date.now());
|
|
expect(true).toBeTrue();
|
|
});
|
|
|
|
tap.testParallel('parallel metric 2 - 90ms', async (tools) => {
|
|
startTimes.set('p2', Date.now());
|
|
await tools.delayFor(90);
|
|
endTimes.set('p2', Date.now());
|
|
expect(true).toBeTrue();
|
|
});
|
|
|
|
tap.testParallel('parallel metric 3 - 100ms', async (tools) => {
|
|
startTimes.set('p3', Date.now());
|
|
await tools.delayFor(100);
|
|
endTimes.set('p3', Date.now());
|
|
expect(true).toBeTrue();
|
|
});
|
|
|
|
tap.test('verify parallel execution', async () => {
|
|
// This test runs after parallel tests
|
|
// Verify they actually ran in parallel by checking overlapping times
|
|
if (startTimes.size === 3 && endTimes.size === 3) {
|
|
const p1Start = startTimes.get('p1')!;
|
|
const p2Start = startTimes.get('p2')!;
|
|
const p3Start = startTimes.get('p3')!;
|
|
const p1End = endTimes.get('p1')!;
|
|
const p2End = endTimes.get('p2')!;
|
|
const p3End = endTimes.get('p3')!;
|
|
|
|
// Start times should be very close (within 50ms)
|
|
expect(Math.abs(p1Start - p2Start)).toBeLessThan(50);
|
|
expect(Math.abs(p2Start - p3Start)).toBeLessThan(50);
|
|
|
|
// There should be overlap in execution
|
|
const p1Overlaps = p1Start < p2End && p1End > p2Start;
|
|
const p2Overlaps = p2Start < p3End && p2End > p3Start;
|
|
|
|
expect(p1Overlaps || p2Overlaps).toBeTrue();
|
|
} else {
|
|
// Skip verification if parallel tests didn't run yet
|
|
expect(true).toBeTrue();
|
|
}
|
|
});
|
|
});
|
|
|
|
// Test to ensure average calculation handles mixed timing correctly
|
|
tap.test('final metrics test - 5ms minimal', async (tools) => {
|
|
await tools.delayFor(5);
|
|
expect(true).toBeTrue();
|
|
|
|
console.log('\n📊 Expected Performance Metrics Summary:');
|
|
console.log('- Tests include a mix of durations from <1ms to 200ms');
|
|
console.log('- Slowest test should be "metric test slowest" at ~200ms');
|
|
console.log('- Average should be calculated from individual test times');
|
|
console.log('- Failed test should still contribute its 60ms to timing');
|
|
console.log('- Parallel tests should show their individual times (80ms, 90ms, 100ms)');
|
|
});
|
|
|
|
tap.start(); |