Compare commits

...

6 Commits

11 changed files with 770 additions and 27 deletions

View File

@ -1,5 +1,27 @@
# Changelog
## 2025-05-26 - 2.3.0 - feat(cli)
Add '--version' option and warn against global tstest usage in the tstest project
- Introduced a new '--version' CLI flag that prints the version from package.json
- Added logic in ts/index.ts to detect if tstest is run globally within its own project and issue a warning
- Added .claude/settings.local.json to configure allowed permissions for various commands
## 2025-05-26 - 2.2.6 - fix(tstest)
Improve timeout warning timer management and summary output formatting in the test runner.
- Removed the global timeoutWarningTimer and replaced it with local warning timers in runInNode and runInChrome methods.
- Added warnings when test files run for over one minute if no timeout is specified.
- Ensured proper clearing of warning timers on successful completion or timeout.
- Enhanced quiet mode summary output to clearly display passed and failed test counts.
## 2025-05-26 - 2.2.5 - fix(protocol)
Fix inline timing metadata parsing and enhance test coverage for performance metrics and timing edge cases
- Updated the protocol parser to correctly parse inline key:value pairs while excluding prefixed formats (META:, SKIP:, TODO:, EVENT:)
- Added new tests for performance metrics, timing edge cases, and protocol timing to verify accurate timing capture and retry handling
- Expanded documentation in readme.hints.md to detail the updated timing implementation and parser fixes
## 2025-05-26 - 2.2.4 - fix(logging)
Improve performance metrics reporting and add local permissions configuration

View File

@ -1,6 +1,6 @@
{
"name": "@git.zone/tstest",
"version": "2.2.4",
"version": "2.3.0",
"private": false,
"description": "a test utility to run tests that match test/**/*.ts",
"exports": {

View File

@ -266,3 +266,58 @@ Previously reported issues with these methods have been resolved:
- Both regular and parallel only tests supported
These fixes ensure accurate test counts and proper TAP-compliant output for all test states.
## Test Timing Implementation
### Timing Architecture
Test timing is captured using `@push.rocks/smarttime`'s `HrtMeasurement` class, which provides high-resolution timing:
1. **Timing Capture**:
- Each `TapTest` instance has its own `HrtMeasurement`
- Timer starts immediately before test function execution
- Timer stops after test completes (or fails/times out)
- Millisecond precision is used for reporting
2. **Protocol Integration**:
- Timing is embedded in TAP output using Protocol V2 markers
- Inline format for simple timing: `ok 1 - test name ⟦TSTEST:time:123⟧`
- Block format for complex metadata: `⟦TSTEST:META:{"time":456,"file":"test.ts"}⟧`
3. **Performance Metrics Calculation**:
- Average is calculated from sum of individual test times, not total runtime
- Slowest test detection prefers tests with >0ms duration
- Failed tests still contribute their execution time to metrics
### Edge Cases and Considerations
1. **Sub-millisecond Tests**:
- Very fast tests may report 0ms due to millisecond rounding
- Performance metrics handle this by showing "All tests completed in <1ms" when appropriate
2. **Special Test States**:
- **Skipped tests**: Report 0ms (not executed)
- **Todo tests**: Report 0ms (not executed)
- **Failed tests**: Report actual execution time before failure
- **Timeout tests**: Report time until timeout occurred
3. **Parallel Test Timing**:
- Each parallel test tracks its own execution time independently
- Parallel tests may have overlapping execution periods
- Total suite time reflects wall-clock time, not sum of test times
4. **Hook Timing**:
- `beforeEach`/`afterEach` hooks are not included in individual test times
- Only the actual test function execution is measured
5. **Retry Timing**:
- When tests retry, only the final attempt's duration is reported
- Each retry attempt emits separate `test:started` events
### Parser Fix for Timing Metadata
The protocol parser was fixed to correctly handle inline timing metadata:
- Changed condition from `!simpleMatch[1].includes(':')` to check for simple key:value pairs
- Excludes prefixed formats (META:, SKIP:, TODO:, EVENT:) while parsing simple formats like `time:250`
This ensures timing metadata is correctly extracted and displayed in test results.

View File

@ -0,0 +1,167 @@
import { tap, expect } from '../../ts_tapbundle/index.js';
// Create tests with known, distinct timing patterns to verify metrics calculation
tap.test('metric test 1 - 10ms baseline', async (tools) => {
await tools.delayFor(10);
expect(true).toBeTrue();
});
tap.test('metric test 2 - 20ms double baseline', async (tools) => {
await tools.delayFor(20);
expect(true).toBeTrue();
});
tap.test('metric test 3 - 30ms triple baseline', async (tools) => {
await tools.delayFor(30);
expect(true).toBeTrue();
});
tap.test('metric test 4 - 40ms quadruple baseline', async (tools) => {
await tools.delayFor(40);
expect(true).toBeTrue();
});
tap.test('metric test 5 - 50ms quintuple baseline', async (tools) => {
await tools.delayFor(50);
expect(true).toBeTrue();
});
// Test that should be the slowest
tap.test('metric test slowest - 200ms intentionally slow', async (tools) => {
await tools.delayFor(200);
expect(true).toBeTrue();
});
// Tests to verify edge cases in average calculation
tap.test('metric test fast 1 - minimal work', async () => {
expect(1).toEqual(1);
});
tap.test('metric test fast 2 - minimal work', async () => {
expect(2).toEqual(2);
});
tap.test('metric test fast 3 - minimal work', async () => {
expect(3).toEqual(3);
});
// Test to verify that failed tests still contribute to timing metrics
tap.test('metric test that fails - 60ms before failure', async (tools) => {
await tools.delayFor(60);
expect(true).toBeFalse(); // This will fail
});
// Describe block with timing to test aggregation
tap.describe('performance metrics in describe block', () => {
tap.test('described test 1 - 15ms', async (tools) => {
await tools.delayFor(15);
expect(true).toBeTrue();
});
tap.test('described test 2 - 25ms', async (tools) => {
await tools.delayFor(25);
expect(true).toBeTrue();
});
tap.test('described test 3 - 35ms', async (tools) => {
await tools.delayFor(35);
expect(true).toBeTrue();
});
});
// Test timing with hooks
tap.describe('performance with hooks', () => {
let hookTime = 0;
tap.beforeEach(async () => {
// Hooks shouldn't count toward test time
await new Promise(resolve => setTimeout(resolve, 10));
hookTime += 10;
});
tap.afterEach(async () => {
// Hooks shouldn't count toward test time
await new Promise(resolve => setTimeout(resolve, 10));
hookTime += 10;
});
tap.test('test with hooks 1 - should only count test time', async (tools) => {
await tools.delayFor(30);
expect(true).toBeTrue();
// Test time should be ~30ms, not 50ms (including hooks)
});
tap.test('test with hooks 2 - should only count test time', async (tools) => {
await tools.delayFor(40);
expect(true).toBeTrue();
// Test time should be ~40ms, not 60ms (including hooks)
});
});
// Parallel tests to verify timing is captured correctly
tap.describe('parallel timing verification', () => {
const startTimes: Map<string, number> = new Map();
const endTimes: Map<string, number> = new Map();
tap.testParallel('parallel metric 1 - 80ms', async (tools) => {
startTimes.set('p1', Date.now());
await tools.delayFor(80);
endTimes.set('p1', Date.now());
expect(true).toBeTrue();
});
tap.testParallel('parallel metric 2 - 90ms', async (tools) => {
startTimes.set('p2', Date.now());
await tools.delayFor(90);
endTimes.set('p2', Date.now());
expect(true).toBeTrue();
});
tap.testParallel('parallel metric 3 - 100ms', async (tools) => {
startTimes.set('p3', Date.now());
await tools.delayFor(100);
endTimes.set('p3', Date.now());
expect(true).toBeTrue();
});
tap.test('verify parallel execution', async () => {
// This test runs after parallel tests
// Verify they actually ran in parallel by checking overlapping times
if (startTimes.size === 3 && endTimes.size === 3) {
const p1Start = startTimes.get('p1')!;
const p2Start = startTimes.get('p2')!;
const p3Start = startTimes.get('p3')!;
const p1End = endTimes.get('p1')!;
const p2End = endTimes.get('p2')!;
const p3End = endTimes.get('p3')!;
// Start times should be very close (within 50ms)
expect(Math.abs(p1Start - p2Start)).toBeLessThan(50);
expect(Math.abs(p2Start - p3Start)).toBeLessThan(50);
// There should be overlap in execution
const p1Overlaps = p1Start < p2End && p1End > p2Start;
const p2Overlaps = p2Start < p3End && p2End > p3Start;
expect(p1Overlaps || p2Overlaps).toBeTrue();
} else {
// Skip verification if parallel tests didn't run yet
expect(true).toBeTrue();
}
});
});
// Test to ensure average calculation handles mixed timing correctly
tap.test('final metrics test - 5ms minimal', async (tools) => {
await tools.delayFor(5);
expect(true).toBeTrue();
console.log('\n📊 Expected Performance Metrics Summary:');
console.log('- Tests include a mix of durations from <1ms to 200ms');
console.log('- Slowest test should be "metric test slowest" at ~200ms');
console.log('- Average should be calculated from individual test times');
console.log('- Failed test should still contribute its 60ms to timing');
console.log('- Parallel tests should show their individual times (80ms, 90ms, 100ms)');
});
tap.start();

View File

@ -0,0 +1,214 @@
import { tap, expect } from '../../ts_tapbundle/index.js';
tap.test('ultra-fast test - should capture sub-millisecond timing', async () => {
// This test does almost nothing, should complete in < 1ms
const x = 1 + 1;
expect(x).toEqual(2);
});
tap.test('test with exact 1ms delay', async (tools) => {
const start = Date.now();
await tools.delayFor(1);
const elapsed = Date.now() - start;
// Should be at least 1ms but could be more due to event loop
expect(elapsed).toBeGreaterThanOrEqual(1);
});
tap.test('test with 10ms delay', async (tools) => {
await tools.delayFor(10);
expect(true).toBeTrue();
});
tap.test('test with 100ms delay', async (tools) => {
await tools.delayFor(100);
expect(true).toBeTrue();
});
tap.test('test with 250ms delay', async (tools) => {
await tools.delayFor(250);
expect(true).toBeTrue();
});
tap.test('test with 500ms delay', async (tools) => {
await tools.delayFor(500);
expect(true).toBeTrue();
});
tap.test('test with variable processing time', async (tools) => {
// Simulate variable processing
const iterations = 1000000;
let sum = 0;
for (let i = 0; i < iterations; i++) {
sum += Math.sqrt(i);
}
expect(sum).toBeGreaterThan(0);
// Add a small delay to ensure measurable time
await tools.delayFor(5);
});
tap.test('test with multiple async operations', async () => {
// Multiple promises in parallel
const results = await Promise.all([
new Promise(resolve => setTimeout(() => resolve(1), 10)),
new Promise(resolve => setTimeout(() => resolve(2), 20)),
new Promise(resolve => setTimeout(() => resolve(3), 30))
]);
expect(results).toEqual([1, 2, 3]);
// This should take at least 30ms (the longest delay)
});
tap.test('test with synchronous heavy computation', async () => {
// Heavy synchronous computation
const fibonacci = (n: number): number => {
if (n <= 1) return n;
return fibonacci(n - 1) + fibonacci(n - 2);
};
// Calculate fibonacci(30) - should take measurable time
const result = fibonacci(30);
expect(result).toEqual(832040);
});
// Test with retry to see if timing accumulates correctly
tap.retry(2).test('test with retry - fails first then passes', async (tools) => {
// Get or initialize retry count
const retryCount = tools.context.get('retryCount') || 0;
tools.context.set('retryCount', retryCount + 1);
await tools.delayFor(50);
if (retryCount === 0) {
throw new Error('First attempt fails');
}
expect(retryCount).toEqual(1);
});
// Test timeout handling
tap.timeout(100).test('test with timeout - should complete just in time', async (tools) => {
await tools.delayFor(80); // Just under the timeout
expect(true).toBeTrue();
});
// Skip test - should show 0ms
tap.skip.test('skipped test - should report 0ms', async (tools) => {
await tools.delayFor(1000); // This won't execute
expect(true).toBeTrue();
});
// Todo test - should show 0ms
tap.todo.test('todo test - should report 0ms', async (tools) => {
await tools.delayFor(1000); // This won't execute
expect(true).toBeTrue();
});
// Test with skip inside
tap.test('test that skips conditionally - should show time until skip', async (tools) => {
await tools.delayFor(25);
const shouldSkip = true;
if (shouldSkip) {
tools.skip('Skipping after 25ms');
}
// This won't execute
await tools.delayFor(1000);
expect(true).toBeTrue();
});
// Test with very precise timing
tap.test('test with precise timing measurements', async (tools) => {
const measurements: number[] = [];
for (let i = 0; i < 5; i++) {
const start = process.hrtime.bigint();
await tools.delayFor(10);
const end = process.hrtime.bigint();
const durationMs = Number(end - start) / 1_000_000;
measurements.push(durationMs);
}
// All measurements should be at least 10ms
measurements.forEach(m => {
expect(m).toBeGreaterThanOrEqual(10);
});
// But not too much more (accounting for timer precision)
measurements.forEach(m => {
expect(m).toBeLessThan(20);
});
});
// Test that intentionally has 0 actual work
tap.test('empty test - absolute minimum execution time', async () => {
// Literally nothing
});
// Test with promise that resolves immediately
tap.test('test with immediate promise resolution', async () => {
await Promise.resolve();
expect(true).toBeTrue();
});
// Test with microtask queue
tap.test('test with microtask queue processing', async () => {
let value = 0;
await Promise.resolve().then(() => {
value = 1;
return Promise.resolve();
}).then(() => {
value = 2;
return Promise.resolve();
}).then(() => {
value = 3;
});
expect(value).toEqual(3);
});
// Test to verify timing accumulation in describe blocks
tap.describe('timing in describe blocks', () => {
let startTime: number;
tap.beforeEach(async () => {
startTime = Date.now();
await new Promise(resolve => setTimeout(resolve, 5));
});
tap.afterEach(async () => {
await new Promise(resolve => setTimeout(resolve, 5));
});
tap.test('first test in describe', async (tools) => {
await tools.delayFor(10);
const elapsed = Date.now() - startTime;
expect(elapsed).toBeGreaterThanOrEqual(10);
});
tap.test('second test in describe', async (tools) => {
await tools.delayFor(20);
const elapsed = Date.now() - startTime;
expect(elapsed).toBeGreaterThanOrEqual(20);
});
});
// Parallel tests to see timing differences
tap.testParallel('parallel test 1 - 100ms', async (tools) => {
await tools.delayFor(100);
expect(true).toBeTrue();
});
tap.testParallel('parallel test 2 - 50ms', async (tools) => {
await tools.delayFor(50);
expect(true).toBeTrue();
});
tap.testParallel('parallel test 3 - 150ms', async (tools) => {
await tools.delayFor(150);
expect(true).toBeTrue();
});
tap.start();

View File

@ -0,0 +1,204 @@
import { tap, expect } from '../../ts_tapbundle/index.js';
import { ProtocolParser, ProtocolEmitter } from '../../ts_tapbundle_protocol/index.js';
// Test the protocol's ability to emit and parse timing metadata
tap.test('protocol should correctly emit timing metadata', async () => {
const emitter = new ProtocolEmitter();
const testResult = {
ok: true,
testNumber: 1,
description: 'test with timing',
metadata: {
time: 123
}
};
const lines = emitter.emitTest(testResult);
// Should have inline timing metadata
expect(lines.length).toEqual(1);
expect(lines[0]).toInclude('⟦TSTEST:time:123⟧');
});
tap.test('protocol should correctly parse timing metadata', async () => {
const parser = new ProtocolParser();
const line = 'ok 1 - test with timing ⟦TSTEST:time:456⟧';
const messages = parser.parseLine(line);
expect(messages.length).toEqual(1);
expect(messages[0].type).toEqual('test');
const content = messages[0].content as any;
expect(content.metadata).toBeDefined();
expect(content.metadata.time).toEqual(456);
});
tap.test('protocol should handle 0ms timing', async () => {
const parser = new ProtocolParser();
const line = 'ok 1 - ultra fast test ⟦TSTEST:time:0⟧';
const messages = parser.parseLine(line);
const content = messages[0].content as any;
expect(content.metadata.time).toEqual(0);
});
tap.test('protocol should handle large timing values', async () => {
const parser = new ProtocolParser();
const line = 'ok 1 - slow test ⟦TSTEST:time:999999⟧';
const messages = parser.parseLine(line);
const content = messages[0].content as any;
expect(content.metadata.time).toEqual(999999);
});
tap.test('protocol should handle timing with other metadata', async () => {
const emitter = new ProtocolEmitter();
const testResult = {
ok: true,
testNumber: 1,
description: 'complex test',
metadata: {
time: 789,
file: 'test.ts',
tags: ['slow', 'integration']
}
};
const lines = emitter.emitTest(testResult);
// Should use block metadata format for complex metadata
expect(lines.length).toBeGreaterThan(1);
expect(lines[1]).toInclude('META:');
expect(lines[1]).toInclude('"time":789');
});
tap.test('protocol should parse timing from block metadata', async () => {
const parser = new ProtocolParser();
const lines = [
'ok 1 - complex test',
'⟦TSTEST:META:{"time":321,"file":"test.ts"}⟧'
];
let testResult: any;
for (const line of lines) {
const messages = parser.parseLine(line);
if (messages.length > 0 && messages[0].type === 'test') {
testResult = messages[0].content;
}
}
expect(testResult).toBeDefined();
expect(testResult.metadata).toBeUndefined(); // Metadata comes separately in block format
});
tap.test('timing for skipped tests should be 0 or missing', async () => {
const emitter = new ProtocolEmitter();
const testResult = {
ok: true,
testNumber: 1,
description: 'skipped test',
directive: {
type: 'skip' as const,
reason: 'Not ready'
},
metadata: {
time: 0
}
};
const lines = emitter.emitTest(testResult);
expect(lines[0]).toInclude('# SKIP');
// If time is 0, it might be included or omitted
if (lines[0].includes('⟦TSTEST:')) {
expect(lines[0]).toInclude('time:0');
}
});
tap.test('protocol should handle fractional milliseconds', async () => {
const emitter = new ProtocolEmitter();
// Even though we use integers, test that protocol handles them correctly
const testResult = {
ok: true,
testNumber: 1,
description: 'precise test',
metadata: {
time: 123 // Protocol uses integers for milliseconds
}
};
const lines = emitter.emitTest(testResult);
expect(lines[0]).toInclude('time:123');
});
tap.test('protocol should handle timing in retry scenarios', async () => {
const emitter = new ProtocolEmitter();
const testResult = {
ok: true,
testNumber: 1,
description: 'retry test',
metadata: {
time: 200,
retry: 2
}
};
const lines = emitter.emitTest(testResult);
// Should include both time and retry
expect(lines[0]).toMatch(/time:200.*retry:2|retry:2.*time:200/);
});
// Test actual timing capture
tap.test('HrtMeasurement should capture accurate timing', async (tools) => {
// Import HrtMeasurement
const { HrtMeasurement } = await import('@push.rocks/smarttime');
const measurement = new HrtMeasurement();
measurement.start();
await tools.delayFor(50);
measurement.stop();
// Should be at least 50ms
expect(measurement.milliSeconds).toBeGreaterThanOrEqual(50);
// But not too much more (allow for some overhead)
expect(measurement.milliSeconds).toBeLessThan(100);
});
tap.test('multiple timing measurements should be independent', async (tools) => {
const { HrtMeasurement } = await import('@push.rocks/smarttime');
const measurement1 = new HrtMeasurement();
const measurement2 = new HrtMeasurement();
measurement1.start();
await tools.delayFor(25);
measurement2.start();
await tools.delayFor(25);
measurement1.stop();
await tools.delayFor(25);
measurement2.stop();
// measurement1 should be ~50ms (25ms + 25ms)
expect(measurement1.milliSeconds).toBeGreaterThanOrEqual(50);
expect(measurement1.milliSeconds).toBeLessThan(70);
// measurement2 should be ~50ms (25ms + 25ms)
expect(measurement2.milliSeconds).toBeGreaterThanOrEqual(50);
expect(measurement2.milliSeconds).toBeLessThan(70);
});
tap.start();

View File

@ -3,6 +3,6 @@
*/
export const commitinfo = {
name: '@git.zone/tstest',
version: '2.2.4',
version: '2.3.0',
description: 'a test utility to run tests that match test/**/*.ts'
}

View File

@ -8,6 +8,40 @@ export enum TestExecutionMode {
}
export const runCli = async () => {
// Check if we're using global tstest in the tstest project itself
try {
const packageJsonPath = `${process.cwd()}/package.json`;
const fs = await import('fs');
if (fs.existsSync(packageJsonPath)) {
const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf8'));
if (packageJson.name === '@git.zone/tstest') {
// Check if we're running from a global installation
const execPath = process.argv[1];
// Debug: log the paths (uncomment for debugging)
// console.log('DEBUG: Checking global tstest usage...');
// console.log('execPath:', execPath);
// console.log('cwd:', process.cwd());
// console.log('process.argv:', process.argv);
// Check if this is running from global installation
const isLocalCli = execPath.includes(process.cwd());
const isGlobalPnpm = process.argv.some(arg => arg.includes('.pnpm') && !arg.includes(process.cwd()));
const isGlobalNpm = process.argv.some(arg => arg.includes('npm/node_modules') && !arg.includes(process.cwd()));
if (!isLocalCli && (isGlobalPnpm || isGlobalNpm || !execPath.includes('node_modules'))) {
console.error('\n⚠ WARNING: You are using a globally installed tstest in the tstest project itself!');
console.error(' This means you are NOT testing your local changes.');
console.error(' Please use one of these commands instead:');
console.error(' • node cli.js <test-path>');
console.error(' • pnpm test <test-path>');
console.error(' • ./cli.js <test-path> (if executable)\n');
}
}
}
} catch (error) {
// Silently ignore any errors in this check
}
// Parse command line arguments
const args = process.argv.slice(2);
const logOptions: LogOptions = {};
@ -24,6 +58,18 @@ export const runCli = async () => {
const arg = args[i];
switch (arg) {
case '--version':
// Get version from package.json
try {
const fs = await import('fs');
const packagePath = new URL('../package.json', import.meta.url).pathname;
const packageData = JSON.parse(await fs.promises.readFile(packagePath, 'utf8'));
console.log(`tstest version ${packageData.version}`);
} catch (error) {
console.log('tstest version unknown');
}
process.exit(0);
break;
case '--quiet':
case '-q':
logOptions.quiet = true;
@ -115,6 +161,7 @@ export const runCli = async () => {
console.error('You must specify a test directory/file/pattern as argument. Please try again.');
console.error('\nUsage: tstest <path> [options]');
console.error('\nOptions:');
console.error(' --version Show version information');
console.error(' --quiet, -q Minimal output');
console.error(' --verbose, -v Verbose output');
console.error(' --no-color Disable colored output');

View File

@ -18,7 +18,6 @@ export class TsTest {
public startFromFile: number | null;
public stopAtFile: number | null;
public timeoutSeconds: number | null;
private timeoutWarningTimer: NodeJS.Timeout | null = null;
public smartshellInstance = new plugins.smartshell.Smartshell({
executor: 'bash',
@ -45,15 +44,6 @@ export class TsTest {
await this.movePreviousLogFiles();
}
// Start timeout warning timer if no timeout was specified
if (this.timeoutSeconds === null) {
this.timeoutWarningTimer = setTimeout(() => {
this.logger.warning('Test is running for more than 1 minute.');
this.logger.warning('Consider using --timeout option to set a timeout for test files.');
this.logger.warning('Example: tstest test --timeout=300 (for 5 minutes)');
}, 60000); // 1 minute
}
const testGroups = await this.testDir.getTestFileGroups();
const allFiles = [...testGroups.serial, ...Object.values(testGroups.parallelGroups).flat()];
@ -92,12 +82,6 @@ export class TsTest {
}
}
// Clear the timeout warning timer if it was set
if (this.timeoutWarningTimer) {
clearTimeout(this.timeoutWarningTimer);
this.timeoutWarningTimer = null;
}
tapCombinator.evaluate();
}
@ -272,6 +256,19 @@ import '${absoluteTestFile.replace(/\\/g, '/')}';
execResultStreaming.childProcess.on('error', cleanup);
}
// Start warning timer if no timeout was specified
let warningTimer: NodeJS.Timeout | null = null;
if (this.timeoutSeconds === null) {
warningTimer = setTimeout(() => {
console.error('');
console.error(cs('⚠️ WARNING: Test file is running for more than 1 minute', 'orange'));
console.error(cs(` File: ${fileNameArg}`, 'orange'));
console.error(cs(' Consider using --timeout option to set a timeout for test files.', 'orange'));
console.error(cs(' Example: tstest test --timeout=300 (for 5 minutes)', 'orange'));
console.error('');
}, 60000); // 1 minute
}
// Handle timeout if specified
if (this.timeoutSeconds !== null) {
const timeoutMs = this.timeoutSeconds * 1000;
@ -293,6 +290,10 @@ import '${absoluteTestFile.replace(/\\/g, '/')}';
// Clear timeout if test completed successfully
clearTimeout(timeoutId);
} catch (error) {
// Clear warning timer if it was set
if (warningTimer) {
clearTimeout(warningTimer);
}
// Handle timeout error
tapParser.handleTimeout(this.timeoutSeconds);
// Ensure entire process tree is killed if still running
@ -307,6 +308,11 @@ import '${absoluteTestFile.replace(/\\/g, '/')}';
await tapParser.handleTapProcess(execResultStreaming.childProcess);
}
// Clear warning timer if it was set
if (warningTimer) {
clearTimeout(warningTimer);
}
return tapParser;
}
@ -425,6 +431,19 @@ import '${absoluteTestFile.replace(/\\/g, '/')}';
}
);
// Start warning timer if no timeout was specified
let warningTimer: NodeJS.Timeout | null = null;
if (this.timeoutSeconds === null) {
warningTimer = setTimeout(() => {
console.error('');
console.error(cs('⚠️ WARNING: Test file is running for more than 1 minute', 'orange'));
console.error(cs(` File: ${fileNameArg}`, 'orange'));
console.error(cs(' Consider using --timeout option to set a timeout for test files.', 'orange'));
console.error(cs(' Example: tstest test --timeout=300 (for 5 minutes)', 'orange'));
console.error('');
}, 60000); // 1 minute
}
// Handle timeout if specified
if (this.timeoutSeconds !== null) {
const timeoutMs = this.timeoutSeconds * 1000;
@ -444,6 +463,10 @@ import '${absoluteTestFile.replace(/\\/g, '/')}';
// Clear timeout if test completed successfully
clearTimeout(timeoutId);
} catch (error) {
// Clear warning timer if it was set
if (warningTimer) {
clearTimeout(warningTimer);
}
// Handle timeout error
tapParser.handleTimeout(this.timeoutSeconds);
}
@ -451,6 +474,11 @@ import '${absoluteTestFile.replace(/\\/g, '/')}';
await evaluatePromise;
}
// Clear warning timer if it was set
if (warningTimer) {
clearTimeout(warningTimer);
}
// Always clean up resources, even on timeout
try {
await this.smartbrowserInstance.stop();
@ -488,10 +516,10 @@ import '${absoluteTestFile.replace(/\\/g, '/')}';
try {
// Delete 00err and 00diff directories if they exist
if (await plugins.smartfile.fs.isDirectory(errDir)) {
if (plugins.smartfile.fs.isDirectorySync(errDir)) {
plugins.smartfile.fs.removeSync(errDir);
}
if (await plugins.smartfile.fs.isDirectory(diffDir)) {
if (plugins.smartfile.fs.isDirectorySync(diffDir)) {
plugins.smartfile.fs.removeSync(diffDir);
}

View File

@ -242,9 +242,11 @@ export class TsTestLogger {
if (!this.options.quiet) {
const total = passed + failed;
const status = failed === 0 ? 'PASSED' : 'FAILED';
const color = failed === 0 ? 'green' : 'red';
this.log(this.format(` Summary: ${passed}/${total} ${status}`, color));
if (failed === 0) {
this.log(this.format(` Summary: ${passed}/${total} PASSED`, 'green'));
} else {
this.log(this.format(` Summary: ${passed} passed, ${failed} failed of ${total} tests`, 'red'));
}
}
// If using --logfile, handle error copy and diff detection
@ -390,7 +392,11 @@ export class TsTestLogger {
if (this.options.quiet) {
const status = summary.totalFailed === 0 ? 'PASSED' : 'FAILED';
this.log(`\nSummary: ${summary.totalPassed}/${summary.totalTests} | ${totalDuration}ms | ${status}`);
if (summary.totalFailed === 0) {
this.log(`\nSummary: ${summary.totalPassed}/${summary.totalTests} | ${totalDuration}ms | ${status}`);
} else {
this.log(`\nSummary: ${summary.totalPassed} passed, ${summary.totalFailed} failed of ${summary.totalTests} tests | ${totalDuration}ms | ${status}`);
}
return;
}

View File

@ -269,8 +269,8 @@ export class ProtocolParser {
// Extract simple key:value pairs
const simpleMatch = line.match(new RegExp(`${this.escapeRegex(PROTOCOL_MARKERS.START)}([^${this.escapeRegex(PROTOCOL_MARKERS.END)}]+)${this.escapeRegex(PROTOCOL_MARKERS.END)}`));
if (simpleMatch && !simpleMatch[1].includes(':')) {
// Not a prefixed format, might be key:value pairs
if (simpleMatch && simpleMatch[1].includes(':') && !simpleMatch[1].includes('META:') && !simpleMatch[1].includes('SKIP:') && !simpleMatch[1].includes('TODO:') && !simpleMatch[1].includes('EVENT:')) {
// This is a simple key:value format (not a prefixed format)
const pairs = simpleMatch[1].split(',');
for (const pair of pairs) {
const [key, value] = pair.split(':');