Compare commits
89 Commits
Author | SHA1 | Date | |
---|---|---|---|
c26145205f | |||
82fc22653b | |||
3d85f54be0 | |||
9464c17c15 | |||
91b99ce304 | |||
899045e6aa | |||
845f146e91 | |||
d1f8652fc7 | |||
f717078558 | |||
d2c0e533b5 | |||
d3c7fce595 | |||
570e2d6b3b | |||
b7f4b7b3b8 | |||
424046b0de | |||
0f762f2063 | |||
82757c4abc | |||
7aaeed0dc6 | |||
c98bd85829 | |||
33d2ff1d4f | |||
91880f8d42 | |||
7b1732abcc | |||
7d09b39f2b | |||
96efba5903 | |||
3c535a8a77 | |||
0954265095 | |||
e1d90589bc | |||
33f705d961 | |||
13b11ab1bf | |||
63280e4a9a | |||
23addc2d2f | |||
3649114c8d | |||
2841aba8a4 | |||
31bf090410 | |||
b525754035 | |||
aa10fc4ab3 | |||
3eb8ef22e5 | |||
763dc89f59 | |||
e0d8ede450 | |||
27c950c1a1 | |||
83b324b09f | |||
63a2879cb4 | |||
1a375fa689 | |||
c48887a820 | |||
02aeb8195e | |||
53d3dc55e6 | |||
a82fdc0f26 | |||
cfcb99de76 | |||
a3a4ded41e | |||
03d478d6ff | |||
77e53bd68a | |||
946e467c26 | |||
f452a58fff | |||
2b01d949f2 | |||
1c5cf46ba9 | |||
b28e2eace3 | |||
cc388f1408 | |||
bac2f852c5 | |||
d9e0f1f758 | |||
42cd08eb1c | |||
553d5f0df7 | |||
6cc883dede | |||
fa9abbc4db | |||
56f0f0be16 | |||
dc0f859fad | |||
78ffad2f7d | |||
3fc4cee2b1 | |||
a57edeef64 | |||
1f73751a8c | |||
90741ed917 | |||
962fa2cd4d | |||
c085a20a4f | |||
1f355a10a1 | |||
a73ce99564 | |||
64f825091d | |||
5ddc2d2de0 | |||
85fec03878 | |||
61c3226156 | |||
f0bf778810 | |||
a8e9f67810 | |||
4cce132472 | |||
dc250804f5 | |||
9669445646 | |||
928d9d0616 | |||
3655b2f734 | |||
6712ff6b07 | |||
ef5efc0a93 | |||
f305547116 | |||
033a0a806c | |||
7f87c24ad8 |
2
.gitignore
vendored
2
.gitignore
vendored
@ -17,4 +17,4 @@ node_modules/
|
||||
dist/
|
||||
dist_*/
|
||||
|
||||
# custom
|
||||
# custom
|
||||
|
128
.gitlab-ci.yml
128
.gitlab-ci.yml
@ -1,128 +0,0 @@
|
||||
# gitzone ci_default
|
||||
image: registry.gitlab.com/hosttoday/ht-docker-node:npmci
|
||||
|
||||
cache:
|
||||
paths:
|
||||
- .npmci_cache/
|
||||
key: '$CI_BUILD_STAGE'
|
||||
|
||||
stages:
|
||||
- security
|
||||
- test
|
||||
- release
|
||||
- metadata
|
||||
|
||||
before_script:
|
||||
- pnpm install -g pnpm
|
||||
- pnpm install -g @shipzone/npmci
|
||||
- npmci npm prepare
|
||||
|
||||
# ====================
|
||||
# security stage
|
||||
# ====================
|
||||
# ====================
|
||||
# security stage
|
||||
# ====================
|
||||
auditProductionDependencies:
|
||||
image: registry.gitlab.com/hosttoday/ht-docker-node:npmci
|
||||
stage: security
|
||||
script:
|
||||
- npmci command npm config set registry https://registry.npmjs.org
|
||||
- npmci command pnpm audit --audit-level=high --prod
|
||||
tags:
|
||||
- lossless
|
||||
- docker
|
||||
allow_failure: true
|
||||
|
||||
auditDevDependencies:
|
||||
image: registry.gitlab.com/hosttoday/ht-docker-node:npmci
|
||||
stage: security
|
||||
script:
|
||||
- npmci command npm config set registry https://registry.npmjs.org
|
||||
- npmci command pnpm audit --audit-level=high --dev
|
||||
tags:
|
||||
- lossless
|
||||
- docker
|
||||
allow_failure: true
|
||||
|
||||
# ====================
|
||||
# test stage
|
||||
# ====================
|
||||
|
||||
testStable:
|
||||
stage: test
|
||||
script:
|
||||
- npmci node install stable
|
||||
- npmci npm install
|
||||
- npmci npm test
|
||||
coverage: /\d+.?\d+?\%\s*coverage/
|
||||
tags:
|
||||
- docker
|
||||
|
||||
testBuild:
|
||||
stage: test
|
||||
script:
|
||||
- npmci node install stable
|
||||
- npmci npm install
|
||||
- npmci command npm run build
|
||||
coverage: /\d+.?\d+?\%\s*coverage/
|
||||
tags:
|
||||
- docker
|
||||
|
||||
release:
|
||||
stage: release
|
||||
script:
|
||||
- npmci node install stable
|
||||
- npmci npm publish
|
||||
only:
|
||||
- tags
|
||||
tags:
|
||||
- lossless
|
||||
- docker
|
||||
- notpriv
|
||||
|
||||
# ====================
|
||||
# metadata stage
|
||||
# ====================
|
||||
codequality:
|
||||
stage: metadata
|
||||
allow_failure: true
|
||||
only:
|
||||
- tags
|
||||
script:
|
||||
- npmci command npm install -g typescript
|
||||
- npmci npm prepare
|
||||
- npmci npm install
|
||||
tags:
|
||||
- lossless
|
||||
- docker
|
||||
- priv
|
||||
|
||||
trigger:
|
||||
stage: metadata
|
||||
script:
|
||||
- npmci trigger
|
||||
only:
|
||||
- tags
|
||||
tags:
|
||||
- lossless
|
||||
- docker
|
||||
- notpriv
|
||||
|
||||
pages:
|
||||
stage: metadata
|
||||
script:
|
||||
- npmci node install stable
|
||||
- npmci npm install
|
||||
- npmci command npm run buildDocs
|
||||
tags:
|
||||
- lossless
|
||||
- docker
|
||||
- notpriv
|
||||
only:
|
||||
- tags
|
||||
artifacts:
|
||||
expire_in: 1 week
|
||||
paths:
|
||||
- public
|
||||
allow_failure: true
|
411
changelog.md
Normal file
411
changelog.md
Normal file
@ -0,0 +1,411 @@
|
||||
# Changelog
|
||||
|
||||
## 2025-05-26 - 2.3.0 - feat(cli)
|
||||
Add '--version' option and warn against global tstest usage in the tstest project
|
||||
|
||||
- Introduced a new '--version' CLI flag that prints the version from package.json
|
||||
- Added logic in ts/index.ts to detect if tstest is run globally within its own project and issue a warning
|
||||
- Added .claude/settings.local.json to configure allowed permissions for various commands
|
||||
|
||||
## 2025-05-26 - 2.2.6 - fix(tstest)
|
||||
Improve timeout warning timer management and summary output formatting in the test runner.
|
||||
|
||||
- Removed the global timeoutWarningTimer and replaced it with local warning timers in runInNode and runInChrome methods.
|
||||
- Added warnings when test files run for over one minute if no timeout is specified.
|
||||
- Ensured proper clearing of warning timers on successful completion or timeout.
|
||||
- Enhanced quiet mode summary output to clearly display passed and failed test counts.
|
||||
|
||||
## 2025-05-26 - 2.2.5 - fix(protocol)
|
||||
Fix inline timing metadata parsing and enhance test coverage for performance metrics and timing edge cases
|
||||
|
||||
- Updated the protocol parser to correctly parse inline key:value pairs while excluding prefixed formats (META:, SKIP:, TODO:, EVENT:)
|
||||
- Added new tests for performance metrics, timing edge cases, and protocol timing to verify accurate timing capture and retry handling
|
||||
- Expanded documentation in readme.hints.md to detail the updated timing implementation and parser fixes
|
||||
|
||||
## 2025-05-26 - 2.2.4 - fix(logging)
|
||||
Improve performance metrics reporting and add local permissions configuration
|
||||
|
||||
- Add .claude/settings.local.json to configure allowed permissions for various commands
|
||||
- Update tstest logging: compute average test duration from actual durations and adjust slowest test display formatting
|
||||
|
||||
## 2025-05-26 - 2.2.3 - fix(readme/ts/tstest.plugins)
|
||||
Update npm package scope and documentation to use '@git.zone' instead of '@gitzone', and add local settings configuration.
|
||||
|
||||
- Changed npm package links and source repository URLs in readme from '@gitzone/tstest' to '@git.zone/tstest'.
|
||||
- Updated comments in ts/tstest.plugins.ts to reflect the correct '@git.zone' scope.
|
||||
- Added .claude/settings.local.json file with local permission settings.
|
||||
|
||||
## 2025-05-26 - 2.2.2 - fix(config)
|
||||
Cleanup project configuration by adding local CLAUDE settings and removing redundant license files
|
||||
|
||||
- Added .claude/settings.local.json with updated permissions for CLI and build tasks
|
||||
- Removed license and license.md files to streamline repository content
|
||||
|
||||
## 2025-05-26 - 2.2.1 - fix(repo configuration)
|
||||
Update repository metadata to use 'git.zone' naming and add local permission settings
|
||||
|
||||
- Changed githost from 'gitlab.com' to 'code.foss.global' and gitscope from 'gitzone' to 'git.zone' in npmextra.json
|
||||
- Updated npm package name from '@gitzone/tstest' to '@git.zone/tstest' in npmextra.json and readme.md
|
||||
- Added .claude/settings.local.json with new permission configuration
|
||||
|
||||
## 2025-05-26 - 2.2.0 - feat(watch mode)
|
||||
Add watch mode support with CLI options and enhanced documentation
|
||||
|
||||
- Introduce '--watch' (or '-w') and '--watch-ignore' CLI flags for automatic test re-runs
|
||||
- Integrate @push.rocks/smartchok for file watching with 300ms debouncing
|
||||
- Update readme.md and readme.hints.md with detailed instructions and examples for watch mode
|
||||
- Add a demo test file (test/watch-demo/test.demo.ts) to illustrate the new feature
|
||||
- Add smartchok dependency in package.json
|
||||
|
||||
## 2025-05-26 - 2.1.0 - feat(core)
|
||||
Implement Protocol V2 with enhanced settings and lifecycle hooks
|
||||
|
||||
- Migrated to Protocol V2 using Unicode markers and structured metadata with new ts_tapbundle_protocol module
|
||||
- Refactored TAP parser/emitter to support improved protocol parsing and error reporting
|
||||
- Integrated global settings via tap.settings() and lifecycle hooks (beforeAll/afterAll, beforeEach/afterEach)
|
||||
- Enhanced expect wrapper with diff generation for clearer assertion failures
|
||||
- Updated test loader to automatically run 00init.ts for proper test configuration
|
||||
- Revised documentation (readme.hints.md, readme.plan.md) to reflect current implementation status and remaining work
|
||||
|
||||
## 2025-05-25 - 2.0.0 - BREAKING CHANGE(protocol)
|
||||
Introduce protocol v2 implementation and update build configuration with revised build order, new tspublish files, and enhanced documentation
|
||||
|
||||
- Added ts_tapbundle_protocol directory with isomorphic implementation for protocol v2
|
||||
- Updated readme.hints.md and readme.plan.md to explain the complete replacement of the v1 protocol and new build process
|
||||
- Revised build order in tspublish.json files across ts, ts_tapbundle, ts_tapbundle_node, and ts_tapbundle_protocol
|
||||
- Introduced .claude/settings.local.json with updated permission settings for CLI and build tools
|
||||
|
||||
## 2025-05-24 - 1.11.5 - fix(tstest)
|
||||
Fix timeout handling to correctly evaluate TAP results after killing the test process.
|
||||
|
||||
- Added call to evaluateFinalResult() after killing the process in runInNode to ensure final TAP output is processed.
|
||||
|
||||
## 2025-05-24 - 1.11.4 - fix(logging)
|
||||
Improve warning logging and add permission settings file
|
||||
|
||||
- Replace multiple logger.error calls with logger.warning for tests running over 1 minute
|
||||
- Add warning method in tstest logger to display warning messages consistently
|
||||
- Introduce .claude/settings.local.json to configure allowed permissions
|
||||
|
||||
## 2025-05-24 - 1.11.3 - fix(tstest)
|
||||
Add timeout warning for long-running tests and introduce local settings configuration
|
||||
|
||||
- Add .claude/settings.local.json with permission configuration for local development
|
||||
- Implement a timeout warning timer that notifies when tests run longer than 1 minute without an explicit timeout
|
||||
- Clear the timeout warning timer upon test completion
|
||||
- Remove unused import of logPrefixes in tstest.classes.tstest.ts
|
||||
|
||||
## 2025-05-24 - 1.11.2 - fix(tstest)
|
||||
Improve timeout and error handling in test execution along with TAP parser timeout logic improvements.
|
||||
|
||||
- In the TAP parser, ensure that expected tests are properly set when no tests are defined to avoid false negatives on timeout.
|
||||
- Use smartshell's terminate method and fallback kill to properly stop the entire process tree on timeout.
|
||||
- Clean up browser, server, and WebSocket instances reliably even when a timeout occurs.
|
||||
- Minor improvements in log file filtering and error logging for better clarity.
|
||||
|
||||
## 2025-05-24 - 1.11.1 - fix(tstest)
|
||||
Clear timeout identifiers after successful test execution and add local CLAUDE settings
|
||||
|
||||
- Ensure timeout IDs are cleared when tests complete to prevent lingering timeouts
|
||||
- Add .claude/settings.local.json with updated permission settings for CLI commands
|
||||
|
||||
## 2025-05-24 - 1.11.0 - feat(cli)
|
||||
Add new timeout and file range options with enhanced logfile diff logging
|
||||
|
||||
- Introduce --timeout <seconds> option to safeguard tests from running too long
|
||||
- Add --startFrom and --stopAt options to control the range of test files executed
|
||||
- Enhance logfile organization by automatically moving previous logs and generating diff reports for failed or changed test outputs
|
||||
- Update CLI argument parsing and internal timeout handling for both Node.js and browser tests
|
||||
|
||||
## 2025-05-24 - 1.10.2 - fix(tstest-logging)
|
||||
Improve log file handling with log rotation and diff reporting
|
||||
|
||||
- Add .claude/settings.local.json to configure allowed shell and web operations
|
||||
- Introduce movePreviousLogFiles function to archive previous log files when --logfile is used
|
||||
- Enhance logging to generate error copies and diff reports between current and previous logs
|
||||
- Add type annotations for console overrides in browser evaluations for improved stability
|
||||
|
||||
## 2025-05-23 - 1.10.1 - fix(tstest)
|
||||
Improve file range filtering and summary logging by skipping test files outside the specified range and reporting them in the final summary.
|
||||
|
||||
- Introduce runSingleTestOrSkip to check file index against startFrom/stopAt values.
|
||||
- Log skipped files with appropriate messages and add them to the summary.
|
||||
- Update the logger to include total skipped files in the test summary.
|
||||
- Add permission settings in .claude/settings.local.json to support new operations.
|
||||
|
||||
## 2025-05-23 - 1.10.0 - feat(cli)
|
||||
Add --startFrom and --stopAt options to filter test files by range
|
||||
|
||||
- Introduced CLI options --startFrom and --stopAt in ts/index.ts for selective test execution
|
||||
- Added validation to ensure provided range values are positive and startFrom is not greater than stopAt
|
||||
- Propagated file range filtering into test grouping in tstest.classes.tstest.ts, applying the range filter across serial and parallel groups
|
||||
- Updated usage messages to include the new options
|
||||
|
||||
## 2025-05-23 - 1.9.4 - fix(docs)
|
||||
Update documentation and configuration for legal notices and CI permissions. This commit adds a new local settings file for tool permissions, refines the legal and trademark sections in the readme, and improves glob test files with clearer log messages.
|
||||
|
||||
- Added .claude/settings.local.json to configure permissions for various CLI commands
|
||||
- Revised legal and trademark documentation in the readme to clarify company ownership and usage guidelines
|
||||
- Updated glob test files with improved console log messages for better clarity during test discovery
|
||||
|
||||
## 2025-05-23 - 1.9.3 - fix(tstest)
|
||||
Fix test timing display issue and update TAP protocol documentation
|
||||
|
||||
- Changed TAP parser regex to non-greedy pattern to correctly separate test timing metadata
|
||||
- Enhanced readme.hints.md with detailed explanation of test timing fix and planned protocol upgrades
|
||||
- Updated readme.md with improved usage examples for tapbundle and comprehensive test framework documentation
|
||||
- Added new protocol design document (readme.protocol.md) and improvement plan (readme.plan.md) outlining future changes
|
||||
- Introduced .claude/settings.local.json update for npm and CLI permissions
|
||||
- Exported protocol utilities and added tapbundle protocol implementation for future enhancements
|
||||
|
||||
## 2025-05-23 - 1.9.2 - fix(logging)
|
||||
Fix log file naming to prevent collisions and update logging system documentation.
|
||||
|
||||
- Enhance safe filename generation in tstest logging to preserve directory structure using double underscores.
|
||||
- Update readme.hints.md to include detailed logging system documentation and behavior.
|
||||
- Add .claude/settings.local.json with updated permissions for build tools.
|
||||
|
||||
## 2025-05-23 - 1.9.1 - fix(dependencies)
|
||||
Update dependency versions and add local configuration files
|
||||
|
||||
- Bump @git.zone/tsbuild from ^2.5.1 to ^2.6.3
|
||||
- Bump @types/node from ^22.15.18 to ^22.15.21
|
||||
- Bump @push.rocks/smartexpect from ^2.4.2 to ^2.5.0
|
||||
- Bump @push.rocks/smartfile from ^11.2.0 to ^11.2.3
|
||||
- Bump @push.rocks/smartlog from ^3.1.1 to ^3.1.8
|
||||
- Add .npmrc with npm registry configuration
|
||||
- Add .claude/settings.local.json for local permissions
|
||||
|
||||
## 2025-05-16 - 1.9.0 - feat(docs)
|
||||
Update documentation to embed tapbundle and clarify module exports for browser compatibility; also add CI permission settings.
|
||||
|
||||
- Embed tapbundle directly into tstest to simplify usage and ensure browser support.
|
||||
- Update import paths in examples from '@push.rocks/tapbundle' to '@git.zone/tstest/tapbundle'.
|
||||
- Revise the changelog to reflect version 1.8.0 improvements including enhanced test lifecycle hooks and parallel execution fixes.
|
||||
- Add .claude/settings.local.json to configure CI-related permissions and tool operations.
|
||||
|
||||
## 2025-05-16 - 1.8.0 - feat(documentation)
|
||||
Enhance README with detailed test features and update local settings for build permissions.
|
||||
|
||||
- Expanded the documentation to include tag filtering, parallel test execution groups, lifecycle hooks, snapshot testing, timeout control, retry logic, and test fixtures
|
||||
- Updated .claude/settings.local.json to allow additional permissions for various build and test commands
|
||||
|
||||
## 2025-05-16 - 1.7.0 - feat(tstest)
|
||||
Enhance tstest with fluent API, suite grouping, tag filtering, fixture & snapshot testing, and parallel execution improvements
|
||||
|
||||
- Updated npm scripts to run tests in verbose mode and support glob patterns with quotes
|
||||
- Introduced tag filtering support (--tags) in the CLI to run tests by specified tags
|
||||
- Implemented fluent syntax methods (tags, priority, retry, timeout) for defining tests and applying settings
|
||||
- Added test suite grouping with describe(), along with beforeEach and afterEach lifecycle hooks
|
||||
- Integrated a fixture system and snapshot testing via TapTools with base64 snapshot communication
|
||||
- Enhanced TAP parser regex, error collection, and snapshot handling for improved debugging
|
||||
- Improved parallel test execution by grouping files with a 'para__' pattern and running them concurrently
|
||||
|
||||
## 2025-05-15 - 1.6.0 - feat(package)
|
||||
Revamp package exports and update permissions with an extensive improvement plan for test runner enhancements.
|
||||
|
||||
- Replaced 'main' and 'typings' in package.json with explicit exports for improved module resolution.
|
||||
- Added .claude/settings.local.json to configure permissions for bash commands and web fetches.
|
||||
- Updated readme.plan.md with a comprehensive roadmap covering enhanced error reporting, rich test metadata, nested test suites, and advanced test features.
|
||||
|
||||
## 2025-05-15 - 1.5.0 - feat(cli)
|
||||
Improve test runner configuration: update test scripts, reorganize test directories, update dependencies and add local settings for command permissions.
|
||||
|
||||
- Updated package.json scripts to use pnpm and separate commands for tapbundle and tstest.
|
||||
- Reorganized tests into dedicated directories (test/tapbundle and test/tstest) and removed deprecated test files.
|
||||
- Refactored import paths and bumped dependency versions in tapbundle, tstest, and associated node utilities.
|
||||
- Added .claude/settings.local.json to configure local permissions for bash and web fetch commands.
|
||||
- Introduced ts/tspublish.json to define publish order.
|
||||
|
||||
## 2025-05-15 - 1.4.0 - feat(logging)
|
||||
Display failed test console logs in default mode
|
||||
|
||||
- Introduce log buffering in TsTestLogger to capture console output for failed tests
|
||||
- Enhance TapParser to collect and display error details when tests fail
|
||||
- Update README and project plan to document log improvements for debugging
|
||||
|
||||
## 2025-05-15 - 1.3.1 - fix(settings)
|
||||
Add local permissions configuration and remove obsolete test output log
|
||||
|
||||
- Added .claude/settings.local.json to configure allowed permissions for web fetch and bash commands
|
||||
- Removed test-output.log to eliminate accidental commit of test artifacts
|
||||
|
||||
## 2025-05-15 - 1.3.0 - feat(logger)
|
||||
Improve logging output and add --logfile support for persistent logs
|
||||
|
||||
- Add new .claude/settings.local.json with logging permissions configuration
|
||||
- Remove obsolete readme.plan.md
|
||||
- Introduce test/test.console.ts to capture and display console outputs during tests
|
||||
- Update CLI in ts/index.ts to replace '--log-file' with '--logfile' flag
|
||||
- Enhance TsTestLogger to support file logging, clean ANSI sequences, and improved JSON output
|
||||
- Forward TAP protocol logs to testConsoleOutput in TapParser for better console distinction
|
||||
|
||||
## 2025-05-15 - 1.2.0 - feat(logging)
|
||||
Improve logging output, CLI option parsing, and test report formatting.
|
||||
|
||||
- Added a centralized TsTestLogger with support for multiple verbosity levels, JSON output, and file logging (TODO).
|
||||
- Integrated new logger into CLI parsing, TapParser, TapCombinator, and TsTest classes to ensure consistent and structured output.
|
||||
- Introduced new CLI options (--quiet, --verbose, --no-color, --json, --log-file) for enhanced user control.
|
||||
- Enhanced visual design with progress indicators, detailed error aggregation, and performance summaries.
|
||||
- Updated documentation and logging code to align with improved CI/CD behavior, including skipping non-CI tests.
|
||||
|
||||
## 2025-05-15 - 1.1.0 - feat(cli)
|
||||
Enhance test discovery with support for single file and glob pattern execution using improved CLI argument detection
|
||||
|
||||
- Detect execution mode (file, glob, directory) based on CLI input in ts/index.ts
|
||||
- Refactor TestDirectory to load test files using SmartFile for single file and glob patterns
|
||||
- Update TsTest to pass execution mode and adjust test discovery accordingly
|
||||
- Bump dependency versions for typedserver, tsbundle, tapbundle, and others
|
||||
- Add .claude/settings.local.json for updated permissions configuration
|
||||
|
||||
## 2025-01-23 - 1.0.96 - fix(TsTest)
|
||||
Fixed improper type-check for promise-like testModule defaults
|
||||
|
||||
- Corrected the type-check for promise-like default exports in test modules
|
||||
- Removed unnecessary setTimeout used for async execution
|
||||
|
||||
## 2025-01-23 - 1.0.95 - fix(core)
|
||||
Fix delay handling in Chrome test execution
|
||||
|
||||
- Replaced smartdelay.delayFor with native Promise-based delay mechanism in runInChrome method.
|
||||
|
||||
## 2025-01-23 - 1.0.94 - fix(TsTest)
|
||||
Fix test module execution by ensuring promise resolution delay
|
||||
|
||||
- Added a delay to ensure promise resolution when dynamically importing test modules in the runInChrome method.
|
||||
|
||||
## 2025-01-23 - 1.0.93 - fix(tstest)
|
||||
Handle globalThis.tapPromise in browser runtime evaluation
|
||||
|
||||
- Added support for using globalThis.tapPromise in the browser evaluation logic.
|
||||
- Added log messages to indicate the usage of globalThis.tapPromise.
|
||||
|
||||
## 2025-01-23 - 1.0.92 - fix(core)
|
||||
Improve error logging for test modules without default promise
|
||||
|
||||
- Added logging to display the exported test module content when it does not export a default promise.
|
||||
|
||||
## 2025-01-23 - 1.0.91 - fix(core)
|
||||
Refactored tstest class to enhance promise handling for test modules.
|
||||
|
||||
- Removed .gitlab-ci.yml configuration file.
|
||||
- Updated package.json dependency versions.
|
||||
- Added a condition to handle promiselike objects in tests.
|
||||
|
||||
## 2024-04-18 - 1.0.89 to 1.0.90 - Enhancements and Bug Fixes
|
||||
Multiple updates and fixes have been made.
|
||||
|
||||
- Updated core components to enhance stability and performance.
|
||||
|
||||
## 2024-03-07 - 1.0.86 to 1.0.88 - Core Updates
|
||||
Continued improvements and updates in the core module.
|
||||
|
||||
- Applied critical fixes to enhance core stability.
|
||||
|
||||
## 2024-01-19 - 1.0.85 to 1.0.89 - Bug Fixes
|
||||
Series of core updates have been implemented.
|
||||
|
||||
- Addressed known bugs and improved overall system functionality.
|
||||
|
||||
## 2023-11-09 - 1.0.81 to 1.0.84 - Maintenance Updates
|
||||
Maintenance updates focusing on core reliability.
|
||||
|
||||
- Improved core module through systematic updates.
|
||||
- Strengthened system robustness.
|
||||
|
||||
## 2023-08-26 - 1.0.77 to 1.0.80 - Critical Fixes
|
||||
Critical fixes implemented in core functionality.
|
||||
|
||||
- Enhanced core processing to fix existing issues.
|
||||
|
||||
## 2023-07-13 - 1.0.75 to 1.0.76 - Stability Improvements
|
||||
Stability enhancements and minor improvements.
|
||||
|
||||
- Focused on ensuring a stable operational core.
|
||||
|
||||
## 2022-11-08 - 1.0.73 to 1.0.74 - Routine Fixes
|
||||
Routine core fixes to address reported issues.
|
||||
|
||||
- Addressed minor issues in the core module.
|
||||
|
||||
## 2022-08-03 - 1.0.71 to 1.0.72 - Core Enhancements
|
||||
Enhancements applied to core systems.
|
||||
|
||||
- Tweaked core components for enhanced reliability.
|
||||
|
||||
## 2022-05-04 - 1.0.69 to 1.0.70 - System Reliability Fixes
|
||||
Fixes targeting the reliability of the core systems.
|
||||
|
||||
- Improved system reliability through targeted core updates.
|
||||
|
||||
## 2022-03-17 - 1.0.65 to 1.0.68 - Major Core Updates
|
||||
Major updates and bug fixes delivered for core components.
|
||||
|
||||
- Enhanced central operations through key updates.
|
||||
|
||||
## 2022-02-15 - 1.0.60 to 1.0.64 - Core Stability Improvements
|
||||
Focused updates on core stability and performance.
|
||||
|
||||
- Reinforced stability through systematic core changes.
|
||||
|
||||
## 2021-11-07 - 1.0.54 to 1.0.59 - Core Fixes and Improvements
|
||||
Multiple core updates aimed at fixing and improving the system.
|
||||
|
||||
- Addressed outstanding bugs and improved performance in the core.
|
||||
|
||||
## 2021-08-20 - 1.0.50 to 1.0.53 - Core Functionality Updates
|
||||
Continued updates to improve core functionality and user experience.
|
||||
|
||||
- Implemented essential core fixes to enhance user experience.
|
||||
|
||||
## 2020-10-01 - 1.0.44 to 1.0.49 - Core System Enhancements
|
||||
Critical enhancements to core systems.
|
||||
|
||||
- Improved core operations and tackled existing issues.
|
||||
|
||||
## 2020-09-29 - 1.0.40 to 1.0.43 - Essential Fixes
|
||||
Series of essential fixes for the core system.
|
||||
|
||||
- Rectified known issues and bolstered core functionalities.
|
||||
|
||||
## 2020-07-10 - 1.0.35 to 1.0.39 - Core Function Fixes
|
||||
Focused improvements and fixes for critical components.
|
||||
|
||||
- Addressed critical core functions to boost system performance.
|
||||
|
||||
## 2020-06-01 - 1.0.31 to 1.0.34 - Core Updates
|
||||
Updates to maintain core functionality efficacy.
|
||||
|
||||
- Fixed inefficiencies and updated essential components.
|
||||
|
||||
## 2019-10-02 - 1.0.26 to 1.0.29 - Core Maintenance
|
||||
Regular maintenance and updates for core reliability.
|
||||
|
||||
- Addressed multiple core issues and enhanced system stability.
|
||||
|
||||
## 2019-05-28 - 1.0.20 to 1.0.25 - Core Improvements
|
||||
General improvements targeting core functionalities.
|
||||
|
||||
- Made systematic improvements to core processes.
|
||||
|
||||
## 2019-04-08 - 1.0.16 to 1.0.19 - Bug Squashing
|
||||
Resolved numerous issues within core operations.
|
||||
|
||||
- Fixed and optimized core functionalities for better performance.
|
||||
|
||||
## 2018-12-06 - 1.0.15 - Dependency Updates
|
||||
Updates aimed at improving dependency management.
|
||||
|
||||
- Ensured dependencies are up-to-date for optimal performance.
|
||||
|
||||
## 2018-08-14 - 1.0.14 - Test Improvement
|
||||
Major improvements in testing mechanisms and logging.
|
||||
|
||||
- Improved test results handling for accuracy and reliability.
|
||||
- Enhanced logging features for increased clarity.
|
||||
|
||||
## 2018-08-04 - 1.0.1 to 1.0.13 - Initial Implementation and Fixes
|
||||
Initial release and critical updates focusing on core stability and functionality.
|
||||
|
||||
- Implemented core components and established initial system structure.
|
||||
- Addressed key bugs and enhanced initial functionality.
|
19
license.md
Normal file
19
license.md
Normal file
@ -0,0 +1,19 @@
|
||||
Copyright (c) 2014 Task Venture Capital GmbH (hello@task.vc)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
@ -6,11 +6,11 @@
|
||||
"gitzone": {
|
||||
"projectType": "npm",
|
||||
"module": {
|
||||
"githost": "gitlab.com",
|
||||
"gitscope": "gitzone",
|
||||
"githost": "code.foss.global",
|
||||
"gitscope": "git.zone",
|
||||
"gitrepo": "tstest",
|
||||
"description": "a test utility to run tests that match test/**/*.ts",
|
||||
"npmPackagename": "@gitzone/tstest",
|
||||
"npmPackagename": "@git.zone/tstest",
|
||||
"license": "MIT"
|
||||
}
|
||||
}
|
||||
|
63
package.json
63
package.json
@ -1,10 +1,13 @@
|
||||
{
|
||||
"name": "@git.zone/tstest",
|
||||
"version": "1.0.85",
|
||||
"version": "2.3.0",
|
||||
"private": false,
|
||||
"description": "a test utility to run tests that match test/**/*.ts",
|
||||
"main": "dist_ts/index.js",
|
||||
"typings": "dist_ts/index.d.ts",
|
||||
"exports": {
|
||||
".": "./dist_ts/index.js",
|
||||
"./tapbundle": "./dist_ts_tapbundle/index.js",
|
||||
"./tapbundle_node": "./dist_ts_tapbundle_node/index.js"
|
||||
},
|
||||
"type": "module",
|
||||
"author": "Lossless GmbH",
|
||||
"license": "MIT",
|
||||
@ -12,32 +15,43 @@
|
||||
"tstest": "./cli.js"
|
||||
},
|
||||
"scripts": {
|
||||
"test": "(npm run cleanUp && npm run prepareTest && npm run tstest)",
|
||||
"prepareTest": "git clone https://gitlab.com/sandboxzone/sandbox-npmts.git .nogit/sandbox-npmts && cd .nogit/sandbox-npmts && npm install",
|
||||
"tstest": "cd .nogit/sandbox-npmts && node ../../cli.ts.js test/ --web",
|
||||
"cleanUp": "rm -rf .nogit/sandbox-npmts",
|
||||
"build": "(tsbuild --web --allowimplicitany --skiplibcheck)",
|
||||
"test": "pnpm run build && pnpm run test:tapbundle:verbose && pnpm run test:tstest:verbose",
|
||||
"test:tapbundle": "tsx ./cli.child.ts \"test/tapbundle/**/*.ts\"",
|
||||
"test:tapbundle:verbose": "tsx ./cli.child.ts \"test/tapbundle/**/*.ts\" --verbose",
|
||||
"test:tstest": "tsx ./cli.child.ts \"test/tstest/**/*.ts\"",
|
||||
"test:tstest:verbose": "tsx ./cli.child.ts \"test/tstest/**/*.ts\" --verbose",
|
||||
"build": "(tsbuild tsfolders)",
|
||||
"buildDocs": "tsdoc"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@git.zone/tsbuild": "^2.1.70",
|
||||
"@types/node": "^20.9.0"
|
||||
"@git.zone/tsbuild": "^2.6.3",
|
||||
"@types/node": "^22.15.21"
|
||||
},
|
||||
"dependencies": {
|
||||
"@api.global/typedserver": "^3.0.9",
|
||||
"@git.zone/tsbundle": "^2.0.10",
|
||||
"@git.zone/tsrun": "^1.2.46",
|
||||
"@push.rocks/consolecolor": "^2.0.1",
|
||||
"@push.rocks/smartbrowser": "^2.0.6",
|
||||
"@api.global/typedserver": "^3.0.74",
|
||||
"@git.zone/tsbundle": "^2.2.5",
|
||||
"@git.zone/tsrun": "^1.3.3",
|
||||
"@push.rocks/consolecolor": "^2.0.2",
|
||||
"@push.rocks/qenv": "^6.1.0",
|
||||
"@push.rocks/smartbrowser": "^2.0.8",
|
||||
"@push.rocks/smartchok": "^1.0.34",
|
||||
"@push.rocks/smartcrypto": "^2.0.4",
|
||||
"@push.rocks/smartdelay": "^3.0.5",
|
||||
"@push.rocks/smartfile": "^11.0.0",
|
||||
"@push.rocks/smartlog": "^3.0.3",
|
||||
"@push.rocks/smartpromise": "^4.0.3",
|
||||
"@push.rocks/smartshell": "^3.0.3",
|
||||
"@push.rocks/tapbundle": "^5.0.15",
|
||||
"@types/ws": "^8.5.9",
|
||||
"figures": "^6.0.1",
|
||||
"ws": "^8.14.2"
|
||||
"@push.rocks/smartenv": "^5.0.12",
|
||||
"@push.rocks/smartexpect": "^2.5.0",
|
||||
"@push.rocks/smartfile": "^11.2.3",
|
||||
"@push.rocks/smartjson": "^5.0.20",
|
||||
"@push.rocks/smartlog": "^3.1.8",
|
||||
"@push.rocks/smartmongo": "^2.0.12",
|
||||
"@push.rocks/smartpath": "^5.0.18",
|
||||
"@push.rocks/smartpromise": "^4.2.3",
|
||||
"@push.rocks/smartrequest": "^2.1.0",
|
||||
"@push.rocks/smarts3": "^2.2.5",
|
||||
"@push.rocks/smartshell": "^3.2.3",
|
||||
"@push.rocks/smarttime": "^4.1.1",
|
||||
"@types/ws": "^8.18.1",
|
||||
"figures": "^6.1.0",
|
||||
"ws": "^8.18.2"
|
||||
},
|
||||
"files": [
|
||||
"ts/**/*",
|
||||
@ -53,5 +67,6 @@
|
||||
],
|
||||
"browserslist": [
|
||||
"last 1 chrome versions"
|
||||
]
|
||||
],
|
||||
"packageManager": "pnpm@10.10.0+sha512.d615db246fe70f25dcfea6d8d73dee782ce23e2245e3c4f6f888249fb568149318637dca73c2c5c8ef2a4ca0d5657fb9567188bfab47f566d1ee6ce987815c39"
|
||||
}
|
||||
|
11184
pnpm-lock.yaml
generated
11184
pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
323
readme.hints.md
Normal file
323
readme.hints.md
Normal file
@ -0,0 +1,323 @@
|
||||
# Architecture Overview
|
||||
|
||||
## Project Structure
|
||||
|
||||
This project integrates tstest with tapbundle through a modular architecture:
|
||||
|
||||
1. **tstest** (`/ts/`) - The test runner that discovers and executes test files
|
||||
2. **tapbundle** (`/ts_tapbundle/`) - The TAP testing framework for writing tests
|
||||
3. **tapbundle_node** (`/ts_tapbundle_node/`) - Node.js-specific testing utilities
|
||||
|
||||
## How Components Work Together
|
||||
|
||||
### Test Execution Flow
|
||||
|
||||
1. **CLI Entry Point** (`cli.js` <20> `cli.ts.js` <20> `cli.child.ts`)
|
||||
- The CLI uses tsx to run TypeScript files directly
|
||||
- Accepts glob patterns to find test files
|
||||
- Supports options like `--verbose`, `--quiet`, `--web`
|
||||
|
||||
2. **Test Discovery**
|
||||
- tstest scans for test files matching the provided pattern
|
||||
- Defaults to `test/**/*.ts` when no pattern is specified
|
||||
- Supports both file and directory modes
|
||||
|
||||
3. **Test Runner**
|
||||
- Each test file imports `tap` and `expect` from tapbundle
|
||||
- Tests are written using `tap.test()` with async functions
|
||||
- Browser tests are compiled with esbuild and run in Chromium via Puppeteer
|
||||
|
||||
### Key Integration Points
|
||||
|
||||
1. **Import Structure**
|
||||
- Test files import from local tapbundle: `import { tap, expect } from '../../ts_tapbundle/index.js'`
|
||||
- Node-specific tests also import from tapbundle_node: `import { tapNodeTools } from '../../ts_tapbundle_node/index.js'`
|
||||
|
||||
2. **WebHelpers**
|
||||
- Browser tests can use webhelpers for DOM manipulation
|
||||
- `webhelpers.html` - Template literal for creating HTML strings
|
||||
- `webhelpers.fixture` - Creates DOM elements from HTML strings
|
||||
- Automatically detects browser environment and only enables in browser context
|
||||
|
||||
3. **Build System**
|
||||
- Uses `tsbuild tsfolders` to compile TypeScript (invoked by `pnpm build`)
|
||||
- Maintains separate output directories: `/dist_ts/`, `/dist_ts_tapbundle/`, `/dist_ts_tapbundle_node/`, `/dist_ts_tapbundle_protocol/`
|
||||
- Compilation order is resolved automatically based on dependencies in tspublish.json files
|
||||
- Protocol imports use compiled dist directories:
|
||||
```typescript
|
||||
// In ts/tstest.classes.tap.parser.ts
|
||||
import { ProtocolParser } from '../dist_ts_tapbundle_protocol/index.js';
|
||||
|
||||
// In ts_tapbundle/tapbundle.classes.tap.ts
|
||||
import { ProtocolEmitter } from '../dist_ts_tapbundle_protocol/index.js';
|
||||
```
|
||||
|
||||
### Test Scripts
|
||||
|
||||
The package.json defines several test scripts:
|
||||
- `test` - Builds and runs all tests (tapbundle and tstest)
|
||||
- `test:tapbundle` - Runs tapbundle framework tests
|
||||
- `test:tstest` - Runs tstest's own tests
|
||||
- Both support `:verbose` variants for detailed output
|
||||
|
||||
### Environment Detection
|
||||
|
||||
The framework automatically detects the runtime environment:
|
||||
- Node.js tests run directly via tsx
|
||||
- Browser tests are compiled and served via a local server
|
||||
- WebHelpers are only enabled in browser environment
|
||||
|
||||
This architecture allows for seamless testing across both Node.js and browser environments while maintaining a clean separation of concerns.
|
||||
|
||||
## Logging System
|
||||
|
||||
### Log File Naming (Fixed in v1.9.1)
|
||||
|
||||
When using the `--logfile` flag, tstest creates log files in `.nogit/testlogs/`. The log file naming was updated to preserve directory structure and prevent collisions:
|
||||
|
||||
- **Old behavior**: `test/tapbundle/test.ts` → `.nogit/testlogs/test.log`
|
||||
- **New behavior**: `test/tapbundle/test.ts` → `.nogit/testlogs/test__tapbundle__test.log`
|
||||
|
||||
This fix ensures that test files with the same basename in different directories don't overwrite each other's logs. The implementation:
|
||||
1. Takes the relative path from the current working directory
|
||||
2. Replaces path separators (`/`) with double underscores (`__`)
|
||||
3. Removes the `.ts` extension
|
||||
4. Creates a flat filename that preserves the directory structure
|
||||
|
||||
### Test Timing Display (Fixed in v1.9.2)
|
||||
|
||||
Fixed an issue where test timing was displayed incorrectly with duplicate values like:
|
||||
- Before: `✅ test name # time=133ms (0ms)`
|
||||
- After: `✅ test name (133ms)`
|
||||
|
||||
The issue was in the TAP parser regex which was greedily capturing the entire line including the TAP timing comment. Changed the regex from `(.*)` to `(.*?)` to make it non-greedy, properly separating the test name from the timing metadata.
|
||||
|
||||
## Protocol Limitations and Improvements
|
||||
|
||||
### Current TAP Protocol Issues
|
||||
The current implementation uses standard TAP format with metadata in comments:
|
||||
```
|
||||
ok 1 - test name # time=123ms
|
||||
```
|
||||
|
||||
This has several limitations:
|
||||
1. **Delimiter Conflict**: Test descriptions containing `#` can break parsing
|
||||
2. **Regex Fragility**: Complex regex patterns that are hard to maintain
|
||||
3. **Limited Metadata**: Difficult to add rich error information or custom data
|
||||
|
||||
### Planned Protocol V2
|
||||
A new internal protocol is being designed that will:
|
||||
- Use Unicode delimiters `⟦TSTEST:⟧` that won't conflict with test content
|
||||
- Support structured JSON metadata
|
||||
- Allow rich error reporting with stack traces and diffs
|
||||
- Completely replace v1 protocol (no backwards compatibility)
|
||||
|
||||
### ts_tapbundle_protocol Directory
|
||||
The protocol v2 implementation is contained in a separate `ts_tapbundle_protocol` directory:
|
||||
- **Isomorphic Code**: All protocol code works in both browser and Node.js environments
|
||||
- **No Platform Dependencies**: No Node.js-specific imports, ensuring true cross-platform compatibility
|
||||
- **Clean Separation**: Protocol logic is isolated from platform-specific code in tstest and tapbundle
|
||||
- **Shared Implementation**: Both tstest (parser) and tapbundle (emitter) use the same protocol classes
|
||||
- **Build Process**:
|
||||
- Compiled by `pnpm build` via tsbuild to `dist_ts_tapbundle_protocol/`
|
||||
- Build order managed through tspublish.json files
|
||||
- Other modules import from the compiled dist directory, not source
|
||||
|
||||
This architectural decision ensures the protocol can be used in any JavaScript environment without modification and maintains proper build dependencies.
|
||||
|
||||
See `readme.protocol.md` for the full specification and `ts_tapbundle_protocol/` for the implementation.
|
||||
|
||||
## Protocol V2 Implementation Status
|
||||
|
||||
The Protocol V2 has been implemented to fix issues with TAP protocol parsing when test descriptions contain special characters like `#`, `###SNAPSHOT###`, or protocol markers like `⟦TSTEST:ERROR⟧`.
|
||||
|
||||
### Implementation Details:
|
||||
|
||||
1. **Protocol Components**:
|
||||
- `ProtocolEmitter` - Generates protocol v2 messages (used by tapbundle)
|
||||
- `ProtocolParser` - Parses protocol v2 messages (used by tstest)
|
||||
- Uses Unicode markers `⟦TSTEST:` and `⟧` to avoid conflicts with test content
|
||||
|
||||
2. **Current Status**:
|
||||
- ✅ Basic protocol emission and parsing works
|
||||
- ✅ Handles test descriptions with special characters correctly
|
||||
- ✅ Supports metadata for timing, tags, errors
|
||||
- ⚠️ Protocol messages sometimes appear in console output (parsing not catching all cases)
|
||||
|
||||
3. **Key Findings**:
|
||||
- `tap.skip.test()` doesn't create actual test objects, just logs and increments counter
|
||||
- `tap.todo()` method is not implemented (no `addTodo` method in Tap class)
|
||||
- Protocol parser's `isBlockStart` was fixed to only match exact block markers, not partial matches in test descriptions
|
||||
|
||||
4. **Import Paths**:
|
||||
- tstest imports from: `import { ProtocolParser } from '../dist_ts_tapbundle_protocol/index.js';`
|
||||
- tapbundle imports from: `import { ProtocolEmitter } from '../dist_ts_tapbundle_protocol/index.js';`
|
||||
|
||||
## Test Configuration System (Phase 2)
|
||||
|
||||
The Test Configuration System has been implemented to provide global settings and lifecycle hooks for tests.
|
||||
|
||||
### Key Features:
|
||||
|
||||
1. **00init.ts Discovery**:
|
||||
- Automatically detects `00init.ts` files in the same directory as test files
|
||||
- Creates a temporary loader file that imports both `00init.ts` and the test file
|
||||
- Loader files are cleaned up automatically after test execution
|
||||
|
||||
2. **Settings Inheritance**:
|
||||
- Global settings from `00init.ts` → File-level settings → Test-level settings
|
||||
- Settings include: timeout, retries, retryDelay, bail, concurrency
|
||||
- Lifecycle hooks: beforeAll, afterAll, beforeEach, afterEach
|
||||
|
||||
3. **Implementation Details**:
|
||||
- `SettingsManager` class handles settings inheritance and merging
|
||||
- `tap.settings()` API allows configuration at any level
|
||||
- Lifecycle hooks are integrated into test execution flow
|
||||
|
||||
### Important Development Notes:
|
||||
|
||||
1. **Local Development**: When developing tstest itself, use `node cli.js` instead of globally installed `tstest` to test changes
|
||||
|
||||
2. **Console Output Buffering**: Console output from tests is buffered and only displayed for failing tests. TAP-compliant comments (lines starting with `#`) are always shown.
|
||||
|
||||
3. **TypeScript Warnings**: Fixed async/await warnings in `movePreviousLogFiles()` by using sync versions of file operations
|
||||
|
||||
## Enhanced Communication Features (Phase 3)
|
||||
|
||||
The Enhanced Communication system has been implemented to provide rich, real-time feedback during test execution.
|
||||
|
||||
### Key Features:
|
||||
|
||||
1. **Event-Based Test Lifecycle Reporting**:
|
||||
- `test:queued` - Test is ready to run
|
||||
- `test:started` - Test execution begins
|
||||
- `test:completed` - Test finishes (with pass/fail status)
|
||||
- `suite:started` - Test suite/describe block begins
|
||||
- `suite:completed` - Test suite/describe block ends
|
||||
- `hook:started` - Lifecycle hook (beforeEach/afterEach) begins
|
||||
- `hook:completed` - Lifecycle hook finishes
|
||||
- `assertion:failed` - Assertion failure with detailed information
|
||||
|
||||
2. **Visual Diff Output for Assertion Failures**:
|
||||
- **String Diffs**: Character-by-character comparison with colored output
|
||||
- **Object/Array Diffs**: Deep property comparison showing added/removed/changed properties
|
||||
- **Primitive Diffs**: Clear display of expected vs actual values
|
||||
- **Colorized Output**: Green for expected, red for actual, yellow for differences
|
||||
- **Smart Formatting**: Multi-line strings and complex objects are formatted for readability
|
||||
|
||||
3. **Real-Time Test Progress API**:
|
||||
- Tests emit progress events as they execute
|
||||
- tstest parser processes events and updates display in real-time
|
||||
- Structured event format carries rich metadata (timing, errors, diffs)
|
||||
- Seamless integration with existing TAP protocol via Protocol V2
|
||||
|
||||
### Implementation Details:
|
||||
- Events are transmitted via Protocol V2's `EVENT` block type
|
||||
- Event data is JSON-encoded within protocol markers
|
||||
- Parser handles events asynchronously for real-time updates
|
||||
- Visual diffs are generated using custom diff algorithms for each data type
|
||||
|
||||
## Watch Mode (Phase 4)
|
||||
|
||||
tstest now supports watch mode for automatic test re-runs on file changes.
|
||||
|
||||
### Usage
|
||||
```bash
|
||||
tstest test/**/*.ts --watch
|
||||
tstest test/specific.ts -w
|
||||
```
|
||||
|
||||
### Features
|
||||
- **Automatic Re-runs**: Tests re-run when any watched file changes
|
||||
- **Debouncing**: Multiple rapid changes are batched (300ms delay)
|
||||
- **Clear Output**: Console is cleared before each run for clean results
|
||||
- **Status Updates**: Shows which files triggered the re-run
|
||||
- **Graceful Exit**: Press Ctrl+C to stop watching
|
||||
|
||||
### Options
|
||||
- `--watch` or `-w`: Enable watch mode
|
||||
- `--watch-ignore`: Comma-separated patterns to ignore (e.g., `--watch-ignore node_modules,dist`)
|
||||
|
||||
### Implementation Details
|
||||
- Uses `@push.rocks/smartchok` for cross-platform file watching
|
||||
- Watches the entire project directory from where tests are run
|
||||
- Ignores changes matching the ignore patterns
|
||||
- Shows "Waiting for file changes..." between runs
|
||||
|
||||
## Fixed Issues
|
||||
|
||||
### tap.skip.test(), tap.todo(), and tap.only.test() (Fixed)
|
||||
|
||||
Previously reported issues with these methods have been resolved:
|
||||
|
||||
1. **tap.skip.test()** - Now properly creates test objects that are counted in test results
|
||||
- Tests marked with `skip.test()` appear in the test count
|
||||
- Shows as passed with skip directive in TAP output
|
||||
- `markAsSkipped()` method added to handle pre-test skip marking
|
||||
|
||||
2. **tap.todo.test()** - Fully implemented with test object creation
|
||||
- Supports both `tap.todo.test('description')` and `tap.todo.test('description', testFunc)`
|
||||
- Todo tests are counted and marked with todo directive
|
||||
- Both regular and parallel todo tests supported
|
||||
|
||||
3. **tap.only.test()** - Works correctly for focused testing
|
||||
- When `.only` tests exist, only those tests run
|
||||
- Other tests are not executed but still counted
|
||||
- Both regular and parallel only tests supported
|
||||
|
||||
These fixes ensure accurate test counts and proper TAP-compliant output for all test states.
|
||||
|
||||
## Test Timing Implementation
|
||||
|
||||
### Timing Architecture
|
||||
|
||||
Test timing is captured using `@push.rocks/smarttime`'s `HrtMeasurement` class, which provides high-resolution timing:
|
||||
|
||||
1. **Timing Capture**:
|
||||
- Each `TapTest` instance has its own `HrtMeasurement`
|
||||
- Timer starts immediately before test function execution
|
||||
- Timer stops after test completes (or fails/times out)
|
||||
- Millisecond precision is used for reporting
|
||||
|
||||
2. **Protocol Integration**:
|
||||
- Timing is embedded in TAP output using Protocol V2 markers
|
||||
- Inline format for simple timing: `ok 1 - test name ⟦TSTEST:time:123⟧`
|
||||
- Block format for complex metadata: `⟦TSTEST:META:{"time":456,"file":"test.ts"}⟧`
|
||||
|
||||
3. **Performance Metrics Calculation**:
|
||||
- Average is calculated from sum of individual test times, not total runtime
|
||||
- Slowest test detection prefers tests with >0ms duration
|
||||
- Failed tests still contribute their execution time to metrics
|
||||
|
||||
### Edge Cases and Considerations
|
||||
|
||||
1. **Sub-millisecond Tests**:
|
||||
- Very fast tests may report 0ms due to millisecond rounding
|
||||
- Performance metrics handle this by showing "All tests completed in <1ms" when appropriate
|
||||
|
||||
2. **Special Test States**:
|
||||
- **Skipped tests**: Report 0ms (not executed)
|
||||
- **Todo tests**: Report 0ms (not executed)
|
||||
- **Failed tests**: Report actual execution time before failure
|
||||
- **Timeout tests**: Report time until timeout occurred
|
||||
|
||||
3. **Parallel Test Timing**:
|
||||
- Each parallel test tracks its own execution time independently
|
||||
- Parallel tests may have overlapping execution periods
|
||||
- Total suite time reflects wall-clock time, not sum of test times
|
||||
|
||||
4. **Hook Timing**:
|
||||
- `beforeEach`/`afterEach` hooks are not included in individual test times
|
||||
- Only the actual test function execution is measured
|
||||
|
||||
5. **Retry Timing**:
|
||||
- When tests retry, only the final attempt's duration is reported
|
||||
- Each retry attempt emits separate `test:started` events
|
||||
|
||||
### Parser Fix for Timing Metadata
|
||||
|
||||
The protocol parser was fixed to correctly handle inline timing metadata:
|
||||
- Changed condition from `!simpleMatch[1].includes(':')` to check for simple key:value pairs
|
||||
- Excludes prefixed formats (META:, SKIP:, TODO:, EVENT:) while parsing simple formats like `time:250`
|
||||
|
||||
This ensures timing metadata is correctly extracted and displayed in test results.
|
936
readme.md
936
readme.md
@ -1,61 +1,919 @@
|
||||
# @gitzone/tstest
|
||||
a test utility to run tests that match test/**/*.ts
|
||||
# @git.zone/tstest
|
||||
🧪 **A powerful, modern test runner for TypeScript** - making your test runs beautiful and informative!
|
||||
|
||||
## Availabililty and Links
|
||||
* [npmjs.org (npm package)](https://www.npmjs.com/package/@gitzone/tstest)
|
||||
* [gitlab.com (source)](https://gitlab.com/gitzone/tstest)
|
||||
* [github.com (source mirror)](https://github.com/gitzone/tstest)
|
||||
* [docs (typedoc)](https://gitzone.gitlab.io/tstest/)
|
||||
* [npmjs.org (npm package)](https://www.npmjs.com/package/@git.zone/tstest)
|
||||
* [code.foss.global (source)](https://code.foss.global/git.zone/tstest)
|
||||
|
||||
## Status for master
|
||||
## Why tstest?
|
||||
|
||||
Status Category | Status Badge
|
||||
-- | --
|
||||
GitLab Pipelines | [](https://lossless.cloud)
|
||||
GitLab Pipline Test Coverage | [](https://lossless.cloud)
|
||||
npm | [](https://lossless.cloud)
|
||||
Snyk | [](https://lossless.cloud)
|
||||
TypeScript Support | [](https://lossless.cloud)
|
||||
node Support | [](https://nodejs.org/dist/latest-v10.x/docs/api/)
|
||||
Code Style | [](https://lossless.cloud)
|
||||
PackagePhobia (total standalone install weight) | [](https://lossless.cloud)
|
||||
PackagePhobia (package size on registry) | [](https://lossless.cloud)
|
||||
BundlePhobia (total size when bundled) | [](https://lossless.cloud)
|
||||
Platform support | [](https://lossless.cloud) [](https://lossless.cloud)
|
||||
**tstest** is a TypeScript test runner that makes testing delightful. It's designed for modern development workflows with beautiful output, flexible test execution, and powerful features that make debugging a breeze.
|
||||
|
||||
### ✨ Key Features
|
||||
|
||||
- 🎯 **Smart Test Execution** - Run all tests, single files, or use glob patterns
|
||||
- 🎨 **Beautiful Output** - Color-coded results with emojis and clean formatting
|
||||
- 📊 **Multiple Output Modes** - Choose from normal, quiet, verbose, or JSON output
|
||||
- 🔍 **Automatic Discovery** - Finds all your test files automatically
|
||||
- 🌐 **Cross-Environment** - Supports Node.js and browser testing
|
||||
- 📝 **Detailed Logging** - Optional file logging for debugging
|
||||
- ⚡ **Performance Metrics** - See which tests are slow
|
||||
- 🤖 **CI/CD Ready** - JSON output mode for automation
|
||||
- 🏷️ **Tag-based Filtering** - Run only tests with specific tags
|
||||
- 🎯 **Parallel Test Execution** - Run tests in parallel groups
|
||||
- 🔧 **Test Lifecycle Hooks** - beforeEach/afterEach support
|
||||
- 📸 **Snapshot Testing** - Compare test outputs with saved snapshots
|
||||
- ⏳ **Timeout Control** - Set custom timeouts for tests
|
||||
- 🔁 **Retry Logic** - Automatically retry failing tests
|
||||
- 🛠️ **Test Fixtures** - Create reusable test data
|
||||
- 📦 **Browser-Compatible** - Full browser support with embedded tapbundle
|
||||
- 👀 **Watch Mode** - Automatically re-run tests on file changes
|
||||
- 📊 **Real-time Progress** - Live test execution progress updates
|
||||
- 🎨 **Visual Diffs** - Beautiful side-by-side diffs for failed assertions
|
||||
- 🔄 **Event-based Reporting** - Real-time test lifecycle events
|
||||
- ⚙️ **Test Configuration** - Flexible test settings with .tstest.json files
|
||||
- 🚀 **Protocol V2** - Enhanced TAP protocol with Unicode delimiters
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
npm install --save-dev @git.zone/tstest
|
||||
# or with pnpm
|
||||
pnpm add -D @git.zone/tstest
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
## cli usage
|
||||
### Basic Test Execution
|
||||
|
||||
lets assume we have a directory called test/ where all our tests arae defined. Simply type
|
||||
|
||||
```
|
||||
```bash
|
||||
# Run all tests in a directory
|
||||
tstest test/
|
||||
|
||||
# Run a specific test file
|
||||
tstest test/test.mycomponent.ts
|
||||
|
||||
# Use glob patterns
|
||||
tstest "test/**/*.spec.ts"
|
||||
tstest "test/unit/*.ts"
|
||||
```
|
||||
|
||||
to run all tests.
|
||||
### Execution Modes
|
||||
|
||||
## Syntax
|
||||
**tstest** intelligently detects how you want to run your tests:
|
||||
|
||||
tstest supports tap syntax. In other words your testfiles are run in a subprocess, and the console output contains trigger messages for tstest to determine test status. Inside your testfile you should use `@pushrocks/tapbundle` for the best results.
|
||||
1. **Directory mode** - Recursively finds all test files
|
||||
2. **File mode** - Runs a single test file
|
||||
3. **Glob mode** - Uses pattern matching for flexible test selection
|
||||
|
||||
## Environments
|
||||
### Command Line Options
|
||||
|
||||
tstest supports different environments:
|
||||
| Option | Description |
|
||||
|--------|-------------|
|
||||
| `--quiet`, `-q` | Minimal output - perfect for CI environments |
|
||||
| `--verbose`, `-v` | Show all console output from tests |
|
||||
| `--no-color` | Disable colored output |
|
||||
| `--json` | Output results as JSON |
|
||||
| `--logfile` | Save detailed logs with automatic error and diff tracking |
|
||||
| `--tags <tags>` | Run only tests with specific tags (comma-separated) |
|
||||
| `--timeout <seconds>` | Timeout test files after specified seconds |
|
||||
| `--startFrom <n>` | Start running from test file number n |
|
||||
| `--stopAt <n>` | Stop running at test file number n |
|
||||
| `--watch`, `-w` | Watch for file changes and re-run tests |
|
||||
| `--watch-ignore <patterns>` | Ignore patterns in watch mode (comma-separated) |
|
||||
| `--only` | Run only tests marked with .only |
|
||||
|
||||
- a testfile called `test-something.node.ts` will be run in node
|
||||
- a testfile called `test-something.chrome.ts` will be run in chrome environment (bundled through parcel and run through puppeteer)
|
||||
- a testfile called `test-something.both.ts` will be run in node an chrome, which is good for isomorphic packages.
|
||||
### Example Outputs
|
||||
|
||||
> note: there is alpha support for the deno environment by naming a file test-something.deno.ts
|
||||
#### Normal Output (Default)
|
||||
```
|
||||
🔍 Test Discovery
|
||||
Mode: directory
|
||||
Pattern: test
|
||||
Found: 4 test file(s)
|
||||
|
||||
## Contribution
|
||||
▶️ test/test.ts (1/4)
|
||||
Runtime: node.js
|
||||
✅ prepare test (1ms)
|
||||
Summary: 1/1 PASSED
|
||||
|
||||
We are always happy for code contributions. If you are not the code contributing type that is ok. Still, maintaining Open Source repositories takes considerable time and thought. If you like the quality of what we do and our modules are useful to you we would appreciate a little monthly contribution: You can [contribute one time](https://lossless.link/contribute-onetime) or [contribute monthly](https://lossless.link/contribute). :)
|
||||
📊 Test Summary
|
||||
┌────────────────────────────────┐
|
||||
│ Total Files: 4 │
|
||||
│ Total Tests: 4 │
|
||||
│ Passed: 4 │
|
||||
│ Failed: 0 │
|
||||
│ Duration: 542ms │
|
||||
└────────────────────────────────┘
|
||||
|
||||
For further information read the linked docs at the top of this readme.
|
||||
ALL TESTS PASSED! 🎉
|
||||
```
|
||||
|
||||
> MIT licensed | **©** [Lossless GmbH](https://lossless.gmbh)
|
||||
| By using this npm module you agree to our [privacy policy](https://lossless.gmbH/privacy)
|
||||
#### Quiet Mode
|
||||
```
|
||||
Found 4 tests
|
||||
✅ test functionality works
|
||||
✅ api calls return expected data
|
||||
✅ error handling works correctly
|
||||
✅ performance is within limits
|
||||
|
||||
[](https://maintainedby.lossless.com)
|
||||
Summary: 4/4 | 542ms | PASSED
|
||||
```
|
||||
|
||||
#### Verbose Mode
|
||||
Shows all console output from your tests, making debugging easier:
|
||||
```
|
||||
▶️ test/api.test.ts (1/1)
|
||||
Runtime: node.js
|
||||
Making API call to /users...
|
||||
Response received: 200 OK
|
||||
Processing user data...
|
||||
✅ api calls return expected data (145ms)
|
||||
Summary: 1/1 PASSED
|
||||
```
|
||||
|
||||
#### JSON Mode
|
||||
Perfect for CI/CD pipelines:
|
||||
```json
|
||||
{"event":"discovery","count":4,"pattern":"test","executionMode":"directory"}
|
||||
{"event":"fileStart","filename":"test/test.ts","runtime":"node.js","index":1,"total":4}
|
||||
{"event":"testResult","testName":"prepare test","passed":true,"duration":1}
|
||||
{"event":"summary","summary":{"totalFiles":4,"totalTests":4,"totalPassed":4,"totalFailed":0,"totalDuration":542}}
|
||||
```
|
||||
|
||||
## Test File Naming Conventions
|
||||
|
||||
tstest supports different test environments through file naming:
|
||||
|
||||
| Pattern | Environment | Example |
|
||||
|---------|-------------|---------|
|
||||
| `*.ts` | Node.js (default) | `test.basic.ts` |
|
||||
| `*.node.ts` | Node.js only | `test.api.node.ts` |
|
||||
| `*.chrome.ts` | Chrome browser | `test.dom.chrome.ts` |
|
||||
| `*.browser.ts` | Browser environment | `test.ui.browser.ts` |
|
||||
| `*.both.ts` | Both Node.js and browser | `test.isomorphic.both.ts` |
|
||||
|
||||
### Writing Tests with tapbundle
|
||||
|
||||
tstest includes tapbundle, a powerful TAP-based test framework. Import it from the embedded tapbundle:
|
||||
|
||||
```typescript
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
|
||||
tap.test('my awesome test', async () => {
|
||||
const result = await myFunction();
|
||||
expect(result).toEqual('expected value');
|
||||
});
|
||||
|
||||
tap.start();
|
||||
```
|
||||
|
||||
**Module Exports**
|
||||
|
||||
tstest provides multiple exports for different use cases:
|
||||
|
||||
- `@git.zone/tstest` - Main CLI and test runner functionality
|
||||
- `@git.zone/tstest/tapbundle` - Browser-compatible test framework
|
||||
- `@git.zone/tstest/tapbundle_node` - Node.js-specific test utilities
|
||||
|
||||
## tapbundle Test Framework
|
||||
|
||||
### Basic Test Syntax
|
||||
|
||||
```typescript
|
||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||
|
||||
// Basic test
|
||||
tap.test('should perform basic arithmetic', async () => {
|
||||
expect(2 + 2).toEqual(4);
|
||||
});
|
||||
|
||||
// Async test with tools
|
||||
tap.test('async operations', async (tools) => {
|
||||
await tools.delayFor(100); // delay for 100ms
|
||||
const result = await fetchData();
|
||||
expect(result).toBeDefined();
|
||||
});
|
||||
|
||||
// Start test execution
|
||||
tap.start();
|
||||
```
|
||||
|
||||
### Test Modifiers and Chaining
|
||||
|
||||
```typescript
|
||||
// Skip a test
|
||||
tap.skip.test('not ready yet', async () => {
|
||||
// This test will be skipped
|
||||
});
|
||||
|
||||
// Run only this test (exclusive)
|
||||
tap.only.test('focus on this', async () => {
|
||||
// Only this test will run
|
||||
});
|
||||
|
||||
// Todo test - creates actual test object marked as todo
|
||||
tap.todo.test('implement later', async () => {
|
||||
// This test will be counted but marked as todo
|
||||
});
|
||||
|
||||
// Chaining modifiers
|
||||
tap.timeout(5000)
|
||||
.retry(3)
|
||||
.tags('api', 'integration')
|
||||
.test('complex test', async (tools) => {
|
||||
// Test with 5s timeout, 3 retries, and tags
|
||||
});
|
||||
```
|
||||
|
||||
### Test Organization with describe()
|
||||
|
||||
```typescript
|
||||
tap.describe('User Management', () => {
|
||||
let testDatabase;
|
||||
|
||||
tap.beforeEach(async () => {
|
||||
testDatabase = await createTestDB();
|
||||
});
|
||||
|
||||
tap.afterEach(async () => {
|
||||
await testDatabase.cleanup();
|
||||
});
|
||||
|
||||
tap.test('should create user', async () => {
|
||||
const user = await testDatabase.createUser({ name: 'John' });
|
||||
expect(user.id).toBeDefined();
|
||||
});
|
||||
|
||||
tap.describe('User Permissions', () => {
|
||||
tap.test('should set admin role', async () => {
|
||||
// Nested describe blocks
|
||||
});
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### Test Tools (Available in Test Function)
|
||||
|
||||
Every test function receives a `tools` parameter with utilities:
|
||||
|
||||
```typescript
|
||||
tap.test('using test tools', async (tools) => {
|
||||
// Delay utilities
|
||||
await tools.delayFor(1000); // delay for 1000ms
|
||||
await tools.delayForRandom(100, 500); // random delay between 100-500ms
|
||||
|
||||
// Skip test conditionally
|
||||
tools.skipIf(process.env.CI === 'true', 'Skipping in CI');
|
||||
|
||||
// Skip test unconditionally
|
||||
if (!apiKeyAvailable) {
|
||||
tools.skip('API key not available');
|
||||
}
|
||||
|
||||
// Mark as todo
|
||||
tools.todo('Needs implementation');
|
||||
|
||||
// Retry configuration
|
||||
tools.retry(3); // Set retry count
|
||||
|
||||
// Timeout configuration
|
||||
tools.timeout(10000); // Set timeout to 10s
|
||||
|
||||
// Context sharing between tests
|
||||
tools.context.set('userId', 12345);
|
||||
const userId = tools.context.get('userId');
|
||||
|
||||
// Deferred promises
|
||||
const deferred = tools.defer();
|
||||
setTimeout(() => deferred.resolve('done'), 100);
|
||||
await deferred.promise;
|
||||
|
||||
// Colored console output
|
||||
const coloredString = await tools.coloredString('Success!', 'green');
|
||||
console.log(coloredString);
|
||||
|
||||
// Error handling helper
|
||||
const error = await tools.returnError(async () => {
|
||||
throw new Error('Expected error');
|
||||
});
|
||||
expect(error).toBeInstanceOf(Error);
|
||||
});
|
||||
```
|
||||
|
||||
### Snapshot Testing
|
||||
|
||||
```typescript
|
||||
tap.test('snapshot test', async (tools) => {
|
||||
const output = generateComplexOutput();
|
||||
|
||||
// Compare with saved snapshot
|
||||
await tools.matchSnapshot(output);
|
||||
|
||||
// Named snapshots for multiple checks in one test
|
||||
await tools.matchSnapshot(output.header, 'header');
|
||||
await tools.matchSnapshot(output.body, 'body');
|
||||
});
|
||||
|
||||
// Update snapshots with: UPDATE_SNAPSHOTS=true tstest test/
|
||||
```
|
||||
|
||||
### Test Fixtures
|
||||
|
||||
```typescript
|
||||
// Define reusable fixtures
|
||||
tap.defineFixture('testUser', async (data) => ({
|
||||
id: Date.now(),
|
||||
name: data?.name || 'Test User',
|
||||
email: data?.email || 'test@example.com',
|
||||
created: new Date()
|
||||
}));
|
||||
|
||||
tap.defineFixture('testPost', async (data) => ({
|
||||
id: Date.now(),
|
||||
title: data?.title || 'Test Post',
|
||||
authorId: data?.authorId || 1
|
||||
}));
|
||||
|
||||
// Use fixtures in tests
|
||||
tap.test('fixture test', async (tools) => {
|
||||
const user = await tools.fixture('testUser', { name: 'John' });
|
||||
const post = await tools.fixture('testPost', { authorId: user.id });
|
||||
|
||||
expect(post.authorId).toEqual(user.id);
|
||||
|
||||
// Factory pattern for multiple instances
|
||||
const users = await tools.factory('testUser').createMany(5);
|
||||
expect(users).toHaveLength(5);
|
||||
});
|
||||
```
|
||||
|
||||
### Parallel Test Execution
|
||||
|
||||
```typescript
|
||||
// Parallel tests within a file
|
||||
tap.testParallel('parallel test 1', async () => {
|
||||
await heavyOperation();
|
||||
});
|
||||
|
||||
tap.testParallel('parallel test 2', async () => {
|
||||
await anotherHeavyOperation();
|
||||
});
|
||||
|
||||
// File naming for parallel groups
|
||||
// test.api.para__1.ts - runs in parallel with other para__1 files
|
||||
// test.db.para__1.ts - runs in parallel with other para__1 files
|
||||
// test.auth.para__2.ts - runs after para__1 group completes
|
||||
```
|
||||
|
||||
### Assertions with expect()
|
||||
|
||||
tapbundle uses @push.rocks/smartexpect for assertions:
|
||||
|
||||
```typescript
|
||||
// Basic assertions
|
||||
expect(value).toEqual(5);
|
||||
expect(value).not.toEqual(10);
|
||||
expect(obj).toDeepEqual({ a: 1, b: 2 });
|
||||
|
||||
// Type assertions
|
||||
expect('hello').toBeTypeofString();
|
||||
expect(42).toBeTypeofNumber();
|
||||
expect(true).toBeTypeofBoolean();
|
||||
expect([]).toBeArray();
|
||||
expect({}).toBeTypeOf('object');
|
||||
|
||||
// Comparison assertions
|
||||
expect(5).toBeGreaterThan(3);
|
||||
expect(3).toBeLessThan(5);
|
||||
expect(5).toBeGreaterThanOrEqual(5);
|
||||
expect(5).toBeLessThanOrEqual(5);
|
||||
expect(0.1 + 0.2).toBeCloseTo(0.3, 10);
|
||||
|
||||
// Truthiness
|
||||
expect(true).toBeTrue();
|
||||
expect(false).toBeFalse();
|
||||
expect('text').toBeTruthy();
|
||||
expect(0).toBeFalsy();
|
||||
expect(null).toBeNull();
|
||||
expect(undefined).toBeUndefined();
|
||||
expect(null).toBeNullOrUndefined();
|
||||
|
||||
// String assertions
|
||||
expect('hello world').toStartWith('hello');
|
||||
expect('hello world').toEndWith('world');
|
||||
expect('hello world').toInclude('lo wo');
|
||||
expect('hello world').toMatch(/^hello/);
|
||||
expect('option').toBeOneOf(['choice', 'option', 'alternative']);
|
||||
|
||||
// Array assertions
|
||||
expect([1, 2, 3]).toContain(2);
|
||||
expect([1, 2, 3]).toContainAll([1, 3]);
|
||||
expect([1, 2, 3]).toExclude(4);
|
||||
expect([1, 2, 3]).toHaveLength(3);
|
||||
expect([]).toBeEmptyArray();
|
||||
expect([{ id: 1 }]).toContainEqual({ id: 1 });
|
||||
|
||||
// Object assertions
|
||||
expect(obj).toHaveProperty('name');
|
||||
expect(obj).toHaveProperty('user.email', 'test@example.com');
|
||||
expect(obj).toHaveDeepProperty(['level1', 'level2']);
|
||||
expect(obj).toMatchObject({ name: 'John' });
|
||||
|
||||
// Function assertions
|
||||
expect(() => { throw new Error('test'); }).toThrow();
|
||||
expect(() => { throw new Error('test'); }).toThrow(Error);
|
||||
expect(() => { throw new Error('test error'); }).toThrowErrorMatching(/test/);
|
||||
expect(myFunction).not.toThrow();
|
||||
|
||||
// Promise assertions
|
||||
await expect(Promise.resolve('value')).resolves.toEqual('value');
|
||||
await expect(Promise.reject(new Error('fail'))).rejects.toThrow();
|
||||
|
||||
// Custom assertions
|
||||
expect(7).customAssertion(
|
||||
value => value % 2 === 1,
|
||||
'Value is not odd'
|
||||
);
|
||||
```
|
||||
|
||||
### Pre-tasks
|
||||
|
||||
Run setup tasks before tests start:
|
||||
|
||||
```typescript
|
||||
tap.preTask('setup database', async () => {
|
||||
await initializeTestDatabase();
|
||||
console.log('Database initialized');
|
||||
});
|
||||
|
||||
tap.preTask('load environment', async () => {
|
||||
await loadTestEnvironment();
|
||||
});
|
||||
|
||||
// Pre-tasks run in order before any tests
|
||||
```
|
||||
|
||||
### Tag-based Test Filtering
|
||||
|
||||
```typescript
|
||||
// Tag individual tests
|
||||
tap.tags('unit', 'api')
|
||||
.test('api unit test', async () => {
|
||||
// Test code
|
||||
});
|
||||
|
||||
tap.tags('integration', 'slow')
|
||||
.test('database integration', async () => {
|
||||
// Test code
|
||||
});
|
||||
|
||||
// Run only tests with specific tags
|
||||
// tstest test/ --tags unit,api
|
||||
```
|
||||
|
||||
### Context Sharing
|
||||
|
||||
Share data between tests:
|
||||
|
||||
```typescript
|
||||
tap.test('first test', async (tools) => {
|
||||
const sessionId = await createSession();
|
||||
tools.context.set('sessionId', sessionId);
|
||||
});
|
||||
|
||||
tap.test('second test', async (tools) => {
|
||||
const sessionId = tools.context.get('sessionId');
|
||||
expect(sessionId).toBeDefined();
|
||||
|
||||
// Cleanup
|
||||
tools.context.delete('sessionId');
|
||||
});
|
||||
```
|
||||
|
||||
### Browser Testing with webhelpers
|
||||
|
||||
For browser-specific tests:
|
||||
|
||||
```typescript
|
||||
import { tap, webhelpers } from '@git.zone/tstest/tapbundle';
|
||||
|
||||
tap.test('DOM manipulation', async () => {
|
||||
// Create DOM elements from HTML strings
|
||||
const element = await webhelpers.fixture(webhelpers.html`
|
||||
<div class="test-container">
|
||||
<h1>Test Title</h1>
|
||||
<button id="test-btn">Click Me</button>
|
||||
</div>
|
||||
`);
|
||||
|
||||
expect(element.querySelector('h1').textContent).toEqual('Test Title');
|
||||
|
||||
// Simulate interactions
|
||||
const button = element.querySelector('#test-btn');
|
||||
button.click();
|
||||
});
|
||||
|
||||
tap.test('CSS testing', async () => {
|
||||
const styles = webhelpers.css`
|
||||
.test-class {
|
||||
color: red;
|
||||
font-size: 16px;
|
||||
}
|
||||
`;
|
||||
|
||||
// styles is a string that can be injected into the page
|
||||
expect(styles).toInclude('color: red');
|
||||
});
|
||||
```
|
||||
|
||||
### Advanced Error Handling
|
||||
|
||||
```typescript
|
||||
tap.test('error handling', async (tools) => {
|
||||
// Capture errors without failing the test
|
||||
const error = await tools.returnError(async () => {
|
||||
await functionThatThrows();
|
||||
});
|
||||
|
||||
expect(error).toBeInstanceOf(Error);
|
||||
expect(error.message).toEqual('Expected error message');
|
||||
});
|
||||
```
|
||||
|
||||
### Test Wrap
|
||||
|
||||
Create wrapped test environments:
|
||||
|
||||
```typescript
|
||||
import { TapWrap } from '@git.zone/tstest/tapbundle';
|
||||
|
||||
const tapWrap = new TapWrap({
|
||||
before: async () => {
|
||||
console.log('Before all tests');
|
||||
await globalSetup();
|
||||
},
|
||||
after: async () => {
|
||||
console.log('After all tests');
|
||||
await globalCleanup();
|
||||
}
|
||||
});
|
||||
|
||||
// Tests registered here will have the wrap lifecycle
|
||||
tapWrap.tap.test('wrapped test', async () => {
|
||||
// This test runs with the wrap setup/teardown
|
||||
});
|
||||
```
|
||||
|
||||
## Advanced Features
|
||||
|
||||
### Watch Mode
|
||||
|
||||
Automatically re-run tests when files change:
|
||||
|
||||
```bash
|
||||
# Watch all files in the project
|
||||
tstest test/ --watch
|
||||
|
||||
# Watch with custom ignore patterns
|
||||
tstest test/ --watch --watch-ignore "dist/**,coverage/**"
|
||||
|
||||
# Short form
|
||||
tstest test/ -w
|
||||
```
|
||||
|
||||
**Features:**
|
||||
- 👀 Shows which files triggered the re-run
|
||||
- ⏱️ 300ms debouncing to batch rapid changes
|
||||
- 🔄 Clears console between runs for clean output
|
||||
- 📁 Intelligently ignores common non-source files
|
||||
|
||||
### Real-time Test Progress
|
||||
|
||||
tstest provides real-time updates during test execution:
|
||||
|
||||
```
|
||||
▶️ test/api.test.ts (1/4)
|
||||
Runtime: node.js
|
||||
⏳ Running: api endpoint validation...
|
||||
✅ api endpoint validation (145ms)
|
||||
⏳ Running: error handling...
|
||||
✅ error handling (23ms)
|
||||
Summary: 2/2 PASSED
|
||||
```
|
||||
|
||||
### Visual Diffs for Failed Assertions
|
||||
|
||||
When assertions fail, tstest shows beautiful side-by-side diffs:
|
||||
|
||||
```
|
||||
❌ should return correct user data
|
||||
|
||||
String Diff:
|
||||
- Expected
|
||||
+ Received
|
||||
|
||||
- Hello World
|
||||
+ Hello Universe
|
||||
|
||||
Object Diff:
|
||||
{
|
||||
name: "John",
|
||||
- age: 30,
|
||||
+ age: 31,
|
||||
email: "john@example.com"
|
||||
}
|
||||
```
|
||||
|
||||
### Test Configuration (.tstest.json)
|
||||
|
||||
Configure test behavior with `.tstest.json` files:
|
||||
|
||||
```json
|
||||
{
|
||||
"timeout": 30000,
|
||||
"retries": 2,
|
||||
"bail": false,
|
||||
"parallel": true,
|
||||
"tags": ["unit", "fast"],
|
||||
"env": {
|
||||
"NODE_ENV": "test"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Configuration files are discovered in:
|
||||
1. Test file directory
|
||||
2. Parent directories (up to project root)
|
||||
3. Project root
|
||||
4. Home directory (`~/.tstest.json`)
|
||||
|
||||
Settings cascade and merge, with closer files taking precedence.
|
||||
|
||||
### Event-based Test Reporting
|
||||
|
||||
tstest emits detailed events during test execution for integration with CI/CD tools:
|
||||
|
||||
```json
|
||||
{"event":"suite:started","file":"test/api.test.ts","timestamp":"2025-05-26T10:30:00.000Z"}
|
||||
{"event":"test:started","name":"api endpoint validation","timestamp":"2025-05-26T10:30:00.100Z"}
|
||||
{"event":"test:progress","name":"api endpoint validation","message":"Validating response schema"}
|
||||
{"event":"test:completed","name":"api endpoint validation","passed":true,"duration":145}
|
||||
{"event":"suite:completed","file":"test/api.test.ts","passed":true,"total":2,"failed":0}
|
||||
```
|
||||
|
||||
### Enhanced TAP Protocol (Protocol V2)
|
||||
|
||||
tstest uses an enhanced TAP protocol with Unicode delimiters for better parsing:
|
||||
|
||||
```
|
||||
⟦TSTEST:EVENT:test:started⟧{"name":"my test","timestamp":"2025-05-26T10:30:00.000Z"}
|
||||
ok 1 my test
|
||||
⟦TSTEST:EVENT:test:completed⟧{"name":"my test","passed":true,"duration":145}
|
||||
```
|
||||
|
||||
This prevents conflicts with test output that might contain TAP-like formatting.
|
||||
|
||||
## Advanced Features
|
||||
|
||||
### Glob Pattern Support
|
||||
|
||||
Run specific test patterns:
|
||||
```bash
|
||||
# Run all unit tests
|
||||
tstest "test/unit/**/*.ts"
|
||||
|
||||
# Run all integration tests
|
||||
tstest "test/integration/*.test.ts"
|
||||
|
||||
# Run multiple patterns
|
||||
tstest "test/**/*.spec.ts" "test/**/*.test.ts"
|
||||
```
|
||||
|
||||
**Important**: Always quote glob patterns to prevent shell expansion. Without quotes, the shell will expand the pattern and only pass the first matching file to tstest.
|
||||
|
||||
### Enhanced Test Logging
|
||||
|
||||
The `--logfile` option provides intelligent test logging with automatic organization:
|
||||
|
||||
```bash
|
||||
tstest test/ --logfile
|
||||
```
|
||||
|
||||
**Log Organization:**
|
||||
- **Current Run**: `.nogit/testlogs/[testname].log`
|
||||
- **Previous Run**: `.nogit/testlogs/previous/[testname].log`
|
||||
- **Failed Tests**: `.nogit/testlogs/00err/[testname].log`
|
||||
- **Changed Output**: `.nogit/testlogs/00diff/[testname].log`
|
||||
|
||||
**Features:**
|
||||
- Previous logs are automatically moved to the `previous/` folder
|
||||
- Failed tests create copies in `00err/` for quick identification
|
||||
- Tests with changed output create diff reports in `00diff/`
|
||||
- The `00err/` and `00diff/` folders are cleared on each run
|
||||
|
||||
**Example Diff Report:**
|
||||
```
|
||||
DIFF REPORT: test__api__integration.log
|
||||
Generated: 2025-05-24T01:29:13.847Z
|
||||
================================================================================
|
||||
|
||||
- [Line 8] ✅ api test passes (150ms)
|
||||
+ [Line 8] ✅ api test passes (165ms)
|
||||
|
||||
================================================================================
|
||||
Previous version had 40 lines
|
||||
Current version has 40 lines
|
||||
```
|
||||
|
||||
### Test Timeout Protection
|
||||
|
||||
Prevent runaway tests with the `--timeout` option:
|
||||
|
||||
```bash
|
||||
# Timeout any test file that runs longer than 60 seconds
|
||||
tstest test/ --timeout 60
|
||||
|
||||
# Shorter timeout for unit tests
|
||||
tstest test/unit/ --timeout 10
|
||||
```
|
||||
|
||||
When a test exceeds the timeout:
|
||||
- The test process is terminated (SIGTERM)
|
||||
- The test is marked as failed
|
||||
- An error log is created in `.nogit/testlogs/00err/`
|
||||
- Clear error message shows the timeout duration
|
||||
|
||||
### Test File Range Control
|
||||
|
||||
Run specific ranges of test files using `--startFrom` and `--stopAt`:
|
||||
|
||||
```bash
|
||||
# Run tests starting from the 5th file
|
||||
tstest test/ --startFrom 5
|
||||
|
||||
# Run only files 5 through 10
|
||||
tstest test/ --startFrom 5 --stopAt 10
|
||||
|
||||
# Run only the first 3 test files
|
||||
tstest test/ --stopAt 3
|
||||
```
|
||||
|
||||
This is particularly useful for:
|
||||
- Debugging specific test failures in large test suites
|
||||
- Running tests in chunks on different CI runners
|
||||
- Quickly testing changes to specific test files
|
||||
|
||||
The output shows which files are skipped:
|
||||
```
|
||||
⏭️ test/auth.test.ts (1/10)
|
||||
Skipped: before start range (5)
|
||||
⏭️ test/user.test.ts (2/10)
|
||||
Skipped: before start range (5)
|
||||
▶️ test/api.test.ts (5/10)
|
||||
Runtime: node.js
|
||||
✅ api endpoints work (145ms)
|
||||
```
|
||||
|
||||
### Performance Analysis
|
||||
|
||||
In verbose mode, see performance metrics:
|
||||
```
|
||||
⏱️ Performance Metrics:
|
||||
Average per test: 135ms
|
||||
Slowest test: api integration test (486ms)
|
||||
```
|
||||
|
||||
### Parallel Test Groups
|
||||
|
||||
Tests can be organized into parallel groups for concurrent execution:
|
||||
|
||||
```
|
||||
━━━ Parallel Group: para__1 ━━━
|
||||
▶️ test/auth.para__1.ts
|
||||
▶️ test/user.para__1.ts
|
||||
... tests run concurrently ...
|
||||
──────────────────────────────────
|
||||
|
||||
━━━ Parallel Group: para__2 ━━━
|
||||
▶️ test/db.para__2.ts
|
||||
▶️ test/api.para__2.ts
|
||||
... tests run concurrently ...
|
||||
──────────────────────────────────
|
||||
```
|
||||
|
||||
Files with the same parallel group suffix (e.g., `para__1`) run simultaneously, while different groups run sequentially.
|
||||
|
||||
### CI/CD Integration
|
||||
|
||||
For continuous integration, combine quiet and JSON modes:
|
||||
```bash
|
||||
# GitHub Actions example
|
||||
tstest test/ --json > test-results.json
|
||||
|
||||
# Or minimal output
|
||||
tstest test/ --quiet
|
||||
```
|
||||
|
||||
**Advanced CI Example:**
|
||||
```bash
|
||||
# Run tests with comprehensive logging and safety features
|
||||
tstest test/ \
|
||||
--timeout 300 \
|
||||
--logfile \
|
||||
--json > test-results.json
|
||||
|
||||
# Run specific test chunks in parallel CI jobs
|
||||
tstest test/ --startFrom 1 --stopAt 10 # Job 1
|
||||
tstest test/ --startFrom 11 --stopAt 20 # Job 2
|
||||
tstest test/ --startFrom 21 # Job 3
|
||||
```
|
||||
|
||||
### Debugging Failed Tests
|
||||
|
||||
When tests fail, use the enhanced logging features:
|
||||
|
||||
```bash
|
||||
# Run with logging to capture detailed output
|
||||
tstest test/ --logfile --verbose
|
||||
|
||||
# Check error logs
|
||||
ls .nogit/testlogs/00err/
|
||||
|
||||
# Review diffs for flaky tests
|
||||
cat .nogit/testlogs/00diff/test__api__endpoints.log
|
||||
|
||||
# Re-run specific failed tests
|
||||
tstest test/api/endpoints.test.ts --verbose --timeout 60
|
||||
```
|
||||
|
||||
## Changelog
|
||||
|
||||
### Version 1.11.0
|
||||
- 👀 Added Watch Mode with `--watch`/`-w` flag for automatic test re-runs
|
||||
- 📊 Implemented real-time test progress updates with event streaming
|
||||
- 🎨 Added visual diffs for failed assertions with side-by-side comparison
|
||||
- 🔄 Enhanced event-based test lifecycle reporting
|
||||
- ⚙️ Added test configuration system with `.tstest.json` files
|
||||
- 🚀 Implemented Protocol V2 with Unicode delimiters for better TAP parsing
|
||||
- 🐛 Fixed `tap.todo()` to create proper test objects
|
||||
- 🐛 Fixed `tap.skip.test()` to correctly create and count test objects
|
||||
- 🐛 Fixed `tap.only.test()` implementation with `--only` flag support
|
||||
- 📁 Added settings inheritance for cascading test configuration
|
||||
- ⏱️ Added debouncing for file change events in watch mode
|
||||
|
||||
### Version 1.10.0
|
||||
- ⏱️ Added `--timeout <seconds>` option for test file timeout protection
|
||||
- 🎯 Added `--startFrom <n>` and `--stopAt <n>` options for test file range control
|
||||
- 📁 Enhanced `--logfile` with intelligent log organization:
|
||||
- Previous logs moved to `previous/` folder
|
||||
- Failed tests copied to `00err/` folder
|
||||
- Changed tests create diff reports in `00diff/` folder
|
||||
- 🔍 Improved test discovery to show skipped files with clear reasons
|
||||
- 🐛 Fixed TypeScript compilation warnings and unused variables
|
||||
- 📊 Test summaries now include skipped file counts
|
||||
|
||||
### Version 1.9.2
|
||||
- 🐛 Fixed test timing display issue (removed duplicate timing in output)
|
||||
- 📝 Improved internal protocol design documentation
|
||||
- 🔧 Added protocol v2 utilities for future improvements
|
||||
|
||||
### Version 1.9.1
|
||||
- 🐛 Fixed log file naming to preserve directory structure
|
||||
- 📁 Log files now prevent collisions: `test__dir__file.log`
|
||||
|
||||
### Version 1.9.0
|
||||
- 📚 Comprehensive documentation update
|
||||
- 🏗️ Embedded tapbundle for better integration
|
||||
- 🌐 Full browser compatibility
|
||||
|
||||
### Version 1.8.0
|
||||
- 📦 Embedded tapbundle directly into tstest project
|
||||
- 🌐 Made tapbundle fully browser-compatible
|
||||
- 📸 Added snapshot testing with base64-encoded communication protocol
|
||||
- 🏷️ Introduced tag-based test filtering
|
||||
- 🔧 Enhanced test lifecycle hooks (beforeEach/afterEach)
|
||||
- 🎯 Fixed parallel test execution and grouping
|
||||
- ⏳ Improved timeout and retry mechanisms
|
||||
- 🛠️ Added test fixtures for reusable test data
|
||||
- 📊 Enhanced TAP parser for better test reporting
|
||||
- 🐛 Fixed glob pattern handling in shell scripts
|
||||
|
||||
## License and Legal Information
|
||||
|
||||
This repository contains open-source code that is licensed under the MIT License. A copy of the MIT License can be found in the [license](license.md) file within this repository.
|
||||
|
||||
**Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.
|
||||
|
||||
### Trademarks
|
||||
|
||||
This project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and any related products or services are trademarks of Task Venture Capital GmbH and are not included within the scope of the MIT license granted herein. Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines, and any usage must be approved in writing by Task Venture Capital GmbH.
|
||||
|
||||
### Company Information
|
||||
|
||||
Task Venture Capital GmbH
|
||||
Registered at District court Bremen HRB 35230 HB, Germany
|
||||
|
||||
For any legal inquiries or if you require further information, please contact us via email at hello@task.vc.
|
||||
|
||||
By using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.
|
321
readme.plan.md
Normal file
321
readme.plan.md
Normal file
@ -0,0 +1,321 @@
|
||||
# Improvement Plan for tstest and tapbundle
|
||||
|
||||
!! FIRST: Reread /home/philkunz/.claude/CLAUDE.md to ensure following all guidelines !!
|
||||
|
||||
## Improved Internal Protocol (NEW - Critical) ✅ COMPLETED
|
||||
|
||||
### Current Issues ✅ RESOLVED
|
||||
- ✅ TAP protocol uses `#` for metadata which conflicts with test descriptions containing `#`
|
||||
- ✅ Fragile regex parsing that breaks with special characters
|
||||
- ✅ Limited extensibility for new metadata types
|
||||
|
||||
### Proposed Solution: Protocol V2 ✅ IMPLEMENTED
|
||||
- ✅ Use Unicode delimiters `⟦TSTEST:META:{}⟧` that won't appear in test names
|
||||
- ✅ Structured JSON metadata format
|
||||
- ✅ Separate protocol blocks for complex data (errors, snapshots)
|
||||
- ✅ Complete replacement of v1 (no backwards compatibility needed)
|
||||
|
||||
### Implementation ✅ COMPLETED
|
||||
- ✅ Phase 1: Create protocol v2 implementation in ts_tapbundle_protocol
|
||||
- ✅ Phase 2: Replace all v1 code in both tstest and tapbundle with v2
|
||||
- ✅ Phase 3: Delete all v1 parsing and generation code
|
||||
|
||||
#### ts_tapbundle_protocol Directory
|
||||
The protocol v2 implementation will be contained in the `ts_tapbundle_protocol` directory as isomorphic TypeScript code:
|
||||
- **Isomorphic Design**: All code must work in both browser and Node.js environments
|
||||
- **No Node.js Imports**: No Node.js-specific modules allowed (no fs, path, child_process, etc.)
|
||||
- **Protocol Classes**: Contains classes implementing all sides of the protocol:
|
||||
- ✅ `ProtocolEmitter`: For generating protocol v2 messages (used by tapbundle)
|
||||
- ✅ `ProtocolParser`: For parsing protocol v2 messages (used by tstest)
|
||||
- ✅ `ProtocolMessage`: Base classes for different message types
|
||||
- ✅ `ProtocolTypes`: TypeScript interfaces and types for protocol structures
|
||||
- **Pure TypeScript**: Only browser-compatible APIs and pure TypeScript/JavaScript code
|
||||
- **Build Integration**:
|
||||
- Compiled by `pnpm build` (via tsbuild) to `dist_ts_tapbundle_protocol/`
|
||||
- Build order defined in tspublish.json files
|
||||
- Imported by ts and ts_tapbundle modules from the compiled dist directory
|
||||
|
||||
See `readme.protocol.md` for detailed specification.
|
||||
|
||||
## Test Configuration System (NEW)
|
||||
|
||||
### Global Test Configuration via 00init.ts
|
||||
- **Discovery**: Check for `test/00init.ts` before running tests
|
||||
- **Execution**: Import and execute before any test files if found
|
||||
- **Purpose**: Define project-wide default test settings
|
||||
|
||||
### tap.settings() API
|
||||
```typescript
|
||||
interface TapSettings {
|
||||
// Timing
|
||||
timeout?: number; // Default timeout for all tests (ms)
|
||||
slowThreshold?: number; // Mark tests as slow if they exceed this (ms)
|
||||
|
||||
// Execution Control
|
||||
bail?: boolean; // Stop on first test failure
|
||||
retries?: number; // Number of retries for failed tests
|
||||
retryDelay?: number; // Delay between retries (ms)
|
||||
|
||||
// Output Control
|
||||
suppressConsole?: boolean; // Suppress console output in passing tests
|
||||
verboseErrors?: boolean; // Show full stack traces
|
||||
showTestDuration?: boolean; // Show duration for each test
|
||||
|
||||
// Parallel Execution
|
||||
maxConcurrency?: number; // Max parallel tests (for .para files)
|
||||
isolateTests?: boolean; // Run each test in fresh context
|
||||
|
||||
// Lifecycle Hooks
|
||||
beforeAll?: () => Promise<void> | void;
|
||||
afterAll?: () => Promise<void> | void;
|
||||
beforeEach?: (testName: string) => Promise<void> | void;
|
||||
afterEach?: (testName: string, passed: boolean) => Promise<void> | void;
|
||||
|
||||
// Environment
|
||||
env?: Record<string, string>; // Additional environment variables
|
||||
|
||||
// Features
|
||||
enableSnapshots?: boolean; // Enable snapshot testing
|
||||
snapshotDirectory?: string; // Custom snapshot directory
|
||||
updateSnapshots?: boolean; // Update snapshots instead of comparing
|
||||
}
|
||||
```
|
||||
|
||||
### Settings Inheritance
|
||||
- Global (00init.ts) → File level → Test level
|
||||
- More specific settings override less specific ones
|
||||
- Arrays/objects are merged, primitives are replaced
|
||||
|
||||
### Implementation Phases
|
||||
1. **Core Infrastructure**: Settings storage and merge logic
|
||||
2. **Discovery**: 00init.ts loading mechanism
|
||||
3. **Application**: Apply settings to test execution
|
||||
4. **Advanced**: Parallel execution and snapshot configuration
|
||||
|
||||
## 1. Enhanced Communication Between tapbundle and tstest ✅ COMPLETED
|
||||
|
||||
### 1.1 Real-time Test Progress API ✅ COMPLETED
|
||||
- ✅ Create a bidirectional communication channel between tapbundle and tstest
|
||||
- ✅ Emit events for test lifecycle stages (start, progress, completion)
|
||||
- ✅ Allow tstest to subscribe to tapbundle events for better progress reporting
|
||||
- ✅ Implement a standardized message format for test metadata
|
||||
|
||||
### 1.2 Rich Error Reporting ✅ COMPLETED
|
||||
- ✅ Pass structured error objects from tapbundle to tstest
|
||||
- ✅ Include stack traces, code snippets, and contextual information
|
||||
- ✅ Support for error categorization (assertion failures, timeouts, uncaught exceptions)
|
||||
- ✅ Visual diff output for failed assertions
|
||||
|
||||
## 2. Enhanced toolsArg Functionality
|
||||
|
||||
### 2.3 Test Data and Context Sharing (Partial)
|
||||
```typescript
|
||||
tap.test('data-driven test', async (toolsArg) => {
|
||||
// Parameterized test data (not yet implemented)
|
||||
const testData = toolsArg.data<TestInput>();
|
||||
expect(processData(testData)).toEqual(expected);
|
||||
});
|
||||
```
|
||||
|
||||
## 3. Nested Tests and Test Suites
|
||||
|
||||
### 3.2 Hierarchical Test Organization (Not yet implemented)
|
||||
- Support for multiple levels of nesting
|
||||
- Inherited context and configuration from parent suites
|
||||
- Aggregated reporting for test suites
|
||||
- Suite-level lifecycle hooks
|
||||
|
||||
## 4. Advanced Test Features
|
||||
|
||||
### 4.1 Snapshot Testing ✅ (Basic implementation complete)
|
||||
|
||||
### 4.2 Performance Benchmarking
|
||||
```typescript
|
||||
tap.test('performance test', async (toolsArg) => {
|
||||
const benchmark = toolsArg.benchmark();
|
||||
|
||||
// Run operation
|
||||
await expensiveOperation();
|
||||
|
||||
// Assert performance constraints
|
||||
benchmark.expect({
|
||||
maxDuration: 1000,
|
||||
maxMemory: '100MB'
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
|
||||
## 5. Test Execution Improvements
|
||||
|
||||
|
||||
### 5.2 Watch Mode ✅ COMPLETED
|
||||
- Automatically re-run tests on file changes
|
||||
- Debounced file change detection (300ms)
|
||||
- Clear console output between runs
|
||||
- Shows which files triggered re-runs
|
||||
- Graceful exit with Ctrl+C
|
||||
- `--watch-ignore` option for excluding patterns
|
||||
|
||||
### 5.3 Advanced Test Filtering (Partial) ⚠️
|
||||
```typescript
|
||||
// Exclude tests by pattern (not yet implemented)
|
||||
tstest --exclude "**/slow/**"
|
||||
|
||||
// Run only failed tests from last run (not yet implemented)
|
||||
tstest --failed
|
||||
|
||||
// Run tests modified in git (not yet implemented)
|
||||
tstest --changed
|
||||
```
|
||||
|
||||
## 6. Reporting and Analytics
|
||||
|
||||
### 6.1 Custom Reporters
|
||||
- Plugin architecture for custom reporters
|
||||
- Built-in reporters: JSON, JUnit, HTML, Markdown
|
||||
- Real-time streaming reporters
|
||||
- Aggregated test metrics and trends
|
||||
|
||||
### 6.2 Coverage Integration
|
||||
- Built-in code coverage collection
|
||||
- Coverage thresholds and enforcement
|
||||
- Coverage trending over time
|
||||
- Integration with CI/CD pipelines
|
||||
|
||||
### 6.3 Test Analytics Dashboard
|
||||
- Web-based dashboard for test results
|
||||
- Historical test performance data
|
||||
- Flaky test detection
|
||||
- Test impact analysis
|
||||
|
||||
## 7. Developer Experience
|
||||
|
||||
### 7.1 Better Error Messages
|
||||
- Clear, actionable error messages
|
||||
- Suggestions for common issues
|
||||
- Links to documentation
|
||||
- Code examples in error output
|
||||
|
||||
## Implementation Phases
|
||||
|
||||
### Phase 1: Improved Internal Protocol (Priority: Critical) ✅ COMPLETED
|
||||
1. ✅ Create ts_tapbundle_protocol directory with isomorphic protocol v2 implementation
|
||||
- ✅ Implement ProtocolEmitter class for message generation
|
||||
- ✅ Implement ProtocolParser class for message parsing
|
||||
- ✅ Define ProtocolMessage types and interfaces
|
||||
- ✅ Ensure all code is browser and Node.js compatible
|
||||
- ✅ Add tspublish.json to configure build order
|
||||
2. ✅ Update build configuration to compile ts_tapbundle_protocol first
|
||||
3. ✅ Replace TAP parser in tstest with Protocol V2 parser importing from dist_ts_tapbundle_protocol
|
||||
4. ✅ Replace TAP generation in tapbundle with Protocol V2 emitter importing from dist_ts_tapbundle_protocol
|
||||
5. ✅ Delete all v1 TAP parsing code from tstest
|
||||
6. ✅ Delete all v1 TAP generation code from tapbundle
|
||||
7. ✅ Test with real-world test suites containing special characters
|
||||
|
||||
### Phase 2: Test Configuration System (Priority: High) ✅ COMPLETED
|
||||
1. ✅ Implement tap.settings() API with TypeScript interfaces
|
||||
2. ✅ Add 00init.ts discovery and loading mechanism
|
||||
3. ✅ Implement settings inheritance and merge logic
|
||||
4. ✅ Apply settings to test execution (timeouts, retries, etc.)
|
||||
|
||||
### Phase 3: Enhanced Communication (Priority: High) ✅ COMPLETED
|
||||
1. ✅ Build on Protocol V2 for richer communication
|
||||
2. ✅ Implement real-time test progress API
|
||||
3. ✅ Add structured error reporting with diffs and traces
|
||||
|
||||
### Phase 4: Developer Experience (Priority: Medium) ❌ NOT STARTED
|
||||
1. Add watch mode
|
||||
2. Implement custom reporters
|
||||
3. Complete advanced test filtering options
|
||||
4. Add performance benchmarking API
|
||||
|
||||
### Phase 5: Analytics and Performance (Priority: Low) ❌ NOT STARTED
|
||||
1. Build test analytics dashboard
|
||||
2. Implement coverage integration
|
||||
3. Create trend analysis tools
|
||||
4. Add test impact analysis
|
||||
|
||||
## Technical Considerations
|
||||
|
||||
### API Design Principles
|
||||
- Clean, modern API design without legacy constraints
|
||||
- Progressive enhancement approach
|
||||
- Well-documented features and APIs
|
||||
- Clear, simple interfaces
|
||||
|
||||
### Performance Goals
|
||||
- Minimal overhead for test execution
|
||||
- Efficient parallel execution
|
||||
- Fast test discovery
|
||||
- Optimized browser test bundling
|
||||
|
||||
### Integration Points
|
||||
- Clean interfaces between tstest and tapbundle
|
||||
- Extensible plugin architecture
|
||||
- Standard test result format
|
||||
- Compatible with existing CI/CD tools
|
||||
|
||||
## Summary of Remaining Work
|
||||
|
||||
### ✅ Completed
|
||||
- **Protocol V2**: Full implementation with Unicode delimiters, structured metadata, and special character handling
|
||||
- **Test Configuration System**: tap.settings() API, 00init.ts discovery, settings inheritance, lifecycle hooks
|
||||
- **Enhanced Communication**: Event-based test lifecycle reporting, visual diff output for assertion failures, real-time test progress API
|
||||
- **Rich Error Reporting**: Stack traces, error metadata, and visual diffs through protocol
|
||||
- **Tags Filtering**: `--tags` option for running specific tagged tests
|
||||
|
||||
### ✅ Existing Features (Not in Plan)
|
||||
- **Timeout Support**: `--timeout` option and per-test timeouts
|
||||
- **Test Retries**: `tap.retry()` for flaky test handling
|
||||
- **Parallel Tests**: `.testParallel()` for concurrent execution
|
||||
- **Snapshot Testing**: Basic implementation with `toMatchSnapshot()`
|
||||
- **Test Lifecycle**: `describe()` blocks with `beforeEach`/`afterEach`
|
||||
- **Skip Tests**: `tap.skip.test()` (though it doesn't create test objects)
|
||||
- **Log Files**: `--logfile` option saves output to `.nogit/testlogs/`
|
||||
- **Test Range**: `--startFrom` and `--stopAt` for partial runs
|
||||
|
||||
### ⚠️ Partially Completed
|
||||
- **Advanced Test Filtering**: Have `--tags` but missing `--exclude`, `--failed`, `--changed`
|
||||
|
||||
### ❌ Not Started
|
||||
|
||||
#### High Priority
|
||||
|
||||
#### Medium Priority
|
||||
2. **Developer Experience**
|
||||
- Watch mode for file changes
|
||||
- Custom reporters (JSON, JUnit, HTML, Markdown)
|
||||
- Performance benchmarking API
|
||||
- Better error messages with suggestions
|
||||
|
||||
3. **Enhanced toolsArg**
|
||||
- Test data injection
|
||||
- Context sharing between tests
|
||||
- Parameterized tests
|
||||
|
||||
4. **Test Organization**
|
||||
- Hierarchical test suites
|
||||
- Nested describe blocks
|
||||
- Suite-level lifecycle hooks
|
||||
|
||||
#### Low Priority
|
||||
5. **Analytics and Performance**
|
||||
- Test analytics dashboard
|
||||
- Code coverage integration
|
||||
- Trend analysis
|
||||
- Flaky test detection
|
||||
|
||||
### Recently Fixed Issues ✅
|
||||
- **tap.todo()**: Now fully implemented with test object creation
|
||||
- **tap.skip.test()**: Now creates test objects and maintains accurate test count
|
||||
- **tap.only.test()**: Works correctly - when .only tests exist, only those run
|
||||
|
||||
### Remaining Minor Issues
|
||||
- **Protocol Output**: Some protocol messages still appear in console output
|
||||
|
||||
### Next Recommended Steps
|
||||
1. Add Watch Mode (Phase 4) - high developer value for fast feedback
|
||||
2. Implement Custom Reporters - important for CI/CD integration
|
||||
3. Implement performance benchmarking API
|
||||
4. Add better error messages with suggestions
|
287
readme.protocol.md
Normal file
287
readme.protocol.md
Normal file
@ -0,0 +1,287 @@
|
||||
# Improved Internal Protocol Design
|
||||
|
||||
## Current Issues with TAP Protocol
|
||||
|
||||
1. **Delimiter Conflict**: Using `#` for metadata conflicts with test descriptions containing `#`
|
||||
2. **Ambiguous Parsing**: No clear boundary between test name and metadata
|
||||
3. **Limited Extensibility**: Adding new metadata requires regex changes
|
||||
4. **Mixed Concerns**: Protocol data mixed with human-readable output
|
||||
|
||||
## Proposed Internal Protocol v2
|
||||
|
||||
### Design Principles
|
||||
|
||||
1. **Clear Separation**: Protocol data must be unambiguously separated from user content
|
||||
2. **Extensibility**: Easy to add new metadata without breaking parsers
|
||||
3. **Backwards Compatible**: Can coexist with standard TAP for gradual migration
|
||||
4. **Machine Readable**: Structured format for reliable parsing
|
||||
5. **Human Friendly**: Still readable in raw form
|
||||
|
||||
### Protocol Options
|
||||
|
||||
#### Option 1: Special Delimiters
|
||||
```
|
||||
ok 1 - test description ::TSTEST:: {"time":123,"retry":0}
|
||||
not ok 2 - another test ::TSTEST:: {"time":45,"error":"timeout"}
|
||||
ok 3 - skipped test ::TSTEST:: {"time":0,"skip":"not ready"}
|
||||
```
|
||||
|
||||
**Pros**:
|
||||
- Simple to implement
|
||||
- Backwards compatible with TAP parsers (they ignore the suffix)
|
||||
- Easy to parse with split()
|
||||
|
||||
**Cons**:
|
||||
- Still could conflict if test name contains `::TSTEST::`
|
||||
- Not standard TAP
|
||||
|
||||
#### Option 2: Separate Metadata Lines
|
||||
```
|
||||
ok 1 - test description
|
||||
::METADATA:: {"test":1,"time":123,"retry":0}
|
||||
not ok 2 - another test
|
||||
::METADATA:: {"test":2,"time":45,"error":"timeout"}
|
||||
```
|
||||
|
||||
**Pros**:
|
||||
- Complete separation of concerns
|
||||
- No chance of conflicts
|
||||
- Can include arbitrary metadata
|
||||
|
||||
**Cons**:
|
||||
- Requires correlation between lines
|
||||
- More complex parsing
|
||||
|
||||
#### Option 3: YAML Blocks (TAP 13 Compatible)
|
||||
```
|
||||
ok 1 - test description
|
||||
---
|
||||
time: 123
|
||||
retry: 0
|
||||
...
|
||||
not ok 2 - another test
|
||||
---
|
||||
time: 45
|
||||
error: timeout
|
||||
stack: |
|
||||
Error: timeout
|
||||
at Test.run (test.js:10:5)
|
||||
...
|
||||
```
|
||||
|
||||
**Pros**:
|
||||
- Standard TAP 13 feature
|
||||
- Structured data format
|
||||
- Human readable
|
||||
- Extensible
|
||||
|
||||
**Cons**:
|
||||
- More verbose
|
||||
- YAML parsing overhead
|
||||
|
||||
#### Option 4: Binary Protocol Markers (Recommended)
|
||||
```
|
||||
ok 1 - test description
|
||||
␛[TSTEST:eyJ0aW1lIjoxMjMsInJldHJ5IjowfQ==]␛
|
||||
not ok 2 - another test
|
||||
␛[TSTEST:eyJ0aW1lIjo0NSwiZXJyb3IiOiJ0aW1lb3V0In0=]␛
|
||||
```
|
||||
|
||||
Using ASCII escape character (␛ = \x1B) with base64 encoded JSON.
|
||||
|
||||
**Pros**:
|
||||
- Zero chance of accidental conflicts
|
||||
- Compact
|
||||
- Fast to parse
|
||||
- Invisible in most terminals
|
||||
|
||||
**Cons**:
|
||||
- Not human readable in raw form
|
||||
- Requires base64 encoding/decoding
|
||||
|
||||
### Recommended Implementation: Hybrid Approach
|
||||
|
||||
Use multiple strategies based on context:
|
||||
|
||||
1. **For timing and basic metadata**: Use structured delimiters
|
||||
```
|
||||
ok 1 - test name ⟦time:123,retry:0⟧
|
||||
```
|
||||
|
||||
2. **For complex data (errors, snapshots)**: Use separate protocol lines
|
||||
```
|
||||
ok 1 - test failed
|
||||
⟦TSTEST:ERROR⟧
|
||||
{"message":"Assertion failed","stack":"...","diff":"..."}
|
||||
⟦/TSTEST:ERROR⟧
|
||||
```
|
||||
|
||||
3. **For human-readable output**: Keep standard TAP comments
|
||||
```
|
||||
# Test suite: User Authentication
|
||||
ok 1 - should login
|
||||
```
|
||||
|
||||
### Implementation Plan
|
||||
|
||||
#### Phase 1: Parser Enhancement
|
||||
1. Add new protocol parser alongside existing TAP parser
|
||||
2. Support both old and new formats during transition
|
||||
3. Add protocol version negotiation
|
||||
|
||||
#### Phase 2: Metadata Structure
|
||||
```typescript
|
||||
interface TestMetadata {
|
||||
// Timing
|
||||
time: number; // milliseconds
|
||||
startTime?: number; // Unix timestamp
|
||||
endTime?: number; // Unix timestamp
|
||||
|
||||
// Status
|
||||
skip?: string; // skip reason
|
||||
todo?: string; // todo reason
|
||||
retry?: number; // retry attempt
|
||||
maxRetries?: number; // max retries allowed
|
||||
|
||||
// Error details
|
||||
error?: {
|
||||
message: string;
|
||||
stack?: string;
|
||||
diff?: string;
|
||||
actual?: any;
|
||||
expected?: any;
|
||||
};
|
||||
|
||||
// Test context
|
||||
file?: string; // source file
|
||||
line?: number; // line number
|
||||
column?: number; // column number
|
||||
|
||||
// Custom data
|
||||
tags?: string[]; // test tags
|
||||
custom?: Record<string, any>;
|
||||
}
|
||||
```
|
||||
|
||||
#### Phase 3: Protocol Messages
|
||||
|
||||
##### Success Message
|
||||
```
|
||||
ok 1 - user authentication works
|
||||
⟦TSTEST:META:{"time":123,"tags":["auth","unit"]}⟧
|
||||
```
|
||||
|
||||
##### Failure Message
|
||||
```
|
||||
not ok 2 - login fails with invalid password
|
||||
⟦TSTEST:META:{"time":45,"retry":1,"maxRetries":3}⟧
|
||||
⟦TSTEST:ERROR⟧
|
||||
{
|
||||
"message": "Expected 401 but got 500",
|
||||
"stack": "Error: Expected 401 but got 500\n at Test.run (auth.test.ts:25:10)",
|
||||
"actual": 500,
|
||||
"expected": 401
|
||||
}
|
||||
⟦/TSTEST:ERROR⟧
|
||||
```
|
||||
|
||||
##### Skip Message
|
||||
```
|
||||
ok 3 - database integration test ⟦TSTEST:SKIP:No database connection⟧
|
||||
```
|
||||
|
||||
##### Snapshot Communication
|
||||
```
|
||||
⟦TSTEST:SNAPSHOT:user-profile⟧
|
||||
{
|
||||
"name": "John Doe",
|
||||
"email": "john@example.com",
|
||||
"roles": ["user", "admin"]
|
||||
}
|
||||
⟦/TSTEST:SNAPSHOT⟧
|
||||
```
|
||||
|
||||
### Migration Strategy
|
||||
|
||||
1. **Version Detection**: First line indicates protocol version
|
||||
```
|
||||
⟦TSTEST:PROTOCOL:2.0⟧
|
||||
TAP version 13
|
||||
```
|
||||
|
||||
2. **Gradual Rollout**:
|
||||
- v1.10: Add protocol v2 parser, keep v1 generator
|
||||
- v1.11: Generate v2 by default, v1 with --legacy flag
|
||||
- v2.0: Remove v1 support
|
||||
|
||||
3. **Feature Flags**:
|
||||
```typescript
|
||||
tap.settings({
|
||||
protocol: 'v2', // or 'v1', 'auto'
|
||||
protocolFeatures: {
|
||||
structuredErrors: true,
|
||||
enhancedTiming: true,
|
||||
binaryMarkers: false
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
### Benefits of New Protocol
|
||||
|
||||
1. **Reliability**: No more regex fragility or description conflicts
|
||||
2. **Performance**: Faster parsing with clear boundaries
|
||||
3. **Extensibility**: Easy to add new metadata fields
|
||||
4. **Debugging**: Rich error information with stack traces and diffs
|
||||
5. **Integration**: Better IDE and CI/CD tool integration
|
||||
6. **Forward Compatible**: Room for future enhancements
|
||||
|
||||
### Example Parser Implementation
|
||||
|
||||
```typescript
|
||||
class ProtocolV2Parser {
|
||||
private readonly MARKER_START = '⟦TSTEST:';
|
||||
private readonly MARKER_END = '⟧';
|
||||
|
||||
parseMetadata(line: string): TestMetadata | null {
|
||||
const start = line.lastIndexOf(this.MARKER_START);
|
||||
if (start === -1) return null;
|
||||
|
||||
const end = line.indexOf(this.MARKER_END, start);
|
||||
if (end === -1) return null;
|
||||
|
||||
const content = line.substring(start + this.MARKER_START.length, end);
|
||||
const [type, data] = content.split(':', 2);
|
||||
|
||||
switch (type) {
|
||||
case 'META':
|
||||
return JSON.parse(data);
|
||||
case 'SKIP':
|
||||
return { skip: data };
|
||||
case 'TODO':
|
||||
return { todo: data };
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
parseTestLine(line: string): ParsedTest {
|
||||
// First extract any metadata
|
||||
const metadata = this.parseMetadata(line);
|
||||
|
||||
// Then parse the TAP part (without metadata)
|
||||
const cleanLine = this.removeMetadata(line);
|
||||
const tapResult = this.parseTAP(cleanLine);
|
||||
|
||||
return { ...tapResult, metadata };
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Next Steps
|
||||
|
||||
1. Implement proof of concept with basic metadata support
|
||||
2. Test with real-world test suites for edge cases
|
||||
3. Benchmark parsing performance
|
||||
4. Get feedback from users
|
||||
5. Finalize protocol specification
|
||||
6. Implement in both tapbundle and tstest
|
41
test/config-test/00init.ts
Normal file
41
test/config-test/00init.ts
Normal file
@ -0,0 +1,41 @@
|
||||
import { tap } from '../../ts_tapbundle/index.js';
|
||||
|
||||
// TAP-compliant comment output
|
||||
console.log('# 🚀 00init.ts: LOADED AND EXECUTING');
|
||||
console.log('# 🚀 00init.ts: Setting up global test configuration');
|
||||
|
||||
// Add a global variable to verify 00init.ts was loaded
|
||||
(global as any).__00INIT_LOADED = true;
|
||||
|
||||
// Configure global test settings
|
||||
tap.settings({
|
||||
// Set a default timeout of 5 seconds for all tests
|
||||
timeout: 5000,
|
||||
|
||||
// Enable retries for flaky tests
|
||||
retries: 2,
|
||||
retryDelay: 1000,
|
||||
|
||||
// Show test duration
|
||||
showTestDuration: true,
|
||||
|
||||
// Global lifecycle hooks
|
||||
beforeAll: async () => {
|
||||
console.log('Global beforeAll: Initializing test environment');
|
||||
},
|
||||
|
||||
afterAll: async () => {
|
||||
console.log('Global afterAll: Cleaning up test environment');
|
||||
},
|
||||
|
||||
beforeEach: async (testName: string) => {
|
||||
console.log(`Global beforeEach: Starting test "${testName}"`);
|
||||
},
|
||||
|
||||
afterEach: async (testName: string, passed: boolean) => {
|
||||
console.log(`Global afterEach: Test "${testName}" ${passed ? 'passed' : 'failed'}`);
|
||||
}
|
||||
});
|
||||
|
||||
console.log('# 🚀 00init.ts: Configuration COMPLETE');
|
||||
console.log('# 🚀 00init.ts: tap.settings() called successfully');
|
44
test/config-test/test.config.ts
Normal file
44
test/config-test/test.config.ts
Normal file
@ -0,0 +1,44 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
|
||||
// TAP-compliant comment output
|
||||
console.log('# 🔍 TEST FILE LOADED - test.config.ts');
|
||||
|
||||
// Check if 00init.ts was loaded
|
||||
const initLoaded = (global as any).__00INIT_LOADED;
|
||||
console.log(`# 🔍 00init.ts loaded: ${initLoaded === true}`);
|
||||
|
||||
// Test that uses the global timeout setting
|
||||
tap.test('Test with global timeout', async (toolsArg) => {
|
||||
// This test should complete within the 5 second timeout set in 00init.ts
|
||||
await toolsArg.delayFor(2000); // 2 seconds
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// Test that demonstrates retries
|
||||
tap.test('Test with retries', async () => {
|
||||
// This test will use the global retry setting (2 retries)
|
||||
console.log('Running test that might be flaky');
|
||||
|
||||
// Simulate a flaky test that passes on second try
|
||||
const randomValue = Math.random();
|
||||
console.log(`Random value: ${randomValue}`);
|
||||
|
||||
// Always pass for demonstration
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// Test with custom timeout that overrides global
|
||||
tap.timeout(1000).test('Test with custom timeout', async (toolsArg) => {
|
||||
// This test has a 1 second timeout, overriding the global 5 seconds
|
||||
await toolsArg.delayFor(500); // 500ms - should pass
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// Test to verify lifecycle hooks are working
|
||||
tap.test('Test lifecycle hooks', async () => {
|
||||
console.log('Inside test: lifecycle hooks should have run');
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// Start the test suite
|
||||
tap.start();
|
22
test/config-test/test.file-settings.ts
Normal file
22
test/config-test/test.file-settings.ts
Normal file
@ -0,0 +1,22 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
|
||||
// Override global settings for this file
|
||||
tap.settings({
|
||||
timeout: 2000, // Override global timeout to 2 seconds
|
||||
retries: 0, // Disable retries for this file
|
||||
});
|
||||
|
||||
tap.test('Test with file-level timeout', async (toolsArg) => {
|
||||
// This should use the file-level timeout of 2 seconds
|
||||
console.log('Running with file-level timeout of 2 seconds');
|
||||
await toolsArg.delayFor(1000); // 1 second - should pass
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('Test without retries', async () => {
|
||||
// This test should not retry even if it fails
|
||||
console.log('This test has no retries (file-level setting)');
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.start();
|
3
test/debug.js
Normal file
3
test/debug.js
Normal file
@ -0,0 +1,3 @@
|
||||
// Direct run to see TAP output
|
||||
const { execSync } = require('child_process');
|
||||
console.log(execSync('tsx test/tapbundle/test.debug.ts', { cwd: '/mnt/data/lossless/git.zone/tstest' }).toString());
|
8
test/glob-test/another.spec.ts
Normal file
8
test/glob-test/another.spec.ts
Normal file
@ -0,0 +1,8 @@
|
||||
import { tap } from '../../ts_tapbundle/index.js';
|
||||
|
||||
tap.test('spec file test', async () => {
|
||||
console.log('This is a .spec.ts file that should be found by glob');
|
||||
return true;
|
||||
});
|
||||
|
||||
tap.start();
|
8
test/glob-test/nested/test.nested-glob.ts
Normal file
8
test/glob-test/nested/test.nested-glob.ts
Normal file
@ -0,0 +1,8 @@
|
||||
import { tap } from '../../../ts_tapbundle/index.js';
|
||||
|
||||
tap.test('nested glob pattern test', async () => {
|
||||
console.log('This test file is in a nested directory');
|
||||
return true;
|
||||
});
|
||||
|
||||
tap.start();
|
8
test/glob-test/test.glob-test.ts
Normal file
8
test/glob-test/test.glob-test.ts
Normal file
@ -0,0 +1,8 @@
|
||||
import { tap } from '../../ts_tapbundle/index.js';
|
||||
|
||||
tap.test('glob pattern test', async () => {
|
||||
console.log('This test file should be found by glob patterns');
|
||||
return true;
|
||||
});
|
||||
|
||||
tap.start();
|
55
test/tapbundle/test.browser.nonci.ts
Normal file
55
test/tapbundle/test.browser.nonci.ts
Normal file
@ -0,0 +1,55 @@
|
||||
import { tap, expect, webhelpers } from '../../ts_tapbundle/index.js';
|
||||
|
||||
tap.preTask('custompretask', async () => {
|
||||
console.log('this is a pretask');
|
||||
});
|
||||
|
||||
tap.test('should have access to webhelpers', async () => {
|
||||
const myElement = await webhelpers.fixture(webhelpers.html`<div></div>`);
|
||||
expect(myElement).toBeInstanceOf(HTMLElement);
|
||||
console.log(myElement);
|
||||
});
|
||||
|
||||
const test1 = tap.test('my first test -> expect true to be true', async () => {
|
||||
return expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
const test2 = tap.test('my second test', async (tools) => {
|
||||
await tools.delayFor(50);
|
||||
});
|
||||
|
||||
const test3 = tap.test(
|
||||
'my third test -> test2 should take longer than test1 and endure at least 1000ms',
|
||||
async () => {
|
||||
expect(
|
||||
(await test1.testPromise).hrtMeasurement.milliSeconds <
|
||||
(await test2).hrtMeasurement.milliSeconds,
|
||||
).toBeTrue();
|
||||
expect((await test2.testPromise).hrtMeasurement.milliSeconds > 10).toBeTrue();
|
||||
},
|
||||
);
|
||||
|
||||
const test4 = tap.skip.test('my 4th test -> should fail', async (tools) => {
|
||||
tools.allowFailure();
|
||||
expect(false).toBeTrue();
|
||||
});
|
||||
|
||||
const test5 = tap.test('my 5th test -> should pass in about 500ms', async (tools) => {
|
||||
tools.timeout(1000);
|
||||
await tools.delayFor(500);
|
||||
});
|
||||
|
||||
const test6 = tap.skip.test('my 6th test -> should fail after 1000ms', async (tools) => {
|
||||
tools.allowFailure();
|
||||
tools.timeout(1000);
|
||||
await tools.delayFor(100);
|
||||
});
|
||||
|
||||
const testPromise = tap.start();
|
||||
|
||||
// Export promise for browser compatibility
|
||||
if (typeof globalThis !== 'undefined') {
|
||||
(globalThis as any).tapPromise = testPromise;
|
||||
}
|
||||
|
||||
export default testPromise;
|
19
test/tapbundle/test.debug.ts
Normal file
19
test/tapbundle/test.debug.ts
Normal file
@ -0,0 +1,19 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
|
||||
// Simple test to debug TAP output
|
||||
tap.test('test 1', async () => {
|
||||
console.log('Test 1 running');
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('test 2 - skip', async (toolsArg) => {
|
||||
toolsArg.skip('Skipping test 2');
|
||||
expect(false).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('test 3', async () => {
|
||||
console.log('Test 3 running');
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.start();
|
101
test/tapbundle/test.describe.ts
Normal file
101
test/tapbundle/test.describe.ts
Normal file
@ -0,0 +1,101 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
|
||||
// Global state for testing lifecycle hooks
|
||||
const lifecycleOrder: string[] = [];
|
||||
|
||||
tap.describe('Test Suite A', () => {
|
||||
tap.beforeEach(async (toolsArg) => {
|
||||
lifecycleOrder.push('Suite A - beforeEach');
|
||||
});
|
||||
|
||||
tap.afterEach(async (toolsArg) => {
|
||||
lifecycleOrder.push('Suite A - afterEach');
|
||||
});
|
||||
|
||||
tap.test('test 1 in suite A', async (toolsArg) => {
|
||||
lifecycleOrder.push('Test 1');
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('test 2 in suite A', async (toolsArg) => {
|
||||
lifecycleOrder.push('Test 2');
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.describe('Nested Suite B', () => {
|
||||
tap.beforeEach(async (toolsArg) => {
|
||||
lifecycleOrder.push('Suite B - beforeEach');
|
||||
});
|
||||
|
||||
tap.afterEach(async (toolsArg) => {
|
||||
lifecycleOrder.push('Suite B - afterEach');
|
||||
});
|
||||
|
||||
tap.test('test 1 in nested suite B', async (toolsArg) => {
|
||||
lifecycleOrder.push('Nested Test 1');
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// Test outside any suite
|
||||
tap.test('test outside suites', async (toolsArg) => {
|
||||
lifecycleOrder.push('Outside Test');
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.describe('Test Suite with errors', () => {
|
||||
tap.beforeEach(async (toolsArg) => {
|
||||
// Setup that might fail
|
||||
const data = await Promise.resolve({ value: 42 });
|
||||
toolsArg.testData = data;
|
||||
});
|
||||
|
||||
tap.test('test with error', async (toolsArg) => {
|
||||
// Verify that data from beforeEach is available
|
||||
expect(toolsArg.testData).toBeDefined();
|
||||
expect(toolsArg.testData.value).toEqual(42);
|
||||
|
||||
// Test that error handling works by catching an error
|
||||
try {
|
||||
throw new Error('Intentional error');
|
||||
} catch (error) {
|
||||
expect(error.message).toEqual('Intentional error');
|
||||
}
|
||||
});
|
||||
|
||||
tap.test('test with skip in suite', async (toolsArg) => {
|
||||
toolsArg.skip('Skipping this test in a suite');
|
||||
expect(false).toBeTrue();
|
||||
});
|
||||
});
|
||||
|
||||
// Verify lifecycle order - this test runs last to check if all hooks were called properly
|
||||
tap.test('verify lifecycle hook order', async (toolsArg) => {
|
||||
// Wait a bit to ensure all tests have completed
|
||||
await new Promise(resolve => setTimeout(resolve, 50));
|
||||
console.log('Lifecycle order:', lifecycleOrder);
|
||||
|
||||
// Check that the tests we expect to have run actually did
|
||||
expect(lifecycleOrder).toContain('Test 1');
|
||||
expect(lifecycleOrder).toContain('Test 2');
|
||||
expect(lifecycleOrder).toContain('Nested Test 1');
|
||||
|
||||
// Check that beforeEach was called before each test in Suite A
|
||||
const test1Index = lifecycleOrder.indexOf('Test 1');
|
||||
expect(test1Index).toBeGreaterThan(-1);
|
||||
const beforeTest1 = lifecycleOrder.slice(0, test1Index);
|
||||
expect(beforeTest1).toContain('Suite A - beforeEach');
|
||||
|
||||
// Check that afterEach was called after test 1
|
||||
const afterTest1 = lifecycleOrder.slice(test1Index + 1);
|
||||
expect(afterTest1).toContain('Suite A - afterEach');
|
||||
|
||||
// Check nested suite lifecycle
|
||||
const nestedTest1Index = lifecycleOrder.indexOf('Nested Test 1');
|
||||
expect(nestedTest1Index).toBeGreaterThan(-1);
|
||||
const beforeNestedTest1 = lifecycleOrder.slice(0, nestedTest1Index);
|
||||
expect(beforeNestedTest1).toContain('Suite B - beforeEach');
|
||||
});
|
||||
|
||||
tap.start();
|
120
test/tapbundle/test.fixtures.ts
Normal file
120
test/tapbundle/test.fixtures.ts
Normal file
@ -0,0 +1,120 @@
|
||||
import { tap, TapTools } from '../../ts_tapbundle/index.js';
|
||||
import { expect } from '@push.rocks/smartexpect';
|
||||
|
||||
// Define fixture factories
|
||||
interface User {
|
||||
id: number;
|
||||
name: string;
|
||||
email: string;
|
||||
role: string;
|
||||
}
|
||||
|
||||
interface Post {
|
||||
id: number;
|
||||
title: string;
|
||||
content: string;
|
||||
authorId: number;
|
||||
tags: string[];
|
||||
}
|
||||
|
||||
// Define user fixture factory
|
||||
TapTools.defineFixture<User>('user', (data) => {
|
||||
const id = data?.id || Math.floor(Math.random() * 10000);
|
||||
return {
|
||||
id,
|
||||
name: data?.name || `Test User ${id}`,
|
||||
email: data?.email || `user${id}@test.com`,
|
||||
role: data?.role || 'user'
|
||||
};
|
||||
});
|
||||
|
||||
// Define post fixture factory
|
||||
TapTools.defineFixture<Post>('post', async (data) => {
|
||||
const id = data?.id || Math.floor(Math.random() * 10000);
|
||||
return {
|
||||
id,
|
||||
title: data?.title || `Post ${id}`,
|
||||
content: data?.content || `Content for post ${id}`,
|
||||
authorId: data?.authorId || 1,
|
||||
tags: data?.tags || ['test', 'sample']
|
||||
};
|
||||
});
|
||||
|
||||
tap.describe('Fixture System', () => {
|
||||
tap.afterEach(async () => {
|
||||
// Clean up fixtures after each test
|
||||
await TapTools.cleanupFixtures();
|
||||
});
|
||||
|
||||
tap.tags('unit', 'fixtures')
|
||||
.test('should create a simple fixture', async (toolsArg) => {
|
||||
const user = await toolsArg.fixture<User>('user');
|
||||
|
||||
expect(user).toHaveProperty('id');
|
||||
expect(user).toHaveProperty('name');
|
||||
expect(user).toHaveProperty('email');
|
||||
expect(user.role).toEqual('user');
|
||||
});
|
||||
|
||||
tap.tags('unit', 'fixtures')
|
||||
.test('should create fixture with custom data', async (toolsArg) => {
|
||||
const admin = await toolsArg.fixture<User>('user', {
|
||||
name: 'Admin User',
|
||||
role: 'admin'
|
||||
});
|
||||
|
||||
expect(admin.name).toEqual('Admin User');
|
||||
expect(admin.role).toEqual('admin');
|
||||
expect(admin.email).toContain('@test.com');
|
||||
});
|
||||
|
||||
tap.tags('unit', 'fixtures')
|
||||
.test('should create multiple fixtures with factory', async (toolsArg) => {
|
||||
const userFactory = toolsArg.factory<User>('user');
|
||||
const users = await userFactory.createMany(3);
|
||||
|
||||
// Try different approach
|
||||
expect(users.length).toEqual(3);
|
||||
expect(users[0].id).not.toEqual(users[1].id);
|
||||
expect(users[0].email).not.toEqual(users[1].email);
|
||||
});
|
||||
|
||||
tap.tags('unit', 'fixtures')
|
||||
.test('should create fixtures with custom data per instance', async (toolsArg) => {
|
||||
const postFactory = toolsArg.factory<Post>('post');
|
||||
const posts = await postFactory.createMany(3, (index) => ({
|
||||
title: `Post ${index + 1}`,
|
||||
tags: [`tag${index + 1}`]
|
||||
}));
|
||||
|
||||
expect(posts[0].title).toEqual('Post 1');
|
||||
expect(posts[1].title).toEqual('Post 2');
|
||||
expect(posts[2].title).toEqual('Post 3');
|
||||
|
||||
expect(posts[0].tags).toContain('tag1');
|
||||
expect(posts[1].tags).toContain('tag2');
|
||||
});
|
||||
|
||||
tap.tags('unit', 'fixtures')
|
||||
.test('should handle related fixtures', async (toolsArg) => {
|
||||
const user = await toolsArg.fixture<User>('user', { name: 'Author' });
|
||||
const post = await toolsArg.fixture<Post>('post', {
|
||||
title: 'My Article',
|
||||
authorId: user.id
|
||||
});
|
||||
|
||||
expect(post.authorId).toEqual(user.id);
|
||||
});
|
||||
|
||||
tap.tags('unit', 'fixtures', 'error')
|
||||
.test('should throw error for undefined fixture', async (toolsArg) => {
|
||||
try {
|
||||
await toolsArg.fixture('nonexistent');
|
||||
expect(true).toBeFalse(); // Should not reach here
|
||||
} catch (error: any) {
|
||||
expect(error.message).toContain('Fixture \'nonexistent\' not found');
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
tap.start();
|
32
test/tapbundle/test.fluent-syntax.ts
Normal file
32
test/tapbundle/test.fluent-syntax.ts
Normal file
@ -0,0 +1,32 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
|
||||
// Test with fluent syntax
|
||||
tap.tags('unit', 'fluent')
|
||||
.priority('high')
|
||||
.test('test with fluent syntax', async (toolsArg) => {
|
||||
expect(true).toBeTrue();
|
||||
toolsArg.context.set('fluentTest', 'works');
|
||||
});
|
||||
|
||||
// Chain multiple settings
|
||||
tap.tags('integration')
|
||||
.priority('low')
|
||||
.retry(3)
|
||||
.timeout(5000)
|
||||
.test('test with multiple settings', async (toolsArg) => {
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// Test context access from fluent test
|
||||
tap.tags('unit')
|
||||
.test('verify fluent context', async (toolsArg) => {
|
||||
const fluentValue = toolsArg.context.get('fluentTest');
|
||||
expect(fluentValue).toEqual('works');
|
||||
});
|
||||
|
||||
// Test without tags - should show all tests run without filtering
|
||||
tap.test('regular test without tags', async (toolsArg) => {
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.start();
|
28
test/tapbundle/test.node.ts
Normal file
28
test/tapbundle/test.node.ts
Normal file
@ -0,0 +1,28 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
|
||||
import { tapNodeTools } from '../../ts_tapbundle_node/index.js';
|
||||
|
||||
tap.test('should execure a command', async () => {
|
||||
const result = await tapNodeTools.runCommand('ls -la');
|
||||
expect(result.exitCode).toEqual(0);
|
||||
});
|
||||
|
||||
tap.test('should create a https cert', async () => {
|
||||
const { key, cert } = await tapNodeTools.createHttpsCert('localhost');
|
||||
console.log(key);
|
||||
console.log(cert);
|
||||
expect(key).toInclude('-----BEGIN RSA PRIVATE KEY-----');
|
||||
expect(cert).toInclude('-----BEGIN CERTIFICATE-----');
|
||||
});
|
||||
|
||||
tap.test('should create a smartmongo instance', async () => {
|
||||
const smartmongo = await tapNodeTools.createSmartmongo();
|
||||
await smartmongo.stop();
|
||||
});
|
||||
|
||||
tap.test('should create a smarts3 instance', async () => {
|
||||
const smarts3 = await tapNodeTools.createSmarts3();
|
||||
await smarts3.stop();
|
||||
});
|
||||
|
||||
tap.start();
|
167
test/tapbundle/test.performance-metrics.ts
Normal file
167
test/tapbundle/test.performance-metrics.ts
Normal file
@ -0,0 +1,167 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
|
||||
// Create tests with known, distinct timing patterns to verify metrics calculation
|
||||
tap.test('metric test 1 - 10ms baseline', async (tools) => {
|
||||
await tools.delayFor(10);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('metric test 2 - 20ms double baseline', async (tools) => {
|
||||
await tools.delayFor(20);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('metric test 3 - 30ms triple baseline', async (tools) => {
|
||||
await tools.delayFor(30);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('metric test 4 - 40ms quadruple baseline', async (tools) => {
|
||||
await tools.delayFor(40);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('metric test 5 - 50ms quintuple baseline', async (tools) => {
|
||||
await tools.delayFor(50);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// Test that should be the slowest
|
||||
tap.test('metric test slowest - 200ms intentionally slow', async (tools) => {
|
||||
await tools.delayFor(200);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// Tests to verify edge cases in average calculation
|
||||
tap.test('metric test fast 1 - minimal work', async () => {
|
||||
expect(1).toEqual(1);
|
||||
});
|
||||
|
||||
tap.test('metric test fast 2 - minimal work', async () => {
|
||||
expect(2).toEqual(2);
|
||||
});
|
||||
|
||||
tap.test('metric test fast 3 - minimal work', async () => {
|
||||
expect(3).toEqual(3);
|
||||
});
|
||||
|
||||
// Test to verify that failed tests still contribute to timing metrics
|
||||
tap.test('metric test that fails - 60ms before failure', async (tools) => {
|
||||
await tools.delayFor(60);
|
||||
expect(true).toBeFalse(); // This will fail
|
||||
});
|
||||
|
||||
// Describe block with timing to test aggregation
|
||||
tap.describe('performance metrics in describe block', () => {
|
||||
tap.test('described test 1 - 15ms', async (tools) => {
|
||||
await tools.delayFor(15);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('described test 2 - 25ms', async (tools) => {
|
||||
await tools.delayFor(25);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('described test 3 - 35ms', async (tools) => {
|
||||
await tools.delayFor(35);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
});
|
||||
|
||||
// Test timing with hooks
|
||||
tap.describe('performance with hooks', () => {
|
||||
let hookTime = 0;
|
||||
|
||||
tap.beforeEach(async () => {
|
||||
// Hooks shouldn't count toward test time
|
||||
await new Promise(resolve => setTimeout(resolve, 10));
|
||||
hookTime += 10;
|
||||
});
|
||||
|
||||
tap.afterEach(async () => {
|
||||
// Hooks shouldn't count toward test time
|
||||
await new Promise(resolve => setTimeout(resolve, 10));
|
||||
hookTime += 10;
|
||||
});
|
||||
|
||||
tap.test('test with hooks 1 - should only count test time', async (tools) => {
|
||||
await tools.delayFor(30);
|
||||
expect(true).toBeTrue();
|
||||
// Test time should be ~30ms, not 50ms (including hooks)
|
||||
});
|
||||
|
||||
tap.test('test with hooks 2 - should only count test time', async (tools) => {
|
||||
await tools.delayFor(40);
|
||||
expect(true).toBeTrue();
|
||||
// Test time should be ~40ms, not 60ms (including hooks)
|
||||
});
|
||||
});
|
||||
|
||||
// Parallel tests to verify timing is captured correctly
|
||||
tap.describe('parallel timing verification', () => {
|
||||
const startTimes: Map<string, number> = new Map();
|
||||
const endTimes: Map<string, number> = new Map();
|
||||
|
||||
tap.testParallel('parallel metric 1 - 80ms', async (tools) => {
|
||||
startTimes.set('p1', Date.now());
|
||||
await tools.delayFor(80);
|
||||
endTimes.set('p1', Date.now());
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.testParallel('parallel metric 2 - 90ms', async (tools) => {
|
||||
startTimes.set('p2', Date.now());
|
||||
await tools.delayFor(90);
|
||||
endTimes.set('p2', Date.now());
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.testParallel('parallel metric 3 - 100ms', async (tools) => {
|
||||
startTimes.set('p3', Date.now());
|
||||
await tools.delayFor(100);
|
||||
endTimes.set('p3', Date.now());
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('verify parallel execution', async () => {
|
||||
// This test runs after parallel tests
|
||||
// Verify they actually ran in parallel by checking overlapping times
|
||||
if (startTimes.size === 3 && endTimes.size === 3) {
|
||||
const p1Start = startTimes.get('p1')!;
|
||||
const p2Start = startTimes.get('p2')!;
|
||||
const p3Start = startTimes.get('p3')!;
|
||||
const p1End = endTimes.get('p1')!;
|
||||
const p2End = endTimes.get('p2')!;
|
||||
const p3End = endTimes.get('p3')!;
|
||||
|
||||
// Start times should be very close (within 50ms)
|
||||
expect(Math.abs(p1Start - p2Start)).toBeLessThan(50);
|
||||
expect(Math.abs(p2Start - p3Start)).toBeLessThan(50);
|
||||
|
||||
// There should be overlap in execution
|
||||
const p1Overlaps = p1Start < p2End && p1End > p2Start;
|
||||
const p2Overlaps = p2Start < p3End && p2End > p3Start;
|
||||
|
||||
expect(p1Overlaps || p2Overlaps).toBeTrue();
|
||||
} else {
|
||||
// Skip verification if parallel tests didn't run yet
|
||||
expect(true).toBeTrue();
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// Test to ensure average calculation handles mixed timing correctly
|
||||
tap.test('final metrics test - 5ms minimal', async (tools) => {
|
||||
await tools.delayFor(5);
|
||||
expect(true).toBeTrue();
|
||||
|
||||
console.log('\n📊 Expected Performance Metrics Summary:');
|
||||
console.log('- Tests include a mix of durations from <1ms to 200ms');
|
||||
console.log('- Slowest test should be "metric test slowest" at ~200ms');
|
||||
console.log('- Average should be calculated from individual test times');
|
||||
console.log('- Failed test should still contribute its 60ms to timing');
|
||||
console.log('- Parallel tests should show their individual times (80ms, 90ms, 100ms)');
|
||||
});
|
||||
|
||||
tap.start();
|
52
test/tapbundle/test.snapshot.ts
Normal file
52
test/tapbundle/test.snapshot.ts
Normal file
@ -0,0 +1,52 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
|
||||
// Test basic snapshot functionality
|
||||
tap.tags('unit', 'snapshot')
|
||||
.test('should match string snapshot', async (toolsArg) => {
|
||||
const testString = 'Hello, World!';
|
||||
await toolsArg.matchSnapshot(testString);
|
||||
});
|
||||
|
||||
// Test object snapshot
|
||||
tap.tags('unit', 'snapshot')
|
||||
.test('should match object snapshot', async (toolsArg) => {
|
||||
const testObject = {
|
||||
name: 'Test User',
|
||||
age: 30,
|
||||
hobbies: ['reading', 'coding', 'gaming'],
|
||||
metadata: {
|
||||
created: '2024-01-01',
|
||||
updated: '2024-01-15'
|
||||
}
|
||||
};
|
||||
await toolsArg.matchSnapshot(testObject);
|
||||
});
|
||||
|
||||
// Test named snapshots
|
||||
tap.tags('unit', 'snapshot')
|
||||
.test('should handle multiple named snapshots', async (toolsArg) => {
|
||||
const config1 = { version: '1.0.0', features: ['a', 'b'] };
|
||||
const config2 = { version: '2.0.0', features: ['a', 'b', 'c'] };
|
||||
|
||||
await toolsArg.matchSnapshot(config1, 'config_v1');
|
||||
await toolsArg.matchSnapshot(config2, 'config_v2');
|
||||
});
|
||||
|
||||
// Test dynamic content with snapshot
|
||||
tap.tags('unit', 'snapshot')
|
||||
.test('should handle template snapshot', async (toolsArg) => {
|
||||
const template = `
|
||||
<div class="container">
|
||||
<h1>Welcome</h1>
|
||||
<p>This is a test template</p>
|
||||
<ul>
|
||||
<li>Item 1</li>
|
||||
<li>Item 2</li>
|
||||
</ul>
|
||||
</div>
|
||||
`.trim();
|
||||
|
||||
await toolsArg.matchSnapshot(template, 'html_template');
|
||||
});
|
||||
|
||||
tap.start();
|
49
test/tapbundle/test.tags-context.ts
Normal file
49
test/tapbundle/test.tags-context.ts
Normal file
@ -0,0 +1,49 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
|
||||
// First test sets some data and has tags
|
||||
tap.tags('unit', 'context')
|
||||
.priority('high')
|
||||
.test('test with tags and context setting', async (toolsArg) => {
|
||||
// Set some data in context
|
||||
toolsArg.context.set('testData', { value: 42 });
|
||||
toolsArg.context.set('users', ['alice', 'bob']);
|
||||
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// Second test reads the context data
|
||||
tap.tags('unit', 'context')
|
||||
.test('test reading context', async (toolsArg) => {
|
||||
// Read data from context
|
||||
const testData = toolsArg.context.get('testData');
|
||||
const users = toolsArg.context.get('users');
|
||||
|
||||
expect(testData).toEqual({ value: 42 });
|
||||
expect(users).toContain('alice');
|
||||
expect(users).toContain('bob');
|
||||
});
|
||||
|
||||
// Test without tags - should be skipped when filtering by tags
|
||||
tap.test('test without tags', async (toolsArg) => {
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// Test with different tags
|
||||
tap.tags('integration')
|
||||
.priority('low')
|
||||
.test('integration test', async (toolsArg) => {
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// Test context cleanup
|
||||
tap.tags('unit')
|
||||
.test('test context operations', async (toolsArg) => {
|
||||
// Set and delete
|
||||
toolsArg.context.set('temp', 'value');
|
||||
expect(toolsArg.context.get('temp')).toEqual('value');
|
||||
|
||||
toolsArg.context.delete('temp');
|
||||
expect(toolsArg.context.get('temp')).toBeUndefined();
|
||||
});
|
||||
|
||||
tap.start();
|
5
test/tapbundle/test.tapwrap.ts
Normal file
5
test/tapbundle/test.tapwrap.ts
Normal file
@ -0,0 +1,5 @@
|
||||
import { tap, expect, TapWrap } from '../../ts_tapbundle/index.js';
|
||||
|
||||
tap.test('should run a test', async () => {});
|
||||
|
||||
tap.start();
|
214
test/tapbundle/test.timing-edge-cases.ts
Normal file
214
test/tapbundle/test.timing-edge-cases.ts
Normal file
@ -0,0 +1,214 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
|
||||
tap.test('ultra-fast test - should capture sub-millisecond timing', async () => {
|
||||
// This test does almost nothing, should complete in < 1ms
|
||||
const x = 1 + 1;
|
||||
expect(x).toEqual(2);
|
||||
});
|
||||
|
||||
tap.test('test with exact 1ms delay', async (tools) => {
|
||||
const start = Date.now();
|
||||
await tools.delayFor(1);
|
||||
const elapsed = Date.now() - start;
|
||||
// Should be at least 1ms but could be more due to event loop
|
||||
expect(elapsed).toBeGreaterThanOrEqual(1);
|
||||
});
|
||||
|
||||
tap.test('test with 10ms delay', async (tools) => {
|
||||
await tools.delayFor(10);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('test with 100ms delay', async (tools) => {
|
||||
await tools.delayFor(100);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('test with 250ms delay', async (tools) => {
|
||||
await tools.delayFor(250);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('test with 500ms delay', async (tools) => {
|
||||
await tools.delayFor(500);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('test with variable processing time', async (tools) => {
|
||||
// Simulate variable processing
|
||||
const iterations = 1000000;
|
||||
let sum = 0;
|
||||
for (let i = 0; i < iterations; i++) {
|
||||
sum += Math.sqrt(i);
|
||||
}
|
||||
expect(sum).toBeGreaterThan(0);
|
||||
|
||||
// Add a small delay to ensure measurable time
|
||||
await tools.delayFor(5);
|
||||
});
|
||||
|
||||
tap.test('test with multiple async operations', async () => {
|
||||
// Multiple promises in parallel
|
||||
const results = await Promise.all([
|
||||
new Promise(resolve => setTimeout(() => resolve(1), 10)),
|
||||
new Promise(resolve => setTimeout(() => resolve(2), 20)),
|
||||
new Promise(resolve => setTimeout(() => resolve(3), 30))
|
||||
]);
|
||||
|
||||
expect(results).toEqual([1, 2, 3]);
|
||||
// This should take at least 30ms (the longest delay)
|
||||
});
|
||||
|
||||
tap.test('test with synchronous heavy computation', async () => {
|
||||
// Heavy synchronous computation
|
||||
const fibonacci = (n: number): number => {
|
||||
if (n <= 1) return n;
|
||||
return fibonacci(n - 1) + fibonacci(n - 2);
|
||||
};
|
||||
|
||||
// Calculate fibonacci(30) - should take measurable time
|
||||
const result = fibonacci(30);
|
||||
expect(result).toEqual(832040);
|
||||
});
|
||||
|
||||
// Test with retry to see if timing accumulates correctly
|
||||
tap.retry(2).test('test with retry - fails first then passes', async (tools) => {
|
||||
// Get or initialize retry count
|
||||
const retryCount = tools.context.get('retryCount') || 0;
|
||||
tools.context.set('retryCount', retryCount + 1);
|
||||
|
||||
await tools.delayFor(50);
|
||||
|
||||
if (retryCount === 0) {
|
||||
throw new Error('First attempt fails');
|
||||
}
|
||||
|
||||
expect(retryCount).toEqual(1);
|
||||
});
|
||||
|
||||
// Test timeout handling
|
||||
tap.timeout(100).test('test with timeout - should complete just in time', async (tools) => {
|
||||
await tools.delayFor(80); // Just under the timeout
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// Skip test - should show 0ms
|
||||
tap.skip.test('skipped test - should report 0ms', async (tools) => {
|
||||
await tools.delayFor(1000); // This won't execute
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// Todo test - should show 0ms
|
||||
tap.todo.test('todo test - should report 0ms', async (tools) => {
|
||||
await tools.delayFor(1000); // This won't execute
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// Test with skip inside
|
||||
tap.test('test that skips conditionally - should show time until skip', async (tools) => {
|
||||
await tools.delayFor(25);
|
||||
|
||||
const shouldSkip = true;
|
||||
if (shouldSkip) {
|
||||
tools.skip('Skipping after 25ms');
|
||||
}
|
||||
|
||||
// This won't execute
|
||||
await tools.delayFor(1000);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// Test with very precise timing
|
||||
tap.test('test with precise timing measurements', async (tools) => {
|
||||
const measurements: number[] = [];
|
||||
|
||||
for (let i = 0; i < 5; i++) {
|
||||
const start = process.hrtime.bigint();
|
||||
await tools.delayFor(10);
|
||||
const end = process.hrtime.bigint();
|
||||
const durationMs = Number(end - start) / 1_000_000;
|
||||
measurements.push(durationMs);
|
||||
}
|
||||
|
||||
// All measurements should be at least 10ms
|
||||
measurements.forEach(m => {
|
||||
expect(m).toBeGreaterThanOrEqual(10);
|
||||
});
|
||||
|
||||
// But not too much more (accounting for timer precision)
|
||||
measurements.forEach(m => {
|
||||
expect(m).toBeLessThan(20);
|
||||
});
|
||||
});
|
||||
|
||||
// Test that intentionally has 0 actual work
|
||||
tap.test('empty test - absolute minimum execution time', async () => {
|
||||
// Literally nothing
|
||||
});
|
||||
|
||||
// Test with promise that resolves immediately
|
||||
tap.test('test with immediate promise resolution', async () => {
|
||||
await Promise.resolve();
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// Test with microtask queue
|
||||
tap.test('test with microtask queue processing', async () => {
|
||||
let value = 0;
|
||||
|
||||
await Promise.resolve().then(() => {
|
||||
value = 1;
|
||||
return Promise.resolve();
|
||||
}).then(() => {
|
||||
value = 2;
|
||||
return Promise.resolve();
|
||||
}).then(() => {
|
||||
value = 3;
|
||||
});
|
||||
|
||||
expect(value).toEqual(3);
|
||||
});
|
||||
|
||||
// Test to verify timing accumulation in describe blocks
|
||||
tap.describe('timing in describe blocks', () => {
|
||||
let startTime: number;
|
||||
|
||||
tap.beforeEach(async () => {
|
||||
startTime = Date.now();
|
||||
await new Promise(resolve => setTimeout(resolve, 5));
|
||||
});
|
||||
|
||||
tap.afterEach(async () => {
|
||||
await new Promise(resolve => setTimeout(resolve, 5));
|
||||
});
|
||||
|
||||
tap.test('first test in describe', async (tools) => {
|
||||
await tools.delayFor(10);
|
||||
const elapsed = Date.now() - startTime;
|
||||
expect(elapsed).toBeGreaterThanOrEqual(10);
|
||||
});
|
||||
|
||||
tap.test('second test in describe', async (tools) => {
|
||||
await tools.delayFor(20);
|
||||
const elapsed = Date.now() - startTime;
|
||||
expect(elapsed).toBeGreaterThanOrEqual(20);
|
||||
});
|
||||
});
|
||||
|
||||
// Parallel tests to see timing differences
|
||||
tap.testParallel('parallel test 1 - 100ms', async (tools) => {
|
||||
await tools.delayFor(100);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.testParallel('parallel test 2 - 50ms', async (tools) => {
|
||||
await tools.delayFor(50);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.testParallel('parallel test 3 - 150ms', async (tools) => {
|
||||
await tools.delayFor(150);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.start();
|
204
test/tapbundle/test.timing-protocol.ts
Normal file
204
test/tapbundle/test.timing-protocol.ts
Normal file
@ -0,0 +1,204 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
import { ProtocolParser, ProtocolEmitter } from '../../ts_tapbundle_protocol/index.js';
|
||||
|
||||
// Test the protocol's ability to emit and parse timing metadata
|
||||
tap.test('protocol should correctly emit timing metadata', async () => {
|
||||
const emitter = new ProtocolEmitter();
|
||||
|
||||
const testResult = {
|
||||
ok: true,
|
||||
testNumber: 1,
|
||||
description: 'test with timing',
|
||||
metadata: {
|
||||
time: 123
|
||||
}
|
||||
};
|
||||
|
||||
const lines = emitter.emitTest(testResult);
|
||||
|
||||
// Should have inline timing metadata
|
||||
expect(lines.length).toEqual(1);
|
||||
expect(lines[0]).toInclude('⟦TSTEST:time:123⟧');
|
||||
});
|
||||
|
||||
tap.test('protocol should correctly parse timing metadata', async () => {
|
||||
const parser = new ProtocolParser();
|
||||
|
||||
const line = 'ok 1 - test with timing ⟦TSTEST:time:456⟧';
|
||||
const messages = parser.parseLine(line);
|
||||
|
||||
expect(messages.length).toEqual(1);
|
||||
expect(messages[0].type).toEqual('test');
|
||||
|
||||
const content = messages[0].content as any;
|
||||
expect(content.metadata).toBeDefined();
|
||||
expect(content.metadata.time).toEqual(456);
|
||||
});
|
||||
|
||||
tap.test('protocol should handle 0ms timing', async () => {
|
||||
const parser = new ProtocolParser();
|
||||
|
||||
const line = 'ok 1 - ultra fast test ⟦TSTEST:time:0⟧';
|
||||
const messages = parser.parseLine(line);
|
||||
|
||||
const content = messages[0].content as any;
|
||||
expect(content.metadata.time).toEqual(0);
|
||||
});
|
||||
|
||||
tap.test('protocol should handle large timing values', async () => {
|
||||
const parser = new ProtocolParser();
|
||||
|
||||
const line = 'ok 1 - slow test ⟦TSTEST:time:999999⟧';
|
||||
const messages = parser.parseLine(line);
|
||||
|
||||
const content = messages[0].content as any;
|
||||
expect(content.metadata.time).toEqual(999999);
|
||||
});
|
||||
|
||||
tap.test('protocol should handle timing with other metadata', async () => {
|
||||
const emitter = new ProtocolEmitter();
|
||||
|
||||
const testResult = {
|
||||
ok: true,
|
||||
testNumber: 1,
|
||||
description: 'complex test',
|
||||
metadata: {
|
||||
time: 789,
|
||||
file: 'test.ts',
|
||||
tags: ['slow', 'integration']
|
||||
}
|
||||
};
|
||||
|
||||
const lines = emitter.emitTest(testResult);
|
||||
|
||||
// Should use block metadata format for complex metadata
|
||||
expect(lines.length).toBeGreaterThan(1);
|
||||
expect(lines[1]).toInclude('META:');
|
||||
expect(lines[1]).toInclude('"time":789');
|
||||
});
|
||||
|
||||
tap.test('protocol should parse timing from block metadata', async () => {
|
||||
const parser = new ProtocolParser();
|
||||
|
||||
const lines = [
|
||||
'ok 1 - complex test',
|
||||
'⟦TSTEST:META:{"time":321,"file":"test.ts"}⟧'
|
||||
];
|
||||
|
||||
let testResult: any;
|
||||
|
||||
for (const line of lines) {
|
||||
const messages = parser.parseLine(line);
|
||||
if (messages.length > 0 && messages[0].type === 'test') {
|
||||
testResult = messages[0].content;
|
||||
}
|
||||
}
|
||||
|
||||
expect(testResult).toBeDefined();
|
||||
expect(testResult.metadata).toBeUndefined(); // Metadata comes separately in block format
|
||||
});
|
||||
|
||||
tap.test('timing for skipped tests should be 0 or missing', async () => {
|
||||
const emitter = new ProtocolEmitter();
|
||||
|
||||
const testResult = {
|
||||
ok: true,
|
||||
testNumber: 1,
|
||||
description: 'skipped test',
|
||||
directive: {
|
||||
type: 'skip' as const,
|
||||
reason: 'Not ready'
|
||||
},
|
||||
metadata: {
|
||||
time: 0
|
||||
}
|
||||
};
|
||||
|
||||
const lines = emitter.emitTest(testResult);
|
||||
expect(lines[0]).toInclude('# SKIP');
|
||||
|
||||
// If time is 0, it might be included or omitted
|
||||
if (lines[0].includes('⟦TSTEST:')) {
|
||||
expect(lines[0]).toInclude('time:0');
|
||||
}
|
||||
});
|
||||
|
||||
tap.test('protocol should handle fractional milliseconds', async () => {
|
||||
const emitter = new ProtocolEmitter();
|
||||
|
||||
// Even though we use integers, test that protocol handles them correctly
|
||||
const testResult = {
|
||||
ok: true,
|
||||
testNumber: 1,
|
||||
description: 'precise test',
|
||||
metadata: {
|
||||
time: 123 // Protocol uses integers for milliseconds
|
||||
}
|
||||
};
|
||||
|
||||
const lines = emitter.emitTest(testResult);
|
||||
expect(lines[0]).toInclude('time:123');
|
||||
});
|
||||
|
||||
tap.test('protocol should handle timing in retry scenarios', async () => {
|
||||
const emitter = new ProtocolEmitter();
|
||||
|
||||
const testResult = {
|
||||
ok: true,
|
||||
testNumber: 1,
|
||||
description: 'retry test',
|
||||
metadata: {
|
||||
time: 200,
|
||||
retry: 2
|
||||
}
|
||||
};
|
||||
|
||||
const lines = emitter.emitTest(testResult);
|
||||
// Should include both time and retry
|
||||
expect(lines[0]).toMatch(/time:200.*retry:2|retry:2.*time:200/);
|
||||
});
|
||||
|
||||
// Test actual timing capture
|
||||
tap.test('HrtMeasurement should capture accurate timing', async (tools) => {
|
||||
// Import HrtMeasurement
|
||||
const { HrtMeasurement } = await import('@push.rocks/smarttime');
|
||||
|
||||
const measurement = new HrtMeasurement();
|
||||
measurement.start();
|
||||
|
||||
await tools.delayFor(50);
|
||||
|
||||
measurement.stop();
|
||||
|
||||
// Should be at least 50ms
|
||||
expect(measurement.milliSeconds).toBeGreaterThanOrEqual(50);
|
||||
// But not too much more (allow for some overhead)
|
||||
expect(measurement.milliSeconds).toBeLessThan(100);
|
||||
});
|
||||
|
||||
tap.test('multiple timing measurements should be independent', async (tools) => {
|
||||
const { HrtMeasurement } = await import('@push.rocks/smarttime');
|
||||
|
||||
const measurement1 = new HrtMeasurement();
|
||||
const measurement2 = new HrtMeasurement();
|
||||
|
||||
measurement1.start();
|
||||
await tools.delayFor(25);
|
||||
|
||||
measurement2.start();
|
||||
await tools.delayFor(25);
|
||||
|
||||
measurement1.stop();
|
||||
await tools.delayFor(25);
|
||||
measurement2.stop();
|
||||
|
||||
// measurement1 should be ~50ms (25ms + 25ms)
|
||||
expect(measurement1.milliSeconds).toBeGreaterThanOrEqual(50);
|
||||
expect(measurement1.milliSeconds).toBeLessThan(70);
|
||||
|
||||
// measurement2 should be ~50ms (25ms + 25ms)
|
||||
expect(measurement2.milliSeconds).toBeGreaterThanOrEqual(50);
|
||||
expect(measurement2.milliSeconds).toBeLessThan(70);
|
||||
});
|
||||
|
||||
tap.start();
|
85
test/tapbundle/test.toolsarg.ts
Normal file
85
test/tapbundle/test.toolsarg.ts
Normal file
@ -0,0 +1,85 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
|
||||
// Test skip functionality
|
||||
tap.test('should skip a test with skip()', async (toolsArg) => {
|
||||
toolsArg.skip('This test is skipped');
|
||||
// This code should not run
|
||||
expect(false).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('should conditionally skip with skipIf()', async (toolsArg) => {
|
||||
const shouldSkip = true;
|
||||
toolsArg.skipIf(shouldSkip, 'Condition met, skipping');
|
||||
// This code should not run
|
||||
expect(false).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('should not skip when skipIf condition is false', async (toolsArg) => {
|
||||
const shouldSkip = false;
|
||||
toolsArg.skipIf(shouldSkip, 'Should not skip');
|
||||
// This code should run
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// Test todo functionality
|
||||
tap.test('should mark test as todo', async (toolsArg) => {
|
||||
toolsArg.todo('Not implemented yet');
|
||||
// Test code that would be implemented later
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// Test timeout functionality
|
||||
tap.test('should set custom timeout', async (toolsArg) => {
|
||||
toolsArg.timeout(5000);
|
||||
// Simulate a task that takes 100ms
|
||||
await toolsArg.delayFor(100);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// This test is expected to fail due to timeout
|
||||
tap.test('should timeout when exceeding limit', async (toolsArg) => {
|
||||
toolsArg.timeout(100);
|
||||
// This test will timeout and be marked as failed by the test runner
|
||||
await toolsArg.delayFor(2000);
|
||||
// This line should not be reached due to timeout
|
||||
});
|
||||
|
||||
tap.test('timeout should work properly', async (toolsArg) => {
|
||||
toolsArg.timeout(200);
|
||||
// This test should complete successfully within the timeout
|
||||
await toolsArg.delayFor(50);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// Test retry functionality
|
||||
tap.retry(3)
|
||||
.test('should retry on failure', async (toolsArg) => {
|
||||
// Use retry count to determine success
|
||||
const currentRetry = toolsArg.retryCount;
|
||||
|
||||
// Fail on first two attempts (0 and 1), succeed on third (2)
|
||||
if (currentRetry < 2) {
|
||||
throw new Error(`Attempt ${currentRetry + 1} failed`);
|
||||
}
|
||||
|
||||
expect(currentRetry).toEqual(2);
|
||||
});
|
||||
|
||||
tap.test('should expose retry count', async (toolsArg) => {
|
||||
toolsArg.retry(2);
|
||||
|
||||
// The retry count should be available
|
||||
expect(toolsArg.retryCount).toBeLessThanOrEqual(2);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// Test allowFailure
|
||||
tap.test('should allow failure', async (toolsArg) => {
|
||||
// Just verify that allowFailure() can be called without throwing
|
||||
toolsArg.allowFailure();
|
||||
expect(true).toBeTrue();
|
||||
// Note: In a real implementation, we would see "please note: failure allowed!"
|
||||
// in the output when this test fails, but the test itself will still be marked as failed
|
||||
});
|
||||
|
||||
tap.start();
|
49
test/tapbundle/test.ts
Normal file
49
test/tapbundle/test.ts
Normal file
@ -0,0 +1,49 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
|
||||
tap.preTask('hi there', async () => {
|
||||
console.log('this is a pretask');
|
||||
});
|
||||
|
||||
const test1 = tap.test('my first test -> expect true to be true', async () => {
|
||||
return expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
const test2 = tap.test('my second test', async (tools) => {
|
||||
await tools.delayFor(1000);
|
||||
});
|
||||
|
||||
const test3 = tap.test(
|
||||
'my third test -> test2 should take longer than test1 and endure at least 1000ms',
|
||||
async () => {
|
||||
expect(
|
||||
(await test1.testPromise).hrtMeasurement.milliSeconds <
|
||||
(await test2.testPromise).hrtMeasurement.milliSeconds,
|
||||
).toBeTrue();
|
||||
expect((await test2.testPromise).hrtMeasurement.milliSeconds >= 1000).toBeTrue();
|
||||
},
|
||||
);
|
||||
|
||||
const test4 = tap.test('my 4th test -> should fail', async (tools) => {
|
||||
tools.allowFailure();
|
||||
expect(false).toBeFalse();
|
||||
return 'hello';
|
||||
});
|
||||
|
||||
const test5 = tap.test('my 5th test -> should pass in about 500ms', async (tools) => {
|
||||
const test4Result = await test4.testResultPromise;
|
||||
tools.timeout(1000);
|
||||
await tools.delayFor(500);
|
||||
});
|
||||
|
||||
const test6 = tap.skip.test('my 6th test -> should fail after 1000ms', async (tools) => {
|
||||
tools.allowFailure();
|
||||
tools.timeout(1000);
|
||||
await tools.delayFor(2000);
|
||||
});
|
||||
|
||||
const test7 = tap.test('my 7th test -> should print a colored string', async (tools) => {
|
||||
const cs = await tools.coloredString('hello', 'red', 'cyan');
|
||||
console.log(cs);
|
||||
});
|
||||
|
||||
tap.start();
|
@ -1,6 +0,0 @@
|
||||
import { expect, tap } from '@push.rocks/tapbundle';
|
||||
import * as tstest from '../ts/index.js';
|
||||
|
||||
tap.test('prepare test', async () => {});
|
||||
|
||||
tap.start();
|
8
test/tstest/subdir/test.sub.ts
Normal file
8
test/tstest/subdir/test.sub.ts
Normal file
@ -0,0 +1,8 @@
|
||||
import { expect, tap } from '../../../ts_tapbundle/index.js';
|
||||
|
||||
tap.test('subdirectory test execution', async () => {
|
||||
console.log('This test verifies subdirectory test discovery works');
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.start();
|
16
test/tstest/test-parallel-demo.ts
Normal file
16
test/tstest/test-parallel-demo.ts
Normal file
@ -0,0 +1,16 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
import * as fs from 'fs';
|
||||
|
||||
// Test to demonstrate parallel execution timing - run with glob pattern
|
||||
// This will give us a clear view of execution order with timestamps
|
||||
|
||||
const timestamp = () => new Date().toISOString().substr(11, 12);
|
||||
|
||||
tap.test('demo test in main file', async (toolsArg) => {
|
||||
console.log(`[${timestamp()}] Test parallel demo started`);
|
||||
await toolsArg.delayFor(1000);
|
||||
console.log(`[${timestamp()}] Test parallel demo completed`);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.start();
|
11
test/tstest/test.api.para__2.ts
Normal file
11
test/tstest/test.api.para__2.ts
Normal file
@ -0,0 +1,11 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
|
||||
// This test runs in parallel group 2
|
||||
tap.test('api test in parallel group 2', async (toolsArg) => {
|
||||
console.log('API test started');
|
||||
await toolsArg.delayFor(800);
|
||||
console.log('API test completed');
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.start();
|
13
test/tstest/test.auth.para__1.ts
Normal file
13
test/tstest/test.auth.para__1.ts
Normal file
@ -0,0 +1,13 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
|
||||
// This test runs in parallel group 1
|
||||
const timestamp = () => new Date().toISOString().substr(11, 12);
|
||||
|
||||
tap.test('auth test in parallel group 1', async (toolsArg) => {
|
||||
console.log(`[${timestamp()}] Auth test started`);
|
||||
await toolsArg.delayFor(1000);
|
||||
console.log(`[${timestamp()}] Auth test completed`);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.start();
|
11
test/tstest/test.console.ts
Normal file
11
test/tstest/test.console.ts
Normal file
@ -0,0 +1,11 @@
|
||||
import { expect, tap } from '../../ts_tapbundle/index.js';
|
||||
|
||||
tap.test('Test with console output', async () => {
|
||||
console.log('Log message 1 from test');
|
||||
console.log('Log message 2 from test');
|
||||
console.error('Error message from test');
|
||||
console.warn('Warning message from test');
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.start();
|
11
test/tstest/test.db.para__2.ts
Normal file
11
test/tstest/test.db.para__2.ts
Normal file
@ -0,0 +1,11 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
|
||||
// This test runs in parallel group 2
|
||||
tap.test('db test in parallel group 2', async (toolsArg) => {
|
||||
console.log('DB test started');
|
||||
await toolsArg.delayFor(800);
|
||||
console.log('DB test completed');
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.start();
|
13
test/tstest/test.fail.ts
Normal file
13
test/tstest/test.fail.ts
Normal file
@ -0,0 +1,13 @@
|
||||
import { expect, tap } from '../../ts_tapbundle/index.js';
|
||||
|
||||
tap.test('This test should fail', async () => {
|
||||
console.log('This test will fail on purpose');
|
||||
expect(true).toBeFalse();
|
||||
});
|
||||
|
||||
tap.test('This test should pass', async () => {
|
||||
console.log('This test will pass');
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.start();
|
23
test/tstest/test.failing-with-logs.ts
Normal file
23
test/tstest/test.failing-with-logs.ts
Normal file
@ -0,0 +1,23 @@
|
||||
import { expect, tap } from '../../ts_tapbundle/index.js';
|
||||
|
||||
tap.test('Test that will fail with console logs', async () => {
|
||||
console.log('Starting the test...');
|
||||
console.log('Doing some setup work');
|
||||
console.log('About to check assertion');
|
||||
|
||||
const value = 42;
|
||||
console.log(`The value is: ${value}`);
|
||||
|
||||
// This will fail
|
||||
expect(value).toEqual(100);
|
||||
|
||||
console.log('This log will not be reached');
|
||||
});
|
||||
|
||||
tap.test('Test that passes', async () => {
|
||||
console.log('This test passes');
|
||||
console.log('So these logs should not show in default mode');
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.start();
|
8
test/tstest/test.glob.ts
Normal file
8
test/tstest/test.glob.ts
Normal file
@ -0,0 +1,8 @@
|
||||
import { expect, tap } from '../../ts_tapbundle/index.js';
|
||||
|
||||
tap.test('glob pattern test execution', async () => {
|
||||
console.log('This test verifies glob pattern execution works');
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.start();
|
10
test/tstest/test.serial1.ts
Normal file
10
test/tstest/test.serial1.ts
Normal file
@ -0,0 +1,10 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
|
||||
// This test runs serially (no para__ in filename)
|
||||
tap.test('serial test 1', async (toolsArg) => {
|
||||
await toolsArg.delayFor(500);
|
||||
console.log('Serial test 1 completed');
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.start();
|
10
test/tstest/test.serial2.ts
Normal file
10
test/tstest/test.serial2.ts
Normal file
@ -0,0 +1,10 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
|
||||
// This test runs serially (no para__ in filename)
|
||||
tap.test('serial test 2', async (toolsArg) => {
|
||||
await toolsArg.delayFor(500);
|
||||
console.log('Serial test 2 completed');
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.start();
|
8
test/tstest/test.single.ts
Normal file
8
test/tstest/test.single.ts
Normal file
@ -0,0 +1,8 @@
|
||||
import { expect, tap } from '../../ts_tapbundle/index.js';
|
||||
|
||||
tap.test('single file test execution', async () => {
|
||||
console.log('This test verifies single file execution works');
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.start();
|
6
test/tstest/test.ts
Normal file
6
test/tstest/test.ts
Normal file
@ -0,0 +1,6 @@
|
||||
import { expect, tap } from '../../ts_tapbundle/index.js';
|
||||
import * as tstest from '../../ts/index.js';
|
||||
|
||||
tap.test('prepare test', async () => {});
|
||||
|
||||
tap.start();
|
13
test/tstest/test.user.para__1.ts
Normal file
13
test/tstest/test.user.para__1.ts
Normal file
@ -0,0 +1,13 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
|
||||
// This test runs in parallel group 1
|
||||
const timestamp = () => new Date().toISOString().substr(11, 12);
|
||||
|
||||
tap.test('user test in parallel group 1', async (toolsArg) => {
|
||||
console.log(`[${timestamp()}] User test started`);
|
||||
await toolsArg.delayFor(1000);
|
||||
console.log(`[${timestamp()}] User test completed`);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.start();
|
17
test/watch-demo/test.demo.ts
Normal file
17
test/watch-demo/test.demo.ts
Normal file
@ -0,0 +1,17 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
|
||||
// This test file demonstrates watch mode
|
||||
// Try modifying this file while running: tstest test/watch-demo --watch
|
||||
|
||||
let counter = 1;
|
||||
|
||||
tap.test('demo test that changes', async () => {
|
||||
expect(counter).toEqual(1);
|
||||
console.log(`Test run at: ${new Date().toISOString()}`);
|
||||
});
|
||||
|
||||
tap.test('another test', async () => {
|
||||
expect('hello').toEqual('hello');
|
||||
});
|
||||
|
||||
tap.start();
|
@ -1,8 +1,8 @@
|
||||
/**
|
||||
* autocreated commitinfo by @pushrocks/commitinfo
|
||||
* autocreated commitinfo by @push.rocks/commitinfo
|
||||
*/
|
||||
export const commitinfo = {
|
||||
name: '@git.zone/tstest',
|
||||
version: '1.0.85',
|
||||
version: '2.3.0',
|
||||
description: 'a test utility to run tests that match test/**/*.ts'
|
||||
}
|
||||
|
200
ts/index.ts
200
ts/index.ts
@ -1,10 +1,202 @@
|
||||
import { TsTest } from './tstest.classes.tstest.js';
|
||||
import type { LogOptions } from './tstest.logging.js';
|
||||
|
||||
export enum TestExecutionMode {
|
||||
DIRECTORY = 'directory',
|
||||
FILE = 'file',
|
||||
GLOB = 'glob'
|
||||
}
|
||||
|
||||
export const runCli = async () => {
|
||||
if (!process.argv[2]) {
|
||||
console.error('You must specify a test directory as argument. Please try again.');
|
||||
// Check if we're using global tstest in the tstest project itself
|
||||
try {
|
||||
const packageJsonPath = `${process.cwd()}/package.json`;
|
||||
const fs = await import('fs');
|
||||
if (fs.existsSync(packageJsonPath)) {
|
||||
const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf8'));
|
||||
if (packageJson.name === '@git.zone/tstest') {
|
||||
// Check if we're running from a global installation
|
||||
const execPath = process.argv[1];
|
||||
// Debug: log the paths (uncomment for debugging)
|
||||
// console.log('DEBUG: Checking global tstest usage...');
|
||||
// console.log('execPath:', execPath);
|
||||
// console.log('cwd:', process.cwd());
|
||||
// console.log('process.argv:', process.argv);
|
||||
|
||||
// Check if this is running from global installation
|
||||
const isLocalCli = execPath.includes(process.cwd());
|
||||
const isGlobalPnpm = process.argv.some(arg => arg.includes('.pnpm') && !arg.includes(process.cwd()));
|
||||
const isGlobalNpm = process.argv.some(arg => arg.includes('npm/node_modules') && !arg.includes(process.cwd()));
|
||||
|
||||
if (!isLocalCli && (isGlobalPnpm || isGlobalNpm || !execPath.includes('node_modules'))) {
|
||||
console.error('\n⚠️ WARNING: You are using a globally installed tstest in the tstest project itself!');
|
||||
console.error(' This means you are NOT testing your local changes.');
|
||||
console.error(' Please use one of these commands instead:');
|
||||
console.error(' • node cli.js <test-path>');
|
||||
console.error(' • pnpm test <test-path>');
|
||||
console.error(' • ./cli.js <test-path> (if executable)\n');
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
// Silently ignore any errors in this check
|
||||
}
|
||||
|
||||
// Parse command line arguments
|
||||
const args = process.argv.slice(2);
|
||||
const logOptions: LogOptions = {};
|
||||
let testPath: string | null = null;
|
||||
let tags: string[] = [];
|
||||
let startFromFile: number | null = null;
|
||||
let stopAtFile: number | null = null;
|
||||
let timeoutSeconds: number | null = null;
|
||||
let watchMode: boolean = false;
|
||||
let watchIgnorePatterns: string[] = [];
|
||||
|
||||
// Parse options
|
||||
for (let i = 0; i < args.length; i++) {
|
||||
const arg = args[i];
|
||||
|
||||
switch (arg) {
|
||||
case '--version':
|
||||
// Get version from package.json
|
||||
try {
|
||||
const fs = await import('fs');
|
||||
const packagePath = new URL('../package.json', import.meta.url).pathname;
|
||||
const packageData = JSON.parse(await fs.promises.readFile(packagePath, 'utf8'));
|
||||
console.log(`tstest version ${packageData.version}`);
|
||||
} catch (error) {
|
||||
console.log('tstest version unknown');
|
||||
}
|
||||
process.exit(0);
|
||||
break;
|
||||
case '--quiet':
|
||||
case '-q':
|
||||
logOptions.quiet = true;
|
||||
break;
|
||||
case '--verbose':
|
||||
case '-v':
|
||||
logOptions.verbose = true;
|
||||
break;
|
||||
case '--no-color':
|
||||
logOptions.noColor = true;
|
||||
break;
|
||||
case '--json':
|
||||
logOptions.json = true;
|
||||
break;
|
||||
case '--log-file':
|
||||
case '--logfile':
|
||||
logOptions.logFile = true; // Set this as a flag, not a value
|
||||
break;
|
||||
case '--tags':
|
||||
if (i + 1 < args.length) {
|
||||
tags = args[++i].split(',');
|
||||
}
|
||||
break;
|
||||
case '--startFrom':
|
||||
if (i + 1 < args.length) {
|
||||
const value = parseInt(args[++i], 10);
|
||||
if (isNaN(value) || value < 1) {
|
||||
console.error('Error: --startFrom must be a positive integer');
|
||||
process.exit(1);
|
||||
}
|
||||
startFromFile = value;
|
||||
} else {
|
||||
console.error('Error: --startFrom requires a number argument');
|
||||
process.exit(1);
|
||||
}
|
||||
break;
|
||||
case '--stopAt':
|
||||
if (i + 1 < args.length) {
|
||||
const value = parseInt(args[++i], 10);
|
||||
if (isNaN(value) || value < 1) {
|
||||
console.error('Error: --stopAt must be a positive integer');
|
||||
process.exit(1);
|
||||
}
|
||||
stopAtFile = value;
|
||||
} else {
|
||||
console.error('Error: --stopAt requires a number argument');
|
||||
process.exit(1);
|
||||
}
|
||||
break;
|
||||
case '--timeout':
|
||||
if (i + 1 < args.length) {
|
||||
const value = parseInt(args[++i], 10);
|
||||
if (isNaN(value) || value < 1) {
|
||||
console.error('Error: --timeout must be a positive integer (seconds)');
|
||||
process.exit(1);
|
||||
}
|
||||
timeoutSeconds = value;
|
||||
} else {
|
||||
console.error('Error: --timeout requires a number argument (seconds)');
|
||||
process.exit(1);
|
||||
}
|
||||
break;
|
||||
case '--watch':
|
||||
case '-w':
|
||||
watchMode = true;
|
||||
break;
|
||||
case '--watch-ignore':
|
||||
if (i + 1 < args.length) {
|
||||
watchIgnorePatterns = args[++i].split(',');
|
||||
} else {
|
||||
console.error('Error: --watch-ignore requires a comma-separated list of patterns');
|
||||
process.exit(1);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
if (!arg.startsWith('-')) {
|
||||
testPath = arg;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Validate test file range options
|
||||
if (startFromFile !== null && stopAtFile !== null && startFromFile > stopAtFile) {
|
||||
console.error('Error: --startFrom cannot be greater than --stopAt');
|
||||
process.exit(1);
|
||||
}
|
||||
const tsTestInstance = new TsTest(process.cwd(), process.argv[2]);
|
||||
await tsTestInstance.run();
|
||||
|
||||
if (!testPath) {
|
||||
console.error('You must specify a test directory/file/pattern as argument. Please try again.');
|
||||
console.error('\nUsage: tstest <path> [options]');
|
||||
console.error('\nOptions:');
|
||||
console.error(' --version Show version information');
|
||||
console.error(' --quiet, -q Minimal output');
|
||||
console.error(' --verbose, -v Verbose output');
|
||||
console.error(' --no-color Disable colored output');
|
||||
console.error(' --json Output results as JSON');
|
||||
console.error(' --logfile Write logs to .nogit/testlogs/[testfile].log');
|
||||
console.error(' --tags <tags> Run only tests with specified tags (comma-separated)');
|
||||
console.error(' --startFrom <n> Start running from test file number n');
|
||||
console.error(' --stopAt <n> Stop running at test file number n');
|
||||
console.error(' --timeout <s> Timeout test files after s seconds');
|
||||
console.error(' --watch, -w Watch for file changes and re-run tests');
|
||||
console.error(' --watch-ignore Patterns to ignore in watch mode (comma-separated)');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
let executionMode: TestExecutionMode;
|
||||
|
||||
// Detect execution mode based on the argument
|
||||
if (testPath.includes('*') || testPath.includes('?') || testPath.includes('[') || testPath.includes('{')) {
|
||||
executionMode = TestExecutionMode.GLOB;
|
||||
} else if (testPath.endsWith('.ts')) {
|
||||
executionMode = TestExecutionMode.FILE;
|
||||
} else {
|
||||
executionMode = TestExecutionMode.DIRECTORY;
|
||||
}
|
||||
|
||||
const tsTestInstance = new TsTest(process.cwd(), testPath, executionMode, logOptions, tags, startFromFile, stopAtFile, timeoutSeconds);
|
||||
|
||||
if (watchMode) {
|
||||
await tsTestInstance.runWatch(watchIgnorePatterns);
|
||||
} else {
|
||||
await tsTestInstance.run();
|
||||
}
|
||||
};
|
||||
|
||||
// Execute CLI when this file is run directly
|
||||
if (import.meta.url === `file://${process.argv[1]}`) {
|
||||
runCli();
|
||||
}
|
||||
|
3
ts/tspublish.json
Normal file
3
ts/tspublish.json
Normal file
@ -0,0 +1,3 @@
|
||||
{
|
||||
"order": 4
|
||||
}
|
@ -6,59 +6,42 @@ import { coloredString as cs } from '@push.rocks/consolecolor';
|
||||
|
||||
import { TapParser } from './tstest.classes.tap.parser.js';
|
||||
import * as logPrefixes from './tstest.logprefixes.js';
|
||||
import { TsTestLogger } from './tstest.logging.js';
|
||||
|
||||
export class TapCombinator {
|
||||
tapParserStore: TapParser[] = [];
|
||||
skippedFiles: string[] = [];
|
||||
private logger: TsTestLogger;
|
||||
|
||||
constructor(logger: TsTestLogger) {
|
||||
this.logger = logger;
|
||||
}
|
||||
|
||||
addTapParser(tapParserArg: TapParser) {
|
||||
this.tapParserStore.push(tapParserArg);
|
||||
}
|
||||
|
||||
addSkippedFile(filename: string) {
|
||||
this.skippedFiles.push(filename);
|
||||
}
|
||||
|
||||
evaluate() {
|
||||
console.log(
|
||||
`${logPrefixes.TsTestPrefix} RESULTS FOR ${this.tapParserStore.length} TESTFILE(S):`
|
||||
);
|
||||
|
||||
let failGlobal = false; // determine wether tstest should fail
|
||||
// Call the logger's summary method with skipped files
|
||||
this.logger.summary(this.skippedFiles);
|
||||
|
||||
// Check for failures
|
||||
let failGlobal = false;
|
||||
for (const tapParser of this.tapParserStore) {
|
||||
if (!tapParser.expectedTests) {
|
||||
if (!tapParser.expectedTests ||
|
||||
tapParser.expectedTests !== tapParser.receivedTests ||
|
||||
tapParser.getErrorTests().length > 0) {
|
||||
failGlobal = true;
|
||||
let overviewString =
|
||||
logPrefixes.TsTestPrefix +
|
||||
cs(` ${tapParser.fileName} ${plugins.figures.cross}`, 'red') +
|
||||
` ${plugins.figures.pointer} ` +
|
||||
`does not specify tests!`;
|
||||
console.log(overviewString);
|
||||
} else if (tapParser.expectedTests !== tapParser.receivedTests) {
|
||||
failGlobal = true;
|
||||
let overviewString =
|
||||
logPrefixes.TsTestPrefix +
|
||||
cs(` ${tapParser.fileName} ${plugins.figures.cross}`, 'red') +
|
||||
` ${plugins.figures.pointer} ` +
|
||||
tapParser.getTestOverviewAsString() +
|
||||
`did not execute all specified tests!`;
|
||||
console.log(overviewString);
|
||||
} else if (tapParser.getErrorTests().length === 0) {
|
||||
let overviewString =
|
||||
logPrefixes.TsTestPrefix +
|
||||
cs(` ${tapParser.fileName} ${plugins.figures.tick}`, 'green') +
|
||||
` ${plugins.figures.pointer} ` +
|
||||
tapParser.getTestOverviewAsString();
|
||||
console.log(overviewString);
|
||||
} else {
|
||||
failGlobal = true;
|
||||
let overviewString =
|
||||
logPrefixes.TsTestPrefix +
|
||||
cs(` ${tapParser.fileName} ${plugins.figures.cross}`, 'red') +
|
||||
` ${plugins.figures.pointer} ` +
|
||||
tapParser.getTestOverviewAsString();
|
||||
console.log(overviewString);
|
||||
break;
|
||||
}
|
||||
}
|
||||
console.log(cs(plugins.figures.hamburger.repeat(48), 'cyan'));
|
||||
if (!failGlobal) {
|
||||
console.log(cs('FINAL RESULT: SUCCESS!', 'green'));
|
||||
} else {
|
||||
console.log(cs('FINAL RESULT: FAIL!', 'red'));
|
||||
|
||||
// Exit with error code if tests failed
|
||||
if (failGlobal) {
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
@ -7,23 +7,59 @@ import { coloredString as cs } from '@push.rocks/consolecolor';
|
||||
import * as plugins from './tstest.plugins.js';
|
||||
import { TapTestResult } from './tstest.classes.tap.testresult.js';
|
||||
import * as logPrefixes from './tstest.logprefixes.js';
|
||||
import { TsTestLogger } from './tstest.logging.js';
|
||||
import { ProtocolParser } from '../dist_ts_tapbundle_protocol/index.js';
|
||||
import type { IProtocolMessage, ITestResult, IPlanLine, IErrorBlock, ITestEvent } from '../dist_ts_tapbundle_protocol/index.js';
|
||||
|
||||
export class TapParser {
|
||||
testStore: TapTestResult[] = [];
|
||||
|
||||
expectedTestsRegex = /([0-9]*)\.\.([0-9]*)$/;
|
||||
expectedTests: number;
|
||||
receivedTests: number;
|
||||
expectedTests: number = 0;
|
||||
receivedTests: number = 0;
|
||||
|
||||
testStatusRegex = /(ok|not\sok)\s([0-9]+)\s-\s(.*)\s#\stime=(.*)ms$/;
|
||||
activeTapTestResult: TapTestResult;
|
||||
|
||||
pretaskRegex = /^::__PRETASK:(.*)$/;
|
||||
|
||||
private logger: TsTestLogger;
|
||||
private protocolParser: ProtocolParser;
|
||||
private protocolVersion: string | null = null;
|
||||
|
||||
/**
|
||||
* the constructor for TapParser
|
||||
*/
|
||||
constructor(public fileName: string) {}
|
||||
constructor(public fileName: string, logger?: TsTestLogger) {
|
||||
this.logger = logger;
|
||||
this.protocolParser = new ProtocolParser();
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle test file timeout
|
||||
*/
|
||||
public handleTimeout(timeoutSeconds: number) {
|
||||
// If no tests have been defined yet, set expected to 1
|
||||
if (this.expectedTests === 0) {
|
||||
this.expectedTests = 1;
|
||||
}
|
||||
|
||||
// Create a fake failing test result for timeout
|
||||
this._getNewTapTestResult();
|
||||
this.activeTapTestResult.testOk = false;
|
||||
this.activeTapTestResult.testSettled = true;
|
||||
this.testStore.push(this.activeTapTestResult);
|
||||
|
||||
// Log the timeout error
|
||||
if (this.logger) {
|
||||
// First log the test result
|
||||
this.logger.testResult(
|
||||
`Test file timeout`,
|
||||
false,
|
||||
timeoutSeconds * 1000,
|
||||
`Error: Test file exceeded timeout of ${timeoutSeconds} seconds`
|
||||
);
|
||||
this.logger.testErrorDetails(`Test execution was terminated after ${timeoutSeconds} seconds`);
|
||||
}
|
||||
|
||||
// Don't call evaluateFinalResult here, let the caller handle it
|
||||
}
|
||||
|
||||
private _getNewTapTestResult() {
|
||||
this.activeTapTestResult = new TapTestResult(this.testStore.length + 1);
|
||||
@ -38,77 +74,299 @@ export class TapParser {
|
||||
logLineArray.pop();
|
||||
}
|
||||
|
||||
// lets parse the log information
|
||||
// Process each line through the protocol parser
|
||||
for (const logLine of logLineArray) {
|
||||
let logLineIsTapProtocol = false;
|
||||
if (!this.expectedTests && this.expectedTestsRegex.test(logLine)) {
|
||||
logLineIsTapProtocol = true;
|
||||
const regexResult = this.expectedTestsRegex.exec(logLine);
|
||||
this.expectedTests = parseInt(regexResult[2]);
|
||||
console.log(
|
||||
`${logPrefixes.TapPrefix} ${cs(`Expecting ${this.expectedTests} tests!`, 'blue')}`
|
||||
);
|
||||
|
||||
// initiating first TapResult
|
||||
this._getNewTapTestResult();
|
||||
} else if (this.pretaskRegex.test(logLine)) {
|
||||
logLineIsTapProtocol = true;
|
||||
const pretaskContentMatch = this.pretaskRegex.exec(logLine);
|
||||
if (pretaskContentMatch && pretaskContentMatch[1]) {
|
||||
console.log(`${logPrefixes.TapPretaskPrefix} Pretask ->${pretaskContentMatch[1]}: Success.`);
|
||||
const messages = this.protocolParser.parseLine(logLine);
|
||||
|
||||
if (messages.length > 0) {
|
||||
// Handle protocol messages
|
||||
for (const message of messages) {
|
||||
this._handleProtocolMessage(message, logLine);
|
||||
}
|
||||
} else if (this.testStatusRegex.test(logLine)) {
|
||||
logLineIsTapProtocol = true;
|
||||
const regexResult = this.testStatusRegex.exec(logLine);
|
||||
const testId = parseInt(regexResult[2]);
|
||||
const testOk = (() => {
|
||||
if (regexResult[1] === 'ok') {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
})();
|
||||
|
||||
const testSubject = regexResult[3];
|
||||
const testDuration = parseInt(regexResult[4]);
|
||||
|
||||
// test for protocol error
|
||||
if (testId !== this.activeTapTestResult.id) {
|
||||
console.log(
|
||||
`${logPrefixes.TapErrorPrefix} Something is strange! Test Ids are not equal!`
|
||||
);
|
||||
}
|
||||
this.activeTapTestResult.setTestResult(testOk);
|
||||
|
||||
if (testOk) {
|
||||
console.log(
|
||||
logPrefixes.TapPrefix,
|
||||
`${cs(`T${testId} ${plugins.figures.tick}`, 'green')} ${plugins.figures.arrowRight} ` +
|
||||
cs(testSubject, 'blue') +
|
||||
` | ${cs(`${testDuration} ms`, 'orange')}`
|
||||
);
|
||||
} else {
|
||||
console.log(
|
||||
logPrefixes.TapPrefix,
|
||||
`${cs(`T${testId} ${plugins.figures.cross}`, 'red')} ${plugins.figures.arrowRight} ` +
|
||||
cs(testSubject, 'blue') +
|
||||
` | ${cs(`${testDuration} ms`, 'orange')}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if (!logLineIsTapProtocol) {
|
||||
} else {
|
||||
// Not a protocol message, handle as console output
|
||||
if (this.activeTapTestResult) {
|
||||
this.activeTapTestResult.addLogLine(logLine);
|
||||
}
|
||||
console.log(logLine);
|
||||
|
||||
// Check for snapshot communication (legacy)
|
||||
const snapshotMatch = logLine.match(/###SNAPSHOT###(.+)###SNAPSHOT###/);
|
||||
if (snapshotMatch) {
|
||||
const base64Data = snapshotMatch[1];
|
||||
try {
|
||||
const snapshotData = JSON.parse(Buffer.from(base64Data, 'base64').toString());
|
||||
this.handleSnapshot(snapshotData);
|
||||
} catch (error: any) {
|
||||
if (this.logger) {
|
||||
this.logger.testConsoleOutput(`Error parsing snapshot data: ${error.message}`);
|
||||
}
|
||||
}
|
||||
} else if (this.logger) {
|
||||
// This is console output from the test file
|
||||
this.logger.testConsoleOutput(logLine);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (this.activeTapTestResult && this.activeTapTestResult.testSettled) {
|
||||
private _handleProtocolMessage(message: IProtocolMessage, originalLine: string) {
|
||||
switch (message.type) {
|
||||
case 'protocol':
|
||||
this.protocolVersion = message.content.version;
|
||||
if (this.logger) {
|
||||
this.logger.tapOutput(`Protocol version: ${this.protocolVersion}`);
|
||||
}
|
||||
break;
|
||||
|
||||
case 'version':
|
||||
// TAP version, we can ignore this
|
||||
break;
|
||||
|
||||
case 'plan':
|
||||
const plan = message.content as IPlanLine;
|
||||
this.expectedTests = plan.end - plan.start + 1;
|
||||
if (plan.skipAll) {
|
||||
if (this.logger) {
|
||||
this.logger.tapOutput(`Skipping all tests: ${plan.skipAll}`);
|
||||
}
|
||||
} else {
|
||||
if (this.logger) {
|
||||
this.logger.tapOutput(`Expecting ${this.expectedTests} tests!`);
|
||||
}
|
||||
}
|
||||
// Initialize first TapResult
|
||||
this._getNewTapTestResult();
|
||||
break;
|
||||
|
||||
case 'test':
|
||||
const testResult = message.content as ITestResult;
|
||||
|
||||
// Update active test result
|
||||
this.activeTapTestResult.setTestResult(testResult.ok);
|
||||
|
||||
// Extract test duration from metadata
|
||||
let testDuration = 0;
|
||||
if (testResult.metadata?.time) {
|
||||
testDuration = testResult.metadata.time;
|
||||
}
|
||||
|
||||
// Log test result
|
||||
if (this.logger) {
|
||||
if (testResult.ok) {
|
||||
this.logger.testResult(testResult.description, true, testDuration);
|
||||
} else {
|
||||
this.logger.testResult(testResult.description, false, testDuration);
|
||||
|
||||
// If there's error metadata, show it
|
||||
if (testResult.metadata?.error) {
|
||||
const error = testResult.metadata.error;
|
||||
let errorDetails = error.message;
|
||||
if (error.stack) {
|
||||
errorDetails = error.stack;
|
||||
}
|
||||
this.logger.testErrorDetails(errorDetails);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle directives (skip/todo)
|
||||
if (testResult.directive) {
|
||||
if (this.logger) {
|
||||
if (testResult.directive.type === 'skip') {
|
||||
this.logger.testConsoleOutput(`Test skipped: ${testResult.directive.reason || 'No reason given'}`);
|
||||
} else if (testResult.directive.type === 'todo') {
|
||||
this.logger.testConsoleOutput(`Test todo: ${testResult.directive.reason || 'No reason given'}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Mark test as settled and move to next
|
||||
this.activeTapTestResult.testSettled = true;
|
||||
this.testStore.push(this.activeTapTestResult);
|
||||
this._getNewTapTestResult();
|
||||
break;
|
||||
|
||||
case 'comment':
|
||||
if (this.logger) {
|
||||
// Check if it's a pretask comment
|
||||
const pretaskMatch = message.content.match(/^Pretask -> (.+): Success\.$/);
|
||||
if (pretaskMatch) {
|
||||
this.logger.tapOutput(message.content);
|
||||
} else {
|
||||
this.logger.testConsoleOutput(message.content);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case 'bailout':
|
||||
if (this.logger) {
|
||||
this.logger.error(`Bail out! ${message.content}`);
|
||||
}
|
||||
break;
|
||||
|
||||
case 'error':
|
||||
const errorBlock = message.content as IErrorBlock;
|
||||
if (this.logger && errorBlock.error) {
|
||||
let errorDetails = errorBlock.error.message;
|
||||
if (errorBlock.error.stack) {
|
||||
errorDetails = errorBlock.error.stack;
|
||||
}
|
||||
this.logger.testErrorDetails(errorDetails);
|
||||
}
|
||||
break;
|
||||
|
||||
case 'snapshot':
|
||||
// Handle new protocol snapshot format
|
||||
const snapshot = message.content;
|
||||
this.handleSnapshot({
|
||||
path: snapshot.name,
|
||||
content: typeof snapshot.content === 'string' ? snapshot.content : JSON.stringify(snapshot.content),
|
||||
action: 'compare' // Default action
|
||||
});
|
||||
break;
|
||||
|
||||
case 'event':
|
||||
const event = message.content as ITestEvent;
|
||||
this._handleTestEvent(event);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
private _handleTestEvent(event: ITestEvent) {
|
||||
if (!this.logger) return;
|
||||
|
||||
switch (event.eventType) {
|
||||
case 'test:queued':
|
||||
// We can track queued tests if needed
|
||||
break;
|
||||
|
||||
case 'test:started':
|
||||
this.logger.testConsoleOutput(cs(`Test starting: ${event.data.description}`, 'cyan'));
|
||||
if (event.data.retry) {
|
||||
this.logger.testConsoleOutput(cs(` Retry attempt ${event.data.retry}`, 'orange'));
|
||||
}
|
||||
break;
|
||||
|
||||
case 'test:progress':
|
||||
if (event.data.progress !== undefined) {
|
||||
this.logger.testConsoleOutput(cs(` Progress: ${event.data.progress}%`, 'cyan'));
|
||||
}
|
||||
break;
|
||||
|
||||
case 'test:completed':
|
||||
// Test completion is already handled by the test result
|
||||
// This event provides additional timing info if needed
|
||||
break;
|
||||
|
||||
case 'suite:started':
|
||||
this.logger.testConsoleOutput(cs(`\nSuite: ${event.data.suiteName}`, 'blue'));
|
||||
break;
|
||||
|
||||
case 'suite:completed':
|
||||
this.logger.testConsoleOutput(cs(`Suite completed: ${event.data.suiteName}\n`, 'blue'));
|
||||
break;
|
||||
|
||||
case 'hook:started':
|
||||
this.logger.testConsoleOutput(cs(` Hook: ${event.data.hookName}`, 'cyan'));
|
||||
break;
|
||||
|
||||
case 'hook:completed':
|
||||
// Silent unless there's an error
|
||||
if (event.data.error) {
|
||||
this.logger.testConsoleOutput(cs(` Hook failed: ${event.data.hookName}`, 'red'));
|
||||
}
|
||||
break;
|
||||
|
||||
case 'assertion:failed':
|
||||
// Enhanced assertion failure with diff
|
||||
if (event.data.error) {
|
||||
this._displayAssertionError(event.data.error);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
private _displayAssertionError(error: any) {
|
||||
if (!this.logger) return;
|
||||
|
||||
// Display error message
|
||||
if (error.message) {
|
||||
this.logger.testErrorDetails(error.message);
|
||||
}
|
||||
|
||||
// Display visual diff if available
|
||||
if (error.diff) {
|
||||
this._displayDiff(error.diff, error.expected, error.actual);
|
||||
}
|
||||
}
|
||||
|
||||
private _displayDiff(diff: any, expected: any, actual: any) {
|
||||
if (!this.logger) return;
|
||||
|
||||
this.logger.testConsoleOutput(cs('\n Diff:', 'cyan'));
|
||||
|
||||
switch (diff.type) {
|
||||
case 'string':
|
||||
this._displayStringDiff(diff.changes);
|
||||
break;
|
||||
|
||||
case 'object':
|
||||
this._displayObjectDiff(diff.changes, expected, actual);
|
||||
break;
|
||||
|
||||
case 'array':
|
||||
this._displayArrayDiff(diff.changes, expected, actual);
|
||||
break;
|
||||
|
||||
case 'primitive':
|
||||
this._displayPrimitiveDiff(diff.changes);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
private _displayStringDiff(changes: any[]) {
|
||||
for (const change of changes) {
|
||||
const linePrefix = ` Line ${change.line + 1}: `;
|
||||
if (change.type === 'add') {
|
||||
this.logger.testConsoleOutput(cs(`${linePrefix}+ ${change.content}`, 'green'));
|
||||
} else if (change.type === 'remove') {
|
||||
this.logger.testConsoleOutput(cs(`${linePrefix}- ${change.content}`, 'red'));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private _displayObjectDiff(changes: any[], expected: any, actual: any) {
|
||||
this.logger.testConsoleOutput(cs(' Expected:', 'red'));
|
||||
this.logger.testConsoleOutput(` ${JSON.stringify(expected, null, 2)}`);
|
||||
this.logger.testConsoleOutput(cs(' Actual:', 'green'));
|
||||
this.logger.testConsoleOutput(` ${JSON.stringify(actual, null, 2)}`);
|
||||
|
||||
this.logger.testConsoleOutput(cs('\n Changes:', 'cyan'));
|
||||
for (const change of changes) {
|
||||
const path = change.path.join('.');
|
||||
if (change.type === 'add') {
|
||||
this.logger.testConsoleOutput(cs(` + ${path}: ${JSON.stringify(change.newValue)}`, 'green'));
|
||||
} else if (change.type === 'remove') {
|
||||
this.logger.testConsoleOutput(cs(` - ${path}: ${JSON.stringify(change.oldValue)}`, 'red'));
|
||||
} else if (change.type === 'modify') {
|
||||
this.logger.testConsoleOutput(cs(` ~ ${path}:`, 'cyan'));
|
||||
this.logger.testConsoleOutput(cs(` - ${JSON.stringify(change.oldValue)}`, 'red'));
|
||||
this.logger.testConsoleOutput(cs(` + ${JSON.stringify(change.newValue)}`, 'green'));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private _displayArrayDiff(changes: any[], expected: any[], actual: any[]) {
|
||||
this._displayObjectDiff(changes, expected, actual);
|
||||
}
|
||||
|
||||
private _displayPrimitiveDiff(changes: any[]) {
|
||||
const change = changes[0];
|
||||
if (change) {
|
||||
this.logger.testConsoleOutput(cs(` Expected: ${JSON.stringify(change.oldValue)}`, 'red'));
|
||||
this.logger.testConsoleOutput(cs(` Actual: ${JSON.stringify(change.newValue)}`, 'green'));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* returns all tests that are not completed
|
||||
@ -166,44 +424,94 @@ export class TapParser {
|
||||
public async handleTapLog(tapLog: string) {
|
||||
this._processLog(tapLog);
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle snapshot data from the test
|
||||
*/
|
||||
private async handleSnapshot(snapshotData: { path: string; content: string; action: string }) {
|
||||
try {
|
||||
const smartfile = await import('@push.rocks/smartfile');
|
||||
|
||||
if (snapshotData.action === 'compare') {
|
||||
// Try to read existing snapshot
|
||||
try {
|
||||
const existingSnapshot = await smartfile.fs.toStringSync(snapshotData.path);
|
||||
if (existingSnapshot !== snapshotData.content) {
|
||||
// Snapshot mismatch
|
||||
if (this.logger) {
|
||||
this.logger.testConsoleOutput(`Snapshot mismatch: ${snapshotData.path}`);
|
||||
this.logger.testConsoleOutput(`Expected:\n${existingSnapshot}`);
|
||||
this.logger.testConsoleOutput(`Received:\n${snapshotData.content}`);
|
||||
}
|
||||
// TODO: Communicate failure back to the test
|
||||
} else {
|
||||
if (this.logger) {
|
||||
this.logger.testConsoleOutput(`Snapshot matched: ${snapshotData.path}`);
|
||||
}
|
||||
}
|
||||
} catch (error: any) {
|
||||
if (error.code === 'ENOENT') {
|
||||
// Snapshot doesn't exist, create it
|
||||
const dirPath = snapshotData.path.substring(0, snapshotData.path.lastIndexOf('/'));
|
||||
await smartfile.fs.ensureDir(dirPath);
|
||||
await smartfile.memory.toFs(snapshotData.content, snapshotData.path);
|
||||
if (this.logger) {
|
||||
this.logger.testConsoleOutput(`Snapshot created: ${snapshotData.path}`);
|
||||
}
|
||||
} else {
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
} else if (snapshotData.action === 'update') {
|
||||
// Update snapshot
|
||||
const dirPath = snapshotData.path.substring(0, snapshotData.path.lastIndexOf('/'));
|
||||
await smartfile.fs.ensureDir(dirPath);
|
||||
await smartfile.memory.toFs(snapshotData.content, snapshotData.path);
|
||||
if (this.logger) {
|
||||
this.logger.testConsoleOutput(`Snapshot updated: ${snapshotData.path}`);
|
||||
}
|
||||
}
|
||||
} catch (error: any) {
|
||||
if (this.logger) {
|
||||
this.logger.testConsoleOutput(`Error handling snapshot: ${error.message}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public async evaluateFinalResult() {
|
||||
this.receivedTests = this.testStore.length;
|
||||
|
||||
// check wether all tests ran
|
||||
if (this.expectedTests === this.receivedTests) {
|
||||
console.log(
|
||||
`${logPrefixes.TapPrefix} ${cs(
|
||||
`${this.receivedTests} out of ${this.expectedTests} Tests completed!`,
|
||||
'green'
|
||||
)}`
|
||||
);
|
||||
if (this.logger) {
|
||||
this.logger.tapOutput(`${this.receivedTests} out of ${this.expectedTests} Tests completed!`);
|
||||
}
|
||||
} else {
|
||||
console.log(
|
||||
`${logPrefixes.TapErrorPrefix} ${cs(
|
||||
`Only ${this.receivedTests} out of ${this.expectedTests} completed!`,
|
||||
'red'
|
||||
)}`
|
||||
);
|
||||
if (this.logger) {
|
||||
this.logger.error(`Only ${this.receivedTests} out of ${this.expectedTests} completed!`);
|
||||
}
|
||||
}
|
||||
if (!this.expectedTests) {
|
||||
console.log(cs('Error: No tests were defined. Therefore the testfile failed!', 'red'));
|
||||
if (!this.expectedTests && this.receivedTests === 0) {
|
||||
if (this.logger) {
|
||||
this.logger.error('No tests were defined. Therefore the testfile failed!');
|
||||
this.logger.testFileEnd(0, 1, 0); // Count as 1 failure
|
||||
}
|
||||
} else if (this.expectedTests !== this.receivedTests) {
|
||||
console.log(
|
||||
cs(
|
||||
'Error: The amount of received tests and expectedTests is unequal! Therefore the testfile failed',
|
||||
'red'
|
||||
)
|
||||
);
|
||||
if (this.logger) {
|
||||
this.logger.error('The amount of received tests and expectedTests is unequal! Therefore the testfile failed');
|
||||
const errorCount = this.getErrorTests().length || 1; // At least 1 error
|
||||
this.logger.testFileEnd(this.receivedTests - errorCount, errorCount, 0);
|
||||
}
|
||||
} else if (this.getErrorTests().length === 0) {
|
||||
console.log(`${logPrefixes.TapPrefix} ${cs(`All tests are successfull!!!`, 'green')}`);
|
||||
if (this.logger) {
|
||||
this.logger.tapOutput('All tests are successfull!!!');
|
||||
this.logger.testFileEnd(this.receivedTests, 0, 0);
|
||||
}
|
||||
} else {
|
||||
console.log(
|
||||
`${logPrefixes.TapPrefix} ${cs(
|
||||
`${this.getErrorTests().length} tests threw an error!!!`,
|
||||
'red'
|
||||
)}`
|
||||
);
|
||||
if (this.logger) {
|
||||
this.logger.tapOutput(`${this.getErrorTests().length} tests threw an error!!!`, true);
|
||||
this.logger.testFileEnd(this.receivedTests - this.getErrorTests().length, this.getErrorTests().length, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,6 +1,7 @@
|
||||
import * as plugins from './tstest.plugins.js';
|
||||
import * as paths from './tstest.paths.js';
|
||||
import { SmartFile } from '@push.rocks/smartfile';
|
||||
import { TestExecutionMode } from './index.js';
|
||||
|
||||
// tap related stuff
|
||||
import { TapCombinator } from './tstest.classes.tap.combinator.js';
|
||||
@ -14,14 +15,14 @@ export class TestDirectory {
|
||||
cwd: string;
|
||||
|
||||
/**
|
||||
* the relative location of the test dir
|
||||
* the test path or pattern
|
||||
*/
|
||||
relativePath: string;
|
||||
testPath: string;
|
||||
|
||||
/**
|
||||
* the absolute path of the test dir
|
||||
* the execution mode
|
||||
*/
|
||||
absolutePath: string;
|
||||
executionMode: TestExecutionMode;
|
||||
|
||||
/**
|
||||
* an array of Smartfiles
|
||||
@ -30,28 +31,111 @@ export class TestDirectory {
|
||||
|
||||
/**
|
||||
* the constructor for TestDirectory
|
||||
* tell it the path
|
||||
* @param pathToTestDirectory
|
||||
* @param cwdArg - the current working directory
|
||||
* @param testPathArg - the test path/pattern
|
||||
* @param executionModeArg - the execution mode
|
||||
*/
|
||||
constructor(cwdArg: string, relativePathToTestDirectory: string) {
|
||||
constructor(cwdArg: string, testPathArg: string, executionModeArg: TestExecutionMode) {
|
||||
this.cwd = cwdArg;
|
||||
this.relativePath = relativePathToTestDirectory;
|
||||
this.testPath = testPathArg;
|
||||
this.executionMode = executionModeArg;
|
||||
}
|
||||
|
||||
private async _init() {
|
||||
this.testfileArray = await plugins.smartfile.fs.fileTreeToObject(
|
||||
plugins.path.join(this.cwd, this.relativePath),
|
||||
'test*.ts'
|
||||
);
|
||||
switch (this.executionMode) {
|
||||
case TestExecutionMode.FILE:
|
||||
// Single file mode
|
||||
const filePath = plugins.path.isAbsolute(this.testPath)
|
||||
? this.testPath
|
||||
: plugins.path.join(this.cwd, this.testPath);
|
||||
|
||||
if (await plugins.smartfile.fs.fileExists(filePath)) {
|
||||
this.testfileArray = [await plugins.smartfile.SmartFile.fromFilePath(filePath)];
|
||||
} else {
|
||||
throw new Error(`Test file not found: ${filePath}`);
|
||||
}
|
||||
break;
|
||||
|
||||
case TestExecutionMode.GLOB:
|
||||
// Glob pattern mode - use listFileTree which supports glob patterns
|
||||
const globPattern = this.testPath;
|
||||
const matchedFiles = await plugins.smartfile.fs.listFileTree(this.cwd, globPattern);
|
||||
|
||||
this.testfileArray = await Promise.all(
|
||||
matchedFiles.map(async (filePath) => {
|
||||
const absolutePath = plugins.path.isAbsolute(filePath)
|
||||
? filePath
|
||||
: plugins.path.join(this.cwd, filePath);
|
||||
return await plugins.smartfile.SmartFile.fromFilePath(absolutePath);
|
||||
})
|
||||
);
|
||||
break;
|
||||
|
||||
case TestExecutionMode.DIRECTORY:
|
||||
// Directory mode - now recursive with ** pattern
|
||||
const dirPath = plugins.path.join(this.cwd, this.testPath);
|
||||
const testPattern = '**/test*.ts';
|
||||
|
||||
const testFiles = await plugins.smartfile.fs.listFileTree(dirPath, testPattern);
|
||||
|
||||
this.testfileArray = await Promise.all(
|
||||
testFiles.map(async (filePath) => {
|
||||
const absolutePath = plugins.path.isAbsolute(filePath)
|
||||
? filePath
|
||||
: plugins.path.join(dirPath, filePath);
|
||||
return await plugins.smartfile.SmartFile.fromFilePath(absolutePath);
|
||||
})
|
||||
);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
async getTestFilePathArray() {
|
||||
await this._init();
|
||||
const testFilePaths: string[] = [];
|
||||
for (const testFile of this.testfileArray) {
|
||||
const filePath = plugins.path.join(this.relativePath, testFile.path);
|
||||
testFilePaths.push(filePath);
|
||||
// Use the path directly from the SmartFile
|
||||
testFilePaths.push(testFile.path);
|
||||
}
|
||||
return testFilePaths;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get test files organized by parallel execution groups
|
||||
* @returns An object with grouped tests
|
||||
*/
|
||||
async getTestFileGroups(): Promise<{
|
||||
serial: string[];
|
||||
parallelGroups: { [groupName: string]: string[] };
|
||||
}> {
|
||||
await this._init();
|
||||
|
||||
const result = {
|
||||
serial: [] as string[],
|
||||
parallelGroups: {} as { [groupName: string]: string[] }
|
||||
};
|
||||
|
||||
for (const testFile of this.testfileArray) {
|
||||
const filePath = testFile.path;
|
||||
const fileName = plugins.path.basename(filePath);
|
||||
|
||||
// Check if file has parallel group pattern
|
||||
const parallelMatch = fileName.match(/\.para__(\d+)\./);
|
||||
|
||||
if (parallelMatch) {
|
||||
const groupNumber = parallelMatch[1];
|
||||
const groupName = `para__${groupNumber}`;
|
||||
|
||||
if (!result.parallelGroups[groupName]) {
|
||||
result.parallelGroups[groupName] = [];
|
||||
}
|
||||
result.parallelGroups[groupName].push(filePath);
|
||||
} else {
|
||||
// File runs serially
|
||||
result.serial.push(filePath);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
@ -1,15 +1,23 @@
|
||||
import * as plugins from './tstest.plugins.js';
|
||||
import * as paths from './tstest.paths.js';
|
||||
import * as logPrefixes from './tstest.logprefixes.js';
|
||||
|
||||
import { coloredString as cs } from '@push.rocks/consolecolor';
|
||||
|
||||
import { TestDirectory } from './tstest.classes.testdirectory.js';
|
||||
import { TapCombinator } from './tstest.classes.tap.combinator.js';
|
||||
import { TapParser } from './tstest.classes.tap.parser.js';
|
||||
import { TestExecutionMode } from './index.js';
|
||||
import { TsTestLogger } from './tstest.logging.js';
|
||||
import type { LogOptions } from './tstest.logging.js';
|
||||
|
||||
export class TsTest {
|
||||
public testDir: TestDirectory;
|
||||
public executionMode: TestExecutionMode;
|
||||
public logger: TsTestLogger;
|
||||
public filterTags: string[];
|
||||
public startFromFile: number | null;
|
||||
public stopAtFile: number | null;
|
||||
public timeoutSeconds: number | null;
|
||||
|
||||
public smartshellInstance = new plugins.smartshell.Smartshell({
|
||||
executor: 'bash',
|
||||
@ -20,78 +28,296 @@ export class TsTest {
|
||||
|
||||
public tsbundleInstance = new plugins.tsbundle.TsBundle();
|
||||
|
||||
constructor(cwdArg: string, relativePathToTestDirectory: string) {
|
||||
this.testDir = new TestDirectory(cwdArg, relativePathToTestDirectory);
|
||||
constructor(cwdArg: string, testPathArg: string, executionModeArg: TestExecutionMode, logOptions: LogOptions = {}, tags: string[] = [], startFromFile: number | null = null, stopAtFile: number | null = null, timeoutSeconds: number | null = null) {
|
||||
this.executionMode = executionModeArg;
|
||||
this.testDir = new TestDirectory(cwdArg, testPathArg, executionModeArg);
|
||||
this.logger = new TsTestLogger(logOptions);
|
||||
this.filterTags = tags;
|
||||
this.startFromFile = startFromFile;
|
||||
this.stopAtFile = stopAtFile;
|
||||
this.timeoutSeconds = timeoutSeconds;
|
||||
}
|
||||
|
||||
async run() {
|
||||
const fileNamesToRun: string[] = await this.testDir.getTestFilePathArray();
|
||||
console.log(cs(plugins.figures.hamburger.repeat(80), 'cyan'));
|
||||
console.log('');
|
||||
console.log(`${logPrefixes.TsTestPrefix} FOUND ${fileNamesToRun.length} TESTFILE(S):`);
|
||||
for (const fileName of fileNamesToRun) {
|
||||
console.log(`${logPrefixes.TsTestPrefix} ${cs(fileName, 'orange')}`);
|
||||
// Move previous log files if --logfile option is used
|
||||
if (this.logger.options.logFile) {
|
||||
await this.movePreviousLogFiles();
|
||||
}
|
||||
console.log('-'.repeat(48));
|
||||
console.log(''); // force new line
|
||||
|
||||
const testGroups = await this.testDir.getTestFileGroups();
|
||||
const allFiles = [...testGroups.serial, ...Object.values(testGroups.parallelGroups).flat()];
|
||||
|
||||
// Log test discovery - always show full count
|
||||
this.logger.testDiscovery(
|
||||
allFiles.length,
|
||||
this.testDir.testPath,
|
||||
this.executionMode
|
||||
);
|
||||
|
||||
const tapCombinator = new TapCombinator(); // lets create the TapCombinator
|
||||
for (const fileNameArg of fileNamesToRun) {
|
||||
switch (true) {
|
||||
case process.env.CI && fileNameArg.includes('.nonci.'):
|
||||
console.log('!!!!!!!!!!!');
|
||||
console.log(
|
||||
`not running testfile ${fileNameArg}, since we are CI and file name includes '.nonci.' tag`
|
||||
);
|
||||
console.log('!!!!!!!!!!!');
|
||||
break;
|
||||
case fileNameArg.endsWith('.browser.ts') || fileNameArg.endsWith('.browser.nonci.ts'):
|
||||
const tapParserBrowser = await this.runInChrome(fileNameArg);
|
||||
tapCombinator.addTapParser(tapParserBrowser);
|
||||
break;
|
||||
case fileNameArg.endsWith('.both.ts') || fileNameArg.endsWith('.both.nonci.ts'):
|
||||
console.log('>>>>>>> TEST PART 1: chrome');
|
||||
const tapParserBothBrowser = await this.runInChrome(fileNameArg);
|
||||
tapCombinator.addTapParser(tapParserBothBrowser);
|
||||
console.log(cs(`|`.repeat(16), 'cyan'));
|
||||
console.log(''); // force new line
|
||||
console.log('>>>>>>> TEST PART 2: node');
|
||||
const tapParserBothNode = await this.runInNode(fileNameArg);
|
||||
tapCombinator.addTapParser(tapParserBothNode);
|
||||
break;
|
||||
default:
|
||||
const tapParserNode = await this.runInNode(fileNameArg);
|
||||
tapCombinator.addTapParser(tapParserNode);
|
||||
break;
|
||||
const tapCombinator = new TapCombinator(this.logger); // lets create the TapCombinator
|
||||
let fileIndex = 0;
|
||||
|
||||
// Execute serial tests first
|
||||
for (const fileNameArg of testGroups.serial) {
|
||||
fileIndex++;
|
||||
await this.runSingleTestOrSkip(fileNameArg, fileIndex, allFiles.length, tapCombinator);
|
||||
}
|
||||
|
||||
// Execute parallel groups sequentially
|
||||
const groupNames = Object.keys(testGroups.parallelGroups).sort();
|
||||
for (const groupName of groupNames) {
|
||||
const groupFiles = testGroups.parallelGroups[groupName];
|
||||
|
||||
if (groupFiles.length > 0) {
|
||||
this.logger.sectionStart(`Parallel Group: ${groupName}`);
|
||||
|
||||
// Run all tests in this group in parallel
|
||||
const parallelPromises = groupFiles.map(async (fileNameArg) => {
|
||||
fileIndex++;
|
||||
return this.runSingleTestOrSkip(fileNameArg, fileIndex, allFiles.length, tapCombinator);
|
||||
});
|
||||
|
||||
await Promise.all(parallelPromises);
|
||||
this.logger.sectionEnd();
|
||||
}
|
||||
|
||||
console.log(cs(`^`.repeat(16), 'cyan'));
|
||||
console.log(''); // force new line
|
||||
}
|
||||
|
||||
tapCombinator.evaluate();
|
||||
}
|
||||
|
||||
public async runWatch(ignorePatterns: string[] = []) {
|
||||
const smartchokInstance = new plugins.smartchok.Smartchok([this.testDir.cwd]);
|
||||
|
||||
console.clear();
|
||||
this.logger.watchModeStart();
|
||||
|
||||
// Initial run
|
||||
await this.run();
|
||||
|
||||
// Set up file watcher
|
||||
const fileChanges = new Map<string, NodeJS.Timeout>();
|
||||
const debounceTime = 300; // 300ms debounce
|
||||
|
||||
const runTestsAfterChange = async () => {
|
||||
console.clear();
|
||||
const changedFiles = Array.from(fileChanges.keys());
|
||||
fileChanges.clear();
|
||||
|
||||
this.logger.watchModeRerun(changedFiles);
|
||||
await this.run();
|
||||
this.logger.watchModeWaiting();
|
||||
};
|
||||
|
||||
// Start watching before subscribing to events
|
||||
await smartchokInstance.start();
|
||||
|
||||
// Subscribe to file change events
|
||||
const changeObservable = await smartchokInstance.getObservableFor('change');
|
||||
const addObservable = await smartchokInstance.getObservableFor('add');
|
||||
const unlinkObservable = await smartchokInstance.getObservableFor('unlink');
|
||||
|
||||
const handleFileChange = (changedPath: string) => {
|
||||
// Skip if path matches ignore patterns
|
||||
if (ignorePatterns.some(pattern => changedPath.includes(pattern))) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Clear existing timeout for this file if any
|
||||
if (fileChanges.has(changedPath)) {
|
||||
clearTimeout(fileChanges.get(changedPath));
|
||||
}
|
||||
|
||||
// Set new timeout for this file
|
||||
const timeout = setTimeout(() => {
|
||||
fileChanges.delete(changedPath);
|
||||
if (fileChanges.size === 0) {
|
||||
runTestsAfterChange();
|
||||
}
|
||||
}, debounceTime);
|
||||
|
||||
fileChanges.set(changedPath, timeout);
|
||||
};
|
||||
|
||||
// Subscribe to all relevant events
|
||||
changeObservable.subscribe(([path]) => handleFileChange(path));
|
||||
addObservable.subscribe(([path]) => handleFileChange(path));
|
||||
unlinkObservable.subscribe(([path]) => handleFileChange(path));
|
||||
|
||||
this.logger.watchModeWaiting();
|
||||
|
||||
// Handle Ctrl+C to exit gracefully
|
||||
process.on('SIGINT', async () => {
|
||||
this.logger.watchModeStop();
|
||||
await smartchokInstance.stop();
|
||||
process.exit(0);
|
||||
});
|
||||
|
||||
// Keep the process running
|
||||
await new Promise(() => {}); // This promise never resolves
|
||||
}
|
||||
|
||||
private async runSingleTestOrSkip(fileNameArg: string, fileIndex: number, totalFiles: number, tapCombinator: TapCombinator) {
|
||||
// Check if this file should be skipped based on range
|
||||
if (this.startFromFile !== null && fileIndex < this.startFromFile) {
|
||||
this.logger.testFileSkipped(fileNameArg, fileIndex, totalFiles, `before start range (${this.startFromFile})`);
|
||||
tapCombinator.addSkippedFile(fileNameArg);
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.stopAtFile !== null && fileIndex > this.stopAtFile) {
|
||||
this.logger.testFileSkipped(fileNameArg, fileIndex, totalFiles, `after stop range (${this.stopAtFile})`);
|
||||
tapCombinator.addSkippedFile(fileNameArg);
|
||||
return;
|
||||
}
|
||||
|
||||
// File is in range, run it
|
||||
await this.runSingleTest(fileNameArg, fileIndex, totalFiles, tapCombinator);
|
||||
}
|
||||
|
||||
private async runSingleTest(fileNameArg: string, fileIndex: number, totalFiles: number, tapCombinator: TapCombinator) {
|
||||
switch (true) {
|
||||
case process.env.CI && fileNameArg.includes('.nonci.'):
|
||||
this.logger.tapOutput(`Skipping ${fileNameArg} - marked as non-CI`);
|
||||
break;
|
||||
case fileNameArg.endsWith('.browser.ts') || fileNameArg.endsWith('.browser.nonci.ts'):
|
||||
const tapParserBrowser = await this.runInChrome(fileNameArg, fileIndex, totalFiles);
|
||||
tapCombinator.addTapParser(tapParserBrowser);
|
||||
break;
|
||||
case fileNameArg.endsWith('.both.ts') || fileNameArg.endsWith('.both.nonci.ts'):
|
||||
this.logger.sectionStart('Part 1: Chrome');
|
||||
const tapParserBothBrowser = await this.runInChrome(fileNameArg, fileIndex, totalFiles);
|
||||
tapCombinator.addTapParser(tapParserBothBrowser);
|
||||
this.logger.sectionEnd();
|
||||
|
||||
this.logger.sectionStart('Part 2: Node');
|
||||
const tapParserBothNode = await this.runInNode(fileNameArg, fileIndex, totalFiles);
|
||||
tapCombinator.addTapParser(tapParserBothNode);
|
||||
this.logger.sectionEnd();
|
||||
break;
|
||||
default:
|
||||
const tapParserNode = await this.runInNode(fileNameArg, fileIndex, totalFiles);
|
||||
tapCombinator.addTapParser(tapParserNode);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
public async runInNode(fileNameArg: string): Promise<TapParser> {
|
||||
console.log(`${cs('=> ', 'blue')} Running ${cs(fileNameArg, 'orange')} in node.js runtime.`);
|
||||
console.log(`${cs(`= `.repeat(32), 'cyan')}`);
|
||||
const tapParser = new TapParser(fileNameArg + ':node');
|
||||
public async runInNode(fileNameArg: string, index: number, total: number): Promise<TapParser> {
|
||||
this.logger.testFileStart(fileNameArg, 'node.js', index, total);
|
||||
const tapParser = new TapParser(fileNameArg + ':node', this.logger);
|
||||
|
||||
// tsrun options
|
||||
let tsrunOptions = '';
|
||||
if (process.argv.includes('--web')) {
|
||||
tsrunOptions += ' --web';
|
||||
}
|
||||
|
||||
// Set filter tags as environment variable
|
||||
if (this.filterTags.length > 0) {
|
||||
process.env.TSTEST_FILTER_TAGS = this.filterTags.join(',');
|
||||
}
|
||||
|
||||
const execResultStreaming = await this.smartshellInstance.execStreamingSilent(
|
||||
`tsrun ${fileNameArg}${tsrunOptions}`
|
||||
);
|
||||
await tapParser.handleTapProcess(execResultStreaming.childProcess);
|
||||
// Check for 00init.ts file in test directory
|
||||
const testDir = plugins.path.dirname(fileNameArg);
|
||||
const initFile = plugins.path.join(testDir, '00init.ts');
|
||||
let runCommand = `tsrun ${fileNameArg}${tsrunOptions}`;
|
||||
|
||||
const initFileExists = await plugins.smartfile.fs.fileExists(initFile);
|
||||
|
||||
// If 00init.ts exists, run it first
|
||||
if (initFileExists) {
|
||||
// Create a temporary loader file that imports both 00init.ts and the test file
|
||||
const absoluteInitFile = plugins.path.resolve(initFile);
|
||||
const absoluteTestFile = plugins.path.resolve(fileNameArg);
|
||||
const loaderContent = `
|
||||
import '${absoluteInitFile.replace(/\\/g, '/')}';
|
||||
import '${absoluteTestFile.replace(/\\/g, '/')}';
|
||||
`;
|
||||
const loaderPath = plugins.path.join(testDir, `.loader_${plugins.path.basename(fileNameArg)}`);
|
||||
await plugins.smartfile.memory.toFs(loaderContent, loaderPath);
|
||||
runCommand = `tsrun ${loaderPath}${tsrunOptions}`;
|
||||
}
|
||||
|
||||
const execResultStreaming = await this.smartshellInstance.execStreamingSilent(runCommand);
|
||||
|
||||
// If we created a loader file, clean it up after test execution
|
||||
if (initFileExists) {
|
||||
const loaderPath = plugins.path.join(testDir, `.loader_${plugins.path.basename(fileNameArg)}`);
|
||||
const cleanup = () => {
|
||||
try {
|
||||
if (plugins.smartfile.fs.fileExistsSync(loaderPath)) {
|
||||
plugins.smartfile.fs.removeSync(loaderPath);
|
||||
}
|
||||
} catch (e) {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
};
|
||||
|
||||
execResultStreaming.childProcess.on('exit', cleanup);
|
||||
execResultStreaming.childProcess.on('error', cleanup);
|
||||
}
|
||||
|
||||
// Start warning timer if no timeout was specified
|
||||
let warningTimer: NodeJS.Timeout | null = null;
|
||||
if (this.timeoutSeconds === null) {
|
||||
warningTimer = setTimeout(() => {
|
||||
console.error('');
|
||||
console.error(cs('⚠️ WARNING: Test file is running for more than 1 minute', 'orange'));
|
||||
console.error(cs(` File: ${fileNameArg}`, 'orange'));
|
||||
console.error(cs(' Consider using --timeout option to set a timeout for test files.', 'orange'));
|
||||
console.error(cs(' Example: tstest test --timeout=300 (for 5 minutes)', 'orange'));
|
||||
console.error('');
|
||||
}, 60000); // 1 minute
|
||||
}
|
||||
|
||||
// Handle timeout if specified
|
||||
if (this.timeoutSeconds !== null) {
|
||||
const timeoutMs = this.timeoutSeconds * 1000;
|
||||
let timeoutId: NodeJS.Timeout;
|
||||
|
||||
const timeoutPromise = new Promise<void>((_resolve, reject) => {
|
||||
timeoutId = setTimeout(async () => {
|
||||
// Use smartshell's terminate() to kill entire process tree
|
||||
await execResultStreaming.terminate();
|
||||
reject(new Error(`Test file timed out after ${this.timeoutSeconds} seconds`));
|
||||
}, timeoutMs);
|
||||
});
|
||||
|
||||
try {
|
||||
await Promise.race([
|
||||
tapParser.handleTapProcess(execResultStreaming.childProcess),
|
||||
timeoutPromise
|
||||
]);
|
||||
// Clear timeout if test completed successfully
|
||||
clearTimeout(timeoutId);
|
||||
} catch (error) {
|
||||
// Clear warning timer if it was set
|
||||
if (warningTimer) {
|
||||
clearTimeout(warningTimer);
|
||||
}
|
||||
// Handle timeout error
|
||||
tapParser.handleTimeout(this.timeoutSeconds);
|
||||
// Ensure entire process tree is killed if still running
|
||||
try {
|
||||
await execResultStreaming.kill(); // This kills the entire process tree with SIGKILL
|
||||
} catch (killError) {
|
||||
// Process tree might already be dead
|
||||
}
|
||||
await tapParser.evaluateFinalResult();
|
||||
}
|
||||
} else {
|
||||
await tapParser.handleTapProcess(execResultStreaming.childProcess);
|
||||
}
|
||||
|
||||
// Clear warning timer if it was set
|
||||
if (warningTimer) {
|
||||
clearTimeout(warningTimer);
|
||||
}
|
||||
|
||||
return tapParser;
|
||||
}
|
||||
|
||||
public async runInChrome(fileNameArg: string): Promise<TapParser> {
|
||||
console.log(`${cs('=> ', 'blue')} Running ${cs(fileNameArg, 'orange')} in chromium runtime.`);
|
||||
console.log(`${cs(`= `.repeat(32), 'cyan')}`);
|
||||
public async runInChrome(fileNameArg: string, index: number, total: number): Promise<TapParser> {
|
||||
this.logger.testFileStart(fileNameArg, 'chromium', index, total);
|
||||
|
||||
// lets get all our paths sorted
|
||||
const tsbundleCacheDirPath = plugins.path.join(paths.cwd, './.nogit/tstest_cache');
|
||||
@ -111,7 +337,7 @@ export class TsTest {
|
||||
});
|
||||
server.addRoute(
|
||||
'/test',
|
||||
new plugins.typedserver.servertools.Handler('GET', async (req, res) => {
|
||||
new plugins.typedserver.servertools.Handler('GET', async (_req, res) => {
|
||||
res.type('.html');
|
||||
res.write(`
|
||||
<html>
|
||||
@ -130,17 +356,24 @@ export class TsTest {
|
||||
await server.start();
|
||||
|
||||
// lets handle realtime comms
|
||||
const tapParser = new TapParser(fileNameArg + ':chrome');
|
||||
const tapParser = new TapParser(fileNameArg + ':chrome', this.logger);
|
||||
const wss = new plugins.ws.WebSocketServer({ port: 8080 });
|
||||
wss.on('connection', (ws) => {
|
||||
ws.on('message', (message) => {
|
||||
tapParser.handleTapLog(message.toString());
|
||||
const messageStr = message.toString();
|
||||
if (messageStr.startsWith('console:')) {
|
||||
const [, level, ...messageParts] = messageStr.split(':');
|
||||
this.logger.browserConsole(messageParts.join(':'), level);
|
||||
} else {
|
||||
tapParser.handleTapLog(messageStr);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// lets do the browser bit
|
||||
// lets do the browser bit with timeout handling
|
||||
await this.smartbrowserInstance.start();
|
||||
const evaluation = await this.smartbrowserInstance.evaluateOnPage(
|
||||
|
||||
const evaluatePromise = this.smartbrowserInstance.evaluateOnPage(
|
||||
`http://localhost:3007/test?bundleName=${bundleFileName}`,
|
||||
async () => {
|
||||
// lets enable real time comms
|
||||
@ -153,12 +386,12 @@ export class TsTest {
|
||||
const originalError = console.error;
|
||||
|
||||
// Override console methods to capture the logs
|
||||
console.log = (...args) => {
|
||||
console.log = (...args: any[]) => {
|
||||
logStore.push(args.join(' '));
|
||||
ws.send(args.join(' '));
|
||||
originalLog(...args);
|
||||
};
|
||||
console.error = (...args) => {
|
||||
console.error = (...args: any[]) => {
|
||||
logStore.push(args.join(' '));
|
||||
ws.send(args.join(' '));
|
||||
originalError(...args);
|
||||
@ -170,29 +403,154 @@ export class TsTest {
|
||||
try {
|
||||
// Dynamically import the test module
|
||||
const testModule = await import(`/${bundleName}`);
|
||||
if (testModule && testModule.runTest) {
|
||||
if (testModule && testModule.default && testModule.default instanceof Promise) {
|
||||
// Execute the exported test function
|
||||
await testModule.runTest();
|
||||
await testModule.default;
|
||||
} else if (testModule && testModule.default && typeof testModule.default.then === 'function') {
|
||||
console.log('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!');
|
||||
console.log('Test module default export is just promiselike: Something might be messing with your Promise implementation.');
|
||||
console.log('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!');
|
||||
await testModule.default;
|
||||
} else if (globalThis.tapPromise && typeof globalThis.tapPromise.then === 'function') {
|
||||
console.log('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!');
|
||||
console.log('Using globalThis.tapPromise');
|
||||
console.log('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!');
|
||||
await testModule.default;
|
||||
} else {
|
||||
originalError('Test module does not export runTest function.');
|
||||
console.error('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!');
|
||||
console.error('Test module does not export a default promise.');
|
||||
console.error('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!');
|
||||
console.log(`We got: ${JSON.stringify(testModule)}`);
|
||||
|
||||
}
|
||||
} catch (err) {
|
||||
originalError(err);
|
||||
console.error(err);
|
||||
}
|
||||
|
||||
return logStore.join('\n');
|
||||
}
|
||||
);
|
||||
await this.smartbrowserInstance.stop();
|
||||
await server.stop();
|
||||
wss.close();
|
||||
|
||||
// Start warning timer if no timeout was specified
|
||||
let warningTimer: NodeJS.Timeout | null = null;
|
||||
if (this.timeoutSeconds === null) {
|
||||
warningTimer = setTimeout(() => {
|
||||
console.error('');
|
||||
console.error(cs('⚠️ WARNING: Test file is running for more than 1 minute', 'orange'));
|
||||
console.error(cs(` File: ${fileNameArg}`, 'orange'));
|
||||
console.error(cs(' Consider using --timeout option to set a timeout for test files.', 'orange'));
|
||||
console.error(cs(' Example: tstest test --timeout=300 (for 5 minutes)', 'orange'));
|
||||
console.error('');
|
||||
}, 60000); // 1 minute
|
||||
}
|
||||
|
||||
// Handle timeout if specified
|
||||
if (this.timeoutSeconds !== null) {
|
||||
const timeoutMs = this.timeoutSeconds * 1000;
|
||||
let timeoutId: NodeJS.Timeout;
|
||||
|
||||
const timeoutPromise = new Promise<void>((_resolve, reject) => {
|
||||
timeoutId = setTimeout(() => {
|
||||
reject(new Error(`Test file timed out after ${this.timeoutSeconds} seconds`));
|
||||
}, timeoutMs);
|
||||
});
|
||||
|
||||
try {
|
||||
await Promise.race([
|
||||
evaluatePromise,
|
||||
timeoutPromise
|
||||
]);
|
||||
// Clear timeout if test completed successfully
|
||||
clearTimeout(timeoutId);
|
||||
} catch (error) {
|
||||
// Clear warning timer if it was set
|
||||
if (warningTimer) {
|
||||
clearTimeout(warningTimer);
|
||||
}
|
||||
// Handle timeout error
|
||||
tapParser.handleTimeout(this.timeoutSeconds);
|
||||
}
|
||||
} else {
|
||||
await evaluatePromise;
|
||||
}
|
||||
|
||||
// Clear warning timer if it was set
|
||||
if (warningTimer) {
|
||||
clearTimeout(warningTimer);
|
||||
}
|
||||
|
||||
// Always clean up resources, even on timeout
|
||||
try {
|
||||
await this.smartbrowserInstance.stop();
|
||||
} catch (error) {
|
||||
// Browser might already be stopped
|
||||
}
|
||||
|
||||
try {
|
||||
await server.stop();
|
||||
} catch (error) {
|
||||
// Server might already be stopped
|
||||
}
|
||||
|
||||
try {
|
||||
wss.close();
|
||||
} catch (error) {
|
||||
// WebSocket server might already be closed
|
||||
}
|
||||
|
||||
console.log(
|
||||
`${cs('=> ', 'blue')} Stopped ${cs(fileNameArg, 'orange')} chromium instance and server.`
|
||||
);
|
||||
// lets create the tap parser
|
||||
// Always evaluate final result (handleTimeout just sets up the test state)
|
||||
await tapParser.evaluateFinalResult();
|
||||
return tapParser;
|
||||
}
|
||||
|
||||
public async runInDeno() {}
|
||||
|
||||
private async movePreviousLogFiles() {
|
||||
const logDir = plugins.path.join('.nogit', 'testlogs');
|
||||
const previousDir = plugins.path.join('.nogit', 'testlogs', 'previous');
|
||||
const errDir = plugins.path.join('.nogit', 'testlogs', '00err');
|
||||
const diffDir = plugins.path.join('.nogit', 'testlogs', '00diff');
|
||||
|
||||
try {
|
||||
// Delete 00err and 00diff directories if they exist
|
||||
if (plugins.smartfile.fs.isDirectorySync(errDir)) {
|
||||
plugins.smartfile.fs.removeSync(errDir);
|
||||
}
|
||||
if (plugins.smartfile.fs.isDirectorySync(diffDir)) {
|
||||
plugins.smartfile.fs.removeSync(diffDir);
|
||||
}
|
||||
|
||||
// Get all .log files in log directory (not in subdirectories)
|
||||
const files = await plugins.smartfile.fs.listFileTree(logDir, '*.log');
|
||||
const logFiles = files.filter((file: string) => !file.includes('/'));
|
||||
|
||||
if (logFiles.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Ensure previous directory exists
|
||||
await plugins.smartfile.fs.ensureDir(previousDir);
|
||||
|
||||
// Move each log file to previous directory
|
||||
for (const file of logFiles) {
|
||||
const filename = plugins.path.basename(file);
|
||||
const sourcePath = plugins.path.join(logDir, filename);
|
||||
const destPath = plugins.path.join(previousDir, filename);
|
||||
|
||||
try {
|
||||
// Copy file to new location and remove original
|
||||
await plugins.smartfile.fs.copy(sourcePath, destPath);
|
||||
await plugins.smartfile.fs.remove(sourcePath);
|
||||
} catch (error) {
|
||||
// Silently continue if a file can't be moved
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
// Directory might not exist, which is fine
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
580
ts/tstest.logging.ts
Normal file
580
ts/tstest.logging.ts
Normal file
@ -0,0 +1,580 @@
|
||||
import { coloredString as cs } from '@push.rocks/consolecolor';
|
||||
import * as plugins from './tstest.plugins.js';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
|
||||
export interface LogOptions {
|
||||
quiet?: boolean;
|
||||
verbose?: boolean;
|
||||
noColor?: boolean;
|
||||
json?: boolean;
|
||||
logFile?: boolean;
|
||||
}
|
||||
|
||||
export interface TestFileResult {
|
||||
file: string;
|
||||
passed: number;
|
||||
failed: number;
|
||||
total: number;
|
||||
duration: number;
|
||||
tests: Array<{
|
||||
name: string;
|
||||
passed: boolean;
|
||||
duration: number;
|
||||
error?: string;
|
||||
}>;
|
||||
}
|
||||
|
||||
export interface TestSummary {
|
||||
totalFiles: number;
|
||||
totalTests: number;
|
||||
totalPassed: number;
|
||||
totalFailed: number;
|
||||
totalSkipped: number;
|
||||
totalDuration: number;
|
||||
fileResults: TestFileResult[];
|
||||
skippedFiles: string[];
|
||||
}
|
||||
|
||||
export class TsTestLogger {
|
||||
public readonly options: LogOptions;
|
||||
private startTime: number;
|
||||
private fileResults: TestFileResult[] = [];
|
||||
private currentFileResult: TestFileResult | null = null;
|
||||
private currentTestLogFile: string | null = null;
|
||||
private currentTestLogs: string[] = []; // Buffer for current test logs
|
||||
private currentTestFailed: boolean = false;
|
||||
|
||||
constructor(options: LogOptions = {}) {
|
||||
this.options = options;
|
||||
this.startTime = Date.now();
|
||||
}
|
||||
|
||||
private format(text: string, color?: string): string {
|
||||
if (this.options.noColor || !color) {
|
||||
return text;
|
||||
}
|
||||
return cs(text, color as any);
|
||||
}
|
||||
|
||||
private log(message: string) {
|
||||
if (this.options.json) {
|
||||
// For JSON mode, skip console output
|
||||
// JSON output is handled by logJson method
|
||||
return;
|
||||
}
|
||||
|
||||
console.log(message);
|
||||
|
||||
// Log to the current test file log if we're in a test and --logfile is specified
|
||||
if (this.currentTestLogFile) {
|
||||
this.logToTestFile(message);
|
||||
}
|
||||
}
|
||||
|
||||
private logToFile(message: string) {
|
||||
// This method is no longer used since we use logToTestFile for individual test logs
|
||||
// Keeping it for potential future use with a global log file
|
||||
}
|
||||
|
||||
private logToTestFile(message: string) {
|
||||
try {
|
||||
// Remove ANSI color codes for file logging
|
||||
const cleanMessage = message.replace(/\u001b\[[0-9;]*m/g, '');
|
||||
|
||||
// Append to test log file
|
||||
fs.appendFileSync(this.currentTestLogFile, cleanMessage + '\n');
|
||||
} catch (error) {
|
||||
// Silently fail to avoid disrupting the test run
|
||||
}
|
||||
}
|
||||
|
||||
private logJson(data: any) {
|
||||
const jsonString = JSON.stringify(data);
|
||||
console.log(jsonString);
|
||||
|
||||
// Also log to test file if --logfile is specified
|
||||
if (this.currentTestLogFile) {
|
||||
this.logToTestFile(jsonString);
|
||||
}
|
||||
}
|
||||
|
||||
// Section separators
|
||||
sectionStart(title: string) {
|
||||
if (this.options.quiet || this.options.json) return;
|
||||
this.log(this.format(`\n━━━ ${title} ━━━`, 'cyan'));
|
||||
}
|
||||
|
||||
sectionEnd() {
|
||||
if (this.options.quiet || this.options.json) return;
|
||||
this.log(this.format('─'.repeat(50), 'dim'));
|
||||
}
|
||||
|
||||
// Progress indication
|
||||
progress(current: number, total: number, message: string) {
|
||||
if (this.options.quiet || this.options.json) return;
|
||||
const percentage = Math.round((current / total) * 100);
|
||||
const filled = Math.round((current / total) * 20);
|
||||
const empty = 20 - filled;
|
||||
|
||||
this.log(this.format(`\n📊 Progress: ${current}/${total} (${percentage}%)`, 'cyan'));
|
||||
this.log(this.format(`[${'█'.repeat(filled)}${'░'.repeat(empty)}] ${message}`, 'dim'));
|
||||
}
|
||||
|
||||
// Test discovery
|
||||
testDiscovery(count: number, pattern: string, executionMode: string) {
|
||||
if (this.options.json) {
|
||||
this.logJson({ event: 'discovery', count, pattern, executionMode });
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.options.quiet) {
|
||||
this.log(`Found ${count} tests`);
|
||||
} else {
|
||||
this.log(this.format(`\n🔍 Test Discovery`, 'bold'));
|
||||
this.log(this.format(` Mode: ${executionMode}`, 'dim'));
|
||||
this.log(this.format(` Pattern: ${pattern}`, 'dim'));
|
||||
this.log(this.format(` Found: ${count} test file(s)`, 'green'));
|
||||
}
|
||||
}
|
||||
|
||||
// Test execution
|
||||
testFileStart(filename: string, runtime: string, index: number, total: number) {
|
||||
this.currentFileResult = {
|
||||
file: filename,
|
||||
passed: 0,
|
||||
failed: 0,
|
||||
total: 0,
|
||||
duration: 0,
|
||||
tests: []
|
||||
};
|
||||
|
||||
// Reset test-specific state
|
||||
this.currentTestLogs = [];
|
||||
this.currentTestFailed = false;
|
||||
|
||||
// Only set up test log file if --logfile option is specified
|
||||
if (this.options.logFile) {
|
||||
// Create a safe filename that preserves directory structure
|
||||
// Convert relative path to a flat filename by replacing separators with __
|
||||
const relativeFilename = path.relative(process.cwd(), filename);
|
||||
const safeFilename = relativeFilename
|
||||
.replace(/\\/g, '/') // Normalize Windows paths
|
||||
.replace(/\//g, '__') // Replace path separators with double underscores
|
||||
.replace(/\.ts$/, '') // Remove .ts extension
|
||||
.replace(/^\.\.__|^\.__|^__/, ''); // Clean up leading separators from relative paths
|
||||
|
||||
this.currentTestLogFile = path.join('.nogit', 'testlogs', `${safeFilename}.log`);
|
||||
|
||||
// Ensure the directory exists
|
||||
const logDir = path.dirname(this.currentTestLogFile);
|
||||
if (!fs.existsSync(logDir)) {
|
||||
fs.mkdirSync(logDir, { recursive: true });
|
||||
}
|
||||
|
||||
// Clear the log file for this test
|
||||
fs.writeFileSync(this.currentTestLogFile, '');
|
||||
}
|
||||
|
||||
if (this.options.json) {
|
||||
this.logJson({ event: 'fileStart', filename, runtime, index, total });
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.options.quiet) return;
|
||||
|
||||
this.log(this.format(`\n▶️ ${filename} (${index}/${total})`, 'blue'));
|
||||
this.log(this.format(` Runtime: ${runtime}`, 'dim'));
|
||||
}
|
||||
|
||||
testResult(testName: string, passed: boolean, duration: number, error?: string) {
|
||||
if (this.currentFileResult) {
|
||||
this.currentFileResult.tests.push({ name: testName, passed, duration, error });
|
||||
this.currentFileResult.total++;
|
||||
if (passed) {
|
||||
this.currentFileResult.passed++;
|
||||
} else {
|
||||
this.currentFileResult.failed++;
|
||||
this.currentTestFailed = true;
|
||||
}
|
||||
this.currentFileResult.duration += duration;
|
||||
}
|
||||
|
||||
if (this.options.json) {
|
||||
this.logJson({ event: 'testResult', testName, passed, duration, error });
|
||||
return;
|
||||
}
|
||||
|
||||
// If test failed and we have buffered logs, show them now
|
||||
if (!passed && this.currentTestLogs.length > 0 && !this.options.verbose) {
|
||||
this.log(this.format(' 📋 Console output from failed test:', 'yellow'));
|
||||
this.currentTestLogs.forEach(logMessage => {
|
||||
this.log(this.format(` ${logMessage}`, 'dim'));
|
||||
});
|
||||
}
|
||||
|
||||
const icon = passed ? '✅' : '❌';
|
||||
const color = passed ? 'green' : 'red';
|
||||
|
||||
if (this.options.quiet) {
|
||||
this.log(`${icon} ${testName}`);
|
||||
} else {
|
||||
this.log(this.format(` ${icon} ${testName} (${duration}ms)`, color));
|
||||
if (error && !passed) {
|
||||
this.log(this.format(` ${error}`, 'red'));
|
||||
}
|
||||
}
|
||||
|
||||
// Clear logs after each test
|
||||
this.currentTestLogs = [];
|
||||
}
|
||||
|
||||
testFileEnd(passed: number, failed: number, duration: number) {
|
||||
if (this.currentFileResult) {
|
||||
this.fileResults.push(this.currentFileResult);
|
||||
this.currentFileResult = null;
|
||||
}
|
||||
|
||||
if (this.options.json) {
|
||||
this.logJson({ event: 'fileEnd', passed, failed, duration });
|
||||
return;
|
||||
}
|
||||
|
||||
if (!this.options.quiet) {
|
||||
const total = passed + failed;
|
||||
if (failed === 0) {
|
||||
this.log(this.format(` Summary: ${passed}/${total} PASSED`, 'green'));
|
||||
} else {
|
||||
this.log(this.format(` Summary: ${passed} passed, ${failed} failed of ${total} tests`, 'red'));
|
||||
}
|
||||
}
|
||||
|
||||
// If using --logfile, handle error copy and diff detection
|
||||
if (this.options.logFile && this.currentTestLogFile) {
|
||||
try {
|
||||
const logContent = fs.readFileSync(this.currentTestLogFile, 'utf-8');
|
||||
const logDir = path.dirname(this.currentTestLogFile);
|
||||
const logBasename = path.basename(this.currentTestLogFile);
|
||||
|
||||
// Create error copy if there were failures
|
||||
if (failed > 0) {
|
||||
const errorDir = path.join(logDir, '00err');
|
||||
if (!fs.existsSync(errorDir)) {
|
||||
fs.mkdirSync(errorDir, { recursive: true });
|
||||
}
|
||||
const errorLogPath = path.join(errorDir, logBasename);
|
||||
fs.writeFileSync(errorLogPath, logContent);
|
||||
}
|
||||
|
||||
// Check for previous version and create diff if changed
|
||||
const previousLogPath = path.join(logDir, 'previous', logBasename);
|
||||
if (fs.existsSync(previousLogPath)) {
|
||||
const previousContent = fs.readFileSync(previousLogPath, 'utf-8');
|
||||
|
||||
// Simple check if content differs
|
||||
if (previousContent !== logContent) {
|
||||
const diffDir = path.join(logDir, '00diff');
|
||||
if (!fs.existsSync(diffDir)) {
|
||||
fs.mkdirSync(diffDir, { recursive: true });
|
||||
}
|
||||
const diffLogPath = path.join(diffDir, logBasename);
|
||||
const diffContent = this.createDiff(previousContent, logContent, logBasename);
|
||||
fs.writeFileSync(diffLogPath, diffContent);
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
// Silently fail to avoid disrupting the test run
|
||||
}
|
||||
}
|
||||
|
||||
// Clear the current test log file reference only if using --logfile
|
||||
if (this.options.logFile) {
|
||||
this.currentTestLogFile = null;
|
||||
}
|
||||
}
|
||||
|
||||
// TAP output forwarding (for TAP protocol messages)
|
||||
tapOutput(message: string, _isError: boolean = false) {
|
||||
if (this.options.json) return;
|
||||
|
||||
// Never show raw TAP protocol messages in console
|
||||
// They are already processed by TapParser and shown in our format
|
||||
|
||||
// Always log to test file if --logfile is specified
|
||||
if (this.currentTestLogFile) {
|
||||
this.logToTestFile(` ${message}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Console output from test files (non-TAP output)
|
||||
testConsoleOutput(message: string) {
|
||||
if (this.options.json) return;
|
||||
|
||||
// In verbose mode, show console output immediately
|
||||
if (this.options.verbose) {
|
||||
this.log(this.format(` ${message}`, 'dim'));
|
||||
} else {
|
||||
// In non-verbose mode, buffer the logs
|
||||
this.currentTestLogs.push(message);
|
||||
}
|
||||
|
||||
// Always log to test file if --logfile is specified
|
||||
if (this.currentTestLogFile) {
|
||||
this.logToTestFile(` ${message}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Skipped test file
|
||||
testFileSkipped(filename: string, index: number, total: number, reason: string) {
|
||||
if (this.options.json) {
|
||||
this.logJson({ event: 'fileSkipped', filename, index, total, reason });
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.options.quiet) return;
|
||||
|
||||
this.log(this.format(`\n⏭️ ${filename} (${index}/${total})`, 'yellow'));
|
||||
this.log(this.format(` Skipped: ${reason}`, 'dim'));
|
||||
}
|
||||
|
||||
// Browser console
|
||||
browserConsole(message: string, level: string = 'log') {
|
||||
if (this.options.json) {
|
||||
this.logJson({ event: 'browserConsole', message, level });
|
||||
return;
|
||||
}
|
||||
|
||||
if (!this.options.quiet) {
|
||||
const prefix = level === 'error' ? '🌐❌' : '🌐';
|
||||
const color = level === 'error' ? 'red' : 'magenta';
|
||||
this.log(this.format(` ${prefix} ${message}`, color));
|
||||
}
|
||||
}
|
||||
|
||||
// Test error details display
|
||||
testErrorDetails(errorMessage: string) {
|
||||
if (this.options.json) {
|
||||
this.logJson({ event: 'testError', error: errorMessage });
|
||||
return;
|
||||
}
|
||||
|
||||
if (!this.options.quiet) {
|
||||
this.log(this.format(' Error details:', 'red'));
|
||||
errorMessage.split('\n').forEach(line => {
|
||||
this.log(this.format(` ${line}`, 'red'));
|
||||
});
|
||||
}
|
||||
|
||||
// Always log to test file if --logfile is specified
|
||||
if (this.currentTestLogFile) {
|
||||
this.logToTestFile(` Error: ${errorMessage}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Final summary
|
||||
summary(skippedFiles: string[] = []) {
|
||||
const totalDuration = Date.now() - this.startTime;
|
||||
const summary: TestSummary = {
|
||||
totalFiles: this.fileResults.length + skippedFiles.length,
|
||||
totalTests: this.fileResults.reduce((sum, r) => sum + r.total, 0),
|
||||
totalPassed: this.fileResults.reduce((sum, r) => sum + r.passed, 0),
|
||||
totalFailed: this.fileResults.reduce((sum, r) => sum + r.failed, 0),
|
||||
totalSkipped: skippedFiles.length,
|
||||
totalDuration,
|
||||
fileResults: this.fileResults,
|
||||
skippedFiles
|
||||
};
|
||||
|
||||
if (this.options.json) {
|
||||
this.logJson({ event: 'summary', summary });
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.options.quiet) {
|
||||
const status = summary.totalFailed === 0 ? 'PASSED' : 'FAILED';
|
||||
if (summary.totalFailed === 0) {
|
||||
this.log(`\nSummary: ${summary.totalPassed}/${summary.totalTests} | ${totalDuration}ms | ${status}`);
|
||||
} else {
|
||||
this.log(`\nSummary: ${summary.totalPassed} passed, ${summary.totalFailed} failed of ${summary.totalTests} tests | ${totalDuration}ms | ${status}`);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Detailed summary
|
||||
this.log(this.format('\n📊 Test Summary', 'bold'));
|
||||
this.log(this.format('┌────────────────────────────────┐', 'dim'));
|
||||
this.log(this.format(`│ Total Files: ${summary.totalFiles.toString().padStart(14)} │`, 'white'));
|
||||
this.log(this.format(`│ Total Tests: ${summary.totalTests.toString().padStart(14)} │`, 'white'));
|
||||
this.log(this.format(`│ Passed: ${summary.totalPassed.toString().padStart(14)} │`, 'green'));
|
||||
this.log(this.format(`│ Failed: ${summary.totalFailed.toString().padStart(14)} │`, summary.totalFailed > 0 ? 'red' : 'green'));
|
||||
if (summary.totalSkipped > 0) {
|
||||
this.log(this.format(`│ Skipped: ${summary.totalSkipped.toString().padStart(14)} │`, 'yellow'));
|
||||
}
|
||||
this.log(this.format(`│ Duration: ${totalDuration.toString().padStart(14)}ms │`, 'white'));
|
||||
this.log(this.format('└────────────────────────────────┘', 'dim'));
|
||||
|
||||
// File results
|
||||
if (summary.totalFailed > 0) {
|
||||
this.log(this.format('\n❌ Failed Tests:', 'red'));
|
||||
this.fileResults.forEach(fileResult => {
|
||||
if (fileResult.failed > 0) {
|
||||
this.log(this.format(`\n ${fileResult.file}`, 'yellow'));
|
||||
fileResult.tests.filter(t => !t.passed).forEach(test => {
|
||||
this.log(this.format(` ❌ ${test.name}`, 'red'));
|
||||
if (test.error) {
|
||||
this.log(this.format(` ${test.error}`, 'dim'));
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Performance metrics
|
||||
if (this.options.verbose) {
|
||||
// Calculate metrics based on actual test durations
|
||||
const allTests = this.fileResults.flatMap(r => r.tests);
|
||||
const testDurations = allTests.map(t => t.duration);
|
||||
const sumOfTestDurations = testDurations.reduce((sum, d) => sum + d, 0);
|
||||
const avgTestDuration = allTests.length > 0 ? Math.round(sumOfTestDurations / allTests.length) : 0;
|
||||
|
||||
// Find slowest test (exclude 0ms durations unless all are 0)
|
||||
const nonZeroDurations = allTests.filter(t => t.duration > 0);
|
||||
const testsToSort = nonZeroDurations.length > 0 ? nonZeroDurations : allTests;
|
||||
const slowestTest = testsToSort.sort((a, b) => b.duration - a.duration)[0];
|
||||
|
||||
this.log(this.format('\n⏱️ Performance Metrics:', 'cyan'));
|
||||
this.log(this.format(` Average per test: ${avgTestDuration}ms`, 'white'));
|
||||
if (slowestTest && slowestTest.duration > 0) {
|
||||
this.log(this.format(` Slowest test: ${slowestTest.name} (${slowestTest.duration}ms)`, 'orange'));
|
||||
} else if (allTests.length > 0) {
|
||||
this.log(this.format(` All tests completed in <1ms`, 'dim'));
|
||||
}
|
||||
}
|
||||
|
||||
// Final status
|
||||
const status = summary.totalFailed === 0 ? 'ALL TESTS PASSED! 🎉' : 'SOME TESTS FAILED! ❌';
|
||||
const statusColor = summary.totalFailed === 0 ? 'green' : 'red';
|
||||
this.log(this.format(`\n${status}`, statusColor));
|
||||
}
|
||||
|
||||
// Warning display
|
||||
warning(message: string) {
|
||||
if (this.options.json) {
|
||||
this.logJson({ event: 'warning', message });
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.options.quiet) {
|
||||
console.log(`WARNING: ${message}`);
|
||||
} else {
|
||||
this.log(this.format(` ⚠️ ${message}`, 'orange'));
|
||||
}
|
||||
}
|
||||
|
||||
// Error display
|
||||
error(message: string, file?: string, stack?: string) {
|
||||
if (this.options.json) {
|
||||
this.logJson({ event: 'error', message, file, stack });
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.options.quiet) {
|
||||
console.error(`ERROR: ${message}`);
|
||||
} else {
|
||||
this.log(this.format('\n⚠️ Error', 'red'));
|
||||
if (file) this.log(this.format(` File: ${file}`, 'yellow'));
|
||||
this.log(this.format(` ${message}`, 'red'));
|
||||
if (stack && this.options.verbose) {
|
||||
this.log(this.format(` Stack:`, 'dim'));
|
||||
this.log(this.format(stack.split('\n').map(line => ` ${line}`).join('\n'), 'dim'));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create a diff between two log contents
|
||||
private createDiff(previousContent: string, currentContent: string, filename: string): string {
|
||||
const previousLines = previousContent.split('\n');
|
||||
const currentLines = currentContent.split('\n');
|
||||
|
||||
let diff = `DIFF REPORT: ${filename}\n`;
|
||||
diff += `Generated: ${new Date().toISOString()}\n`;
|
||||
diff += '='.repeat(80) + '\n\n';
|
||||
|
||||
// Simple line-by-line comparison
|
||||
const maxLines = Math.max(previousLines.length, currentLines.length);
|
||||
let hasChanges = false;
|
||||
|
||||
for (let i = 0; i < maxLines; i++) {
|
||||
const prevLine = previousLines[i] || '';
|
||||
const currLine = currentLines[i] || '';
|
||||
|
||||
if (prevLine !== currLine) {
|
||||
hasChanges = true;
|
||||
if (i < previousLines.length && i >= currentLines.length) {
|
||||
// Line was removed
|
||||
diff += `- [Line ${i + 1}] ${prevLine}\n`;
|
||||
} else if (i >= previousLines.length && i < currentLines.length) {
|
||||
// Line was added
|
||||
diff += `+ [Line ${i + 1}] ${currLine}\n`;
|
||||
} else {
|
||||
// Line was modified
|
||||
diff += `- [Line ${i + 1}] ${prevLine}\n`;
|
||||
diff += `+ [Line ${i + 1}] ${currLine}\n`;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!hasChanges) {
|
||||
diff += 'No changes detected.\n';
|
||||
}
|
||||
|
||||
diff += '\n' + '='.repeat(80) + '\n';
|
||||
diff += `Previous version had ${previousLines.length} lines\n`;
|
||||
diff += `Current version has ${currentLines.length} lines\n`;
|
||||
|
||||
return diff;
|
||||
}
|
||||
|
||||
// Watch mode methods
|
||||
watchModeStart() {
|
||||
if (this.options.json) {
|
||||
this.logJson({ event: 'watchModeStart' });
|
||||
return;
|
||||
}
|
||||
|
||||
this.log(this.format('\n👀 Watch Mode', 'cyan'));
|
||||
this.log(this.format(' Running tests in watch mode...', 'dim'));
|
||||
this.log(this.format(' Press Ctrl+C to exit\n', 'dim'));
|
||||
}
|
||||
|
||||
watchModeWaiting() {
|
||||
if (this.options.json) {
|
||||
this.logJson({ event: 'watchModeWaiting' });
|
||||
return;
|
||||
}
|
||||
|
||||
this.log(this.format('\n Waiting for file changes...', 'dim'));
|
||||
}
|
||||
|
||||
watchModeRerun(changedFiles: string[]) {
|
||||
if (this.options.json) {
|
||||
this.logJson({ event: 'watchModeRerun', changedFiles });
|
||||
return;
|
||||
}
|
||||
|
||||
this.log(this.format('\n🔄 File changes detected:', 'cyan'));
|
||||
changedFiles.forEach(file => {
|
||||
this.log(this.format(` • ${file}`, 'yellow'));
|
||||
});
|
||||
this.log(this.format('\n Re-running tests...\n', 'dim'));
|
||||
}
|
||||
|
||||
watchModeStop() {
|
||||
if (this.options.json) {
|
||||
this.logJson({ event: 'watchModeStop' });
|
||||
return;
|
||||
}
|
||||
|
||||
this.log(this.format('\n\n👋 Stopping watch mode...', 'cyan'));
|
||||
}
|
||||
}
|
@ -13,16 +13,18 @@ export {
|
||||
// @push.rocks scope
|
||||
import * as consolecolor from '@push.rocks/consolecolor';
|
||||
import * as smartbrowser from '@push.rocks/smartbrowser';
|
||||
import * as smartchok from '@push.rocks/smartchok';
|
||||
import * as smartdelay from '@push.rocks/smartdelay';
|
||||
import * as smartfile from '@push.rocks/smartfile';
|
||||
import * as smartlog from '@push.rocks/smartlog';
|
||||
import * as smartpromise from '@push.rocks/smartpromise';
|
||||
import * as smartshell from '@push.rocks/smartshell';
|
||||
import * as tapbundle from '@push.rocks/tapbundle';
|
||||
import * as tapbundle from '../dist_ts_tapbundle/index.js';
|
||||
|
||||
export {
|
||||
consolecolor,
|
||||
smartbrowser,
|
||||
smartchok,
|
||||
smartdelay,
|
||||
smartfile,
|
||||
smartlog,
|
||||
@ -31,7 +33,7 @@ export {
|
||||
tapbundle,
|
||||
};
|
||||
|
||||
// @gitzone scope
|
||||
// @git.zone scope
|
||||
import * as tsbundle from '@git.zone/tsbundle';
|
||||
|
||||
export { tsbundle };
|
||||
|
8
ts_tapbundle/00_commitinfo_data.ts
Normal file
8
ts_tapbundle/00_commitinfo_data.ts
Normal file
@ -0,0 +1,8 @@
|
||||
/**
|
||||
* autocreated commitinfo by @push.rocks/commitinfo
|
||||
*/
|
||||
export const commitinfo = {
|
||||
name: '@push.rocks/tapbundle',
|
||||
version: '6.0.3',
|
||||
description: 'A comprehensive testing automation library that provides a wide range of utilities and tools for TAP (Test Anything Protocol) based testing, especially suitable for projects using tapbuffer.'
|
||||
}
|
7
ts_tapbundle/index.ts
Normal file
7
ts_tapbundle/index.ts
Normal file
@ -0,0 +1,7 @@
|
||||
export { tap } from './tapbundle.classes.tap.js';
|
||||
export { TapWrap } from './tapbundle.classes.tapwrap.js';
|
||||
export { webhelpers } from './webhelpers.js';
|
||||
export { TapTools } from './tapbundle.classes.taptools.js';
|
||||
|
||||
// Export enhanced expect with diff generation
|
||||
export { expect, setProtocolEmitter } from './tapbundle.expect.wrapper.js';
|
21
ts_tapbundle/tapbundle.classes.pretask.ts
Normal file
21
ts_tapbundle/tapbundle.classes.pretask.ts
Normal file
@ -0,0 +1,21 @@
|
||||
import * as plugins from './tapbundle.plugins.js';
|
||||
import { TapTools } from './tapbundle.classes.taptools.js';
|
||||
|
||||
export interface IPreTaskFunction {
|
||||
(tapTools?: TapTools): Promise<any>;
|
||||
}
|
||||
|
||||
export class PreTask {
|
||||
public description: string;
|
||||
public preTaskFunction: IPreTaskFunction;
|
||||
|
||||
constructor(descriptionArg: string, preTaskFunctionArg: IPreTaskFunction) {
|
||||
this.description = descriptionArg;
|
||||
this.preTaskFunction = preTaskFunctionArg;
|
||||
}
|
||||
|
||||
public async run() {
|
||||
console.log(`::__PRETASK: ${this.description}`);
|
||||
await this.preTaskFunction(new TapTools(null));
|
||||
}
|
||||
}
|
117
ts_tapbundle/tapbundle.classes.settingsmanager.ts
Normal file
117
ts_tapbundle/tapbundle.classes.settingsmanager.ts
Normal file
@ -0,0 +1,117 @@
|
||||
import type { ITapSettings, ISettingsManager } from './tapbundle.interfaces.js';
|
||||
|
||||
export class SettingsManager implements ISettingsManager {
|
||||
private globalSettings: ITapSettings = {};
|
||||
private fileSettings: ITapSettings = {};
|
||||
private testSettings: Map<string, ITapSettings> = new Map();
|
||||
|
||||
// Default settings
|
||||
private defaultSettings: ITapSettings = {
|
||||
timeout: undefined, // No timeout by default
|
||||
slowThreshold: 1000, // 1 second
|
||||
bail: false,
|
||||
retries: 0,
|
||||
retryDelay: 0,
|
||||
suppressConsole: false,
|
||||
verboseErrors: true,
|
||||
showTestDuration: true,
|
||||
maxConcurrency: 5,
|
||||
isolateTests: false,
|
||||
enableSnapshots: true,
|
||||
snapshotDirectory: '.snapshots',
|
||||
updateSnapshots: false,
|
||||
};
|
||||
|
||||
/**
|
||||
* Get merged settings for current context
|
||||
*/
|
||||
public getSettings(): ITapSettings {
|
||||
return this.mergeSettings(
|
||||
this.defaultSettings,
|
||||
this.globalSettings,
|
||||
this.fileSettings
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set global settings (from 00init.ts or tap.settings())
|
||||
*/
|
||||
public setGlobalSettings(settings: ITapSettings): void {
|
||||
this.globalSettings = { ...this.globalSettings, ...settings };
|
||||
}
|
||||
|
||||
/**
|
||||
* Set file-level settings
|
||||
*/
|
||||
public setFileSettings(settings: ITapSettings): void {
|
||||
this.fileSettings = { ...this.fileSettings, ...settings };
|
||||
}
|
||||
|
||||
/**
|
||||
* Set test-specific settings
|
||||
*/
|
||||
public setTestSettings(testId: string, settings: ITapSettings): void {
|
||||
const existingSettings = this.testSettings.get(testId) || {};
|
||||
this.testSettings.set(testId, { ...existingSettings, ...settings });
|
||||
}
|
||||
|
||||
/**
|
||||
* Get settings for specific test
|
||||
*/
|
||||
public getTestSettings(testId: string): ITapSettings {
|
||||
const testSpecificSettings = this.testSettings.get(testId) || {};
|
||||
return this.mergeSettings(
|
||||
this.defaultSettings,
|
||||
this.globalSettings,
|
||||
this.fileSettings,
|
||||
testSpecificSettings
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge settings with proper inheritance
|
||||
* Later settings override earlier ones
|
||||
*/
|
||||
private mergeSettings(...settingsArray: ITapSettings[]): ITapSettings {
|
||||
const result: ITapSettings = {};
|
||||
|
||||
for (const settings of settingsArray) {
|
||||
// Simple properties - later values override
|
||||
if (settings.timeout !== undefined) result.timeout = settings.timeout;
|
||||
if (settings.slowThreshold !== undefined) result.slowThreshold = settings.slowThreshold;
|
||||
if (settings.bail !== undefined) result.bail = settings.bail;
|
||||
if (settings.retries !== undefined) result.retries = settings.retries;
|
||||
if (settings.retryDelay !== undefined) result.retryDelay = settings.retryDelay;
|
||||
if (settings.suppressConsole !== undefined) result.suppressConsole = settings.suppressConsole;
|
||||
if (settings.verboseErrors !== undefined) result.verboseErrors = settings.verboseErrors;
|
||||
if (settings.showTestDuration !== undefined) result.showTestDuration = settings.showTestDuration;
|
||||
if (settings.maxConcurrency !== undefined) result.maxConcurrency = settings.maxConcurrency;
|
||||
if (settings.isolateTests !== undefined) result.isolateTests = settings.isolateTests;
|
||||
if (settings.enableSnapshots !== undefined) result.enableSnapshots = settings.enableSnapshots;
|
||||
if (settings.snapshotDirectory !== undefined) result.snapshotDirectory = settings.snapshotDirectory;
|
||||
if (settings.updateSnapshots !== undefined) result.updateSnapshots = settings.updateSnapshots;
|
||||
|
||||
// Lifecycle hooks - later ones override
|
||||
if (settings.beforeAll !== undefined) result.beforeAll = settings.beforeAll;
|
||||
if (settings.afterAll !== undefined) result.afterAll = settings.afterAll;
|
||||
if (settings.beforeEach !== undefined) result.beforeEach = settings.beforeEach;
|
||||
if (settings.afterEach !== undefined) result.afterEach = settings.afterEach;
|
||||
|
||||
// Environment variables - merge
|
||||
if (settings.env) {
|
||||
result.env = { ...result.env, ...settings.env };
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear all settings (useful for testing)
|
||||
*/
|
||||
public clearSettings(): void {
|
||||
this.globalSettings = {};
|
||||
this.fileSettings = {};
|
||||
this.testSettings.clear();
|
||||
}
|
||||
}
|
710
ts_tapbundle/tapbundle.classes.tap.ts
Normal file
710
ts_tapbundle/tapbundle.classes.tap.ts
Normal file
@ -0,0 +1,710 @@
|
||||
import * as plugins from './tapbundle.plugins.js';
|
||||
|
||||
import { type IPreTaskFunction, PreTask } from './tapbundle.classes.pretask.js';
|
||||
import { TapTest, type ITestFunction } from './tapbundle.classes.taptest.js';
|
||||
import { ProtocolEmitter, type ITestEvent } from '../dist_ts_tapbundle_protocol/index.js';
|
||||
import type { ITapSettings } from './tapbundle.interfaces.js';
|
||||
import { SettingsManager } from './tapbundle.classes.settingsmanager.js';
|
||||
|
||||
export interface ITestSuite {
|
||||
description: string;
|
||||
tests: TapTest<any>[];
|
||||
beforeEach?: ITestFunction<any>;
|
||||
afterEach?: ITestFunction<any>;
|
||||
parent?: ITestSuite;
|
||||
children: ITestSuite[];
|
||||
}
|
||||
|
||||
class TestBuilder<T> {
|
||||
private _tap: Tap<T>;
|
||||
private _tags: string[] = [];
|
||||
private _priority: 'high' | 'medium' | 'low' = 'medium';
|
||||
private _retryCount?: number;
|
||||
private _timeoutMs?: number;
|
||||
|
||||
constructor(tap: Tap<T>) {
|
||||
this._tap = tap;
|
||||
}
|
||||
|
||||
tags(...tags: string[]) {
|
||||
this._tags = tags;
|
||||
return this;
|
||||
}
|
||||
|
||||
priority(level: 'high' | 'medium' | 'low') {
|
||||
this._priority = level;
|
||||
return this;
|
||||
}
|
||||
|
||||
retry(count: number) {
|
||||
this._retryCount = count;
|
||||
return this;
|
||||
}
|
||||
|
||||
timeout(ms: number) {
|
||||
this._timeoutMs = ms;
|
||||
return this;
|
||||
}
|
||||
|
||||
test(description: string, testFunction: ITestFunction<T>) {
|
||||
const test = this._tap.test(description, testFunction, 'normal');
|
||||
|
||||
// Apply settings to the test
|
||||
if (this._tags.length > 0) {
|
||||
test.tags = this._tags;
|
||||
}
|
||||
test.priority = this._priority;
|
||||
|
||||
if (this._retryCount !== undefined) {
|
||||
test.tapTools.retry(this._retryCount);
|
||||
}
|
||||
if (this._timeoutMs !== undefined) {
|
||||
test.timeoutMs = this._timeoutMs;
|
||||
}
|
||||
|
||||
return test;
|
||||
}
|
||||
|
||||
testOnly(description: string, testFunction: ITestFunction<T>) {
|
||||
const test = this._tap.test(description, testFunction, 'only');
|
||||
|
||||
// Apply settings to the test
|
||||
if (this._tags.length > 0) {
|
||||
test.tags = this._tags;
|
||||
}
|
||||
test.priority = this._priority;
|
||||
|
||||
if (this._retryCount !== undefined) {
|
||||
test.tapTools.retry(this._retryCount);
|
||||
}
|
||||
if (this._timeoutMs !== undefined) {
|
||||
test.timeoutMs = this._timeoutMs;
|
||||
}
|
||||
|
||||
return test;
|
||||
}
|
||||
|
||||
testSkip(description: string, testFunction: ITestFunction<T>) {
|
||||
const test = this._tap.test(description, testFunction, 'skip');
|
||||
|
||||
// Apply settings to the test
|
||||
if (this._tags.length > 0) {
|
||||
test.tags = this._tags;
|
||||
}
|
||||
test.priority = this._priority;
|
||||
|
||||
if (this._retryCount !== undefined) {
|
||||
test.tapTools.retry(this._retryCount);
|
||||
}
|
||||
if (this._timeoutMs !== undefined) {
|
||||
test.timeoutMs = this._timeoutMs;
|
||||
}
|
||||
|
||||
return test;
|
||||
}
|
||||
}
|
||||
|
||||
export class Tap<T> {
|
||||
private protocolEmitter = new ProtocolEmitter();
|
||||
private settingsManager = new SettingsManager();
|
||||
private _skipCount = 0;
|
||||
private _filterTags: string[] = [];
|
||||
|
||||
constructor() {
|
||||
// Get filter tags from environment
|
||||
if (typeof process !== 'undefined' && process.env && process.env.TSTEST_FILTER_TAGS) {
|
||||
this._filterTags = process.env.TSTEST_FILTER_TAGS.split(',');
|
||||
}
|
||||
}
|
||||
|
||||
// Fluent test builder
|
||||
public tags(...tags: string[]) {
|
||||
const builder = new TestBuilder<T>(this);
|
||||
return builder.tags(...tags);
|
||||
}
|
||||
|
||||
public priority(level: 'high' | 'medium' | 'low') {
|
||||
const builder = new TestBuilder<T>(this);
|
||||
return builder.priority(level);
|
||||
}
|
||||
|
||||
public retry(count: number) {
|
||||
const builder = new TestBuilder<T>(this);
|
||||
return builder.retry(count);
|
||||
}
|
||||
|
||||
public timeout(ms: number) {
|
||||
const builder = new TestBuilder<T>(this);
|
||||
return builder.timeout(ms);
|
||||
}
|
||||
|
||||
/**
|
||||
* skips a test
|
||||
* tests marked with tap.skip.test() are never executed
|
||||
*/
|
||||
public skip = {
|
||||
test: (descriptionArg: string, functionArg: ITestFunction<T>) => {
|
||||
const skippedTest = this.test(descriptionArg, functionArg, 'skip');
|
||||
return skippedTest;
|
||||
},
|
||||
testParallel: (descriptionArg: string, functionArg: ITestFunction<T>) => {
|
||||
const skippedTest = new TapTest<T>({
|
||||
description: descriptionArg,
|
||||
testFunction: functionArg,
|
||||
parallel: true,
|
||||
});
|
||||
|
||||
// Mark as skip mode
|
||||
skippedTest.tapTools.markAsSkipped('Marked as skip');
|
||||
|
||||
// Add to appropriate test list
|
||||
if (this._currentSuite) {
|
||||
this._currentSuite.tests.push(skippedTest);
|
||||
} else {
|
||||
this._tapTests.push(skippedTest);
|
||||
}
|
||||
|
||||
return skippedTest;
|
||||
},
|
||||
};
|
||||
|
||||
/**
|
||||
* only executes tests marked as ONLY
|
||||
*/
|
||||
public only = {
|
||||
test: (descriptionArg: string, testFunctionArg: ITestFunction<T>) => {
|
||||
return this.test(descriptionArg, testFunctionArg, 'only');
|
||||
},
|
||||
testParallel: (descriptionArg: string, testFunctionArg: ITestFunction<T>) => {
|
||||
const onlyTest = new TapTest<T>({
|
||||
description: descriptionArg,
|
||||
testFunction: testFunctionArg,
|
||||
parallel: true,
|
||||
});
|
||||
|
||||
// Add to only tests list
|
||||
this._tapTestsOnly.push(onlyTest);
|
||||
|
||||
return onlyTest;
|
||||
},
|
||||
};
|
||||
|
||||
/**
|
||||
* mark a test as todo (not yet implemented)
|
||||
*/
|
||||
public todo = {
|
||||
test: (descriptionArg: string, functionArg?: ITestFunction<T>) => {
|
||||
const defaultFunc = (async () => {}) as ITestFunction<T>;
|
||||
const todoTest = new TapTest<T>({
|
||||
description: descriptionArg,
|
||||
testFunction: functionArg || defaultFunc,
|
||||
parallel: false,
|
||||
});
|
||||
|
||||
// Mark as todo
|
||||
todoTest.tapTools.todo('Marked as todo');
|
||||
|
||||
// Add to appropriate test list
|
||||
if (this._currentSuite) {
|
||||
this._currentSuite.tests.push(todoTest);
|
||||
} else {
|
||||
this._tapTests.push(todoTest);
|
||||
}
|
||||
|
||||
return todoTest;
|
||||
},
|
||||
testParallel: (descriptionArg: string, functionArg?: ITestFunction<T>) => {
|
||||
const defaultFunc = (async () => {}) as ITestFunction<T>;
|
||||
const todoTest = new TapTest<T>({
|
||||
description: descriptionArg,
|
||||
testFunction: functionArg || defaultFunc,
|
||||
parallel: true,
|
||||
});
|
||||
|
||||
// Mark as todo
|
||||
todoTest.tapTools.todo('Marked as todo');
|
||||
|
||||
// Add to appropriate test list
|
||||
if (this._currentSuite) {
|
||||
this._currentSuite.tests.push(todoTest);
|
||||
} else {
|
||||
this._tapTests.push(todoTest);
|
||||
}
|
||||
|
||||
return todoTest;
|
||||
},
|
||||
};
|
||||
|
||||
private _tapPreTasks: PreTask[] = [];
|
||||
private _tapTests: TapTest<any>[] = [];
|
||||
private _tapTestsOnly: TapTest<any>[] = [];
|
||||
private _currentSuite: ITestSuite | null = null;
|
||||
private _rootSuites: ITestSuite[] = [];
|
||||
|
||||
/**
|
||||
* Configure global test settings
|
||||
*/
|
||||
public settings(settings: ITapSettings): this {
|
||||
this.settingsManager.setGlobalSettings(settings);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current test settings
|
||||
*/
|
||||
public getSettings(): ITapSettings {
|
||||
return this.settingsManager.getSettings();
|
||||
}
|
||||
|
||||
/**
|
||||
* Normal test function, will run one by one
|
||||
* @param testDescription - A description of what the test does
|
||||
* @param testFunction - A Function that returns a Promise and resolves or rejects
|
||||
*/
|
||||
public test(
|
||||
testDescription: string,
|
||||
testFunction: ITestFunction<T>,
|
||||
modeArg: 'normal' | 'only' | 'skip' = 'normal'
|
||||
): TapTest<T> {
|
||||
const localTest = new TapTest<T>({
|
||||
description: testDescription,
|
||||
testFunction,
|
||||
parallel: false,
|
||||
});
|
||||
|
||||
// Apply default settings from settings manager
|
||||
const settings = this.settingsManager.getSettings();
|
||||
if (settings.timeout !== undefined) {
|
||||
localTest.timeoutMs = settings.timeout;
|
||||
}
|
||||
if (settings.retries !== undefined) {
|
||||
localTest.tapTools.retry(settings.retries);
|
||||
}
|
||||
|
||||
// Handle skip mode
|
||||
if (modeArg === 'skip') {
|
||||
localTest.tapTools.markAsSkipped('Marked as skip');
|
||||
}
|
||||
|
||||
// If we're in a suite, add test to the suite
|
||||
if (this._currentSuite) {
|
||||
this._currentSuite.tests.push(localTest);
|
||||
} else {
|
||||
// Otherwise add to global test list
|
||||
if (modeArg === 'normal' || modeArg === 'skip') {
|
||||
this._tapTests.push(localTest);
|
||||
} else if (modeArg === 'only') {
|
||||
this._tapTestsOnly.push(localTest);
|
||||
}
|
||||
}
|
||||
return localTest;
|
||||
}
|
||||
|
||||
public preTask(descriptionArg: string, functionArg: IPreTaskFunction) {
|
||||
this._tapPreTasks.push(new PreTask(descriptionArg, functionArg));
|
||||
}
|
||||
|
||||
/**
|
||||
* A parallel test that will not be waited for before the next starts.
|
||||
* @param testDescription - A description of what the test does
|
||||
* @param testFunction - A Function that returns a Promise and resolves or rejects
|
||||
*/
|
||||
public testParallel(testDescription: string, testFunction: ITestFunction<T>) {
|
||||
const localTest = new TapTest({
|
||||
description: testDescription,
|
||||
testFunction,
|
||||
parallel: true,
|
||||
});
|
||||
|
||||
// Apply default settings from settings manager
|
||||
const settings = this.settingsManager.getSettings();
|
||||
if (settings.timeout !== undefined) {
|
||||
localTest.timeoutMs = settings.timeout;
|
||||
}
|
||||
if (settings.retries !== undefined) {
|
||||
localTest.tapTools.retry(settings.retries);
|
||||
}
|
||||
|
||||
if (this._currentSuite) {
|
||||
this._currentSuite.tests.push(localTest);
|
||||
} else {
|
||||
this._tapTests.push(localTest);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a test suite for grouping related tests
|
||||
*/
|
||||
public describe(description: string, suiteFunction: () => void) {
|
||||
const suite: ITestSuite = {
|
||||
description,
|
||||
tests: [],
|
||||
children: [],
|
||||
parent: this._currentSuite,
|
||||
};
|
||||
|
||||
// Add to parent or root
|
||||
if (this._currentSuite) {
|
||||
this._currentSuite.children.push(suite);
|
||||
} else {
|
||||
this._rootSuites.push(suite);
|
||||
}
|
||||
|
||||
// Execute suite function in context
|
||||
const previousSuite = this._currentSuite;
|
||||
this._currentSuite = suite;
|
||||
try {
|
||||
suiteFunction();
|
||||
} finally {
|
||||
this._currentSuite = previousSuite;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Set up a function to run before each test in the current suite
|
||||
*/
|
||||
public beforeEach(setupFunction: ITestFunction<any>) {
|
||||
if (this._currentSuite) {
|
||||
this._currentSuite.beforeEach = setupFunction;
|
||||
} else {
|
||||
throw new Error('beforeEach can only be used inside a describe block');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Set up a function to run after each test in the current suite
|
||||
*/
|
||||
public afterEach(teardownFunction: ITestFunction<any>) {
|
||||
if (this._currentSuite) {
|
||||
this._currentSuite.afterEach = teardownFunction;
|
||||
} else {
|
||||
throw new Error('afterEach can only be used inside a describe block');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* collect all tests from suites
|
||||
*/
|
||||
private _collectTests(suite: ITestSuite, tests: TapTest<any>[] = []): TapTest<any>[] {
|
||||
tests.push(...suite.tests);
|
||||
for (const childSuite of suite.children) {
|
||||
this._collectTests(childSuite, tests);
|
||||
}
|
||||
return tests;
|
||||
}
|
||||
|
||||
/**
|
||||
* starts the test evaluation
|
||||
*/
|
||||
public async start(optionsArg?: { throwOnError: boolean }) {
|
||||
// lets set the tapbundle promise
|
||||
const smartenvInstance = new plugins.smartenv.Smartenv();
|
||||
const globalPromise = plugins.smartpromise.defer();
|
||||
smartenvInstance.isBrowser
|
||||
? ((globalThis as any).tapbundleDeferred = globalPromise)
|
||||
: null;
|
||||
// Also set tapPromise for backwards compatibility
|
||||
smartenvInstance.isBrowser
|
||||
? ((globalThis as any).tapPromise = globalPromise.promise)
|
||||
: null;
|
||||
|
||||
// Path helpers will be initialized by the Node.js environment if available
|
||||
|
||||
// lets continue with running the tests
|
||||
const promiseArray: Array<Promise<any>> = [];
|
||||
|
||||
// Collect all tests including those in suites
|
||||
let allTests: TapTest<any>[] = [...this._tapTests];
|
||||
for (const suite of this._rootSuites) {
|
||||
this._collectTests(suite, allTests);
|
||||
}
|
||||
|
||||
// safeguard against empty test array
|
||||
if (allTests.length === 0 && this._tapTestsOnly.length === 0) {
|
||||
console.log('no tests specified. Ending here!');
|
||||
return;
|
||||
}
|
||||
|
||||
// determine which tests to run
|
||||
let concerningTests: TapTest[];
|
||||
if (this._tapTestsOnly.length > 0) {
|
||||
concerningTests = this._tapTestsOnly;
|
||||
} else {
|
||||
concerningTests = allTests;
|
||||
}
|
||||
|
||||
// Filter tests by tags if specified
|
||||
if (this._filterTags.length > 0) {
|
||||
concerningTests = concerningTests.filter(test => {
|
||||
// Skip tests without tags when filtering is active
|
||||
if (!test.tags || test.tags.length === 0) {
|
||||
return false;
|
||||
}
|
||||
// Check if test has any of the filter tags
|
||||
return test.tags.some(tag => this._filterTags.includes(tag));
|
||||
});
|
||||
}
|
||||
|
||||
// lets run the pretasks
|
||||
for (const preTask of this._tapPreTasks) {
|
||||
await preTask.run();
|
||||
}
|
||||
|
||||
// Emit protocol header and TAP version
|
||||
console.log(this.protocolEmitter.emitProtocolHeader());
|
||||
console.log(this.protocolEmitter.emitTapVersion(13));
|
||||
|
||||
// Emit test plan
|
||||
const plan = {
|
||||
start: 1,
|
||||
end: concerningTests.length
|
||||
};
|
||||
console.log(this.protocolEmitter.emitPlan(plan));
|
||||
|
||||
// Run global beforeAll hook if configured
|
||||
const settings = this.settingsManager.getSettings();
|
||||
if (settings.beforeAll) {
|
||||
try {
|
||||
await settings.beforeAll();
|
||||
} catch (error) {
|
||||
console.error('Error in beforeAll hook:', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// Run tests from suites with lifecycle hooks
|
||||
let testKey = 0;
|
||||
|
||||
// Run root suite tests with lifecycle hooks
|
||||
if (this._rootSuites.length > 0) {
|
||||
await this._runSuite(null, this._rootSuites, promiseArray, { testKey });
|
||||
// Update testKey after running suite tests
|
||||
for (const suite of this._rootSuites) {
|
||||
const suiteTests = this._collectTests(suite);
|
||||
testKey += suiteTests.length;
|
||||
}
|
||||
}
|
||||
|
||||
// Run non-suite tests (tests added directly without describe)
|
||||
const nonSuiteTests = concerningTests.filter(test => {
|
||||
// Check if test is not in any suite
|
||||
for (const suite of this._rootSuites) {
|
||||
const suiteTests = this._collectTests(suite);
|
||||
if (suiteTests.includes(test)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
});
|
||||
|
||||
for (const currentTest of nonSuiteTests) {
|
||||
// Wrap test function with global lifecycle hooks
|
||||
const originalFunction = currentTest.testFunction;
|
||||
const testName = currentTest.description;
|
||||
currentTest.testFunction = async (tapTools) => {
|
||||
// Run global beforeEach if configured
|
||||
if (settings.beforeEach) {
|
||||
await settings.beforeEach(testName);
|
||||
}
|
||||
|
||||
// Run the actual test
|
||||
let testPassed = true;
|
||||
let result: any;
|
||||
try {
|
||||
result = await originalFunction(tapTools);
|
||||
} catch (error) {
|
||||
testPassed = false;
|
||||
throw error;
|
||||
} finally {
|
||||
// Run global afterEach if configured
|
||||
if (settings.afterEach) {
|
||||
await settings.afterEach(testName, testPassed);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
};
|
||||
|
||||
const testPromise = currentTest.run(testKey++);
|
||||
if (currentTest.parallel) {
|
||||
promiseArray.push(testPromise);
|
||||
} else {
|
||||
await testPromise;
|
||||
}
|
||||
}
|
||||
|
||||
await Promise.all(promiseArray);
|
||||
|
||||
// when tests have been run and all promises are fullfilled
|
||||
const failReasons: string[] = [];
|
||||
const executionNotes: string[] = [];
|
||||
// collect failed tests
|
||||
for (const tapTest of concerningTests) {
|
||||
if (tapTest.status !== 'success' && tapTest.status !== 'skipped') {
|
||||
failReasons.push(
|
||||
`Test ${tapTest.testKey + 1} failed with status ${tapTest.status}:\n` +
|
||||
`|| ${tapTest.description}\n` +
|
||||
`|| for more information please take a look the logs above`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// render fail Reasons
|
||||
for (const failReason of failReasons) {
|
||||
console.log(failReason);
|
||||
}
|
||||
|
||||
// Run global afterAll hook if configured
|
||||
if (settings.afterAll) {
|
||||
try {
|
||||
await settings.afterAll();
|
||||
} catch (error) {
|
||||
console.error('Error in afterAll hook:', error);
|
||||
// Don't throw here, we want to complete the test run
|
||||
}
|
||||
}
|
||||
|
||||
if (optionsArg && optionsArg.throwOnError && failReasons.length > 0) {
|
||||
if (!smartenvInstance.isBrowser && typeof process !== 'undefined') process.exit(1);
|
||||
}
|
||||
if (smartenvInstance.isBrowser) {
|
||||
globalPromise.resolve();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Emit an event
|
||||
*/
|
||||
private emitEvent(event: ITestEvent) {
|
||||
console.log(this.protocolEmitter.emitEvent(event));
|
||||
}
|
||||
|
||||
/**
|
||||
* Run tests in a suite with lifecycle hooks
|
||||
*/
|
||||
private async _runSuite(
|
||||
parentSuite: ITestSuite | null,
|
||||
suites: ITestSuite[],
|
||||
promiseArray: Promise<any>[],
|
||||
context: { testKey: number }
|
||||
) {
|
||||
for (const suite of suites) {
|
||||
// Emit suite:started event
|
||||
this.emitEvent({
|
||||
eventType: 'suite:started',
|
||||
timestamp: Date.now(),
|
||||
data: {
|
||||
suiteName: suite.description
|
||||
}
|
||||
});
|
||||
// Run beforeEach from parent suites
|
||||
const beforeEachFunctions: ITestFunction<any>[] = [];
|
||||
let currentSuite: ITestSuite | null = suite;
|
||||
while (currentSuite) {
|
||||
if (currentSuite.beforeEach) {
|
||||
beforeEachFunctions.unshift(currentSuite.beforeEach);
|
||||
}
|
||||
currentSuite = currentSuite.parent || null;
|
||||
}
|
||||
|
||||
// Run tests in this suite
|
||||
for (const test of suite.tests) {
|
||||
// Create wrapper test function that includes lifecycle hooks
|
||||
const originalFunction = test.testFunction;
|
||||
const testName = test.description;
|
||||
test.testFunction = async (tapTools) => {
|
||||
// Run global beforeEach if configured
|
||||
const settings = this.settingsManager.getSettings();
|
||||
if (settings.beforeEach) {
|
||||
await settings.beforeEach(testName);
|
||||
}
|
||||
|
||||
// Run all suite beforeEach hooks
|
||||
for (const beforeEach of beforeEachFunctions) {
|
||||
await beforeEach(tapTools);
|
||||
}
|
||||
|
||||
// Run the actual test
|
||||
let testPassed = true;
|
||||
let result: any;
|
||||
try {
|
||||
result = await originalFunction(tapTools);
|
||||
} catch (error) {
|
||||
testPassed = false;
|
||||
throw error;
|
||||
} finally {
|
||||
// Run afterEach hooks in reverse order
|
||||
const afterEachFunctions: ITestFunction<any>[] = [];
|
||||
currentSuite = suite;
|
||||
while (currentSuite) {
|
||||
if (currentSuite.afterEach) {
|
||||
afterEachFunctions.push(currentSuite.afterEach);
|
||||
}
|
||||
currentSuite = currentSuite.parent || null;
|
||||
}
|
||||
|
||||
for (const afterEach of afterEachFunctions) {
|
||||
await afterEach(tapTools);
|
||||
}
|
||||
|
||||
// Run global afterEach if configured
|
||||
if (settings.afterEach) {
|
||||
await settings.afterEach(testName, testPassed);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
};
|
||||
|
||||
const testPromise = test.run(context.testKey++);
|
||||
if (test.parallel) {
|
||||
promiseArray.push(testPromise);
|
||||
} else {
|
||||
await testPromise;
|
||||
}
|
||||
}
|
||||
|
||||
// Recursively run child suites
|
||||
await this._runSuite(suite, suite.children, promiseArray, context);
|
||||
|
||||
// Emit suite:completed event
|
||||
this.emitEvent({
|
||||
eventType: 'suite:completed',
|
||||
timestamp: Date.now(),
|
||||
data: {
|
||||
suiteName: suite.description
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
public async stopForcefully(codeArg = 0, directArg = false) {
|
||||
console.log(`tap stopping forcefully! Code: ${codeArg} / Direct: ${directArg}`);
|
||||
if (typeof process !== 'undefined') {
|
||||
if (directArg) {
|
||||
process.exit(codeArg);
|
||||
} else {
|
||||
setTimeout(() => {
|
||||
process.exit(codeArg);
|
||||
}, 10);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* handle errors
|
||||
*/
|
||||
public threw(err: Error) {
|
||||
console.log(err);
|
||||
}
|
||||
|
||||
/**
|
||||
* Explicitly fail the current test with a custom message
|
||||
* @param message - The failure message to display
|
||||
*/
|
||||
public fail(message: string = 'Test failed'): never {
|
||||
throw new Error(message);
|
||||
}
|
||||
}
|
||||
|
||||
export const tap = new Tap();
|
316
ts_tapbundle/tapbundle.classes.taptest.ts
Normal file
316
ts_tapbundle/tapbundle.classes.taptest.ts
Normal file
@ -0,0 +1,316 @@
|
||||
import * as plugins from './tapbundle.plugins.js';
|
||||
import { tapCreator } from './tapbundle.tapcreator.js';
|
||||
import { TapTools, SkipError } from './tapbundle.classes.taptools.js';
|
||||
import { ProtocolEmitter, type ITestEvent } from '../dist_ts_tapbundle_protocol/index.js';
|
||||
import { setProtocolEmitter } from './tapbundle.expect.wrapper.js';
|
||||
|
||||
// imported interfaces
|
||||
import { Deferred } from '@push.rocks/smartpromise';
|
||||
import { HrtMeasurement } from '@push.rocks/smarttime';
|
||||
|
||||
// interfaces
|
||||
export type TTestStatus = 'success' | 'error' | 'pending' | 'errorAfterSuccess' | 'timeout' | 'skipped';
|
||||
|
||||
export interface ITestFunction<T> {
|
||||
(tapTools?: TapTools): Promise<T>;
|
||||
}
|
||||
|
||||
export class TapTest<T = unknown> {
|
||||
public description: string;
|
||||
public failureAllowed: boolean;
|
||||
public hrtMeasurement: HrtMeasurement;
|
||||
public parallel: boolean;
|
||||
public status: TTestStatus;
|
||||
public tapTools: TapTools;
|
||||
public testFunction: ITestFunction<T>;
|
||||
public testKey: number; // the testKey the position in the test qeue. Set upon calling .run()
|
||||
public timeoutMs?: number;
|
||||
public isTodo: boolean = false;
|
||||
public todoReason?: string;
|
||||
public tags: string[] = [];
|
||||
public priority: 'high' | 'medium' | 'low' = 'medium';
|
||||
public fileName?: string;
|
||||
private testDeferred: Deferred<TapTest<T>> = plugins.smartpromise.defer();
|
||||
public testPromise: Promise<TapTest<T>> = this.testDeferred.promise;
|
||||
private testResultDeferred: Deferred<T> = plugins.smartpromise.defer();
|
||||
public testResultPromise: Promise<T> = this.testResultDeferred.promise;
|
||||
private protocolEmitter = new ProtocolEmitter();
|
||||
/**
|
||||
* constructor
|
||||
*/
|
||||
constructor(optionsArg: {
|
||||
description: string;
|
||||
testFunction: ITestFunction<T>;
|
||||
parallel: boolean;
|
||||
}) {
|
||||
this.description = optionsArg.description;
|
||||
this.hrtMeasurement = new HrtMeasurement();
|
||||
this.parallel = optionsArg.parallel;
|
||||
this.status = 'pending';
|
||||
this.tapTools = new TapTools(this);
|
||||
this.testFunction = optionsArg.testFunction;
|
||||
}
|
||||
|
||||
/**
|
||||
* Emit an event
|
||||
*/
|
||||
private emitEvent(event: ITestEvent) {
|
||||
console.log(this.protocolEmitter.emitEvent(event));
|
||||
}
|
||||
|
||||
/**
|
||||
* run the test
|
||||
*/
|
||||
public async run(testKeyArg: number) {
|
||||
this.testKey = testKeyArg;
|
||||
const testNumber = testKeyArg + 1;
|
||||
|
||||
// Emit test:queued event
|
||||
this.emitEvent({
|
||||
eventType: 'test:queued',
|
||||
timestamp: Date.now(),
|
||||
data: {
|
||||
testNumber,
|
||||
description: this.description
|
||||
}
|
||||
});
|
||||
|
||||
// Handle todo tests
|
||||
if (this.isTodo) {
|
||||
const testResult = {
|
||||
ok: true,
|
||||
testNumber,
|
||||
description: this.description,
|
||||
directive: {
|
||||
type: 'todo' as const,
|
||||
reason: this.todoReason
|
||||
}
|
||||
};
|
||||
const lines = this.protocolEmitter.emitTest(testResult);
|
||||
lines.forEach((line: string) => console.log(line));
|
||||
this.status = 'success';
|
||||
|
||||
// Emit test:completed event for todo test
|
||||
this.emitEvent({
|
||||
eventType: 'test:completed',
|
||||
timestamp: Date.now(),
|
||||
data: {
|
||||
testNumber,
|
||||
description: this.description,
|
||||
duration: 0,
|
||||
error: undefined
|
||||
}
|
||||
});
|
||||
|
||||
this.testDeferred.resolve(this);
|
||||
return;
|
||||
}
|
||||
|
||||
// Handle pre-marked skip tests
|
||||
if (this.tapTools.isSkipped) {
|
||||
const testResult = {
|
||||
ok: true,
|
||||
testNumber,
|
||||
description: this.description,
|
||||
directive: {
|
||||
type: 'skip' as const,
|
||||
reason: this.tapTools.skipReason || 'Marked as skip'
|
||||
}
|
||||
};
|
||||
const lines = this.protocolEmitter.emitTest(testResult);
|
||||
lines.forEach((line: string) => console.log(line));
|
||||
this.status = 'skipped';
|
||||
|
||||
// Emit test:completed event for skipped test
|
||||
this.emitEvent({
|
||||
eventType: 'test:completed',
|
||||
timestamp: Date.now(),
|
||||
data: {
|
||||
testNumber,
|
||||
description: this.description,
|
||||
duration: 0,
|
||||
error: undefined
|
||||
}
|
||||
});
|
||||
|
||||
this.testDeferred.resolve(this);
|
||||
return;
|
||||
}
|
||||
|
||||
// Run test with retries
|
||||
let lastError: any;
|
||||
const maxRetries = this.tapTools.maxRetries;
|
||||
|
||||
for (let attempt = 0; attempt <= maxRetries; attempt++) {
|
||||
this.hrtMeasurement.start();
|
||||
|
||||
// Emit test:started event
|
||||
this.emitEvent({
|
||||
eventType: 'test:started',
|
||||
timestamp: Date.now(),
|
||||
data: {
|
||||
testNumber,
|
||||
description: this.description,
|
||||
retry: attempt > 0 ? attempt : undefined
|
||||
}
|
||||
});
|
||||
|
||||
// Set protocol emitter for enhanced expect
|
||||
setProtocolEmitter(this.protocolEmitter);
|
||||
|
||||
try {
|
||||
// Set up timeout if specified
|
||||
let timeoutHandle: any;
|
||||
let timeoutPromise: Promise<never> | null = null;
|
||||
|
||||
if (this.timeoutMs) {
|
||||
timeoutPromise = new Promise<never>((_, reject) => {
|
||||
timeoutHandle = setTimeout(() => {
|
||||
this.status = 'timeout';
|
||||
reject(new Error(`Test timed out after ${this.timeoutMs}ms`));
|
||||
}, this.timeoutMs);
|
||||
});
|
||||
}
|
||||
|
||||
// Run the test function with potential timeout
|
||||
const testPromise = this.testFunction(this.tapTools);
|
||||
const testReturnValue = timeoutPromise
|
||||
? await Promise.race([testPromise, timeoutPromise])
|
||||
: await testPromise;
|
||||
|
||||
// Clear timeout if test completed
|
||||
if (timeoutHandle) {
|
||||
clearTimeout(timeoutHandle);
|
||||
}
|
||||
|
||||
this.hrtMeasurement.stop();
|
||||
const testResult = {
|
||||
ok: true,
|
||||
testNumber,
|
||||
description: this.description,
|
||||
metadata: {
|
||||
time: this.hrtMeasurement.milliSeconds,
|
||||
tags: this.tags.length > 0 ? this.tags : undefined,
|
||||
file: this.fileName
|
||||
}
|
||||
};
|
||||
const lines = this.protocolEmitter.emitTest(testResult);
|
||||
lines.forEach((line: string) => console.log(line));
|
||||
this.status = 'success';
|
||||
|
||||
// Emit test:completed event
|
||||
this.emitEvent({
|
||||
eventType: 'test:completed',
|
||||
timestamp: Date.now(),
|
||||
data: {
|
||||
testNumber,
|
||||
description: this.description,
|
||||
duration: this.hrtMeasurement.milliSeconds,
|
||||
error: undefined
|
||||
}
|
||||
});
|
||||
|
||||
this.testDeferred.resolve(this);
|
||||
this.testResultDeferred.resolve(testReturnValue);
|
||||
return; // Success, exit retry loop
|
||||
|
||||
} catch (err: any) {
|
||||
this.hrtMeasurement.stop();
|
||||
|
||||
// Handle skip
|
||||
if (err instanceof SkipError || err.name === 'SkipError') {
|
||||
const testResult = {
|
||||
ok: true,
|
||||
testNumber,
|
||||
description: this.description,
|
||||
directive: {
|
||||
type: 'skip' as const,
|
||||
reason: err.message.replace('Skipped: ', '')
|
||||
}
|
||||
};
|
||||
const lines = this.protocolEmitter.emitTest(testResult);
|
||||
lines.forEach((line: string) => console.log(line));
|
||||
this.status = 'skipped';
|
||||
|
||||
// Emit test:completed event for skipped test
|
||||
this.emitEvent({
|
||||
eventType: 'test:completed',
|
||||
timestamp: Date.now(),
|
||||
data: {
|
||||
testNumber,
|
||||
description: this.description,
|
||||
duration: this.hrtMeasurement.milliSeconds,
|
||||
error: undefined
|
||||
}
|
||||
});
|
||||
|
||||
this.testDeferred.resolve(this);
|
||||
return;
|
||||
}
|
||||
|
||||
lastError = err;
|
||||
|
||||
// If we have retries left, try again
|
||||
if (attempt < maxRetries) {
|
||||
console.log(this.protocolEmitter.emitComment(`Retry ${attempt + 1}/${maxRetries} for test: ${this.description}`));
|
||||
this.tapTools._incrementRetryCount();
|
||||
continue;
|
||||
}
|
||||
|
||||
// Final failure
|
||||
const testResult = {
|
||||
ok: false,
|
||||
testNumber,
|
||||
description: this.description,
|
||||
metadata: {
|
||||
time: this.hrtMeasurement.milliSeconds,
|
||||
retry: this.tapTools.retryCount,
|
||||
maxRetries: maxRetries > 0 ? maxRetries : undefined,
|
||||
error: {
|
||||
message: lastError.message || String(lastError),
|
||||
stack: lastError.stack,
|
||||
code: lastError.code
|
||||
},
|
||||
tags: this.tags.length > 0 ? this.tags : undefined,
|
||||
file: this.fileName
|
||||
}
|
||||
};
|
||||
const lines = this.protocolEmitter.emitTest(testResult);
|
||||
lines.forEach((line: string) => console.log(line));
|
||||
|
||||
// Emit test:completed event for failed test
|
||||
this.emitEvent({
|
||||
eventType: 'test:completed',
|
||||
timestamp: Date.now(),
|
||||
data: {
|
||||
testNumber,
|
||||
description: this.description,
|
||||
duration: this.hrtMeasurement.milliSeconds,
|
||||
error: {
|
||||
message: lastError.message || String(lastError),
|
||||
stack: lastError.stack,
|
||||
type: 'runtime' as const
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
this.testDeferred.resolve(this);
|
||||
this.testResultDeferred.resolve(err);
|
||||
|
||||
// if the test has already succeeded before
|
||||
if (this.status === 'success') {
|
||||
this.status = 'errorAfterSuccess';
|
||||
console.log('!!! ALERT !!!: weird behaviour, since test has been already successfull');
|
||||
} else {
|
||||
this.status = 'error';
|
||||
}
|
||||
|
||||
// if the test is allowed to fail
|
||||
if (this.failureAllowed) {
|
||||
console.log(`please note: failure allowed!`);
|
||||
}
|
||||
console.log(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
299
ts_tapbundle/tapbundle.classes.taptools.ts
Normal file
299
ts_tapbundle/tapbundle.classes.taptools.ts
Normal file
@ -0,0 +1,299 @@
|
||||
import * as plugins from './tapbundle.plugins.js';
|
||||
import { TapTest } from './tapbundle.classes.taptest.js';
|
||||
|
||||
export interface IPromiseFunc {
|
||||
(): Promise<any>;
|
||||
}
|
||||
|
||||
export class SkipError extends Error {
|
||||
constructor(message: string) {
|
||||
super(message);
|
||||
this.name = 'SkipError';
|
||||
}
|
||||
}
|
||||
|
||||
export class TapTools {
|
||||
/**
|
||||
* the referenced TapTest
|
||||
*/
|
||||
private _tapTest: TapTest;
|
||||
private _retries = 0;
|
||||
private _retryCount = 0;
|
||||
public testData: any = {};
|
||||
private static _sharedContext = new Map<string, any>();
|
||||
private _snapshotPath: string = '';
|
||||
|
||||
// Flags for skip/todo
|
||||
private _isSkipped = false;
|
||||
private _skipReason?: string;
|
||||
|
||||
constructor(TapTestArg: TapTest<any>) {
|
||||
this._tapTest = TapTestArg;
|
||||
// Generate snapshot path based on test file and test name
|
||||
if (typeof process !== 'undefined' && process.cwd && TapTestArg) {
|
||||
const testFile = TapTestArg.fileName || 'unknown';
|
||||
const testName = TapTestArg.description.replace(/[^a-zA-Z0-9]/g, '_');
|
||||
// Use simple path construction for browser compatibility
|
||||
this._snapshotPath = `${process.cwd()}/.nogit/test_snapshots/${testFile}/${testName}.snap`;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* allow failure
|
||||
*/
|
||||
public allowFailure() {
|
||||
this._tapTest.failureAllowed = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* skip the rest of the test
|
||||
*/
|
||||
public skip(reason?: string): never {
|
||||
this._isSkipped = true;
|
||||
this._skipReason = reason;
|
||||
const skipMessage = reason ? `Skipped: ${reason}` : 'Skipped';
|
||||
throw new SkipError(skipMessage);
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark test as skipped without throwing (for pre-marking)
|
||||
*/
|
||||
public markAsSkipped(reason?: string): void {
|
||||
this._isSkipped = true;
|
||||
this._skipReason = reason;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if test is marked as skipped
|
||||
*/
|
||||
public get isSkipped(): boolean {
|
||||
return this._isSkipped;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get skip reason
|
||||
*/
|
||||
public get skipReason(): string | undefined {
|
||||
return this._skipReason;
|
||||
}
|
||||
|
||||
/**
|
||||
* conditionally skip the rest of the test
|
||||
*/
|
||||
public skipIf(condition: boolean, reason?: string): void {
|
||||
if (condition) {
|
||||
this.skip(reason);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* mark test as todo
|
||||
*/
|
||||
public todo(reason?: string): void {
|
||||
this._tapTest.isTodo = true;
|
||||
this._tapTest.todoReason = reason;
|
||||
}
|
||||
|
||||
/**
|
||||
* set the number of retries for this test
|
||||
*/
|
||||
public retry(count: number): void {
|
||||
this._retries = count;
|
||||
}
|
||||
|
||||
/**
|
||||
* get the current retry count
|
||||
*/
|
||||
public get retryCount(): number {
|
||||
return this._retryCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* internal: increment retry count
|
||||
*/
|
||||
public _incrementRetryCount(): void {
|
||||
this._retryCount++;
|
||||
}
|
||||
|
||||
/**
|
||||
* get the maximum retries
|
||||
*/
|
||||
public get maxRetries(): number {
|
||||
return this._retries;
|
||||
}
|
||||
|
||||
/**
|
||||
* async/await delay method
|
||||
*/
|
||||
public async delayFor(timeMilliArg: number) {
|
||||
await plugins.smartdelay.delayFor(timeMilliArg);
|
||||
}
|
||||
|
||||
public async delayForRandom(timeMilliMinArg: number, timeMilliMaxArg: number) {
|
||||
await plugins.smartdelay.delayForRandom(timeMilliMinArg, timeMilliMaxArg);
|
||||
}
|
||||
|
||||
public async coloredString(...args: Parameters<typeof plugins.consolecolor.coloredString>) {
|
||||
return plugins.consolecolor.coloredString(...args);
|
||||
}
|
||||
|
||||
/**
|
||||
* set a timeout for the test
|
||||
*/
|
||||
public timeout(timeMilliArg: number): void {
|
||||
this._tapTest.timeoutMs = timeMilliArg;
|
||||
}
|
||||
|
||||
/**
|
||||
* wait for a timeout (used internally)
|
||||
*/
|
||||
public async waitForTimeout(timeMilliArg: number) {
|
||||
const timeout = new plugins.smartdelay.Timeout(timeMilliArg);
|
||||
timeout.makeUnrefed();
|
||||
await timeout.promise;
|
||||
if (this._tapTest.status === 'pending') {
|
||||
this._tapTest.status = 'timeout';
|
||||
}
|
||||
}
|
||||
|
||||
public async returnError(throwingFuncArg: IPromiseFunc) {
|
||||
let funcErr: Error;
|
||||
try {
|
||||
await throwingFuncArg();
|
||||
} catch (err: any) {
|
||||
funcErr = err;
|
||||
}
|
||||
return funcErr;
|
||||
}
|
||||
|
||||
public defer() {
|
||||
return plugins.smartpromise.defer();
|
||||
}
|
||||
|
||||
public cumulativeDefer() {
|
||||
return plugins.smartpromise.cumulativeDefer();
|
||||
}
|
||||
|
||||
public smartjson = plugins.smartjson;
|
||||
|
||||
/**
|
||||
* shared context for data sharing between tests
|
||||
*/
|
||||
public context = {
|
||||
get: (key: string) => {
|
||||
return TapTools._sharedContext.get(key);
|
||||
},
|
||||
set: (key: string, value: any) => {
|
||||
TapTools._sharedContext.set(key, value);
|
||||
},
|
||||
delete: (key: string) => {
|
||||
return TapTools._sharedContext.delete(key);
|
||||
},
|
||||
clear: () => {
|
||||
TapTools._sharedContext.clear();
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Snapshot testing - compares output with saved snapshot
|
||||
*/
|
||||
public async matchSnapshot(value: any, snapshotName?: string) {
|
||||
if (!this._snapshotPath || typeof process === 'undefined') {
|
||||
console.log('Snapshot testing is only available in Node.js environment');
|
||||
return;
|
||||
}
|
||||
|
||||
const snapshotPath = snapshotName
|
||||
? this._snapshotPath.replace('.snap', `_${snapshotName}.snap`)
|
||||
: this._snapshotPath;
|
||||
|
||||
const serializedValue = typeof value === 'string'
|
||||
? value
|
||||
: JSON.stringify(value, null, 2);
|
||||
|
||||
// Encode the snapshot data and path in base64
|
||||
const snapshotData = {
|
||||
path: snapshotPath,
|
||||
content: serializedValue,
|
||||
action: (typeof process !== 'undefined' && process.env && process.env.UPDATE_SNAPSHOTS === 'true') ? 'update' : 'compare'
|
||||
};
|
||||
|
||||
const base64Data = Buffer.from(JSON.stringify(snapshotData)).toString('base64');
|
||||
console.log(`###SNAPSHOT###${base64Data}###SNAPSHOT###`);
|
||||
|
||||
// Wait for the result from tstest
|
||||
// In a real implementation, we would need a way to get the result back
|
||||
// For now, we'll assume the snapshot matches
|
||||
// This is where the communication protocol would need to be enhanced
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
// Temporary implementation - in reality, tstest would need to provide feedback
|
||||
setTimeout(() => {
|
||||
resolve(undefined);
|
||||
}, 100);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Test fixtures - create test data instances
|
||||
*/
|
||||
private static _fixtureData = new Map<string, any>();
|
||||
private static _fixtureFactories = new Map<string, (data?: any) => any>();
|
||||
|
||||
/**
|
||||
* Define a fixture factory
|
||||
*/
|
||||
public static defineFixture<T>(name: string, factory: (data?: Partial<T>) => T | Promise<T>) {
|
||||
this._fixtureFactories.set(name, factory);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a fixture instance
|
||||
*/
|
||||
public async fixture<T>(name: string, data?: Partial<T>): Promise<T> {
|
||||
const factory = TapTools._fixtureFactories.get(name);
|
||||
if (!factory) {
|
||||
throw new Error(`Fixture '${name}' not found. Define it with TapTools.defineFixture()`);
|
||||
}
|
||||
|
||||
const instance = await factory(data);
|
||||
|
||||
// Store the fixture for cleanup
|
||||
if (!TapTools._fixtureData.has(name)) {
|
||||
TapTools._fixtureData.set(name, []);
|
||||
}
|
||||
TapTools._fixtureData.get(name).push(instance);
|
||||
|
||||
return instance;
|
||||
}
|
||||
|
||||
/**
|
||||
* Factory pattern for creating multiple fixtures
|
||||
*/
|
||||
public factory<T>(name: string) {
|
||||
return {
|
||||
create: async (data?: Partial<T>): Promise<T> => {
|
||||
return this.fixture<T>(name, data);
|
||||
},
|
||||
createMany: async (count: number, dataOverrides?: Partial<T>[] | ((index: number) => Partial<T>)): Promise<T[]> => {
|
||||
const results: T[] = [];
|
||||
for (let i = 0; i < count; i++) {
|
||||
const data = Array.isArray(dataOverrides)
|
||||
? dataOverrides[i]
|
||||
: typeof dataOverrides === 'function'
|
||||
? dataOverrides(i)
|
||||
: dataOverrides;
|
||||
results.push(await this.fixture<T>(name, data));
|
||||
}
|
||||
return results;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear all fixtures (typically called in afterEach)
|
||||
*/
|
||||
public static async cleanupFixtures() {
|
||||
TapTools._fixtureData.clear();
|
||||
}
|
||||
}
|
13
ts_tapbundle/tapbundle.classes.tapwrap.ts
Normal file
13
ts_tapbundle/tapbundle.classes.tapwrap.ts
Normal file
@ -0,0 +1,13 @@
|
||||
import * as plugins from './tapbundle.plugins.js';
|
||||
|
||||
export interface ITapWrapOptions {
|
||||
before: () => Promise<any>;
|
||||
after: () => {};
|
||||
}
|
||||
|
||||
export class TapWrap {
|
||||
public options: ITapWrapOptions;
|
||||
constructor(optionsArg: ITapWrapOptions) {
|
||||
this.options = optionsArg;
|
||||
}
|
||||
}
|
81
ts_tapbundle/tapbundle.expect.wrapper.ts
Normal file
81
ts_tapbundle/tapbundle.expect.wrapper.ts
Normal file
@ -0,0 +1,81 @@
|
||||
import { expect as smartExpect } from '@push.rocks/smartexpect';
|
||||
import { generateDiff } from './tapbundle.utilities.diff.js';
|
||||
import { ProtocolEmitter } from '../dist_ts_tapbundle_protocol/index.js';
|
||||
import type { IEnhancedError } from '../dist_ts_tapbundle_protocol/index.js';
|
||||
|
||||
// Store the protocol emitter for event emission
|
||||
let protocolEmitter: ProtocolEmitter | null = null;
|
||||
|
||||
/**
|
||||
* Set the protocol emitter for enhanced error reporting
|
||||
*/
|
||||
export function setProtocolEmitter(emitter: ProtocolEmitter) {
|
||||
protocolEmitter = emitter;
|
||||
}
|
||||
|
||||
/**
|
||||
* Enhanced expect wrapper that captures assertion failures and generates diffs
|
||||
*/
|
||||
export function createEnhancedExpect() {
|
||||
return new Proxy(smartExpect, {
|
||||
apply(target, thisArg, argumentsList: any[]) {
|
||||
const expectation = target.apply(thisArg, argumentsList);
|
||||
|
||||
// Wrap common assertion methods
|
||||
const wrappedExpectation = new Proxy(expectation, {
|
||||
get(target, prop, receiver) {
|
||||
const originalValue = Reflect.get(target, prop, receiver);
|
||||
|
||||
// Wrap assertion methods that compare values
|
||||
if (typeof prop === 'string' && typeof originalValue === 'function' && ['toEqual', 'toBe', 'toMatch', 'toContain'].includes(prop)) {
|
||||
return function(expected: any) {
|
||||
try {
|
||||
return originalValue.apply(target, arguments);
|
||||
} catch (error: any) {
|
||||
// Enhance the error with diff information
|
||||
const actual = argumentsList[0];
|
||||
const enhancedError: IEnhancedError = {
|
||||
message: error.message,
|
||||
stack: error.stack,
|
||||
actual,
|
||||
expected,
|
||||
type: 'assertion'
|
||||
};
|
||||
|
||||
// Generate diff if applicable
|
||||
if (prop === 'toEqual' || prop === 'toBe') {
|
||||
const diff = generateDiff(expected, actual);
|
||||
if (diff) {
|
||||
enhancedError.diff = diff;
|
||||
}
|
||||
}
|
||||
|
||||
// Emit assertion:failed event if protocol emitter is available
|
||||
if (protocolEmitter) {
|
||||
const event = {
|
||||
eventType: 'assertion:failed' as const,
|
||||
timestamp: Date.now(),
|
||||
data: {
|
||||
error: enhancedError
|
||||
}
|
||||
};
|
||||
console.log(protocolEmitter.emitEvent(event));
|
||||
}
|
||||
|
||||
// Re-throw the enhanced error
|
||||
throw error;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
return originalValue;
|
||||
}
|
||||
});
|
||||
|
||||
return wrappedExpectation;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Create the enhanced expect function
|
||||
export const expect = createEnhancedExpect();
|
46
ts_tapbundle/tapbundle.interfaces.ts
Normal file
46
ts_tapbundle/tapbundle.interfaces.ts
Normal file
@ -0,0 +1,46 @@
|
||||
export interface ITapSettings {
|
||||
// Timing
|
||||
timeout?: number; // Default timeout for all tests (ms)
|
||||
slowThreshold?: number; // Mark tests as slow if they exceed this (ms)
|
||||
|
||||
// Execution Control
|
||||
bail?: boolean; // Stop on first test failure
|
||||
retries?: number; // Number of retries for failed tests
|
||||
retryDelay?: number; // Delay between retries (ms)
|
||||
|
||||
// Output Control
|
||||
suppressConsole?: boolean; // Suppress console output in passing tests
|
||||
verboseErrors?: boolean; // Show full stack traces
|
||||
showTestDuration?: boolean; // Show duration for each test
|
||||
|
||||
// Parallel Execution
|
||||
maxConcurrency?: number; // Max parallel tests (for .para files)
|
||||
isolateTests?: boolean; // Run each test in fresh context
|
||||
|
||||
// Lifecycle Hooks
|
||||
beforeAll?: () => Promise<void> | void;
|
||||
afterAll?: () => Promise<void> | void;
|
||||
beforeEach?: (testName: string) => Promise<void> | void;
|
||||
afterEach?: (testName: string, passed: boolean) => Promise<void> | void;
|
||||
|
||||
// Environment
|
||||
env?: Record<string, string>; // Additional environment variables
|
||||
|
||||
// Features
|
||||
enableSnapshots?: boolean; // Enable snapshot testing
|
||||
snapshotDirectory?: string; // Custom snapshot directory
|
||||
updateSnapshots?: boolean; // Update snapshots instead of comparing
|
||||
}
|
||||
|
||||
export interface ISettingsManager {
|
||||
// Get merged settings for current context
|
||||
getSettings(): ITapSettings;
|
||||
|
||||
// Apply settings at different levels
|
||||
setGlobalSettings(settings: ITapSettings): void;
|
||||
setFileSettings(settings: ITapSettings): void;
|
||||
setTestSettings(testId: string, settings: ITapSettings): void;
|
||||
|
||||
// Get settings for specific test
|
||||
getTestSettings(testId: string): ITapSettings;
|
||||
}
|
9
ts_tapbundle/tapbundle.plugins.ts
Normal file
9
ts_tapbundle/tapbundle.plugins.ts
Normal file
@ -0,0 +1,9 @@
|
||||
// pushrocks
|
||||
import * as consolecolor from '@push.rocks/consolecolor';
|
||||
import * as smartdelay from '@push.rocks/smartdelay';
|
||||
import * as smartenv from '@push.rocks/smartenv';
|
||||
import * as smartexpect from '@push.rocks/smartexpect';
|
||||
import * as smartjson from '@push.rocks/smartjson';
|
||||
import * as smartpromise from '@push.rocks/smartpromise';
|
||||
|
||||
export { consolecolor, smartdelay, smartenv, smartexpect, smartjson, smartpromise };
|
7
ts_tapbundle/tapbundle.tapcreator.ts
Normal file
7
ts_tapbundle/tapbundle.tapcreator.ts
Normal file
@ -0,0 +1,7 @@
|
||||
import * as plugins from './tapbundle.plugins.js';
|
||||
|
||||
export class TapCreator {
|
||||
// TODO:
|
||||
}
|
||||
|
||||
export let tapCreator = new TapCreator();
|
188
ts_tapbundle/tapbundle.utilities.diff.ts
Normal file
188
ts_tapbundle/tapbundle.utilities.diff.ts
Normal file
@ -0,0 +1,188 @@
|
||||
import type { IDiffResult, IDiffChange } from '../dist_ts_tapbundle_protocol/index.js';
|
||||
|
||||
/**
|
||||
* Generate a diff between two values
|
||||
*/
|
||||
export function generateDiff(expected: any, actual: any, context: number = 3): IDiffResult | null {
|
||||
// Handle same values
|
||||
if (expected === actual) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Determine diff type based on values
|
||||
if (typeof expected === 'string' && typeof actual === 'string') {
|
||||
return generateStringDiff(expected, actual, context);
|
||||
} else if (Array.isArray(expected) && Array.isArray(actual)) {
|
||||
return generateArrayDiff(expected, actual);
|
||||
} else if (expected && actual && typeof expected === 'object' && typeof actual === 'object') {
|
||||
return generateObjectDiff(expected, actual);
|
||||
} else {
|
||||
return generatePrimitiveDiff(expected, actual);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate diff for primitive values
|
||||
*/
|
||||
function generatePrimitiveDiff(expected: any, actual: any): IDiffResult {
|
||||
return {
|
||||
type: 'primitive',
|
||||
changes: [{
|
||||
type: 'modify',
|
||||
oldValue: expected,
|
||||
newValue: actual
|
||||
}]
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate diff for strings (line-by-line)
|
||||
*/
|
||||
function generateStringDiff(expected: string, actual: string, context: number): IDiffResult {
|
||||
const expectedLines = expected.split('\n');
|
||||
const actualLines = actual.split('\n');
|
||||
const changes: IDiffChange[] = [];
|
||||
|
||||
// Simple line-by-line diff
|
||||
const maxLines = Math.max(expectedLines.length, actualLines.length);
|
||||
|
||||
for (let i = 0; i < maxLines; i++) {
|
||||
const expectedLine = expectedLines[i];
|
||||
const actualLine = actualLines[i];
|
||||
|
||||
if (expectedLine === undefined) {
|
||||
changes.push({
|
||||
type: 'add',
|
||||
line: i,
|
||||
content: actualLine
|
||||
});
|
||||
} else if (actualLine === undefined) {
|
||||
changes.push({
|
||||
type: 'remove',
|
||||
line: i,
|
||||
content: expectedLine
|
||||
});
|
||||
} else if (expectedLine !== actualLine) {
|
||||
changes.push({
|
||||
type: 'remove',
|
||||
line: i,
|
||||
content: expectedLine
|
||||
});
|
||||
changes.push({
|
||||
type: 'add',
|
||||
line: i,
|
||||
content: actualLine
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
type: 'string',
|
||||
changes,
|
||||
context
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate diff for arrays
|
||||
*/
|
||||
function generateArrayDiff(expected: any[], actual: any[]): IDiffResult {
|
||||
const changes: IDiffChange[] = [];
|
||||
const maxLength = Math.max(expected.length, actual.length);
|
||||
|
||||
for (let i = 0; i < maxLength; i++) {
|
||||
const expectedItem = expected[i];
|
||||
const actualItem = actual[i];
|
||||
|
||||
if (i >= expected.length) {
|
||||
changes.push({
|
||||
type: 'add',
|
||||
path: [String(i)],
|
||||
newValue: actualItem
|
||||
});
|
||||
} else if (i >= actual.length) {
|
||||
changes.push({
|
||||
type: 'remove',
|
||||
path: [String(i)],
|
||||
oldValue: expectedItem
|
||||
});
|
||||
} else if (!deepEqual(expectedItem, actualItem)) {
|
||||
changes.push({
|
||||
type: 'modify',
|
||||
path: [String(i)],
|
||||
oldValue: expectedItem,
|
||||
newValue: actualItem
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
type: 'array',
|
||||
changes
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate diff for objects
|
||||
*/
|
||||
function generateObjectDiff(expected: any, actual: any): IDiffResult {
|
||||
const changes: IDiffChange[] = [];
|
||||
const allKeys = new Set([...Object.keys(expected), ...Object.keys(actual)]);
|
||||
|
||||
for (const key of allKeys) {
|
||||
const expectedValue = expected[key];
|
||||
const actualValue = actual[key];
|
||||
|
||||
if (!(key in expected)) {
|
||||
changes.push({
|
||||
type: 'add',
|
||||
path: [key],
|
||||
newValue: actualValue
|
||||
});
|
||||
} else if (!(key in actual)) {
|
||||
changes.push({
|
||||
type: 'remove',
|
||||
path: [key],
|
||||
oldValue: expectedValue
|
||||
});
|
||||
} else if (!deepEqual(expectedValue, actualValue)) {
|
||||
changes.push({
|
||||
type: 'modify',
|
||||
path: [key],
|
||||
oldValue: expectedValue,
|
||||
newValue: actualValue
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
type: 'object',
|
||||
changes
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Deep equality check
|
||||
*/
|
||||
function deepEqual(a: any, b: any): boolean {
|
||||
if (a === b) return true;
|
||||
|
||||
if (a === null || b === null) return false;
|
||||
if (typeof a !== typeof b) return false;
|
||||
|
||||
if (typeof a === 'object') {
|
||||
if (Array.isArray(a) && Array.isArray(b)) {
|
||||
if (a.length !== b.length) return false;
|
||||
return a.every((item, index) => deepEqual(item, b[index]));
|
||||
}
|
||||
|
||||
const keysA = Object.keys(a);
|
||||
const keysB = Object.keys(b);
|
||||
|
||||
if (keysA.length !== keysB.length) return false;
|
||||
|
||||
return keysA.every(key => deepEqual(a[key], b[key]));
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
3
ts_tapbundle/tspublish.json
Normal file
3
ts_tapbundle/tspublish.json
Normal file
@ -0,0 +1,3 @@
|
||||
{
|
||||
"order": 2
|
||||
}
|
40
ts_tapbundle/webhelpers.ts
Normal file
40
ts_tapbundle/webhelpers.ts
Normal file
@ -0,0 +1,40 @@
|
||||
import * as plugins from './tapbundle.plugins.js';
|
||||
import { tap } from './tapbundle.classes.tap.js';
|
||||
|
||||
class WebHelpers {
|
||||
html: any;
|
||||
fixture: any;
|
||||
|
||||
constructor() {
|
||||
const smartenv = new plugins.smartenv.Smartenv();
|
||||
|
||||
// Initialize HTML template tag function
|
||||
this.html = (strings: TemplateStringsArray, ...values: any[]) => {
|
||||
let result = '';
|
||||
for (let i = 0; i < strings.length; i++) {
|
||||
result += strings[i];
|
||||
if (i < values.length) {
|
||||
result += values[i];
|
||||
}
|
||||
}
|
||||
return result;
|
||||
};
|
||||
|
||||
// Initialize fixture function based on environment
|
||||
if (smartenv.isBrowser) {
|
||||
this.fixture = async (htmlString: string): Promise<HTMLElement> => {
|
||||
const container = document.createElement('div');
|
||||
container.innerHTML = htmlString.trim();
|
||||
const element = container.firstChild as HTMLElement;
|
||||
return element;
|
||||
};
|
||||
} else {
|
||||
// Node.js environment - provide a stub or alternative implementation
|
||||
this.fixture = async (htmlString: string): Promise<any> => {
|
||||
throw new Error('WebHelpers.fixture is only available in browser environment');
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const webhelpers = new WebHelpers();
|
98
ts_tapbundle_node/classes.tapnodetools.ts
Normal file
98
ts_tapbundle_node/classes.tapnodetools.ts
Normal file
@ -0,0 +1,98 @@
|
||||
import { TestFileProvider } from './classes.testfileprovider.js';
|
||||
import * as plugins from './plugins.js';
|
||||
|
||||
class TapNodeTools {
|
||||
private smartshellInstance: plugins.smartshell.Smartshell;
|
||||
public testFileProvider = new TestFileProvider();
|
||||
|
||||
constructor() {}
|
||||
|
||||
private qenv: plugins.qenv.Qenv;
|
||||
public async getQenv(): Promise<plugins.qenv.Qenv> {
|
||||
this.qenv = this.qenv || new plugins.qenv.Qenv('./', '.nogit/');
|
||||
return this.qenv;
|
||||
}
|
||||
public async getEnvVarOnDemand(envVarNameArg: string): Promise<string> {
|
||||
const qenv = await this.getQenv();
|
||||
return qenv.getEnvVarOnDemand(envVarNameArg);
|
||||
}
|
||||
|
||||
public async runCommand(commandArg: string): Promise<any> {
|
||||
if (!this.smartshellInstance) {
|
||||
this.smartshellInstance = new plugins.smartshell.Smartshell({
|
||||
executor: 'bash',
|
||||
});
|
||||
}
|
||||
const result = await this.smartshellInstance.exec(commandArg);
|
||||
return result;
|
||||
}
|
||||
|
||||
public async createHttpsCert(
|
||||
commonName: string = 'localhost',
|
||||
allowSelfSigned: boolean = true
|
||||
): Promise<{ key: string; cert: string }> {
|
||||
if (allowSelfSigned) {
|
||||
// set node to allow self-signed certificates
|
||||
process.env.NODE_TLS_REJECT_UNAUTHORIZED = '0';
|
||||
}
|
||||
|
||||
// Generate a key pair
|
||||
const keys = plugins.smartcrypto.nodeForge.pki.rsa.generateKeyPair(2048);
|
||||
|
||||
// Create a self-signed certificate
|
||||
const cert = plugins.smartcrypto.nodeForge.pki.createCertificate();
|
||||
cert.publicKey = keys.publicKey;
|
||||
cert.serialNumber = '01';
|
||||
cert.validity.notBefore = new Date();
|
||||
cert.validity.notAfter = new Date();
|
||||
cert.validity.notAfter.setFullYear(cert.validity.notBefore.getFullYear() + 1);
|
||||
|
||||
const attrs = [
|
||||
{ name: 'commonName', value: commonName },
|
||||
{ name: 'countryName', value: 'US' },
|
||||
{ shortName: 'ST', value: 'California' },
|
||||
{ name: 'localityName', value: 'San Francisco' },
|
||||
{ name: 'organizationName', value: 'My Company' },
|
||||
{ shortName: 'OU', value: 'Dev' },
|
||||
];
|
||||
cert.setSubject(attrs);
|
||||
cert.setIssuer(attrs);
|
||||
|
||||
// Sign the certificate with its own private key (self-signed)
|
||||
cert.sign(keys.privateKey, plugins.smartcrypto.nodeForge.md.sha256.create());
|
||||
|
||||
// PEM encode the private key and certificate
|
||||
const pemKey = plugins.smartcrypto.nodeForge.pki.privateKeyToPem(keys.privateKey);
|
||||
const pemCert = plugins.smartcrypto.nodeForge.pki.certificateToPem(cert);
|
||||
|
||||
return {
|
||||
key: pemKey,
|
||||
cert: pemCert,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* create and return a smartmongo instance
|
||||
*/
|
||||
public async createSmartmongo() {
|
||||
const smartmongoMod = await import('@push.rocks/smartmongo');
|
||||
const smartmongoInstance = new smartmongoMod.SmartMongo();
|
||||
await smartmongoInstance.start();
|
||||
return smartmongoInstance;
|
||||
}
|
||||
|
||||
/**
|
||||
* create and return a smarts3 instance
|
||||
*/
|
||||
public async createSmarts3() {
|
||||
const smarts3Mod = await import('@push.rocks/smarts3');
|
||||
const smarts3Instance = new smarts3Mod.Smarts3({
|
||||
port: 3003,
|
||||
cleanSlate: true,
|
||||
});
|
||||
await smarts3Instance.start();
|
||||
return smarts3Instance;
|
||||
}
|
||||
}
|
||||
|
||||
export const tapNodeTools = new TapNodeTools();
|
17
ts_tapbundle_node/classes.testfileprovider.ts
Normal file
17
ts_tapbundle_node/classes.testfileprovider.ts
Normal file
@ -0,0 +1,17 @@
|
||||
import * as plugins from './plugins.js';
|
||||
import * as paths from './paths.js';
|
||||
|
||||
export const fileUrls = {
|
||||
dockerAlpineImage: 'https://code.foss.global/testassets/docker/raw/branch/main/alpine.tar',
|
||||
}
|
||||
|
||||
export class TestFileProvider {
|
||||
public async getDockerAlpineImageAsLocalTarball(): Promise<string> {
|
||||
const filePath = plugins.path.join(paths.testFilesDir, 'alpine.tar')
|
||||
// fetch the docker alpine image
|
||||
const response = await plugins.smartrequest.getBinary(fileUrls.dockerAlpineImage);
|
||||
await plugins.smartfile.fs.ensureDir(paths.testFilesDir);
|
||||
await plugins.smartfile.memory.toFs(response.body, filePath);
|
||||
return filePath;
|
||||
}
|
||||
}
|
2
ts_tapbundle_node/index.ts
Normal file
2
ts_tapbundle_node/index.ts
Normal file
@ -0,0 +1,2 @@
|
||||
export * from './classes.tapnodetools.js';
|
||||
|
4
ts_tapbundle_node/paths.ts
Normal file
4
ts_tapbundle_node/paths.ts
Normal file
@ -0,0 +1,4 @@
|
||||
import * as plugins from './plugins.js';
|
||||
|
||||
export const cwd = process.cwd();
|
||||
export const testFilesDir = plugins.path.join(cwd, './.nogit/testfiles/');
|
16
ts_tapbundle_node/plugins.ts
Normal file
16
ts_tapbundle_node/plugins.ts
Normal file
@ -0,0 +1,16 @@
|
||||
// node native
|
||||
import * as crypto from 'crypto';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
|
||||
export { crypto,fs, path, };
|
||||
|
||||
// @push.rocks scope
|
||||
import * as qenv from '@push.rocks/qenv';
|
||||
import * as smartcrypto from '@push.rocks/smartcrypto';
|
||||
import * as smartfile from '@push.rocks/smartfile';
|
||||
import * as smartpath from '@push.rocks/smartpath';
|
||||
import * as smartrequest from '@push.rocks/smartrequest';
|
||||
import * as smartshell from '@push.rocks/smartshell';
|
||||
|
||||
export { qenv, smartcrypto, smartfile, smartpath, smartrequest, smartshell, };
|
3
ts_tapbundle_node/tspublish.json
Normal file
3
ts_tapbundle_node/tspublish.json
Normal file
@ -0,0 +1,3 @@
|
||||
{
|
||||
"order": 3
|
||||
}
|
13
ts_tapbundle_protocol/index.ts
Normal file
13
ts_tapbundle_protocol/index.ts
Normal file
@ -0,0 +1,13 @@
|
||||
// Protocol V2 - Isomorphic implementation for improved TAP protocol
|
||||
// This module is designed to work in both browser and Node.js environments
|
||||
|
||||
export * from './protocol.types.js';
|
||||
export * from './protocol.emitter.js';
|
||||
export * from './protocol.parser.js';
|
||||
|
||||
// Re-export main classes for convenience
|
||||
export { ProtocolEmitter } from './protocol.emitter.js';
|
||||
export { ProtocolParser } from './protocol.parser.js';
|
||||
|
||||
// Re-export constants
|
||||
export { PROTOCOL_MARKERS, PROTOCOL_VERSION } from './protocol.types.js';
|
196
ts_tapbundle_protocol/protocol.emitter.ts
Normal file
196
ts_tapbundle_protocol/protocol.emitter.ts
Normal file
@ -0,0 +1,196 @@
|
||||
import type {
|
||||
ITestResult,
|
||||
ITestMetadata,
|
||||
IPlanLine,
|
||||
ISnapshotData,
|
||||
IErrorBlock,
|
||||
ITestEvent
|
||||
} from './protocol.types.js';
|
||||
|
||||
import {
|
||||
PROTOCOL_MARKERS,
|
||||
PROTOCOL_VERSION
|
||||
} from './protocol.types.js';
|
||||
|
||||
/**
|
||||
* ProtocolEmitter generates Protocol V2 messages
|
||||
* This class is used by tapbundle to emit test results in the new protocol format
|
||||
*/
|
||||
export class ProtocolEmitter {
|
||||
/**
|
||||
* Emit protocol version header
|
||||
*/
|
||||
public emitProtocolHeader(): string {
|
||||
return `${PROTOCOL_MARKERS.START}${PROTOCOL_MARKERS.PROTOCOL_PREFIX}${PROTOCOL_VERSION}${PROTOCOL_MARKERS.END}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Emit TAP version line
|
||||
*/
|
||||
public emitTapVersion(version: number = 13): string {
|
||||
return `TAP version ${version}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Emit test plan
|
||||
*/
|
||||
public emitPlan(plan: IPlanLine): string {
|
||||
if (plan.skipAll) {
|
||||
return `1..0 # Skipped: ${plan.skipAll}`;
|
||||
}
|
||||
return `${plan.start}..${plan.end}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Emit a test result
|
||||
*/
|
||||
public emitTest(result: ITestResult): string[] {
|
||||
const lines: string[] = [];
|
||||
|
||||
// Build the basic TAP line
|
||||
let tapLine = result.ok ? 'ok' : 'not ok';
|
||||
tapLine += ` ${result.testNumber}`;
|
||||
tapLine += ` - ${result.description}`;
|
||||
|
||||
// Add directive if present
|
||||
if (result.directive) {
|
||||
tapLine += ` # ${result.directive.type.toUpperCase()}`;
|
||||
if (result.directive.reason) {
|
||||
tapLine += ` ${result.directive.reason}`;
|
||||
}
|
||||
}
|
||||
|
||||
// Add inline metadata for simple cases
|
||||
if (result.metadata && this.shouldUseInlineMetadata(result.metadata)) {
|
||||
const metaStr = this.createInlineMetadata(result.metadata);
|
||||
if (metaStr) {
|
||||
tapLine += ` ${metaStr}`;
|
||||
}
|
||||
}
|
||||
|
||||
lines.push(tapLine);
|
||||
|
||||
// Add block metadata for complex cases
|
||||
if (result.metadata && !this.shouldUseInlineMetadata(result.metadata)) {
|
||||
lines.push(...this.createBlockMetadata(result.metadata, result.testNumber));
|
||||
}
|
||||
|
||||
return lines;
|
||||
}
|
||||
|
||||
/**
|
||||
* Emit a comment line
|
||||
*/
|
||||
public emitComment(comment: string): string {
|
||||
return `# ${comment}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Emit bailout
|
||||
*/
|
||||
public emitBailout(reason: string): string {
|
||||
return `Bail out! ${reason}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Emit snapshot data
|
||||
*/
|
||||
public emitSnapshot(snapshot: ISnapshotData): string[] {
|
||||
const lines: string[] = [];
|
||||
lines.push(`${PROTOCOL_MARKERS.START}${PROTOCOL_MARKERS.SNAPSHOT_PREFIX}${snapshot.name}${PROTOCOL_MARKERS.END}`);
|
||||
|
||||
if (snapshot.format === 'json') {
|
||||
lines.push(JSON.stringify(snapshot.content, null, 2));
|
||||
} else {
|
||||
lines.push(String(snapshot.content));
|
||||
}
|
||||
|
||||
lines.push(`${PROTOCOL_MARKERS.START}${PROTOCOL_MARKERS.SNAPSHOT_END}${PROTOCOL_MARKERS.END}`);
|
||||
return lines;
|
||||
}
|
||||
|
||||
/**
|
||||
* Emit error block
|
||||
*/
|
||||
public emitError(error: IErrorBlock): string[] {
|
||||
const lines: string[] = [];
|
||||
lines.push(`${PROTOCOL_MARKERS.START}${PROTOCOL_MARKERS.ERROR_PREFIX}${PROTOCOL_MARKERS.END}`);
|
||||
lines.push(JSON.stringify(error, null, 2));
|
||||
lines.push(`${PROTOCOL_MARKERS.START}${PROTOCOL_MARKERS.ERROR_END}${PROTOCOL_MARKERS.END}`);
|
||||
return lines;
|
||||
}
|
||||
|
||||
/**
|
||||
* Emit test event
|
||||
*/
|
||||
public emitEvent(event: ITestEvent): string {
|
||||
const eventJson = JSON.stringify(event);
|
||||
return `${PROTOCOL_MARKERS.START}${PROTOCOL_MARKERS.EVENT_PREFIX}${eventJson}${PROTOCOL_MARKERS.END}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if metadata should be inline
|
||||
*/
|
||||
private shouldUseInlineMetadata(metadata: ITestMetadata): boolean {
|
||||
// Use inline for simple metadata (time, retry, simple skip/todo)
|
||||
const hasComplexData = metadata.error ||
|
||||
metadata.custom ||
|
||||
(metadata.tags && metadata.tags.length > 0) ||
|
||||
metadata.file ||
|
||||
metadata.line;
|
||||
|
||||
return !hasComplexData;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create inline metadata string
|
||||
*/
|
||||
private createInlineMetadata(metadata: ITestMetadata): string {
|
||||
const parts: string[] = [];
|
||||
|
||||
if (metadata.time !== undefined) {
|
||||
parts.push(`time:${metadata.time}`);
|
||||
}
|
||||
|
||||
if (metadata.retry !== undefined) {
|
||||
parts.push(`retry:${metadata.retry}`);
|
||||
}
|
||||
|
||||
if (metadata.skip) {
|
||||
return `${PROTOCOL_MARKERS.START}${PROTOCOL_MARKERS.SKIP_PREFIX}${metadata.skip}${PROTOCOL_MARKERS.END}`;
|
||||
}
|
||||
|
||||
if (metadata.todo) {
|
||||
return `${PROTOCOL_MARKERS.START}${PROTOCOL_MARKERS.TODO_PREFIX}${metadata.todo}${PROTOCOL_MARKERS.END}`;
|
||||
}
|
||||
|
||||
if (parts.length > 0) {
|
||||
return `${PROTOCOL_MARKERS.START}${parts.join(',')}${PROTOCOL_MARKERS.END}`;
|
||||
}
|
||||
|
||||
return '';
|
||||
}
|
||||
|
||||
/**
|
||||
* Create block metadata lines
|
||||
*/
|
||||
private createBlockMetadata(metadata: ITestMetadata, testNumber?: number): string[] {
|
||||
const lines: string[] = [];
|
||||
|
||||
// Create a clean metadata object without skip/todo (handled inline)
|
||||
const blockMeta = { ...metadata };
|
||||
delete blockMeta.skip;
|
||||
delete blockMeta.todo;
|
||||
|
||||
// Emit metadata block
|
||||
const metaJson = JSON.stringify(blockMeta);
|
||||
lines.push(`${PROTOCOL_MARKERS.START}${PROTOCOL_MARKERS.META_PREFIX}${metaJson}${PROTOCOL_MARKERS.END}`);
|
||||
|
||||
// Emit separate error block if present
|
||||
if (metadata.error) {
|
||||
lines.push(...this.emitError({ testNumber, error: metadata.error }));
|
||||
}
|
||||
|
||||
return lines;
|
||||
}
|
||||
}
|
407
ts_tapbundle_protocol/protocol.parser.ts
Normal file
407
ts_tapbundle_protocol/protocol.parser.ts
Normal file
@ -0,0 +1,407 @@
|
||||
import type {
|
||||
ITestResult,
|
||||
ITestMetadata,
|
||||
IPlanLine,
|
||||
IProtocolMessage,
|
||||
ISnapshotData,
|
||||
IErrorBlock,
|
||||
ITestEvent
|
||||
} from './protocol.types.js';
|
||||
|
||||
import {
|
||||
PROTOCOL_MARKERS
|
||||
} from './protocol.types.js';
|
||||
|
||||
/**
|
||||
* ProtocolParser parses Protocol V2 messages
|
||||
* This class is used by tstest to parse test results from the new protocol format
|
||||
*/
|
||||
export class ProtocolParser {
|
||||
private protocolVersion: string | null = null;
|
||||
private inBlock = false;
|
||||
private blockType: string | null = null;
|
||||
private blockContent: string[] = [];
|
||||
|
||||
/**
|
||||
* Parse a single line and return protocol messages
|
||||
*/
|
||||
public parseLine(line: string): IProtocolMessage[] {
|
||||
const messages: IProtocolMessage[] = [];
|
||||
|
||||
// Handle block content
|
||||
if (this.inBlock) {
|
||||
if (this.isBlockEnd(line)) {
|
||||
messages.push(this.finalizeBlock());
|
||||
this.inBlock = false;
|
||||
this.blockType = null;
|
||||
this.blockContent = [];
|
||||
} else {
|
||||
this.blockContent.push(line);
|
||||
}
|
||||
return messages;
|
||||
}
|
||||
|
||||
// Check for block start
|
||||
if (this.isBlockStart(line)) {
|
||||
this.inBlock = true;
|
||||
this.blockType = this.extractBlockType(line);
|
||||
return messages;
|
||||
}
|
||||
|
||||
// Check for protocol version
|
||||
const protocolVersion = this.parseProtocolVersion(line);
|
||||
if (protocolVersion) {
|
||||
this.protocolVersion = protocolVersion;
|
||||
messages.push({
|
||||
type: 'protocol',
|
||||
content: { version: protocolVersion }
|
||||
});
|
||||
return messages;
|
||||
}
|
||||
|
||||
// Parse TAP version
|
||||
const tapVersion = this.parseTapVersion(line);
|
||||
if (tapVersion !== null) {
|
||||
messages.push({
|
||||
type: 'version',
|
||||
content: tapVersion
|
||||
});
|
||||
return messages;
|
||||
}
|
||||
|
||||
// Parse plan
|
||||
const plan = this.parsePlan(line);
|
||||
if (plan) {
|
||||
messages.push({
|
||||
type: 'plan',
|
||||
content: plan
|
||||
});
|
||||
return messages;
|
||||
}
|
||||
|
||||
// Parse bailout
|
||||
const bailout = this.parseBailout(line);
|
||||
if (bailout) {
|
||||
messages.push({
|
||||
type: 'bailout',
|
||||
content: bailout
|
||||
});
|
||||
return messages;
|
||||
}
|
||||
|
||||
// Parse comment
|
||||
if (this.isComment(line)) {
|
||||
messages.push({
|
||||
type: 'comment',
|
||||
content: line.substring(2) // Remove "# "
|
||||
});
|
||||
return messages;
|
||||
}
|
||||
|
||||
// Parse test result
|
||||
const testResult = this.parseTestResult(line);
|
||||
if (testResult) {
|
||||
messages.push({
|
||||
type: 'test',
|
||||
content: testResult
|
||||
});
|
||||
return messages;
|
||||
}
|
||||
|
||||
// Parse event
|
||||
const event = this.parseEvent(line);
|
||||
if (event) {
|
||||
messages.push({
|
||||
type: 'event',
|
||||
content: event
|
||||
});
|
||||
return messages;
|
||||
}
|
||||
|
||||
return messages;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse protocol version header
|
||||
*/
|
||||
private parseProtocolVersion(line: string): string | null {
|
||||
const match = this.extractProtocolData(line, PROTOCOL_MARKERS.PROTOCOL_PREFIX);
|
||||
return match;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse TAP version line
|
||||
*/
|
||||
private parseTapVersion(line: string): number | null {
|
||||
const match = line.match(/^TAP version (\d+)$/);
|
||||
if (match) {
|
||||
return parseInt(match[1], 10);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse plan line
|
||||
*/
|
||||
private parsePlan(line: string): IPlanLine | null {
|
||||
// Skip all plan
|
||||
const skipMatch = line.match(/^1\.\.0\s*#\s*Skipped:\s*(.*)$/);
|
||||
if (skipMatch) {
|
||||
return {
|
||||
start: 1,
|
||||
end: 0,
|
||||
skipAll: skipMatch[1]
|
||||
};
|
||||
}
|
||||
|
||||
// Normal plan
|
||||
const match = line.match(/^(\d+)\.\.(\d+)$/);
|
||||
if (match) {
|
||||
return {
|
||||
start: parseInt(match[1], 10),
|
||||
end: parseInt(match[2], 10)
|
||||
};
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse bailout
|
||||
*/
|
||||
private parseBailout(line: string): string | null {
|
||||
const match = line.match(/^Bail out!\s*(.*)$/);
|
||||
return match ? match[1] : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse event
|
||||
*/
|
||||
private parseEvent(line: string): ITestEvent | null {
|
||||
const eventData = this.extractProtocolData(line, PROTOCOL_MARKERS.EVENT_PREFIX);
|
||||
if (eventData) {
|
||||
try {
|
||||
return JSON.parse(eventData) as ITestEvent;
|
||||
} catch (e) {
|
||||
// Invalid JSON, ignore
|
||||
return null;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if line is a comment
|
||||
*/
|
||||
private isComment(line: string): boolean {
|
||||
return line.startsWith('# ');
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse test result line
|
||||
*/
|
||||
private parseTestResult(line: string): ITestResult | null {
|
||||
// First extract any inline metadata
|
||||
const metadata = this.extractInlineMetadata(line);
|
||||
const cleanLine = this.removeInlineMetadata(line);
|
||||
|
||||
// Parse the TAP part
|
||||
const tapMatch = cleanLine.match(/^(ok|not ok)\s+(\d+)\s*-?\s*(.*)$/);
|
||||
if (!tapMatch) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const result: ITestResult = {
|
||||
ok: tapMatch[1] === 'ok',
|
||||
testNumber: parseInt(tapMatch[2], 10),
|
||||
description: tapMatch[3].trim()
|
||||
};
|
||||
|
||||
// Parse directive
|
||||
const directiveMatch = result.description.match(/^(.*?)\s*#\s*(SKIP|TODO)\s*(.*)$/i);
|
||||
if (directiveMatch) {
|
||||
result.description = directiveMatch[1].trim();
|
||||
result.directive = {
|
||||
type: directiveMatch[2].toLowerCase() as 'skip' | 'todo',
|
||||
reason: directiveMatch[3] || undefined
|
||||
};
|
||||
}
|
||||
|
||||
// Add metadata if found
|
||||
if (metadata) {
|
||||
result.metadata = metadata;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract inline metadata from line
|
||||
*/
|
||||
private extractInlineMetadata(line: string): ITestMetadata | null {
|
||||
const metadata: ITestMetadata = {};
|
||||
let hasData = false;
|
||||
|
||||
// Extract skip reason
|
||||
const skipData = this.extractProtocolData(line, PROTOCOL_MARKERS.SKIP_PREFIX);
|
||||
if (skipData) {
|
||||
metadata.skip = skipData;
|
||||
hasData = true;
|
||||
}
|
||||
|
||||
// Extract todo reason
|
||||
const todoData = this.extractProtocolData(line, PROTOCOL_MARKERS.TODO_PREFIX);
|
||||
if (todoData) {
|
||||
metadata.todo = todoData;
|
||||
hasData = true;
|
||||
}
|
||||
|
||||
// Extract META JSON
|
||||
const metaData = this.extractProtocolData(line, PROTOCOL_MARKERS.META_PREFIX);
|
||||
if (metaData) {
|
||||
try {
|
||||
Object.assign(metadata, JSON.parse(metaData));
|
||||
hasData = true;
|
||||
} catch (e) {
|
||||
// Invalid JSON, ignore
|
||||
}
|
||||
}
|
||||
|
||||
// Extract simple key:value pairs
|
||||
const simpleMatch = line.match(new RegExp(`${this.escapeRegex(PROTOCOL_MARKERS.START)}([^${this.escapeRegex(PROTOCOL_MARKERS.END)}]+)${this.escapeRegex(PROTOCOL_MARKERS.END)}`));
|
||||
if (simpleMatch && simpleMatch[1].includes(':') && !simpleMatch[1].includes('META:') && !simpleMatch[1].includes('SKIP:') && !simpleMatch[1].includes('TODO:') && !simpleMatch[1].includes('EVENT:')) {
|
||||
// This is a simple key:value format (not a prefixed format)
|
||||
const pairs = simpleMatch[1].split(',');
|
||||
for (const pair of pairs) {
|
||||
const [key, value] = pair.split(':');
|
||||
if (key && value) {
|
||||
if (key === 'time') {
|
||||
metadata.time = parseInt(value, 10);
|
||||
hasData = true;
|
||||
} else if (key === 'retry') {
|
||||
metadata.retry = parseInt(value, 10);
|
||||
hasData = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return hasData ? metadata : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove inline metadata from line
|
||||
*/
|
||||
private removeInlineMetadata(line: string): string {
|
||||
// Remove all protocol markers
|
||||
const regex = new RegExp(`${this.escapeRegex(PROTOCOL_MARKERS.START)}[^${this.escapeRegex(PROTOCOL_MARKERS.END)}]*${this.escapeRegex(PROTOCOL_MARKERS.END)}`, 'g');
|
||||
return line.replace(regex, '').trim();
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract protocol data with specific prefix
|
||||
*/
|
||||
private extractProtocolData(line: string, prefix: string): string | null {
|
||||
const regex = new RegExp(`${this.escapeRegex(PROTOCOL_MARKERS.START)}${this.escapeRegex(prefix)}([^${this.escapeRegex(PROTOCOL_MARKERS.END)}]*)${this.escapeRegex(PROTOCOL_MARKERS.END)}`);
|
||||
const match = line.match(regex);
|
||||
return match ? match[1] : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if line starts a block
|
||||
*/
|
||||
private isBlockStart(line: string): boolean {
|
||||
// Only match if the line is exactly the block marker (after trimming)
|
||||
const trimmed = line.trim();
|
||||
return trimmed === `${PROTOCOL_MARKERS.START}${PROTOCOL_MARKERS.ERROR_PREFIX}${PROTOCOL_MARKERS.END}` ||
|
||||
(trimmed.startsWith(`${PROTOCOL_MARKERS.START}${PROTOCOL_MARKERS.SNAPSHOT_PREFIX}`) &&
|
||||
trimmed.endsWith(PROTOCOL_MARKERS.END) &&
|
||||
!trimmed.includes(' '));
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if line ends a block
|
||||
*/
|
||||
private isBlockEnd(line: string): boolean {
|
||||
return line.includes(`${PROTOCOL_MARKERS.START}${PROTOCOL_MARKERS.ERROR_END}${PROTOCOL_MARKERS.END}`) ||
|
||||
line.includes(`${PROTOCOL_MARKERS.START}${PROTOCOL_MARKERS.SNAPSHOT_END}${PROTOCOL_MARKERS.END}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract block type from start line
|
||||
*/
|
||||
private extractBlockType(line: string): string | null {
|
||||
if (line.includes(PROTOCOL_MARKERS.ERROR_PREFIX)) {
|
||||
return 'error';
|
||||
}
|
||||
if (line.includes(PROTOCOL_MARKERS.SNAPSHOT_PREFIX)) {
|
||||
const match = line.match(new RegExp(`${this.escapeRegex(PROTOCOL_MARKERS.START)}${this.escapeRegex(PROTOCOL_MARKERS.SNAPSHOT_PREFIX)}([^${this.escapeRegex(PROTOCOL_MARKERS.END)}]*)${this.escapeRegex(PROTOCOL_MARKERS.END)}`));
|
||||
return match ? `snapshot:${match[1]}` : 'snapshot';
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Finalize current block
|
||||
*/
|
||||
private finalizeBlock(): IProtocolMessage {
|
||||
const content = this.blockContent.join('\n');
|
||||
|
||||
if (this.blockType === 'error') {
|
||||
try {
|
||||
const errorData = JSON.parse(content) as IErrorBlock;
|
||||
return {
|
||||
type: 'error',
|
||||
content: errorData
|
||||
};
|
||||
} catch (e) {
|
||||
return {
|
||||
type: 'error',
|
||||
content: { error: { message: content } }
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
if (this.blockType?.startsWith('snapshot:')) {
|
||||
const name = this.blockType.substring(9);
|
||||
let parsedContent = content;
|
||||
let format: 'json' | 'text' = 'text';
|
||||
|
||||
try {
|
||||
parsedContent = JSON.parse(content);
|
||||
format = 'json';
|
||||
} catch (e) {
|
||||
// Not JSON, keep as text
|
||||
}
|
||||
|
||||
return {
|
||||
type: 'snapshot',
|
||||
content: {
|
||||
name,
|
||||
content: parsedContent,
|
||||
format
|
||||
} as ISnapshotData
|
||||
};
|
||||
}
|
||||
|
||||
// Fallback
|
||||
return {
|
||||
type: 'comment',
|
||||
content: content
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Escape regex special characters
|
||||
*/
|
||||
private escapeRegex(str: string): string {
|
||||
return str.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
|
||||
}
|
||||
|
||||
/**
|
||||
* Get protocol version
|
||||
*/
|
||||
public getProtocolVersion(): string | null {
|
||||
return this.protocolVersion;
|
||||
}
|
||||
}
|
148
ts_tapbundle_protocol/protocol.types.ts
Normal file
148
ts_tapbundle_protocol/protocol.types.ts
Normal file
@ -0,0 +1,148 @@
|
||||
// Protocol V2 Types and Interfaces
|
||||
// This file contains all type definitions for the improved TAP protocol
|
||||
|
||||
export interface ITestMetadata {
|
||||
// Timing
|
||||
time?: number; // milliseconds
|
||||
startTime?: number; // Unix timestamp
|
||||
endTime?: number; // Unix timestamp
|
||||
|
||||
// Status
|
||||
skip?: string; // skip reason
|
||||
todo?: string; // todo reason
|
||||
retry?: number; // retry attempt
|
||||
maxRetries?: number; // max retries allowed
|
||||
|
||||
// Error details
|
||||
error?: {
|
||||
message: string;
|
||||
stack?: string;
|
||||
diff?: string;
|
||||
actual?: any;
|
||||
expected?: any;
|
||||
code?: string;
|
||||
};
|
||||
|
||||
// Test context
|
||||
file?: string; // source file
|
||||
line?: number; // line number
|
||||
column?: number; // column number
|
||||
|
||||
// Custom data
|
||||
tags?: string[]; // test tags
|
||||
custom?: Record<string, any>;
|
||||
}
|
||||
|
||||
export interface ITestResult {
|
||||
ok: boolean;
|
||||
testNumber: number;
|
||||
description: string;
|
||||
directive?: {
|
||||
type: 'skip' | 'todo';
|
||||
reason?: string;
|
||||
};
|
||||
metadata?: ITestMetadata;
|
||||
}
|
||||
|
||||
export interface IPlanLine {
|
||||
start: number;
|
||||
end: number;
|
||||
skipAll?: string;
|
||||
}
|
||||
|
||||
export interface IProtocolMessage {
|
||||
type: 'test' | 'plan' | 'comment' | 'version' | 'bailout' | 'protocol' | 'snapshot' | 'error' | 'event';
|
||||
content: any;
|
||||
}
|
||||
|
||||
export interface IProtocolVersion {
|
||||
version: string;
|
||||
features?: string[];
|
||||
}
|
||||
|
||||
export interface ISnapshotData {
|
||||
name: string;
|
||||
content: any;
|
||||
format?: 'json' | 'text' | 'binary';
|
||||
}
|
||||
|
||||
export interface IErrorBlock {
|
||||
testNumber?: number;
|
||||
error: {
|
||||
message: string;
|
||||
stack?: string;
|
||||
diff?: string;
|
||||
actual?: any;
|
||||
expected?: any;
|
||||
};
|
||||
}
|
||||
|
||||
// Enhanced Communication Types
|
||||
export type EventType =
|
||||
| 'test:queued'
|
||||
| 'test:started'
|
||||
| 'test:progress'
|
||||
| 'test:completed'
|
||||
| 'suite:started'
|
||||
| 'suite:completed'
|
||||
| 'hook:started'
|
||||
| 'hook:completed'
|
||||
| 'assertion:failed';
|
||||
|
||||
export interface ITestEvent {
|
||||
eventType: EventType;
|
||||
timestamp: number;
|
||||
data: {
|
||||
testNumber?: number;
|
||||
description?: string;
|
||||
suiteName?: string;
|
||||
hookName?: string;
|
||||
progress?: number; // 0-100
|
||||
duration?: number;
|
||||
error?: IEnhancedError;
|
||||
[key: string]: any;
|
||||
};
|
||||
}
|
||||
|
||||
export interface IEnhancedError {
|
||||
message: string;
|
||||
stack?: string;
|
||||
diff?: IDiffResult;
|
||||
actual?: any;
|
||||
expected?: any;
|
||||
code?: string;
|
||||
type?: 'assertion' | 'timeout' | 'uncaught' | 'syntax' | 'runtime';
|
||||
}
|
||||
|
||||
export interface IDiffResult {
|
||||
type: 'string' | 'object' | 'array' | 'primitive';
|
||||
changes: IDiffChange[];
|
||||
context?: number; // lines of context
|
||||
}
|
||||
|
||||
export interface IDiffChange {
|
||||
type: 'add' | 'remove' | 'modify';
|
||||
path?: string[]; // for object/array diffs
|
||||
oldValue?: any;
|
||||
newValue?: any;
|
||||
line?: number; // for string diffs
|
||||
content?: string;
|
||||
}
|
||||
|
||||
// Protocol markers
|
||||
export const PROTOCOL_MARKERS = {
|
||||
START: '⟦TSTEST:',
|
||||
END: '⟧',
|
||||
META_PREFIX: 'META:',
|
||||
ERROR_PREFIX: 'ERROR',
|
||||
ERROR_END: '/ERROR',
|
||||
SNAPSHOT_PREFIX: 'SNAPSHOT:',
|
||||
SNAPSHOT_END: '/SNAPSHOT',
|
||||
PROTOCOL_PREFIX: 'PROTOCOL:',
|
||||
SKIP_PREFIX: 'SKIP:',
|
||||
TODO_PREFIX: 'TODO:',
|
||||
EVENT_PREFIX: 'EVENT:',
|
||||
} as const;
|
||||
|
||||
// Protocol version
|
||||
export const PROTOCOL_VERSION = '2.0.0';
|
3
ts_tapbundle_protocol/tspublish.json
Normal file
3
ts_tapbundle_protocol/tspublish.json
Normal file
@ -0,0 +1,3 @@
|
||||
{
|
||||
"order": 1
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user