Compare commits
189 Commits
Author | SHA1 | Date | |
---|---|---|---|
d05ec21b73 | |||
956a880a4a | |||
ee11b1ac17 | |||
054cbb6b3c | |||
ecf11efb4c | |||
1de674e91d | |||
9fa2c23ab2 | |||
36715c9139 | |||
ee0aca9ff7 | |||
aaebe75326 | |||
265ed702ee | |||
efbaded1f3 | |||
799a60188f | |||
3c38a53d9d | |||
cca01b51ec | |||
84843ad359 | |||
7a8ae95be2 | |||
133e0eda8b | |||
14e32b06de | |||
48aebb1eac | |||
733b2249d0 | |||
008844a9e2 | |||
e4fc6623ea | |||
70435cce45 | |||
c26145205f | |||
82fc22653b | |||
3d85f54be0 | |||
9464c17c15 | |||
91b99ce304 | |||
899045e6aa | |||
845f146e91 | |||
d1f8652fc7 | |||
f717078558 | |||
d2c0e533b5 | |||
d3c7fce595 | |||
570e2d6b3b | |||
b7f4b7b3b8 | |||
424046b0de | |||
0f762f2063 | |||
82757c4abc | |||
7aaeed0dc6 | |||
c98bd85829 | |||
33d2ff1d4f | |||
91880f8d42 | |||
7b1732abcc | |||
7d09b39f2b | |||
96efba5903 | |||
3c535a8a77 | |||
0954265095 | |||
e1d90589bc | |||
33f705d961 | |||
13b11ab1bf | |||
63280e4a9a | |||
23addc2d2f | |||
3649114c8d | |||
2841aba8a4 | |||
31bf090410 | |||
b525754035 | |||
aa10fc4ab3 | |||
3eb8ef22e5 | |||
763dc89f59 | |||
e0d8ede450 | |||
27c950c1a1 | |||
83b324b09f | |||
63a2879cb4 | |||
1a375fa689 | |||
c48887a820 | |||
02aeb8195e | |||
53d3dc55e6 | |||
a82fdc0f26 | |||
cfcb99de76 | |||
a3a4ded41e | |||
03d478d6ff | |||
77e53bd68a | |||
946e467c26 | |||
f452a58fff | |||
2b01d949f2 | |||
1c5cf46ba9 | |||
b28e2eace3 | |||
cc388f1408 | |||
bac2f852c5 | |||
d9e0f1f758 | |||
42cd08eb1c | |||
553d5f0df7 | |||
6cc883dede | |||
fa9abbc4db | |||
56f0f0be16 | |||
dc0f859fad | |||
78ffad2f7d | |||
3fc4cee2b1 | |||
a57edeef64 | |||
1f73751a8c | |||
90741ed917 | |||
962fa2cd4d | |||
c085a20a4f | |||
1f355a10a1 | |||
a73ce99564 | |||
64f825091d | |||
5ddc2d2de0 | |||
85fec03878 | |||
61c3226156 | |||
f0bf778810 | |||
a8e9f67810 | |||
4cce132472 | |||
dc250804f5 | |||
9669445646 | |||
928d9d0616 | |||
3655b2f734 | |||
6712ff6b07 | |||
ef5efc0a93 | |||
f305547116 | |||
033a0a806c | |||
7f87c24ad8 | |||
ac08bdffe5 | |||
eb64cb4f71 | |||
3b56c6ce9f | |||
722d777f80 | |||
f1a0455662 | |||
3c62129e02 | |||
ac5e036967 | |||
6ccd0281b9 | |||
d0f85b026f | |||
4376cafabb | |||
1a6e449b8d | |||
6ec99e7276 | |||
e958417d47 | |||
24416c1b5c | |||
d6c8fcc1cf | |||
53bb97c6db | |||
4f35b101ec | |||
549ae53a00 | |||
d9aa2984ef | |||
37e6d94c9f | |||
74e1df6824 | |||
6eb86c63c3 | |||
e919a4a2e9 | |||
85f6703696 | |||
4c2812f671 | |||
bab93448e8 | |||
3db913eb59 | |||
28e0c32944 | |||
0be6b3400a | |||
eeba113e09 | |||
19e45b305c | |||
f9f4150cff | |||
710548911e | |||
23f9a28fa0 | |||
e1d2f1fd68 | |||
3116c5a818 | |||
568772734b | |||
30525e7e55 | |||
f7483ef995 | |||
1460f97c52 | |||
dfac554303 | |||
1d751bdcdf | |||
bd6713eee8 | |||
440b41611b | |||
78a40de700 | |||
e0eb00d755 | |||
dbbcbf4ea2 | |||
8c13b9db89 | |||
f813a79124 | |||
766138aa25 | |||
72880b4a2d | |||
bd5731c439 | |||
0caebb7448 | |||
ed896b7f1c | |||
69ec5a98ab | |||
3b93886147 | |||
5949988293 | |||
04f7be07a3 | |||
d331f90d24 | |||
224400dcb5 | |||
7601ca599a | |||
994a1bc98d | |||
ca51c9e15b | |||
4c4f08152b | |||
39bd80106a | |||
d6b94b534b | |||
d19d3fc51e | |||
f7f1bf25f6 | |||
42fd414609 | |||
8f16f46c37 | |||
f8afb2c7f6 | |||
a3d1fbb2da | |||
0da1a1bc5b | |||
1ede0b476a | |||
1d251689bb | |||
8f1492dfbd |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -17,4 +17,4 @@ node_modules/
|
||||
dist/
|
||||
dist_*/
|
||||
|
||||
# custom
|
||||
# custom
|
||||
|
127
.gitlab-ci.yml
127
.gitlab-ci.yml
@@ -1,127 +0,0 @@
|
||||
# gitzone ci_default
|
||||
image: registry.gitlab.com/hosttoday/ht-docker-node:npmci
|
||||
|
||||
cache:
|
||||
paths:
|
||||
- .npmci_cache/
|
||||
key: '$CI_BUILD_STAGE'
|
||||
|
||||
stages:
|
||||
- security
|
||||
- test
|
||||
- release
|
||||
- metadata
|
||||
|
||||
# ====================
|
||||
# security stage
|
||||
# ====================
|
||||
mirror:
|
||||
stage: security
|
||||
script:
|
||||
- npmci git mirror
|
||||
tags:
|
||||
- lossless
|
||||
- docker
|
||||
- notpriv
|
||||
|
||||
audit:
|
||||
image: registry.gitlab.com/hosttoday/ht-docker-node:npmci
|
||||
stage: security
|
||||
script:
|
||||
- npmci npm prepare
|
||||
- npmci command npm install --ignore-scripts
|
||||
- npmci command npm config set registry https://registry.npmjs.org
|
||||
- npmci command npm audit --audit-level=high
|
||||
tags:
|
||||
- lossless
|
||||
- docker
|
||||
- notpriv
|
||||
|
||||
# ====================
|
||||
# test stage
|
||||
# ====================
|
||||
|
||||
testStable:
|
||||
stage: test
|
||||
script:
|
||||
- npmci npm prepare
|
||||
- npmci node install stable
|
||||
- npmci npm install
|
||||
- npmci npm test
|
||||
coverage: /\d+.?\d+?\%\s*coverage/
|
||||
tags:
|
||||
- lossless
|
||||
- docker
|
||||
- priv
|
||||
|
||||
testBuild:
|
||||
stage: test
|
||||
script:
|
||||
- npmci npm prepare
|
||||
- npmci node install stable
|
||||
- npmci npm install
|
||||
- npmci command npm run build
|
||||
coverage: /\d+.?\d+?\%\s*coverage/
|
||||
tags:
|
||||
- lossless
|
||||
- docker
|
||||
- notpriv
|
||||
|
||||
release:
|
||||
stage: release
|
||||
script:
|
||||
- npmci node install stable
|
||||
- npmci npm publish
|
||||
only:
|
||||
- tags
|
||||
tags:
|
||||
- lossless
|
||||
- docker
|
||||
- notpriv
|
||||
|
||||
# ====================
|
||||
# metadata stage
|
||||
# ====================
|
||||
codequality:
|
||||
stage: metadata
|
||||
allow_failure: true
|
||||
script:
|
||||
- npmci command npm install -g tslint typescript
|
||||
- npmci npm prepare
|
||||
- npmci npm install
|
||||
- npmci command "tslint -c tslint.json ./ts/**/*.ts"
|
||||
tags:
|
||||
- lossless
|
||||
- docker
|
||||
- priv
|
||||
|
||||
trigger:
|
||||
stage: metadata
|
||||
script:
|
||||
- npmci trigger
|
||||
only:
|
||||
- tags
|
||||
tags:
|
||||
- lossless
|
||||
- docker
|
||||
- notpriv
|
||||
|
||||
pages:
|
||||
stage: metadata
|
||||
script:
|
||||
- npmci node install lts
|
||||
- npmci command npm install -g @gitzone/tsdoc
|
||||
- npmci npm prepare
|
||||
- npmci npm install
|
||||
- npmci command tsdoc
|
||||
tags:
|
||||
- lossless
|
||||
- docker
|
||||
- notpriv
|
||||
only:
|
||||
- tags
|
||||
artifacts:
|
||||
expire_in: 1 week
|
||||
paths:
|
||||
- public
|
||||
allow_failure: true
|
1
.serena/.gitignore
vendored
Normal file
1
.serena/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
/cache
|
68
.serena/project.yml
Normal file
68
.serena/project.yml
Normal file
@@ -0,0 +1,68 @@
|
||||
# language of the project (csharp, python, rust, java, typescript, go, cpp, or ruby)
|
||||
# * For C, use cpp
|
||||
# * For JavaScript, use typescript
|
||||
# Special requirements:
|
||||
# * csharp: Requires the presence of a .sln file in the project folder.
|
||||
language: typescript
|
||||
|
||||
# whether to use the project's gitignore file to ignore files
|
||||
# Added on 2025-04-07
|
||||
ignore_all_files_in_gitignore: true
|
||||
# list of additional paths to ignore
|
||||
# same syntax as gitignore, so you can use * and **
|
||||
# Was previously called `ignored_dirs`, please update your config if you are using that.
|
||||
# Added (renamed) on 2025-04-07
|
||||
ignored_paths: []
|
||||
|
||||
# whether the project is in read-only mode
|
||||
# If set to true, all editing tools will be disabled and attempts to use them will result in an error
|
||||
# Added on 2025-04-18
|
||||
read_only: false
|
||||
|
||||
|
||||
# list of tool names to exclude. We recommend not excluding any tools, see the readme for more details.
|
||||
# Below is the complete list of tools for convenience.
|
||||
# To make sure you have the latest list of tools, and to view their descriptions,
|
||||
# execute `uv run scripts/print_tool_overview.py`.
|
||||
#
|
||||
# * `activate_project`: Activates a project by name.
|
||||
# * `check_onboarding_performed`: Checks whether project onboarding was already performed.
|
||||
# * `create_text_file`: Creates/overwrites a file in the project directory.
|
||||
# * `delete_lines`: Deletes a range of lines within a file.
|
||||
# * `delete_memory`: Deletes a memory from Serena's project-specific memory store.
|
||||
# * `execute_shell_command`: Executes a shell command.
|
||||
# * `find_referencing_code_snippets`: Finds code snippets in which the symbol at the given location is referenced.
|
||||
# * `find_referencing_symbols`: Finds symbols that reference the symbol at the given location (optionally filtered by type).
|
||||
# * `find_symbol`: Performs a global (or local) search for symbols with/containing a given name/substring (optionally filtered by type).
|
||||
# * `get_current_config`: Prints the current configuration of the agent, including the active and available projects, tools, contexts, and modes.
|
||||
# * `get_symbols_overview`: Gets an overview of the top-level symbols defined in a given file.
|
||||
# * `initial_instructions`: Gets the initial instructions for the current project.
|
||||
# Should only be used in settings where the system prompt cannot be set,
|
||||
# e.g. in clients you have no control over, like Claude Desktop.
|
||||
# * `insert_after_symbol`: Inserts content after the end of the definition of a given symbol.
|
||||
# * `insert_at_line`: Inserts content at a given line in a file.
|
||||
# * `insert_before_symbol`: Inserts content before the beginning of the definition of a given symbol.
|
||||
# * `list_dir`: Lists files and directories in the given directory (optionally with recursion).
|
||||
# * `list_memories`: Lists memories in Serena's project-specific memory store.
|
||||
# * `onboarding`: Performs onboarding (identifying the project structure and essential tasks, e.g. for testing or building).
|
||||
# * `prepare_for_new_conversation`: Provides instructions for preparing for a new conversation (in order to continue with the necessary context).
|
||||
# * `read_file`: Reads a file within the project directory.
|
||||
# * `read_memory`: Reads the memory with the given name from Serena's project-specific memory store.
|
||||
# * `remove_project`: Removes a project from the Serena configuration.
|
||||
# * `replace_lines`: Replaces a range of lines within a file with new content.
|
||||
# * `replace_symbol_body`: Replaces the full definition of a symbol.
|
||||
# * `restart_language_server`: Restarts the language server, may be necessary when edits not through Serena happen.
|
||||
# * `search_for_pattern`: Performs a search for a pattern in the project.
|
||||
# * `summarize_changes`: Provides instructions for summarizing the changes made to the codebase.
|
||||
# * `switch_modes`: Activates modes by providing a list of their names
|
||||
# * `think_about_collected_information`: Thinking tool for pondering the completeness of collected information.
|
||||
# * `think_about_task_adherence`: Thinking tool for determining whether the agent is still on track with the current task.
|
||||
# * `think_about_whether_you_are_done`: Thinking tool for determining whether the task is truly completed.
|
||||
# * `write_memory`: Writes a named memory (for future reference) to Serena's project-specific memory store.
|
||||
excluded_tools: []
|
||||
|
||||
# initial prompt for the project. It will always be given to the LLM upon activating the project
|
||||
# (contrary to the memories, which are loaded on demand).
|
||||
initial_prompt: ""
|
||||
|
||||
project_name: "tstest"
|
24
.vscode/launch.json
vendored
24
.vscode/launch.json
vendored
@@ -2,28 +2,10 @@
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "current file",
|
||||
"type": "node",
|
||||
"command": "npm test",
|
||||
"name": "Run npm test",
|
||||
"request": "launch",
|
||||
"args": [
|
||||
"${relativeFile}"
|
||||
],
|
||||
"runtimeArgs": ["-r", "@gitzone/tsrun"],
|
||||
"cwd": "${workspaceRoot}",
|
||||
"protocol": "inspector",
|
||||
"internalConsoleOptions": "openOnSessionStart"
|
||||
},
|
||||
{
|
||||
"name": "test.ts",
|
||||
"type": "node",
|
||||
"request": "launch",
|
||||
"args": [
|
||||
"test/test.ts"
|
||||
],
|
||||
"runtimeArgs": ["-r", "@gitzone/tsrun"],
|
||||
"cwd": "${workspaceRoot}",
|
||||
"protocol": "inspector",
|
||||
"internalConsoleOptions": "openOnSessionStart"
|
||||
"type": "node-terminal"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
2
.vscode/settings.json
vendored
2
.vscode/settings.json
vendored
@@ -15,7 +15,7 @@
|
||||
"properties": {
|
||||
"projectType": {
|
||||
"type": "string",
|
||||
"enum": ["website", "element", "service", "npm"]
|
||||
"enum": ["website", "element", "service", "npm", "wcc"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
515
changelog.md
Normal file
515
changelog.md
Normal file
@@ -0,0 +1,515 @@
|
||||
# Changelog
|
||||
|
||||
## 2025-10-11 - 2.4.3 - fix(docs)
|
||||
Update documentation: expand README with multi-runtime architecture, add module READMEs, and add local dev settings
|
||||
|
||||
- Expanded project README: fixed typos, clarified availability header, and added a detailed Multi-Runtime Architecture section (runtimes, naming conventions, migration tool, examples, and runtime-specific notes).
|
||||
- Inserted additional example output and adjusted JSON/example sections to reflect multi-runtime flows and updated totals/durations in examples.
|
||||
- Added dedicated README files for ts_tapbundle, ts_tapbundle_node, and ts_tapbundle_protocol modules with API overviews and usage guides.
|
||||
- Added .claude/settings.local.json to provide local development permissions/settings used by the project tooling.
|
||||
- Minor formatting and documentation cleanup (whitespace, headings, and changelog entries).
|
||||
|
||||
## 2025-10-10 - 2.4.2 - fix(deno)
|
||||
Enable additional Deno permissions for runtime adapters and add local dev settings
|
||||
|
||||
- Add --allow-sys, --allow-import and --node-modules-dir to the default Deno permission set used by the Deno runtime adapter
|
||||
- Include the new permission flags in the fallback permissions array when constructing Deno command args
|
||||
- Add .claude/settings.local.json to capture local development permissions and helper commands
|
||||
|
||||
## 2025-10-10 - 2.4.1 - fix(runtime/deno)
|
||||
Enable Deno runtime tests by adding required permissions and local settings
|
||||
|
||||
- ts/tstest.classes.runtime.deno.ts: expanded default Deno permissions to include --allow-net, --allow-write and --sloppy-imports to allow network access, file writes and permissive JS/TS imports
|
||||
- ts/tstest.classes.runtime.deno.ts: updated fallback permissions used when building the Deno command to match the new default set
|
||||
- Added .claude/settings.local.json with a set of allowed local commands/permissions used for local development/CI tooling
|
||||
|
||||
## 2025-10-10 - 2.4.0 - feat(runtime)
|
||||
Add runtime adapters, filename runtime parser and migration tool; integrate runtime selection into TsTest and add tests
|
||||
|
||||
- Introduce RuntimeAdapter abstraction and RuntimeAdapterRegistry to manage multiple runtimes
|
||||
- Add runtime adapters: NodeRuntimeAdapter, ChromiumRuntimeAdapter, DenoRuntimeAdapter and BunRuntimeAdapter
|
||||
- Add filename runtime parser utilities: parseTestFilename, isLegacyFilename and getLegacyMigrationTarget
|
||||
- Add Migration class to detect and (dry-run) migrate legacy test filenames to the new naming convention
|
||||
- Integrate runtime registry into TsTest and choose execution adapters based on parsed runtimes; show deprecation warnings for legacy naming
|
||||
- Add tests covering runtime parsing and migration: test/test.runtime.parser.node.ts and test/test.migration.node.ts
|
||||
|
||||
## 2025-09-12 - 2.3.8 - fix(tstest)
|
||||
Improve free port selection for Chrome runner and bump smartnetwork dependency
|
||||
|
||||
- Use randomized port selection when finding free HTTP and WebSocket ports to reduce collision probability in concurrent runs
|
||||
- Ensure WebSocket port search excludes the chosen HTTP port so the two ports will not conflict
|
||||
- Simplify failure handling: throw early if a free WebSocket port cannot be found instead of retrying with a less robust fallback
|
||||
- Bump @push.rocks/smartnetwork dependency from ^4.2.0 to ^4.4.0 to pick up new findFreePort options
|
||||
|
||||
## 2025-09-12 - 2.3.7 - fix(tests)
|
||||
Remove flaky dynamic-ports browser test and add local dev tool settings
|
||||
|
||||
- Removed test/tapbundle/test.dynamicports.ts — deletes a browser test that relied on injected dynamic WebSocket ports (reduces flaky CI/browser runs).
|
||||
- Added .claude/settings.local.json — local development settings for the CLAUDE helper (grants allowed dev/automation commands and webfetch permissions).
|
||||
|
||||
## 2025-09-03 - 2.3.6 - fix(tstest)
|
||||
Update deps, fix chrome server route for static bundles, add local tool settings and CI ignore
|
||||
|
||||
- Bump devDependency @git.zone/tsbuild to ^2.6.8
|
||||
- Bump dependencies: @api.global/typedserver to ^3.0.78, @push.rocks/smartlog to ^3.1.9, @push.rocks/smartrequest to ^4.3.1
|
||||
- Fix test server static route in ts/tstest.classes.tstest.ts: replace '(.*)' with '/*splat' so bundled test files are served correctly in Chromium runs
|
||||
- Add .claude/settings.local.json with local permissions for development tasks
|
||||
- Add .serena/.gitignore to ignore /cache
|
||||
|
||||
## 2025-08-18 - 2.3.5 - fix(core)
|
||||
Use SmartRequest with Buffer for binary downloads, tighten static route handling, bump dependencies and add workspace/config files
|
||||
|
||||
- ts_tapbundle_node/classes.testfileprovider.ts: switch to SmartRequest.create().url(...).get() and convert response to a Buffer before writing to disk to fix binary download handling for the Docker Alpine image.
|
||||
- ts/tstest.classes.tstest.ts: change server.addRoute from '*' to '(.*)' so the typedserver static handler uses a proper regex route.
|
||||
- package.json: bump several dependencies (e.g. @api.global/typedserver, @git.zone/tsbuild, @push.rocks/smartfile, @push.rocks/smartpath, @push.rocks/smartrequest, @push.rocks/smartshell) to newer patch/minor versions.
|
||||
- pnpm-workspace.yaml: add onlyBuiltDependencies list (esbuild, mongodb-memory-server, puppeteer).
|
||||
- Remove registry setting from .npmrc (cleanup).
|
||||
- Add project/agent config files: .serena/project.yml and .claude/settings.local.json for local tooling/agent configuration.
|
||||
|
||||
## 2025-08-16 - 2.3.4 - fix(ci)
|
||||
Add local Claude settings to allow required WebFetch and Bash permissions for local tooling and tests
|
||||
|
||||
- Add .claude/settings.local.json to configure allowed permissions for local assistant/automation
|
||||
- Grants WebFetch access for code.foss.global and www.npmjs.com
|
||||
- Allows various Bash commands used by local tasks and test runs (mkdir, tsbuild, pnpm, node, tsx, tstest, ls, rm, grep, cat)
|
||||
- No runtime/library code changes — configuration only
|
||||
|
||||
## 2025-08-16 - 2.3.3 - fix(dependencies)
|
||||
Bump dependency versions and add local Claude settings
|
||||
|
||||
- Bumped devDependency @git.zone/tsbuild ^2.6.3 → ^2.6.4
|
||||
- Updated @git.zone/tsbundle ^2.2.5 → ^2.5.1
|
||||
- Updated @push.rocks/consolecolor ^2.0.2 → ^2.0.3
|
||||
- Updated @push.rocks/qenv ^6.1.0 → ^6.1.3
|
||||
- Updated @push.rocks/smartchok ^1.0.34 → ^1.1.1
|
||||
- Updated @push.rocks/smartenv ^5.0.12 → ^5.0.13
|
||||
- Updated @push.rocks/smartfile ^11.2.3 → ^11.2.5
|
||||
- Updated @push.rocks/smarts3 ^2.2.5 → ^2.2.6
|
||||
- Updated @push.rocks/smartshell ^3.2.3 → ^3.2.4
|
||||
- Updated ws ^8.18.2 → ^8.18.3
|
||||
- Added .claude/settings.local.json for local Claude permissions and tooling (local-only configuration)
|
||||
|
||||
## 2025-07-24 - 2.3.2 - fix(tapbundle)
|
||||
Fix TypeScript IDE warning about tapTools parameter possibly being undefined
|
||||
|
||||
- Changed ITestFunction from interface with optional parameter to union type
|
||||
- Updated test runner to handle both function signatures (with and without tapTools)
|
||||
- Resolves IDE warnings while maintaining backward compatibility
|
||||
|
||||
## 2025-05-26 - 2.3.1 - fix(tapParser/logger)
|
||||
Fix test duration reporting and summary formatting in TAP parser and logger
|
||||
|
||||
- Introduce startTime in TapParser to capture the overall test duration
|
||||
- Pass computed duration to logger methods in evaluateFinalResult for accurate timing
|
||||
- Update summary output to format duration in a human-readable way (ms vs. s)
|
||||
- Add local permission settings configuration to .claude/settings.local.json
|
||||
|
||||
## 2025-05-26 - 2.3.0 - feat(cli)
|
||||
Add '--version' option and warn against global tstest usage in the tstest project
|
||||
|
||||
- Introduced a new '--version' CLI flag that prints the version from package.json
|
||||
- Added logic in ts/index.ts to detect if tstest is run globally within its own project and issue a warning
|
||||
- Added .claude/settings.local.json to configure allowed permissions for various commands
|
||||
|
||||
## 2025-05-26 - 2.2.6 - fix(tstest)
|
||||
Improve timeout warning timer management and summary output formatting in the test runner.
|
||||
|
||||
- Removed the global timeoutWarningTimer and replaced it with local warning timers in runInNode and runInChrome methods.
|
||||
- Added warnings when test files run for over one minute if no timeout is specified.
|
||||
- Ensured proper clearing of warning timers on successful completion or timeout.
|
||||
- Enhanced quiet mode summary output to clearly display passed and failed test counts.
|
||||
|
||||
## 2025-05-26 - 2.2.5 - fix(protocol)
|
||||
Fix inline timing metadata parsing and enhance test coverage for performance metrics and timing edge cases
|
||||
|
||||
- Updated the protocol parser to correctly parse inline key:value pairs while excluding prefixed formats (META:, SKIP:, TODO:, EVENT:)
|
||||
- Added new tests for performance metrics, timing edge cases, and protocol timing to verify accurate timing capture and retry handling
|
||||
- Expanded documentation in readme.hints.md to detail the updated timing implementation and parser fixes
|
||||
|
||||
## 2025-05-26 - 2.2.4 - fix(logging)
|
||||
Improve performance metrics reporting and add local permissions configuration
|
||||
|
||||
- Add .claude/settings.local.json to configure allowed permissions for various commands
|
||||
- Update tstest logging: compute average test duration from actual durations and adjust slowest test display formatting
|
||||
|
||||
## 2025-05-26 - 2.2.3 - fix(readme/ts/tstest.plugins)
|
||||
Update npm package scope and documentation to use '@git.zone' instead of '@gitzone', and add local settings configuration.
|
||||
|
||||
- Changed npm package links and source repository URLs in readme from '@gitzone/tstest' to '@git.zone/tstest'.
|
||||
- Updated comments in ts/tstest.plugins.ts to reflect the correct '@git.zone' scope.
|
||||
- Added .claude/settings.local.json file with local permission settings.
|
||||
|
||||
## 2025-05-26 - 2.2.2 - fix(config)
|
||||
Cleanup project configuration by adding local CLAUDE settings and removing redundant license files
|
||||
|
||||
- Added .claude/settings.local.json with updated permissions for CLI and build tasks
|
||||
- Removed license and license.md files to streamline repository content
|
||||
|
||||
## 2025-05-26 - 2.2.1 - fix(repo configuration)
|
||||
Update repository metadata to use 'git.zone' naming and add local permission settings
|
||||
|
||||
- Changed githost from 'gitlab.com' to 'code.foss.global' and gitscope from 'gitzone' to 'git.zone' in npmextra.json
|
||||
- Updated npm package name from '@gitzone/tstest' to '@git.zone/tstest' in npmextra.json and readme.md
|
||||
- Added .claude/settings.local.json with new permission configuration
|
||||
|
||||
## 2025-05-26 - 2.2.0 - feat(watch mode)
|
||||
Add watch mode support with CLI options and enhanced documentation
|
||||
|
||||
- Introduce '--watch' (or '-w') and '--watch-ignore' CLI flags for automatic test re-runs
|
||||
- Integrate @push.rocks/smartchok for file watching with 300ms debouncing
|
||||
- Update readme.md and readme.hints.md with detailed instructions and examples for watch mode
|
||||
- Add a demo test file (test/watch-demo/test.demo.ts) to illustrate the new feature
|
||||
- Add smartchok dependency in package.json
|
||||
|
||||
## 2025-05-26 - 2.1.0 - feat(core)
|
||||
Implement Protocol V2 with enhanced settings and lifecycle hooks
|
||||
|
||||
- Migrated to Protocol V2 using Unicode markers and structured metadata with new ts_tapbundle_protocol module
|
||||
- Refactored TAP parser/emitter to support improved protocol parsing and error reporting
|
||||
- Integrated global settings via tap.settings() and lifecycle hooks (beforeAll/afterAll, beforeEach/afterEach)
|
||||
- Enhanced expect wrapper with diff generation for clearer assertion failures
|
||||
- Updated test loader to automatically run 00init.ts for proper test configuration
|
||||
- Revised documentation (readme.hints.md, readme.plan.md) to reflect current implementation status and remaining work
|
||||
|
||||
## 2025-05-25 - 2.0.0 - BREAKING CHANGE(protocol)
|
||||
Introduce protocol v2 implementation and update build configuration with revised build order, new tspublish files, and enhanced documentation
|
||||
|
||||
- Added ts_tapbundle_protocol directory with isomorphic implementation for protocol v2
|
||||
- Updated readme.hints.md and readme.plan.md to explain the complete replacement of the v1 protocol and new build process
|
||||
- Revised build order in tspublish.json files across ts, ts_tapbundle, ts_tapbundle_node, and ts_tapbundle_protocol
|
||||
- Introduced .claude/settings.local.json with updated permission settings for CLI and build tools
|
||||
|
||||
## 2025-05-24 - 1.11.5 - fix(tstest)
|
||||
Fix timeout handling to correctly evaluate TAP results after killing the test process.
|
||||
|
||||
- Added call to evaluateFinalResult() after killing the process in runInNode to ensure final TAP output is processed.
|
||||
|
||||
## 2025-05-24 - 1.11.4 - fix(logging)
|
||||
Improve warning logging and add permission settings file
|
||||
|
||||
- Replace multiple logger.error calls with logger.warning for tests running over 1 minute
|
||||
- Add warning method in tstest logger to display warning messages consistently
|
||||
- Introduce .claude/settings.local.json to configure allowed permissions
|
||||
|
||||
## 2025-05-24 - 1.11.3 - fix(tstest)
|
||||
Add timeout warning for long-running tests and introduce local settings configuration
|
||||
|
||||
- Add .claude/settings.local.json with permission configuration for local development
|
||||
- Implement a timeout warning timer that notifies when tests run longer than 1 minute without an explicit timeout
|
||||
- Clear the timeout warning timer upon test completion
|
||||
- Remove unused import of logPrefixes in tstest.classes.tstest.ts
|
||||
|
||||
## 2025-05-24 - 1.11.2 - fix(tstest)
|
||||
Improve timeout and error handling in test execution along with TAP parser timeout logic improvements.
|
||||
|
||||
- In the TAP parser, ensure that expected tests are properly set when no tests are defined to avoid false negatives on timeout.
|
||||
- Use smartshell's terminate method and fallback kill to properly stop the entire process tree on timeout.
|
||||
- Clean up browser, server, and WebSocket instances reliably even when a timeout occurs.
|
||||
- Minor improvements in log file filtering and error logging for better clarity.
|
||||
|
||||
## 2025-05-24 - 1.11.1 - fix(tstest)
|
||||
Clear timeout identifiers after successful test execution and add local CLAUDE settings
|
||||
|
||||
- Ensure timeout IDs are cleared when tests complete to prevent lingering timeouts
|
||||
- Add .claude/settings.local.json with updated permission settings for CLI commands
|
||||
|
||||
## 2025-05-24 - 1.11.0 - feat(cli)
|
||||
Add new timeout and file range options with enhanced logfile diff logging
|
||||
|
||||
- Introduce --timeout <seconds> option to safeguard tests from running too long
|
||||
- Add --startFrom and --stopAt options to control the range of test files executed
|
||||
- Enhance logfile organization by automatically moving previous logs and generating diff reports for failed or changed test outputs
|
||||
- Update CLI argument parsing and internal timeout handling for both Node.js and browser tests
|
||||
|
||||
## 2025-05-24 - 1.10.2 - fix(tstest-logging)
|
||||
Improve log file handling with log rotation and diff reporting
|
||||
|
||||
- Add .claude/settings.local.json to configure allowed shell and web operations
|
||||
- Introduce movePreviousLogFiles function to archive previous log files when --logfile is used
|
||||
- Enhance logging to generate error copies and diff reports between current and previous logs
|
||||
- Add type annotations for console overrides in browser evaluations for improved stability
|
||||
|
||||
## 2025-05-23 - 1.10.1 - fix(tstest)
|
||||
Improve file range filtering and summary logging by skipping test files outside the specified range and reporting them in the final summary.
|
||||
|
||||
- Introduce runSingleTestOrSkip to check file index against startFrom/stopAt values.
|
||||
- Log skipped files with appropriate messages and add them to the summary.
|
||||
- Update the logger to include total skipped files in the test summary.
|
||||
- Add permission settings in .claude/settings.local.json to support new operations.
|
||||
|
||||
## 2025-05-23 - 1.10.0 - feat(cli)
|
||||
Add --startFrom and --stopAt options to filter test files by range
|
||||
|
||||
- Introduced CLI options --startFrom and --stopAt in ts/index.ts for selective test execution
|
||||
- Added validation to ensure provided range values are positive and startFrom is not greater than stopAt
|
||||
- Propagated file range filtering into test grouping in tstest.classes.tstest.ts, applying the range filter across serial and parallel groups
|
||||
- Updated usage messages to include the new options
|
||||
|
||||
## 2025-05-23 - 1.9.4 - fix(docs)
|
||||
Update documentation and configuration for legal notices and CI permissions. This commit adds a new local settings file for tool permissions, refines the legal and trademark sections in the readme, and improves glob test files with clearer log messages.
|
||||
|
||||
- Added .claude/settings.local.json to configure permissions for various CLI commands
|
||||
- Revised legal and trademark documentation in the readme to clarify company ownership and usage guidelines
|
||||
- Updated glob test files with improved console log messages for better clarity during test discovery
|
||||
|
||||
## 2025-05-23 - 1.9.3 - fix(tstest)
|
||||
Fix test timing display issue and update TAP protocol documentation
|
||||
|
||||
- Changed TAP parser regex to non-greedy pattern to correctly separate test timing metadata
|
||||
- Enhanced readme.hints.md with detailed explanation of test timing fix and planned protocol upgrades
|
||||
- Updated readme.md with improved usage examples for tapbundle and comprehensive test framework documentation
|
||||
- Added new protocol design document (readme.protocol.md) and improvement plan (readme.plan.md) outlining future changes
|
||||
- Introduced .claude/settings.local.json update for npm and CLI permissions
|
||||
- Exported protocol utilities and added tapbundle protocol implementation for future enhancements
|
||||
|
||||
## 2025-05-23 - 1.9.2 - fix(logging)
|
||||
Fix log file naming to prevent collisions and update logging system documentation.
|
||||
|
||||
- Enhance safe filename generation in tstest logging to preserve directory structure using double underscores.
|
||||
- Update readme.hints.md to include detailed logging system documentation and behavior.
|
||||
- Add .claude/settings.local.json with updated permissions for build tools.
|
||||
|
||||
## 2025-05-23 - 1.9.1 - fix(dependencies)
|
||||
Update dependency versions and add local configuration files
|
||||
|
||||
- Bump @git.zone/tsbuild from ^2.5.1 to ^2.6.3
|
||||
- Bump @types/node from ^22.15.18 to ^22.15.21
|
||||
- Bump @push.rocks/smartexpect from ^2.4.2 to ^2.5.0
|
||||
- Bump @push.rocks/smartfile from ^11.2.0 to ^11.2.3
|
||||
- Bump @push.rocks/smartlog from ^3.1.1 to ^3.1.8
|
||||
- Add .npmrc with npm registry configuration
|
||||
- Add .claude/settings.local.json for local permissions
|
||||
|
||||
## 2025-05-16 - 1.9.0 - feat(docs)
|
||||
Update documentation to embed tapbundle and clarify module exports for browser compatibility; also add CI permission settings.
|
||||
|
||||
- Embed tapbundle directly into tstest to simplify usage and ensure browser support.
|
||||
- Update import paths in examples from '@push.rocks/tapbundle' to '@git.zone/tstest/tapbundle'.
|
||||
- Revise the changelog to reflect version 1.8.0 improvements including enhanced test lifecycle hooks and parallel execution fixes.
|
||||
- Add .claude/settings.local.json to configure CI-related permissions and tool operations.
|
||||
|
||||
## 2025-05-16 - 1.8.0 - feat(documentation)
|
||||
Enhance README with detailed test features and update local settings for build permissions.
|
||||
|
||||
- Expanded the documentation to include tag filtering, parallel test execution groups, lifecycle hooks, snapshot testing, timeout control, retry logic, and test fixtures
|
||||
- Updated .claude/settings.local.json to allow additional permissions for various build and test commands
|
||||
|
||||
## 2025-05-16 - 1.7.0 - feat(tstest)
|
||||
Enhance tstest with fluent API, suite grouping, tag filtering, fixture & snapshot testing, and parallel execution improvements
|
||||
|
||||
- Updated npm scripts to run tests in verbose mode and support glob patterns with quotes
|
||||
- Introduced tag filtering support (--tags) in the CLI to run tests by specified tags
|
||||
- Implemented fluent syntax methods (tags, priority, retry, timeout) for defining tests and applying settings
|
||||
- Added test suite grouping with describe(), along with beforeEach and afterEach lifecycle hooks
|
||||
- Integrated a fixture system and snapshot testing via TapTools with base64 snapshot communication
|
||||
- Enhanced TAP parser regex, error collection, and snapshot handling for improved debugging
|
||||
- Improved parallel test execution by grouping files with a 'para__' pattern and running them concurrently
|
||||
|
||||
## 2025-05-15 - 1.6.0 - feat(package)
|
||||
Revamp package exports and update permissions with an extensive improvement plan for test runner enhancements.
|
||||
|
||||
- Replaced 'main' and 'typings' in package.json with explicit exports for improved module resolution.
|
||||
- Added .claude/settings.local.json to configure permissions for bash commands and web fetches.
|
||||
- Updated readme.plan.md with a comprehensive roadmap covering enhanced error reporting, rich test metadata, nested test suites, and advanced test features.
|
||||
|
||||
## 2025-05-15 - 1.5.0 - feat(cli)
|
||||
Improve test runner configuration: update test scripts, reorganize test directories, update dependencies and add local settings for command permissions.
|
||||
|
||||
- Updated package.json scripts to use pnpm and separate commands for tapbundle and tstest.
|
||||
- Reorganized tests into dedicated directories (test/tapbundle and test/tstest) and removed deprecated test files.
|
||||
- Refactored import paths and bumped dependency versions in tapbundle, tstest, and associated node utilities.
|
||||
- Added .claude/settings.local.json to configure local permissions for bash and web fetch commands.
|
||||
- Introduced ts/tspublish.json to define publish order.
|
||||
|
||||
## 2025-05-15 - 1.4.0 - feat(logging)
|
||||
Display failed test console logs in default mode
|
||||
|
||||
- Introduce log buffering in TsTestLogger to capture console output for failed tests
|
||||
- Enhance TapParser to collect and display error details when tests fail
|
||||
- Update README and project plan to document log improvements for debugging
|
||||
|
||||
## 2025-05-15 - 1.3.1 - fix(settings)
|
||||
Add local permissions configuration and remove obsolete test output log
|
||||
|
||||
- Added .claude/settings.local.json to configure allowed permissions for web fetch and bash commands
|
||||
- Removed test-output.log to eliminate accidental commit of test artifacts
|
||||
|
||||
## 2025-05-15 - 1.3.0 - feat(logger)
|
||||
Improve logging output and add --logfile support for persistent logs
|
||||
|
||||
- Add new .claude/settings.local.json with logging permissions configuration
|
||||
- Remove obsolete readme.plan.md
|
||||
- Introduce test/test.console.ts to capture and display console outputs during tests
|
||||
- Update CLI in ts/index.ts to replace '--log-file' with '--logfile' flag
|
||||
- Enhance TsTestLogger to support file logging, clean ANSI sequences, and improved JSON output
|
||||
- Forward TAP protocol logs to testConsoleOutput in TapParser for better console distinction
|
||||
|
||||
## 2025-05-15 - 1.2.0 - feat(logging)
|
||||
Improve logging output, CLI option parsing, and test report formatting.
|
||||
|
||||
- Added a centralized TsTestLogger with support for multiple verbosity levels, JSON output, and file logging (TODO).
|
||||
- Integrated new logger into CLI parsing, TapParser, TapCombinator, and TsTest classes to ensure consistent and structured output.
|
||||
- Introduced new CLI options (--quiet, --verbose, --no-color, --json, --log-file) for enhanced user control.
|
||||
- Enhanced visual design with progress indicators, detailed error aggregation, and performance summaries.
|
||||
- Updated documentation and logging code to align with improved CI/CD behavior, including skipping non-CI tests.
|
||||
|
||||
## 2025-05-15 - 1.1.0 - feat(cli)
|
||||
Enhance test discovery with support for single file and glob pattern execution using improved CLI argument detection
|
||||
|
||||
- Detect execution mode (file, glob, directory) based on CLI input in ts/index.ts
|
||||
- Refactor TestDirectory to load test files using SmartFile for single file and glob patterns
|
||||
- Update TsTest to pass execution mode and adjust test discovery accordingly
|
||||
- Bump dependency versions for typedserver, tsbundle, tapbundle, and others
|
||||
- Add .claude/settings.local.json for updated permissions configuration
|
||||
|
||||
## 2025-01-23 - 1.0.96 - fix(TsTest)
|
||||
Fixed improper type-check for promise-like testModule defaults
|
||||
|
||||
- Corrected the type-check for promise-like default exports in test modules
|
||||
- Removed unnecessary setTimeout used for async execution
|
||||
|
||||
## 2025-01-23 - 1.0.95 - fix(core)
|
||||
Fix delay handling in Chrome test execution
|
||||
|
||||
- Replaced smartdelay.delayFor with native Promise-based delay mechanism in runInChrome method.
|
||||
|
||||
## 2025-01-23 - 1.0.94 - fix(TsTest)
|
||||
Fix test module execution by ensuring promise resolution delay
|
||||
|
||||
- Added a delay to ensure promise resolution when dynamically importing test modules in the runInChrome method.
|
||||
|
||||
## 2025-01-23 - 1.0.93 - fix(tstest)
|
||||
Handle globalThis.tapPromise in browser runtime evaluation
|
||||
|
||||
- Added support for using globalThis.tapPromise in the browser evaluation logic.
|
||||
- Added log messages to indicate the usage of globalThis.tapPromise.
|
||||
|
||||
## 2025-01-23 - 1.0.92 - fix(core)
|
||||
Improve error logging for test modules without default promise
|
||||
|
||||
- Added logging to display the exported test module content when it does not export a default promise.
|
||||
|
||||
## 2025-01-23 - 1.0.91 - fix(core)
|
||||
Refactored tstest class to enhance promise handling for test modules.
|
||||
|
||||
- Removed .gitlab-ci.yml configuration file.
|
||||
- Updated package.json dependency versions.
|
||||
- Added a condition to handle promiselike objects in tests.
|
||||
|
||||
## 2024-04-18 - 1.0.89 to 1.0.90 - Enhancements and Bug Fixes
|
||||
Multiple updates and fixes have been made.
|
||||
|
||||
- Updated core components to enhance stability and performance.
|
||||
|
||||
## 2024-03-07 - 1.0.86 to 1.0.88 - Core Updates
|
||||
Continued improvements and updates in the core module.
|
||||
|
||||
- Applied critical fixes to enhance core stability.
|
||||
|
||||
## 2024-01-19 - 1.0.85 to 1.0.89 - Bug Fixes
|
||||
Series of core updates have been implemented.
|
||||
|
||||
- Addressed known bugs and improved overall system functionality.
|
||||
|
||||
## 2023-11-09 - 1.0.81 to 1.0.84 - Maintenance Updates
|
||||
Maintenance updates focusing on core reliability.
|
||||
|
||||
- Improved core module through systematic updates.
|
||||
- Strengthened system robustness.
|
||||
|
||||
## 2023-08-26 - 1.0.77 to 1.0.80 - Critical Fixes
|
||||
Critical fixes implemented in core functionality.
|
||||
|
||||
- Enhanced core processing to fix existing issues.
|
||||
|
||||
## 2023-07-13 - 1.0.75 to 1.0.76 - Stability Improvements
|
||||
Stability enhancements and minor improvements.
|
||||
|
||||
- Focused on ensuring a stable operational core.
|
||||
|
||||
## 2022-11-08 - 1.0.73 to 1.0.74 - Routine Fixes
|
||||
Routine core fixes to address reported issues.
|
||||
|
||||
- Addressed minor issues in the core module.
|
||||
|
||||
## 2022-08-03 - 1.0.71 to 1.0.72 - Core Enhancements
|
||||
Enhancements applied to core systems.
|
||||
|
||||
- Tweaked core components for enhanced reliability.
|
||||
|
||||
## 2022-05-04 - 1.0.69 to 1.0.70 - System Reliability Fixes
|
||||
Fixes targeting the reliability of the core systems.
|
||||
|
||||
- Improved system reliability through targeted core updates.
|
||||
|
||||
## 2022-03-17 - 1.0.65 to 1.0.68 - Major Core Updates
|
||||
Major updates and bug fixes delivered for core components.
|
||||
|
||||
- Enhanced central operations through key updates.
|
||||
|
||||
## 2022-02-15 - 1.0.60 to 1.0.64 - Core Stability Improvements
|
||||
Focused updates on core stability and performance.
|
||||
|
||||
- Reinforced stability through systematic core changes.
|
||||
|
||||
## 2021-11-07 - 1.0.54 to 1.0.59 - Core Fixes and Improvements
|
||||
Multiple core updates aimed at fixing and improving the system.
|
||||
|
||||
- Addressed outstanding bugs and improved performance in the core.
|
||||
|
||||
## 2021-08-20 - 1.0.50 to 1.0.53 - Core Functionality Updates
|
||||
Continued updates to improve core functionality and user experience.
|
||||
|
||||
- Implemented essential core fixes to enhance user experience.
|
||||
|
||||
## 2020-10-01 - 1.0.44 to 1.0.49 - Core System Enhancements
|
||||
Critical enhancements to core systems.
|
||||
|
||||
- Improved core operations and tackled existing issues.
|
||||
|
||||
## 2020-09-29 - 1.0.40 to 1.0.43 - Essential Fixes
|
||||
Series of essential fixes for the core system.
|
||||
|
||||
- Rectified known issues and bolstered core functionalities.
|
||||
|
||||
## 2020-07-10 - 1.0.35 to 1.0.39 - Core Function Fixes
|
||||
Focused improvements and fixes for critical components.
|
||||
|
||||
- Addressed critical core functions to boost system performance.
|
||||
|
||||
## 2020-06-01 - 1.0.31 to 1.0.34 - Core Updates
|
||||
Updates to maintain core functionality efficacy.
|
||||
|
||||
- Fixed inefficiencies and updated essential components.
|
||||
|
||||
## 2019-10-02 - 1.0.26 to 1.0.29 - Core Maintenance
|
||||
Regular maintenance and updates for core reliability.
|
||||
|
||||
- Addressed multiple core issues and enhanced system stability.
|
||||
|
||||
## 2019-05-28 - 1.0.20 to 1.0.25 - Core Improvements
|
||||
General improvements targeting core functionalities.
|
||||
|
||||
- Made systematic improvements to core processes.
|
||||
|
||||
## 2019-04-08 - 1.0.16 to 1.0.19 - Bug Squashing
|
||||
Resolved numerous issues within core operations.
|
||||
|
||||
- Fixed and optimized core functionalities for better performance.
|
||||
|
||||
## 2018-12-06 - 1.0.15 - Dependency Updates
|
||||
Updates aimed at improving dependency management.
|
||||
|
||||
- Ensured dependencies are up-to-date for optimal performance.
|
||||
|
||||
## 2018-08-14 - 1.0.14 - Test Improvement
|
||||
Major improvements in testing mechanisms and logging.
|
||||
|
||||
- Improved test results handling for accuracy and reliability.
|
||||
- Enhanced logging features for increased clarity.
|
||||
|
||||
## 2018-08-04 - 1.0.1 to 1.0.13 - Initial Implementation and Fixes
|
||||
Initial release and critical updates focusing on core stability and functionality.
|
||||
|
||||
- Implemented core components and established initial system structure.
|
||||
- Addressed key bugs and enhanced initial functionality.
|
4
cli.child.ts
Normal file
4
cli.child.ts
Normal file
@@ -0,0 +1,4 @@
|
||||
#!/usr/bin/env node
|
||||
process.env.CLI_CALL = 'true';
|
||||
import * as cliTool from './ts/index.js';
|
||||
cliTool.runCli();
|
2
cli.js
2
cli.js
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env node
|
||||
process.env.CLI_CALL = 'true';
|
||||
const cliTool = require('./dist_ts/index');
|
||||
const cliTool = await import('./dist_ts/index.js');
|
||||
cliTool.runCli();
|
||||
|
@@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env node
|
||||
process.env.CLI_CALL = 'true';
|
||||
require('@gitzone/tsrun');
|
||||
const cliTool = require('./ts/index');
|
||||
cliTool.runCli();
|
||||
|
||||
import * as tsrun from '@git.zone/tsrun';
|
||||
tsrun.runPath('./cli.child.js', import.meta.url);
|
||||
|
@@ -1,3 +0,0 @@
|
||||
# How to contribute
|
||||
|
||||
Start with `tstest.classes.tstest.ts` to understand whats happening
|
19
license.md
Normal file
19
license.md
Normal file
@@ -0,0 +1,19 @@
|
||||
Copyright (c) 2014 Task Venture Capital GmbH (hello@task.vc)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
@@ -6,11 +6,11 @@
|
||||
"gitzone": {
|
||||
"projectType": "npm",
|
||||
"module": {
|
||||
"githost": "gitlab.com",
|
||||
"gitscope": "gitzone",
|
||||
"githost": "code.foss.global",
|
||||
"gitscope": "git.zone",
|
||||
"gitrepo": "tstest",
|
||||
"shortDescription": "a test utility to run tests that match test/**/*.ts",
|
||||
"npmPackagename": "@gitzone/tstest",
|
||||
"description": "a test utility to run tests that match test/**/*.ts",
|
||||
"npmPackagename": "@git.zone/tstest",
|
||||
"license": "MIT"
|
||||
}
|
||||
}
|
||||
|
9926
package-lock.json
generated
9926
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
70
package.json
70
package.json
@@ -1,41 +1,58 @@
|
||||
{
|
||||
"name": "@gitzone/tstest",
|
||||
"version": "1.0.47",
|
||||
"name": "@git.zone/tstest",
|
||||
"version": "2.4.3",
|
||||
"private": false,
|
||||
"description": "a test utility to run tests that match test/**/*.ts",
|
||||
"main": "dist_ts/index.js",
|
||||
"typings": "dist_ts/index.d.ts",
|
||||
"exports": {
|
||||
".": "./dist_ts/index.js",
|
||||
"./tapbundle": "./dist_ts_tapbundle/index.js",
|
||||
"./tapbundle_node": "./dist_ts_tapbundle_node/index.js"
|
||||
},
|
||||
"type": "module",
|
||||
"author": "Lossless GmbH",
|
||||
"license": "MIT",
|
||||
"bin": {
|
||||
"tstest": "./cli.js"
|
||||
},
|
||||
"scripts": {
|
||||
"test": "(npm run cleanUp && npm run prepareTest && npm run tstest)",
|
||||
"prepareTest": "git clone https://gitlab.com/sandboxzone/sandbox-npmts.git .nogit/sandbox-npmts && cd .nogit/sandbox-npmts && npm install",
|
||||
"tstest": "cd .nogit/sandbox-npmts && node ../../cli.ts.js test/ --web",
|
||||
"cleanUp": "rm -rf .nogit/sandbox-npmts",
|
||||
"build": "(tsbuild --web)"
|
||||
"test": "pnpm run build && pnpm run test:tapbundle:verbose && pnpm run test:tstest:verbose",
|
||||
"test:tapbundle": "tsx ./cli.child.ts \"test/tapbundle/**/*.ts\"",
|
||||
"test:tapbundle:verbose": "tsx ./cli.child.ts \"test/tapbundle/**/*.ts\" --verbose",
|
||||
"test:tstest": "tsx ./cli.child.ts \"test/tstest/**/*.ts\"",
|
||||
"test:tstest:verbose": "tsx ./cli.child.ts \"test/tstest/**/*.ts\" --verbose",
|
||||
"build": "(tsbuild tsfolders)",
|
||||
"buildDocs": "tsdoc"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@gitzone/tsbuild": "^2.1.25",
|
||||
"tslint": "^6.1.3",
|
||||
"tslint-config-prettier": "^1.18.0"
|
||||
"@git.zone/tsbuild": "^2.6.8",
|
||||
"@types/node": "^22.15.21"
|
||||
},
|
||||
"dependencies": {
|
||||
"@gitzone/tsbundle": "^1.0.78",
|
||||
"@gitzone/tsrun": "^1.2.12",
|
||||
"@pushrocks/consolecolor": "^2.0.1",
|
||||
"@pushrocks/smartbrowser": "^1.0.17",
|
||||
"@pushrocks/smartdelay": "^2.0.10",
|
||||
"@pushrocks/smartexpress": "^3.0.76",
|
||||
"@pushrocks/smartfile": "^8.0.0",
|
||||
"@pushrocks/smartlog": "^2.0.39",
|
||||
"@pushrocks/smartpromise": "^3.0.6",
|
||||
"@pushrocks/smartshell": "^2.0.25",
|
||||
"@pushrocks/tapbundle": "^3.2.9",
|
||||
"@types/figures": "^3.0.1",
|
||||
"figures": "^3.0.0"
|
||||
"@api.global/typedserver": "^3.0.78",
|
||||
"@git.zone/tsbundle": "^2.5.1",
|
||||
"@git.zone/tsrun": "^1.3.3",
|
||||
"@push.rocks/consolecolor": "^2.0.3",
|
||||
"@push.rocks/qenv": "^6.1.3",
|
||||
"@push.rocks/smartbrowser": "^2.0.8",
|
||||
"@push.rocks/smartchok": "^1.1.1",
|
||||
"@push.rocks/smartcrypto": "^2.0.4",
|
||||
"@push.rocks/smartdelay": "^3.0.5",
|
||||
"@push.rocks/smartenv": "^5.0.13",
|
||||
"@push.rocks/smartexpect": "^2.5.0",
|
||||
"@push.rocks/smartfile": "^11.2.7",
|
||||
"@push.rocks/smartjson": "^5.0.20",
|
||||
"@push.rocks/smartlog": "^3.1.9",
|
||||
"@push.rocks/smartmongo": "^2.0.12",
|
||||
"@push.rocks/smartnetwork": "^4.4.0",
|
||||
"@push.rocks/smartpath": "^6.0.0",
|
||||
"@push.rocks/smartpromise": "^4.2.3",
|
||||
"@push.rocks/smartrequest": "^4.3.1",
|
||||
"@push.rocks/smarts3": "^2.2.6",
|
||||
"@push.rocks/smartshell": "^3.3.0",
|
||||
"@push.rocks/smarttime": "^4.1.1",
|
||||
"@types/ws": "^8.18.1",
|
||||
"figures": "^6.1.0",
|
||||
"ws": "^8.18.3"
|
||||
},
|
||||
"files": [
|
||||
"ts/**/*",
|
||||
@@ -51,5 +68,6 @@
|
||||
],
|
||||
"browserslist": [
|
||||
"last 1 chrome versions"
|
||||
]
|
||||
],
|
||||
"packageManager": "pnpm@10.10.0+sha512.d615db246fe70f25dcfea6d8d73dee782ce23e2245e3c4f6f888249fb568149318637dca73c2c5c8ef2a4ca0d5657fb9567188bfab47f566d1ee6ce987815c39"
|
||||
}
|
||||
|
9240
pnpm-lock.yaml
generated
Normal file
9240
pnpm-lock.yaml
generated
Normal file
File diff suppressed because it is too large
Load Diff
4
pnpm-workspace.yaml
Normal file
4
pnpm-workspace.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
onlyBuiltDependencies:
|
||||
- esbuild
|
||||
- mongodb-memory-server
|
||||
- puppeteer
|
323
readme.hints.md
Normal file
323
readme.hints.md
Normal file
@@ -0,0 +1,323 @@
|
||||
# Architecture Overview
|
||||
|
||||
## Project Structure
|
||||
|
||||
This project integrates tstest with tapbundle through a modular architecture:
|
||||
|
||||
1. **tstest** (`/ts/`) - The test runner that discovers and executes test files
|
||||
2. **tapbundle** (`/ts_tapbundle/`) - The TAP testing framework for writing tests
|
||||
3. **tapbundle_node** (`/ts_tapbundle_node/`) - Node.js-specific testing utilities
|
||||
|
||||
## How Components Work Together
|
||||
|
||||
### Test Execution Flow
|
||||
|
||||
1. **CLI Entry Point** (`cli.js` <20> `cli.ts.js` <20> `cli.child.ts`)
|
||||
- The CLI uses tsx to run TypeScript files directly
|
||||
- Accepts glob patterns to find test files
|
||||
- Supports options like `--verbose`, `--quiet`, `--web`
|
||||
|
||||
2. **Test Discovery**
|
||||
- tstest scans for test files matching the provided pattern
|
||||
- Defaults to `test/**/*.ts` when no pattern is specified
|
||||
- Supports both file and directory modes
|
||||
|
||||
3. **Test Runner**
|
||||
- Each test file imports `tap` and `expect` from tapbundle
|
||||
- Tests are written using `tap.test()` with async functions
|
||||
- Browser tests are compiled with esbuild and run in Chromium via Puppeteer
|
||||
|
||||
### Key Integration Points
|
||||
|
||||
1. **Import Structure**
|
||||
- Test files import from local tapbundle: `import { tap, expect } from '../../ts_tapbundle/index.js'`
|
||||
- Node-specific tests also import from tapbundle_node: `import { tapNodeTools } from '../../ts_tapbundle_node/index.js'`
|
||||
|
||||
2. **WebHelpers**
|
||||
- Browser tests can use webhelpers for DOM manipulation
|
||||
- `webhelpers.html` - Template literal for creating HTML strings
|
||||
- `webhelpers.fixture` - Creates DOM elements from HTML strings
|
||||
- Automatically detects browser environment and only enables in browser context
|
||||
|
||||
3. **Build System**
|
||||
- Uses `tsbuild tsfolders` to compile TypeScript (invoked by `pnpm build`)
|
||||
- Maintains separate output directories: `/dist_ts/`, `/dist_ts_tapbundle/`, `/dist_ts_tapbundle_node/`, `/dist_ts_tapbundle_protocol/`
|
||||
- Compilation order is resolved automatically based on dependencies in tspublish.json files
|
||||
- Protocol imports use compiled dist directories:
|
||||
```typescript
|
||||
// In ts/tstest.classes.tap.parser.ts
|
||||
import { ProtocolParser } from '../dist_ts_tapbundle_protocol/index.js';
|
||||
|
||||
// In ts_tapbundle/tapbundle.classes.tap.ts
|
||||
import { ProtocolEmitter } from '../dist_ts_tapbundle_protocol/index.js';
|
||||
```
|
||||
|
||||
### Test Scripts
|
||||
|
||||
The package.json defines several test scripts:
|
||||
- `test` - Builds and runs all tests (tapbundle and tstest)
|
||||
- `test:tapbundle` - Runs tapbundle framework tests
|
||||
- `test:tstest` - Runs tstest's own tests
|
||||
- Both support `:verbose` variants for detailed output
|
||||
|
||||
### Environment Detection
|
||||
|
||||
The framework automatically detects the runtime environment:
|
||||
- Node.js tests run directly via tsx
|
||||
- Browser tests are compiled and served via a local server
|
||||
- WebHelpers are only enabled in browser environment
|
||||
|
||||
This architecture allows for seamless testing across both Node.js and browser environments while maintaining a clean separation of concerns.
|
||||
|
||||
## Logging System
|
||||
|
||||
### Log File Naming (Fixed in v1.9.1)
|
||||
|
||||
When using the `--logfile` flag, tstest creates log files in `.nogit/testlogs/`. The log file naming was updated to preserve directory structure and prevent collisions:
|
||||
|
||||
- **Old behavior**: `test/tapbundle/test.ts` → `.nogit/testlogs/test.log`
|
||||
- **New behavior**: `test/tapbundle/test.ts` → `.nogit/testlogs/test__tapbundle__test.log`
|
||||
|
||||
This fix ensures that test files with the same basename in different directories don't overwrite each other's logs. The implementation:
|
||||
1. Takes the relative path from the current working directory
|
||||
2. Replaces path separators (`/`) with double underscores (`__`)
|
||||
3. Removes the `.ts` extension
|
||||
4. Creates a flat filename that preserves the directory structure
|
||||
|
||||
### Test Timing Display (Fixed in v1.9.2)
|
||||
|
||||
Fixed an issue where test timing was displayed incorrectly with duplicate values like:
|
||||
- Before: `✅ test name # time=133ms (0ms)`
|
||||
- After: `✅ test name (133ms)`
|
||||
|
||||
The issue was in the TAP parser regex which was greedily capturing the entire line including the TAP timing comment. Changed the regex from `(.*)` to `(.*?)` to make it non-greedy, properly separating the test name from the timing metadata.
|
||||
|
||||
## Protocol Limitations and Improvements
|
||||
|
||||
### Current TAP Protocol Issues
|
||||
The current implementation uses standard TAP format with metadata in comments:
|
||||
```
|
||||
ok 1 - test name # time=123ms
|
||||
```
|
||||
|
||||
This has several limitations:
|
||||
1. **Delimiter Conflict**: Test descriptions containing `#` can break parsing
|
||||
2. **Regex Fragility**: Complex regex patterns that are hard to maintain
|
||||
3. **Limited Metadata**: Difficult to add rich error information or custom data
|
||||
|
||||
### Planned Protocol V2
|
||||
A new internal protocol is being designed that will:
|
||||
- Use Unicode delimiters `⟦TSTEST:⟧` that won't conflict with test content
|
||||
- Support structured JSON metadata
|
||||
- Allow rich error reporting with stack traces and diffs
|
||||
- Completely replace v1 protocol (no backwards compatibility)
|
||||
|
||||
### ts_tapbundle_protocol Directory
|
||||
The protocol v2 implementation is contained in a separate `ts_tapbundle_protocol` directory:
|
||||
- **Isomorphic Code**: All protocol code works in both browser and Node.js environments
|
||||
- **No Platform Dependencies**: No Node.js-specific imports, ensuring true cross-platform compatibility
|
||||
- **Clean Separation**: Protocol logic is isolated from platform-specific code in tstest and tapbundle
|
||||
- **Shared Implementation**: Both tstest (parser) and tapbundle (emitter) use the same protocol classes
|
||||
- **Build Process**:
|
||||
- Compiled by `pnpm build` via tsbuild to `dist_ts_tapbundle_protocol/`
|
||||
- Build order managed through tspublish.json files
|
||||
- Other modules import from the compiled dist directory, not source
|
||||
|
||||
This architectural decision ensures the protocol can be used in any JavaScript environment without modification and maintains proper build dependencies.
|
||||
|
||||
See `readme.protocol.md` for the full specification and `ts_tapbundle_protocol/` for the implementation.
|
||||
|
||||
## Protocol V2 Implementation Status
|
||||
|
||||
The Protocol V2 has been implemented to fix issues with TAP protocol parsing when test descriptions contain special characters like `#`, `###SNAPSHOT###`, or protocol markers like `⟦TSTEST:ERROR⟧`.
|
||||
|
||||
### Implementation Details:
|
||||
|
||||
1. **Protocol Components**:
|
||||
- `ProtocolEmitter` - Generates protocol v2 messages (used by tapbundle)
|
||||
- `ProtocolParser` - Parses protocol v2 messages (used by tstest)
|
||||
- Uses Unicode markers `⟦TSTEST:` and `⟧` to avoid conflicts with test content
|
||||
|
||||
2. **Current Status**:
|
||||
- ✅ Basic protocol emission and parsing works
|
||||
- ✅ Handles test descriptions with special characters correctly
|
||||
- ✅ Supports metadata for timing, tags, errors
|
||||
- ⚠️ Protocol messages sometimes appear in console output (parsing not catching all cases)
|
||||
|
||||
3. **Key Findings**:
|
||||
- `tap.skip.test()` doesn't create actual test objects, just logs and increments counter
|
||||
- `tap.todo()` method is not implemented (no `addTodo` method in Tap class)
|
||||
- Protocol parser's `isBlockStart` was fixed to only match exact block markers, not partial matches in test descriptions
|
||||
|
||||
4. **Import Paths**:
|
||||
- tstest imports from: `import { ProtocolParser } from '../dist_ts_tapbundle_protocol/index.js';`
|
||||
- tapbundle imports from: `import { ProtocolEmitter } from '../dist_ts_tapbundle_protocol/index.js';`
|
||||
|
||||
## Test Configuration System (Phase 2)
|
||||
|
||||
The Test Configuration System has been implemented to provide global settings and lifecycle hooks for tests.
|
||||
|
||||
### Key Features:
|
||||
|
||||
1. **00init.ts Discovery**:
|
||||
- Automatically detects `00init.ts` files in the same directory as test files
|
||||
- Creates a temporary loader file that imports both `00init.ts` and the test file
|
||||
- Loader files are cleaned up automatically after test execution
|
||||
|
||||
2. **Settings Inheritance**:
|
||||
- Global settings from `00init.ts` → File-level settings → Test-level settings
|
||||
- Settings include: timeout, retries, retryDelay, bail, concurrency
|
||||
- Lifecycle hooks: beforeAll, afterAll, beforeEach, afterEach
|
||||
|
||||
3. **Implementation Details**:
|
||||
- `SettingsManager` class handles settings inheritance and merging
|
||||
- `tap.settings()` API allows configuration at any level
|
||||
- Lifecycle hooks are integrated into test execution flow
|
||||
|
||||
### Important Development Notes:
|
||||
|
||||
1. **Local Development**: When developing tstest itself, use `node cli.js` instead of globally installed `tstest` to test changes
|
||||
|
||||
2. **Console Output Buffering**: Console output from tests is buffered and only displayed for failing tests. TAP-compliant comments (lines starting with `#`) are always shown.
|
||||
|
||||
3. **TypeScript Warnings**: Fixed async/await warnings in `movePreviousLogFiles()` by using sync versions of file operations
|
||||
|
||||
## Enhanced Communication Features (Phase 3)
|
||||
|
||||
The Enhanced Communication system has been implemented to provide rich, real-time feedback during test execution.
|
||||
|
||||
### Key Features:
|
||||
|
||||
1. **Event-Based Test Lifecycle Reporting**:
|
||||
- `test:queued` - Test is ready to run
|
||||
- `test:started` - Test execution begins
|
||||
- `test:completed` - Test finishes (with pass/fail status)
|
||||
- `suite:started` - Test suite/describe block begins
|
||||
- `suite:completed` - Test suite/describe block ends
|
||||
- `hook:started` - Lifecycle hook (beforeEach/afterEach) begins
|
||||
- `hook:completed` - Lifecycle hook finishes
|
||||
- `assertion:failed` - Assertion failure with detailed information
|
||||
|
||||
2. **Visual Diff Output for Assertion Failures**:
|
||||
- **String Diffs**: Character-by-character comparison with colored output
|
||||
- **Object/Array Diffs**: Deep property comparison showing added/removed/changed properties
|
||||
- **Primitive Diffs**: Clear display of expected vs actual values
|
||||
- **Colorized Output**: Green for expected, red for actual, yellow for differences
|
||||
- **Smart Formatting**: Multi-line strings and complex objects are formatted for readability
|
||||
|
||||
3. **Real-Time Test Progress API**:
|
||||
- Tests emit progress events as they execute
|
||||
- tstest parser processes events and updates display in real-time
|
||||
- Structured event format carries rich metadata (timing, errors, diffs)
|
||||
- Seamless integration with existing TAP protocol via Protocol V2
|
||||
|
||||
### Implementation Details:
|
||||
- Events are transmitted via Protocol V2's `EVENT` block type
|
||||
- Event data is JSON-encoded within protocol markers
|
||||
- Parser handles events asynchronously for real-time updates
|
||||
- Visual diffs are generated using custom diff algorithms for each data type
|
||||
|
||||
## Watch Mode (Phase 4)
|
||||
|
||||
tstest now supports watch mode for automatic test re-runs on file changes.
|
||||
|
||||
### Usage
|
||||
```bash
|
||||
tstest test/**/*.ts --watch
|
||||
tstest test/specific.ts -w
|
||||
```
|
||||
|
||||
### Features
|
||||
- **Automatic Re-runs**: Tests re-run when any watched file changes
|
||||
- **Debouncing**: Multiple rapid changes are batched (300ms delay)
|
||||
- **Clear Output**: Console is cleared before each run for clean results
|
||||
- **Status Updates**: Shows which files triggered the re-run
|
||||
- **Graceful Exit**: Press Ctrl+C to stop watching
|
||||
|
||||
### Options
|
||||
- `--watch` or `-w`: Enable watch mode
|
||||
- `--watch-ignore`: Comma-separated patterns to ignore (e.g., `--watch-ignore node_modules,dist`)
|
||||
|
||||
### Implementation Details
|
||||
- Uses `@push.rocks/smartchok` for cross-platform file watching
|
||||
- Watches the entire project directory from where tests are run
|
||||
- Ignores changes matching the ignore patterns
|
||||
- Shows "Waiting for file changes..." between runs
|
||||
|
||||
## Fixed Issues
|
||||
|
||||
### tap.skip.test(), tap.todo(), and tap.only.test() (Fixed)
|
||||
|
||||
Previously reported issues with these methods have been resolved:
|
||||
|
||||
1. **tap.skip.test()** - Now properly creates test objects that are counted in test results
|
||||
- Tests marked with `skip.test()` appear in the test count
|
||||
- Shows as passed with skip directive in TAP output
|
||||
- `markAsSkipped()` method added to handle pre-test skip marking
|
||||
|
||||
2. **tap.todo.test()** - Fully implemented with test object creation
|
||||
- Supports both `tap.todo.test('description')` and `tap.todo.test('description', testFunc)`
|
||||
- Todo tests are counted and marked with todo directive
|
||||
- Both regular and parallel todo tests supported
|
||||
|
||||
3. **tap.only.test()** - Works correctly for focused testing
|
||||
- When `.only` tests exist, only those tests run
|
||||
- Other tests are not executed but still counted
|
||||
- Both regular and parallel only tests supported
|
||||
|
||||
These fixes ensure accurate test counts and proper TAP-compliant output for all test states.
|
||||
|
||||
## Test Timing Implementation
|
||||
|
||||
### Timing Architecture
|
||||
|
||||
Test timing is captured using `@push.rocks/smarttime`'s `HrtMeasurement` class, which provides high-resolution timing:
|
||||
|
||||
1. **Timing Capture**:
|
||||
- Each `TapTest` instance has its own `HrtMeasurement`
|
||||
- Timer starts immediately before test function execution
|
||||
- Timer stops after test completes (or fails/times out)
|
||||
- Millisecond precision is used for reporting
|
||||
|
||||
2. **Protocol Integration**:
|
||||
- Timing is embedded in TAP output using Protocol V2 markers
|
||||
- Inline format for simple timing: `ok 1 - test name ⟦TSTEST:time:123⟧`
|
||||
- Block format for complex metadata: `⟦TSTEST:META:{"time":456,"file":"test.ts"}⟧`
|
||||
|
||||
3. **Performance Metrics Calculation**:
|
||||
- Average is calculated from sum of individual test times, not total runtime
|
||||
- Slowest test detection prefers tests with >0ms duration
|
||||
- Failed tests still contribute their execution time to metrics
|
||||
|
||||
### Edge Cases and Considerations
|
||||
|
||||
1. **Sub-millisecond Tests**:
|
||||
- Very fast tests may report 0ms due to millisecond rounding
|
||||
- Performance metrics handle this by showing "All tests completed in <1ms" when appropriate
|
||||
|
||||
2. **Special Test States**:
|
||||
- **Skipped tests**: Report 0ms (not executed)
|
||||
- **Todo tests**: Report 0ms (not executed)
|
||||
- **Failed tests**: Report actual execution time before failure
|
||||
- **Timeout tests**: Report time until timeout occurred
|
||||
|
||||
3. **Parallel Test Timing**:
|
||||
- Each parallel test tracks its own execution time independently
|
||||
- Parallel tests may have overlapping execution periods
|
||||
- Total suite time reflects wall-clock time, not sum of test times
|
||||
|
||||
4. **Hook Timing**:
|
||||
- `beforeEach`/`afterEach` hooks are not included in individual test times
|
||||
- Only the actual test function execution is measured
|
||||
|
||||
5. **Retry Timing**:
|
||||
- When tests retry, only the final attempt's duration is reported
|
||||
- Each retry attempt emits separate `test:started` events
|
||||
|
||||
### Parser Fix for Timing Metadata
|
||||
|
||||
The protocol parser was fixed to correctly handle inline timing metadata:
|
||||
- Changed condition from `!simpleMatch[1].includes(':')` to check for simple key:value pairs
|
||||
- Excludes prefixed formats (META:, SKIP:, TODO:, EVENT:) while parsing simple formats like `time:250`
|
||||
|
||||
This ensures timing metadata is correctly extracted and displayed in test results.
|
321
readme.plan.md
Normal file
321
readme.plan.md
Normal file
@@ -0,0 +1,321 @@
|
||||
# Improvement Plan for tstest and tapbundle
|
||||
|
||||
!! FIRST: Reread /home/philkunz/.claude/CLAUDE.md to ensure following all guidelines !!
|
||||
|
||||
## Improved Internal Protocol (NEW - Critical) ✅ COMPLETED
|
||||
|
||||
### Current Issues ✅ RESOLVED
|
||||
- ✅ TAP protocol uses `#` for metadata which conflicts with test descriptions containing `#`
|
||||
- ✅ Fragile regex parsing that breaks with special characters
|
||||
- ✅ Limited extensibility for new metadata types
|
||||
|
||||
### Proposed Solution: Protocol V2 ✅ IMPLEMENTED
|
||||
- ✅ Use Unicode delimiters `⟦TSTEST:META:{}⟧` that won't appear in test names
|
||||
- ✅ Structured JSON metadata format
|
||||
- ✅ Separate protocol blocks for complex data (errors, snapshots)
|
||||
- ✅ Complete replacement of v1 (no backwards compatibility needed)
|
||||
|
||||
### Implementation ✅ COMPLETED
|
||||
- ✅ Phase 1: Create protocol v2 implementation in ts_tapbundle_protocol
|
||||
- ✅ Phase 2: Replace all v1 code in both tstest and tapbundle with v2
|
||||
- ✅ Phase 3: Delete all v1 parsing and generation code
|
||||
|
||||
#### ts_tapbundle_protocol Directory
|
||||
The protocol v2 implementation will be contained in the `ts_tapbundle_protocol` directory as isomorphic TypeScript code:
|
||||
- **Isomorphic Design**: All code must work in both browser and Node.js environments
|
||||
- **No Node.js Imports**: No Node.js-specific modules allowed (no fs, path, child_process, etc.)
|
||||
- **Protocol Classes**: Contains classes implementing all sides of the protocol:
|
||||
- ✅ `ProtocolEmitter`: For generating protocol v2 messages (used by tapbundle)
|
||||
- ✅ `ProtocolParser`: For parsing protocol v2 messages (used by tstest)
|
||||
- ✅ `ProtocolMessage`: Base classes for different message types
|
||||
- ✅ `ProtocolTypes`: TypeScript interfaces and types for protocol structures
|
||||
- **Pure TypeScript**: Only browser-compatible APIs and pure TypeScript/JavaScript code
|
||||
- **Build Integration**:
|
||||
- Compiled by `pnpm build` (via tsbuild) to `dist_ts_tapbundle_protocol/`
|
||||
- Build order defined in tspublish.json files
|
||||
- Imported by ts and ts_tapbundle modules from the compiled dist directory
|
||||
|
||||
See `readme.protocol.md` for detailed specification.
|
||||
|
||||
## Test Configuration System (NEW)
|
||||
|
||||
### Global Test Configuration via 00init.ts
|
||||
- **Discovery**: Check for `test/00init.ts` before running tests
|
||||
- **Execution**: Import and execute before any test files if found
|
||||
- **Purpose**: Define project-wide default test settings
|
||||
|
||||
### tap.settings() API
|
||||
```typescript
|
||||
interface TapSettings {
|
||||
// Timing
|
||||
timeout?: number; // Default timeout for all tests (ms)
|
||||
slowThreshold?: number; // Mark tests as slow if they exceed this (ms)
|
||||
|
||||
// Execution Control
|
||||
bail?: boolean; // Stop on first test failure
|
||||
retries?: number; // Number of retries for failed tests
|
||||
retryDelay?: number; // Delay between retries (ms)
|
||||
|
||||
// Output Control
|
||||
suppressConsole?: boolean; // Suppress console output in passing tests
|
||||
verboseErrors?: boolean; // Show full stack traces
|
||||
showTestDuration?: boolean; // Show duration for each test
|
||||
|
||||
// Parallel Execution
|
||||
maxConcurrency?: number; // Max parallel tests (for .para files)
|
||||
isolateTests?: boolean; // Run each test in fresh context
|
||||
|
||||
// Lifecycle Hooks
|
||||
beforeAll?: () => Promise<void> | void;
|
||||
afterAll?: () => Promise<void> | void;
|
||||
beforeEach?: (testName: string) => Promise<void> | void;
|
||||
afterEach?: (testName: string, passed: boolean) => Promise<void> | void;
|
||||
|
||||
// Environment
|
||||
env?: Record<string, string>; // Additional environment variables
|
||||
|
||||
// Features
|
||||
enableSnapshots?: boolean; // Enable snapshot testing
|
||||
snapshotDirectory?: string; // Custom snapshot directory
|
||||
updateSnapshots?: boolean; // Update snapshots instead of comparing
|
||||
}
|
||||
```
|
||||
|
||||
### Settings Inheritance
|
||||
- Global (00init.ts) → File level → Test level
|
||||
- More specific settings override less specific ones
|
||||
- Arrays/objects are merged, primitives are replaced
|
||||
|
||||
### Implementation Phases
|
||||
1. **Core Infrastructure**: Settings storage and merge logic
|
||||
2. **Discovery**: 00init.ts loading mechanism
|
||||
3. **Application**: Apply settings to test execution
|
||||
4. **Advanced**: Parallel execution and snapshot configuration
|
||||
|
||||
## 1. Enhanced Communication Between tapbundle and tstest ✅ COMPLETED
|
||||
|
||||
### 1.1 Real-time Test Progress API ✅ COMPLETED
|
||||
- ✅ Create a bidirectional communication channel between tapbundle and tstest
|
||||
- ✅ Emit events for test lifecycle stages (start, progress, completion)
|
||||
- ✅ Allow tstest to subscribe to tapbundle events for better progress reporting
|
||||
- ✅ Implement a standardized message format for test metadata
|
||||
|
||||
### 1.2 Rich Error Reporting ✅ COMPLETED
|
||||
- ✅ Pass structured error objects from tapbundle to tstest
|
||||
- ✅ Include stack traces, code snippets, and contextual information
|
||||
- ✅ Support for error categorization (assertion failures, timeouts, uncaught exceptions)
|
||||
- ✅ Visual diff output for failed assertions
|
||||
|
||||
## 2. Enhanced toolsArg Functionality
|
||||
|
||||
### 2.3 Test Data and Context Sharing (Partial)
|
||||
```typescript
|
||||
tap.test('data-driven test', async (toolsArg) => {
|
||||
// Parameterized test data (not yet implemented)
|
||||
const testData = toolsArg.data<TestInput>();
|
||||
expect(processData(testData)).toEqual(expected);
|
||||
});
|
||||
```
|
||||
|
||||
## 3. Nested Tests and Test Suites
|
||||
|
||||
### 3.2 Hierarchical Test Organization (Not yet implemented)
|
||||
- Support for multiple levels of nesting
|
||||
- Inherited context and configuration from parent suites
|
||||
- Aggregated reporting for test suites
|
||||
- Suite-level lifecycle hooks
|
||||
|
||||
## 4. Advanced Test Features
|
||||
|
||||
### 4.1 Snapshot Testing ✅ (Basic implementation complete)
|
||||
|
||||
### 4.2 Performance Benchmarking
|
||||
```typescript
|
||||
tap.test('performance test', async (toolsArg) => {
|
||||
const benchmark = toolsArg.benchmark();
|
||||
|
||||
// Run operation
|
||||
await expensiveOperation();
|
||||
|
||||
// Assert performance constraints
|
||||
benchmark.expect({
|
||||
maxDuration: 1000,
|
||||
maxMemory: '100MB'
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
|
||||
## 5. Test Execution Improvements
|
||||
|
||||
|
||||
### 5.2 Watch Mode ✅ COMPLETED
|
||||
- Automatically re-run tests on file changes
|
||||
- Debounced file change detection (300ms)
|
||||
- Clear console output between runs
|
||||
- Shows which files triggered re-runs
|
||||
- Graceful exit with Ctrl+C
|
||||
- `--watch-ignore` option for excluding patterns
|
||||
|
||||
### 5.3 Advanced Test Filtering (Partial) ⚠️
|
||||
```typescript
|
||||
// Exclude tests by pattern (not yet implemented)
|
||||
tstest --exclude "**/slow/**"
|
||||
|
||||
// Run only failed tests from last run (not yet implemented)
|
||||
tstest --failed
|
||||
|
||||
// Run tests modified in git (not yet implemented)
|
||||
tstest --changed
|
||||
```
|
||||
|
||||
## 6. Reporting and Analytics
|
||||
|
||||
### 6.1 Custom Reporters
|
||||
- Plugin architecture for custom reporters
|
||||
- Built-in reporters: JSON, JUnit, HTML, Markdown
|
||||
- Real-time streaming reporters
|
||||
- Aggregated test metrics and trends
|
||||
|
||||
### 6.2 Coverage Integration
|
||||
- Built-in code coverage collection
|
||||
- Coverage thresholds and enforcement
|
||||
- Coverage trending over time
|
||||
- Integration with CI/CD pipelines
|
||||
|
||||
### 6.3 Test Analytics Dashboard
|
||||
- Web-based dashboard for test results
|
||||
- Historical test performance data
|
||||
- Flaky test detection
|
||||
- Test impact analysis
|
||||
|
||||
## 7. Developer Experience
|
||||
|
||||
### 7.1 Better Error Messages
|
||||
- Clear, actionable error messages
|
||||
- Suggestions for common issues
|
||||
- Links to documentation
|
||||
- Code examples in error output
|
||||
|
||||
## Implementation Phases
|
||||
|
||||
### Phase 1: Improved Internal Protocol (Priority: Critical) ✅ COMPLETED
|
||||
1. ✅ Create ts_tapbundle_protocol directory with isomorphic protocol v2 implementation
|
||||
- ✅ Implement ProtocolEmitter class for message generation
|
||||
- ✅ Implement ProtocolParser class for message parsing
|
||||
- ✅ Define ProtocolMessage types and interfaces
|
||||
- ✅ Ensure all code is browser and Node.js compatible
|
||||
- ✅ Add tspublish.json to configure build order
|
||||
2. ✅ Update build configuration to compile ts_tapbundle_protocol first
|
||||
3. ✅ Replace TAP parser in tstest with Protocol V2 parser importing from dist_ts_tapbundle_protocol
|
||||
4. ✅ Replace TAP generation in tapbundle with Protocol V2 emitter importing from dist_ts_tapbundle_protocol
|
||||
5. ✅ Delete all v1 TAP parsing code from tstest
|
||||
6. ✅ Delete all v1 TAP generation code from tapbundle
|
||||
7. ✅ Test with real-world test suites containing special characters
|
||||
|
||||
### Phase 2: Test Configuration System (Priority: High) ✅ COMPLETED
|
||||
1. ✅ Implement tap.settings() API with TypeScript interfaces
|
||||
2. ✅ Add 00init.ts discovery and loading mechanism
|
||||
3. ✅ Implement settings inheritance and merge logic
|
||||
4. ✅ Apply settings to test execution (timeouts, retries, etc.)
|
||||
|
||||
### Phase 3: Enhanced Communication (Priority: High) ✅ COMPLETED
|
||||
1. ✅ Build on Protocol V2 for richer communication
|
||||
2. ✅ Implement real-time test progress API
|
||||
3. ✅ Add structured error reporting with diffs and traces
|
||||
|
||||
### Phase 4: Developer Experience (Priority: Medium) ❌ NOT STARTED
|
||||
1. Add watch mode
|
||||
2. Implement custom reporters
|
||||
3. Complete advanced test filtering options
|
||||
4. Add performance benchmarking API
|
||||
|
||||
### Phase 5: Analytics and Performance (Priority: Low) ❌ NOT STARTED
|
||||
1. Build test analytics dashboard
|
||||
2. Implement coverage integration
|
||||
3. Create trend analysis tools
|
||||
4. Add test impact analysis
|
||||
|
||||
## Technical Considerations
|
||||
|
||||
### API Design Principles
|
||||
- Clean, modern API design without legacy constraints
|
||||
- Progressive enhancement approach
|
||||
- Well-documented features and APIs
|
||||
- Clear, simple interfaces
|
||||
|
||||
### Performance Goals
|
||||
- Minimal overhead for test execution
|
||||
- Efficient parallel execution
|
||||
- Fast test discovery
|
||||
- Optimized browser test bundling
|
||||
|
||||
### Integration Points
|
||||
- Clean interfaces between tstest and tapbundle
|
||||
- Extensible plugin architecture
|
||||
- Standard test result format
|
||||
- Compatible with existing CI/CD tools
|
||||
|
||||
## Summary of Remaining Work
|
||||
|
||||
### ✅ Completed
|
||||
- **Protocol V2**: Full implementation with Unicode delimiters, structured metadata, and special character handling
|
||||
- **Test Configuration System**: tap.settings() API, 00init.ts discovery, settings inheritance, lifecycle hooks
|
||||
- **Enhanced Communication**: Event-based test lifecycle reporting, visual diff output for assertion failures, real-time test progress API
|
||||
- **Rich Error Reporting**: Stack traces, error metadata, and visual diffs through protocol
|
||||
- **Tags Filtering**: `--tags` option for running specific tagged tests
|
||||
|
||||
### ✅ Existing Features (Not in Plan)
|
||||
- **Timeout Support**: `--timeout` option and per-test timeouts
|
||||
- **Test Retries**: `tap.retry()` for flaky test handling
|
||||
- **Parallel Tests**: `.testParallel()` for concurrent execution
|
||||
- **Snapshot Testing**: Basic implementation with `toMatchSnapshot()`
|
||||
- **Test Lifecycle**: `describe()` blocks with `beforeEach`/`afterEach`
|
||||
- **Skip Tests**: `tap.skip.test()` (though it doesn't create test objects)
|
||||
- **Log Files**: `--logfile` option saves output to `.nogit/testlogs/`
|
||||
- **Test Range**: `--startFrom` and `--stopAt` for partial runs
|
||||
|
||||
### ⚠️ Partially Completed
|
||||
- **Advanced Test Filtering**: Have `--tags` but missing `--exclude`, `--failed`, `--changed`
|
||||
|
||||
### ❌ Not Started
|
||||
|
||||
#### High Priority
|
||||
|
||||
#### Medium Priority
|
||||
2. **Developer Experience**
|
||||
- Watch mode for file changes
|
||||
- Custom reporters (JSON, JUnit, HTML, Markdown)
|
||||
- Performance benchmarking API
|
||||
- Better error messages with suggestions
|
||||
|
||||
3. **Enhanced toolsArg**
|
||||
- Test data injection
|
||||
- Context sharing between tests
|
||||
- Parameterized tests
|
||||
|
||||
4. **Test Organization**
|
||||
- Hierarchical test suites
|
||||
- Nested describe blocks
|
||||
- Suite-level lifecycle hooks
|
||||
|
||||
#### Low Priority
|
||||
5. **Analytics and Performance**
|
||||
- Test analytics dashboard
|
||||
- Code coverage integration
|
||||
- Trend analysis
|
||||
- Flaky test detection
|
||||
|
||||
### Recently Fixed Issues ✅
|
||||
- **tap.todo()**: Now fully implemented with test object creation
|
||||
- **tap.skip.test()**: Now creates test objects and maintains accurate test count
|
||||
- **tap.only.test()**: Works correctly - when .only tests exist, only those run
|
||||
|
||||
### Remaining Minor Issues
|
||||
- **Protocol Output**: Some protocol messages still appear in console output
|
||||
|
||||
### Next Recommended Steps
|
||||
1. Add Watch Mode (Phase 4) - high developer value for fast feedback
|
||||
2. Implement Custom Reporters - important for CI/CD integration
|
||||
3. Implement performance benchmarking API
|
||||
4. Add better error messages with suggestions
|
287
readme.protocol.md
Normal file
287
readme.protocol.md
Normal file
@@ -0,0 +1,287 @@
|
||||
# Improved Internal Protocol Design
|
||||
|
||||
## Current Issues with TAP Protocol
|
||||
|
||||
1. **Delimiter Conflict**: Using `#` for metadata conflicts with test descriptions containing `#`
|
||||
2. **Ambiguous Parsing**: No clear boundary between test name and metadata
|
||||
3. **Limited Extensibility**: Adding new metadata requires regex changes
|
||||
4. **Mixed Concerns**: Protocol data mixed with human-readable output
|
||||
|
||||
## Proposed Internal Protocol v2
|
||||
|
||||
### Design Principles
|
||||
|
||||
1. **Clear Separation**: Protocol data must be unambiguously separated from user content
|
||||
2. **Extensibility**: Easy to add new metadata without breaking parsers
|
||||
3. **Backwards Compatible**: Can coexist with standard TAP for gradual migration
|
||||
4. **Machine Readable**: Structured format for reliable parsing
|
||||
5. **Human Friendly**: Still readable in raw form
|
||||
|
||||
### Protocol Options
|
||||
|
||||
#### Option 1: Special Delimiters
|
||||
```
|
||||
ok 1 - test description ::TSTEST:: {"time":123,"retry":0}
|
||||
not ok 2 - another test ::TSTEST:: {"time":45,"error":"timeout"}
|
||||
ok 3 - skipped test ::TSTEST:: {"time":0,"skip":"not ready"}
|
||||
```
|
||||
|
||||
**Pros**:
|
||||
- Simple to implement
|
||||
- Backwards compatible with TAP parsers (they ignore the suffix)
|
||||
- Easy to parse with split()
|
||||
|
||||
**Cons**:
|
||||
- Still could conflict if test name contains `::TSTEST::`
|
||||
- Not standard TAP
|
||||
|
||||
#### Option 2: Separate Metadata Lines
|
||||
```
|
||||
ok 1 - test description
|
||||
::METADATA:: {"test":1,"time":123,"retry":0}
|
||||
not ok 2 - another test
|
||||
::METADATA:: {"test":2,"time":45,"error":"timeout"}
|
||||
```
|
||||
|
||||
**Pros**:
|
||||
- Complete separation of concerns
|
||||
- No chance of conflicts
|
||||
- Can include arbitrary metadata
|
||||
|
||||
**Cons**:
|
||||
- Requires correlation between lines
|
||||
- More complex parsing
|
||||
|
||||
#### Option 3: YAML Blocks (TAP 13 Compatible)
|
||||
```
|
||||
ok 1 - test description
|
||||
---
|
||||
time: 123
|
||||
retry: 0
|
||||
...
|
||||
not ok 2 - another test
|
||||
---
|
||||
time: 45
|
||||
error: timeout
|
||||
stack: |
|
||||
Error: timeout
|
||||
at Test.run (test.js:10:5)
|
||||
...
|
||||
```
|
||||
|
||||
**Pros**:
|
||||
- Standard TAP 13 feature
|
||||
- Structured data format
|
||||
- Human readable
|
||||
- Extensible
|
||||
|
||||
**Cons**:
|
||||
- More verbose
|
||||
- YAML parsing overhead
|
||||
|
||||
#### Option 4: Binary Protocol Markers (Recommended)
|
||||
```
|
||||
ok 1 - test description
|
||||
␛[TSTEST:eyJ0aW1lIjoxMjMsInJldHJ5IjowfQ==]␛
|
||||
not ok 2 - another test
|
||||
␛[TSTEST:eyJ0aW1lIjo0NSwiZXJyb3IiOiJ0aW1lb3V0In0=]␛
|
||||
```
|
||||
|
||||
Using ASCII escape character (␛ = \x1B) with base64 encoded JSON.
|
||||
|
||||
**Pros**:
|
||||
- Zero chance of accidental conflicts
|
||||
- Compact
|
||||
- Fast to parse
|
||||
- Invisible in most terminals
|
||||
|
||||
**Cons**:
|
||||
- Not human readable in raw form
|
||||
- Requires base64 encoding/decoding
|
||||
|
||||
### Recommended Implementation: Hybrid Approach
|
||||
|
||||
Use multiple strategies based on context:
|
||||
|
||||
1. **For timing and basic metadata**: Use structured delimiters
|
||||
```
|
||||
ok 1 - test name ⟦time:123,retry:0⟧
|
||||
```
|
||||
|
||||
2. **For complex data (errors, snapshots)**: Use separate protocol lines
|
||||
```
|
||||
ok 1 - test failed
|
||||
⟦TSTEST:ERROR⟧
|
||||
{"message":"Assertion failed","stack":"...","diff":"..."}
|
||||
⟦/TSTEST:ERROR⟧
|
||||
```
|
||||
|
||||
3. **For human-readable output**: Keep standard TAP comments
|
||||
```
|
||||
# Test suite: User Authentication
|
||||
ok 1 - should login
|
||||
```
|
||||
|
||||
### Implementation Plan
|
||||
|
||||
#### Phase 1: Parser Enhancement
|
||||
1. Add new protocol parser alongside existing TAP parser
|
||||
2. Support both old and new formats during transition
|
||||
3. Add protocol version negotiation
|
||||
|
||||
#### Phase 2: Metadata Structure
|
||||
```typescript
|
||||
interface TestMetadata {
|
||||
// Timing
|
||||
time: number; // milliseconds
|
||||
startTime?: number; // Unix timestamp
|
||||
endTime?: number; // Unix timestamp
|
||||
|
||||
// Status
|
||||
skip?: string; // skip reason
|
||||
todo?: string; // todo reason
|
||||
retry?: number; // retry attempt
|
||||
maxRetries?: number; // max retries allowed
|
||||
|
||||
// Error details
|
||||
error?: {
|
||||
message: string;
|
||||
stack?: string;
|
||||
diff?: string;
|
||||
actual?: any;
|
||||
expected?: any;
|
||||
};
|
||||
|
||||
// Test context
|
||||
file?: string; // source file
|
||||
line?: number; // line number
|
||||
column?: number; // column number
|
||||
|
||||
// Custom data
|
||||
tags?: string[]; // test tags
|
||||
custom?: Record<string, any>;
|
||||
}
|
||||
```
|
||||
|
||||
#### Phase 3: Protocol Messages
|
||||
|
||||
##### Success Message
|
||||
```
|
||||
ok 1 - user authentication works
|
||||
⟦TSTEST:META:{"time":123,"tags":["auth","unit"]}⟧
|
||||
```
|
||||
|
||||
##### Failure Message
|
||||
```
|
||||
not ok 2 - login fails with invalid password
|
||||
⟦TSTEST:META:{"time":45,"retry":1,"maxRetries":3}⟧
|
||||
⟦TSTEST:ERROR⟧
|
||||
{
|
||||
"message": "Expected 401 but got 500",
|
||||
"stack": "Error: Expected 401 but got 500\n at Test.run (auth.test.ts:25:10)",
|
||||
"actual": 500,
|
||||
"expected": 401
|
||||
}
|
||||
⟦/TSTEST:ERROR⟧
|
||||
```
|
||||
|
||||
##### Skip Message
|
||||
```
|
||||
ok 3 - database integration test ⟦TSTEST:SKIP:No database connection⟧
|
||||
```
|
||||
|
||||
##### Snapshot Communication
|
||||
```
|
||||
⟦TSTEST:SNAPSHOT:user-profile⟧
|
||||
{
|
||||
"name": "John Doe",
|
||||
"email": "john@example.com",
|
||||
"roles": ["user", "admin"]
|
||||
}
|
||||
⟦/TSTEST:SNAPSHOT⟧
|
||||
```
|
||||
|
||||
### Migration Strategy
|
||||
|
||||
1. **Version Detection**: First line indicates protocol version
|
||||
```
|
||||
⟦TSTEST:PROTOCOL:2.0⟧
|
||||
TAP version 13
|
||||
```
|
||||
|
||||
2. **Gradual Rollout**:
|
||||
- v1.10: Add protocol v2 parser, keep v1 generator
|
||||
- v1.11: Generate v2 by default, v1 with --legacy flag
|
||||
- v2.0: Remove v1 support
|
||||
|
||||
3. **Feature Flags**:
|
||||
```typescript
|
||||
tap.settings({
|
||||
protocol: 'v2', // or 'v1', 'auto'
|
||||
protocolFeatures: {
|
||||
structuredErrors: true,
|
||||
enhancedTiming: true,
|
||||
binaryMarkers: false
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
### Benefits of New Protocol
|
||||
|
||||
1. **Reliability**: No more regex fragility or description conflicts
|
||||
2. **Performance**: Faster parsing with clear boundaries
|
||||
3. **Extensibility**: Easy to add new metadata fields
|
||||
4. **Debugging**: Rich error information with stack traces and diffs
|
||||
5. **Integration**: Better IDE and CI/CD tool integration
|
||||
6. **Forward Compatible**: Room for future enhancements
|
||||
|
||||
### Example Parser Implementation
|
||||
|
||||
```typescript
|
||||
class ProtocolV2Parser {
|
||||
private readonly MARKER_START = '⟦TSTEST:';
|
||||
private readonly MARKER_END = '⟧';
|
||||
|
||||
parseMetadata(line: string): TestMetadata | null {
|
||||
const start = line.lastIndexOf(this.MARKER_START);
|
||||
if (start === -1) return null;
|
||||
|
||||
const end = line.indexOf(this.MARKER_END, start);
|
||||
if (end === -1) return null;
|
||||
|
||||
const content = line.substring(start + this.MARKER_START.length, end);
|
||||
const [type, data] = content.split(':', 2);
|
||||
|
||||
switch (type) {
|
||||
case 'META':
|
||||
return JSON.parse(data);
|
||||
case 'SKIP':
|
||||
return { skip: data };
|
||||
case 'TODO':
|
||||
return { todo: data };
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
parseTestLine(line: string): ParsedTest {
|
||||
// First extract any metadata
|
||||
const metadata = this.parseMetadata(line);
|
||||
|
||||
// Then parse the TAP part (without metadata)
|
||||
const cleanLine = this.removeMetadata(line);
|
||||
const tapResult = this.parseTAP(cleanLine);
|
||||
|
||||
return { ...tapResult, metadata };
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Next Steps
|
||||
|
||||
1. Implement proof of concept with basic metadata support
|
||||
2. Test with real-world test suites for edge cases
|
||||
3. Benchmark parsing performance
|
||||
4. Get feedback from users
|
||||
5. Finalize protocol specification
|
||||
6. Implement in both tapbundle and tstest
|
41
test/config-test/00init.ts
Normal file
41
test/config-test/00init.ts
Normal file
@@ -0,0 +1,41 @@
|
||||
import { tap } from '../../ts_tapbundle/index.js';
|
||||
|
||||
// TAP-compliant comment output
|
||||
console.log('# 🚀 00init.ts: LOADED AND EXECUTING');
|
||||
console.log('# 🚀 00init.ts: Setting up global test configuration');
|
||||
|
||||
// Add a global variable to verify 00init.ts was loaded
|
||||
(global as any).__00INIT_LOADED = true;
|
||||
|
||||
// Configure global test settings
|
||||
tap.settings({
|
||||
// Set a default timeout of 5 seconds for all tests
|
||||
timeout: 5000,
|
||||
|
||||
// Enable retries for flaky tests
|
||||
retries: 2,
|
||||
retryDelay: 1000,
|
||||
|
||||
// Show test duration
|
||||
showTestDuration: true,
|
||||
|
||||
// Global lifecycle hooks
|
||||
beforeAll: async () => {
|
||||
console.log('Global beforeAll: Initializing test environment');
|
||||
},
|
||||
|
||||
afterAll: async () => {
|
||||
console.log('Global afterAll: Cleaning up test environment');
|
||||
},
|
||||
|
||||
beforeEach: async (testName: string) => {
|
||||
console.log(`Global beforeEach: Starting test "${testName}"`);
|
||||
},
|
||||
|
||||
afterEach: async (testName: string, passed: boolean) => {
|
||||
console.log(`Global afterEach: Test "${testName}" ${passed ? 'passed' : 'failed'}`);
|
||||
}
|
||||
});
|
||||
|
||||
console.log('# 🚀 00init.ts: Configuration COMPLETE');
|
||||
console.log('# 🚀 00init.ts: tap.settings() called successfully');
|
44
test/config-test/test.config.ts
Normal file
44
test/config-test/test.config.ts
Normal file
@@ -0,0 +1,44 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
|
||||
// TAP-compliant comment output
|
||||
console.log('# 🔍 TEST FILE LOADED - test.config.ts');
|
||||
|
||||
// Check if 00init.ts was loaded
|
||||
const initLoaded = (global as any).__00INIT_LOADED;
|
||||
console.log(`# 🔍 00init.ts loaded: ${initLoaded === true}`);
|
||||
|
||||
// Test that uses the global timeout setting
|
||||
tap.test('Test with global timeout', async (toolsArg) => {
|
||||
// This test should complete within the 5 second timeout set in 00init.ts
|
||||
await toolsArg.delayFor(2000); // 2 seconds
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// Test that demonstrates retries
|
||||
tap.test('Test with retries', async () => {
|
||||
// This test will use the global retry setting (2 retries)
|
||||
console.log('Running test that might be flaky');
|
||||
|
||||
// Simulate a flaky test that passes on second try
|
||||
const randomValue = Math.random();
|
||||
console.log(`Random value: ${randomValue}`);
|
||||
|
||||
// Always pass for demonstration
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// Test with custom timeout that overrides global
|
||||
tap.timeout(1000).test('Test with custom timeout', async (toolsArg) => {
|
||||
// This test has a 1 second timeout, overriding the global 5 seconds
|
||||
await toolsArg.delayFor(500); // 500ms - should pass
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// Test to verify lifecycle hooks are working
|
||||
tap.test('Test lifecycle hooks', async () => {
|
||||
console.log('Inside test: lifecycle hooks should have run');
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// Start the test suite
|
||||
tap.start();
|
22
test/config-test/test.file-settings.ts
Normal file
22
test/config-test/test.file-settings.ts
Normal file
@@ -0,0 +1,22 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
|
||||
// Override global settings for this file
|
||||
tap.settings({
|
||||
timeout: 2000, // Override global timeout to 2 seconds
|
||||
retries: 0, // Disable retries for this file
|
||||
});
|
||||
|
||||
tap.test('Test with file-level timeout', async (toolsArg) => {
|
||||
// This should use the file-level timeout of 2 seconds
|
||||
console.log('Running with file-level timeout of 2 seconds');
|
||||
await toolsArg.delayFor(1000); // 1 second - should pass
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('Test without retries', async () => {
|
||||
// This test should not retry even if it fails
|
||||
console.log('This test has no retries (file-level setting)');
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.start();
|
3
test/debug.js
Normal file
3
test/debug.js
Normal file
@@ -0,0 +1,3 @@
|
||||
// Direct run to see TAP output
|
||||
const { execSync } = require('child_process');
|
||||
console.log(execSync('tsx test/tapbundle/test.debug.ts', { cwd: '/mnt/data/lossless/git.zone/tstest' }).toString());
|
8
test/glob-test/another.spec.ts
Normal file
8
test/glob-test/another.spec.ts
Normal file
@@ -0,0 +1,8 @@
|
||||
import { tap } from '../../ts_tapbundle/index.js';
|
||||
|
||||
tap.test('spec file test', async () => {
|
||||
console.log('This is a .spec.ts file that should be found by glob');
|
||||
return true;
|
||||
});
|
||||
|
||||
tap.start();
|
8
test/glob-test/nested/test.nested-glob.ts
Normal file
8
test/glob-test/nested/test.nested-glob.ts
Normal file
@@ -0,0 +1,8 @@
|
||||
import { tap } from '../../../ts_tapbundle/index.js';
|
||||
|
||||
tap.test('nested glob pattern test', async () => {
|
||||
console.log('This test file is in a nested directory');
|
||||
return true;
|
||||
});
|
||||
|
||||
tap.start();
|
8
test/glob-test/test.glob-test.ts
Normal file
8
test/glob-test/test.glob-test.ts
Normal file
@@ -0,0 +1,8 @@
|
||||
import { tap } from '../../ts_tapbundle/index.js';
|
||||
|
||||
tap.test('glob pattern test', async () => {
|
||||
console.log('This test file should be found by glob patterns');
|
||||
return true;
|
||||
});
|
||||
|
||||
tap.start();
|
55
test/tapbundle/test.browser.nonci.ts
Normal file
55
test/tapbundle/test.browser.nonci.ts
Normal file
@@ -0,0 +1,55 @@
|
||||
import { tap, expect, webhelpers } from '../../ts_tapbundle/index.js';
|
||||
|
||||
tap.preTask('custompretask', async () => {
|
||||
console.log('this is a pretask');
|
||||
});
|
||||
|
||||
tap.test('should have access to webhelpers', async () => {
|
||||
const myElement = await webhelpers.fixture(webhelpers.html`<div></div>`);
|
||||
expect(myElement).toBeInstanceOf(HTMLElement);
|
||||
console.log(myElement);
|
||||
});
|
||||
|
||||
const test1 = tap.test('my first test -> expect true to be true', async () => {
|
||||
return expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
const test2 = tap.test('my second test', async (tools) => {
|
||||
await tools.delayFor(50);
|
||||
});
|
||||
|
||||
const test3 = tap.test(
|
||||
'my third test -> test2 should take longer than test1 and endure at least 1000ms',
|
||||
async () => {
|
||||
expect(
|
||||
(await test1.testPromise).hrtMeasurement.milliSeconds <
|
||||
(await test2).hrtMeasurement.milliSeconds,
|
||||
).toBeTrue();
|
||||
expect((await test2.testPromise).hrtMeasurement.milliSeconds > 10).toBeTrue();
|
||||
},
|
||||
);
|
||||
|
||||
const test4 = tap.skip.test('my 4th test -> should fail', async (tools) => {
|
||||
tools.allowFailure();
|
||||
expect(false).toBeTrue();
|
||||
});
|
||||
|
||||
const test5 = tap.test('my 5th test -> should pass in about 500ms', async (tools) => {
|
||||
tools.timeout(1000);
|
||||
await tools.delayFor(500);
|
||||
});
|
||||
|
||||
const test6 = tap.skip.test('my 6th test -> should fail after 1000ms', async (tools) => {
|
||||
tools.allowFailure();
|
||||
tools.timeout(1000);
|
||||
await tools.delayFor(100);
|
||||
});
|
||||
|
||||
const testPromise = tap.start();
|
||||
|
||||
// Export promise for browser compatibility
|
||||
if (typeof globalThis !== 'undefined') {
|
||||
(globalThis as any).tapPromise = testPromise;
|
||||
}
|
||||
|
||||
export default testPromise;
|
19
test/tapbundle/test.debug.ts
Normal file
19
test/tapbundle/test.debug.ts
Normal file
@@ -0,0 +1,19 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
|
||||
// Simple test to debug TAP output
|
||||
tap.test('test 1', async () => {
|
||||
console.log('Test 1 running');
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('test 2 - skip', async (toolsArg) => {
|
||||
toolsArg.skip('Skipping test 2');
|
||||
expect(false).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('test 3', async () => {
|
||||
console.log('Test 3 running');
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.start();
|
101
test/tapbundle/test.describe.ts
Normal file
101
test/tapbundle/test.describe.ts
Normal file
@@ -0,0 +1,101 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
|
||||
// Global state for testing lifecycle hooks
|
||||
const lifecycleOrder: string[] = [];
|
||||
|
||||
tap.describe('Test Suite A', () => {
|
||||
tap.beforeEach(async (toolsArg) => {
|
||||
lifecycleOrder.push('Suite A - beforeEach');
|
||||
});
|
||||
|
||||
tap.afterEach(async (toolsArg) => {
|
||||
lifecycleOrder.push('Suite A - afterEach');
|
||||
});
|
||||
|
||||
tap.test('test 1 in suite A', async (toolsArg) => {
|
||||
lifecycleOrder.push('Test 1');
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('test 2 in suite A', async (toolsArg) => {
|
||||
lifecycleOrder.push('Test 2');
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.describe('Nested Suite B', () => {
|
||||
tap.beforeEach(async (toolsArg) => {
|
||||
lifecycleOrder.push('Suite B - beforeEach');
|
||||
});
|
||||
|
||||
tap.afterEach(async (toolsArg) => {
|
||||
lifecycleOrder.push('Suite B - afterEach');
|
||||
});
|
||||
|
||||
tap.test('test 1 in nested suite B', async (toolsArg) => {
|
||||
lifecycleOrder.push('Nested Test 1');
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// Test outside any suite
|
||||
tap.test('test outside suites', async (toolsArg) => {
|
||||
lifecycleOrder.push('Outside Test');
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.describe('Test Suite with errors', () => {
|
||||
tap.beforeEach(async (toolsArg) => {
|
||||
// Setup that might fail
|
||||
const data = await Promise.resolve({ value: 42 });
|
||||
toolsArg.testData = data;
|
||||
});
|
||||
|
||||
tap.test('test with error', async (toolsArg) => {
|
||||
// Verify that data from beforeEach is available
|
||||
expect(toolsArg.testData).toBeDefined();
|
||||
expect(toolsArg.testData.value).toEqual(42);
|
||||
|
||||
// Test that error handling works by catching an error
|
||||
try {
|
||||
throw new Error('Intentional error');
|
||||
} catch (error) {
|
||||
expect(error.message).toEqual('Intentional error');
|
||||
}
|
||||
});
|
||||
|
||||
tap.test('test with skip in suite', async (toolsArg) => {
|
||||
toolsArg.skip('Skipping this test in a suite');
|
||||
expect(false).toBeTrue();
|
||||
});
|
||||
});
|
||||
|
||||
// Verify lifecycle order - this test runs last to check if all hooks were called properly
|
||||
tap.test('verify lifecycle hook order', async (toolsArg) => {
|
||||
// Wait a bit to ensure all tests have completed
|
||||
await new Promise(resolve => setTimeout(resolve, 50));
|
||||
console.log('Lifecycle order:', lifecycleOrder);
|
||||
|
||||
// Check that the tests we expect to have run actually did
|
||||
expect(lifecycleOrder).toContain('Test 1');
|
||||
expect(lifecycleOrder).toContain('Test 2');
|
||||
expect(lifecycleOrder).toContain('Nested Test 1');
|
||||
|
||||
// Check that beforeEach was called before each test in Suite A
|
||||
const test1Index = lifecycleOrder.indexOf('Test 1');
|
||||
expect(test1Index).toBeGreaterThan(-1);
|
||||
const beforeTest1 = lifecycleOrder.slice(0, test1Index);
|
||||
expect(beforeTest1).toContain('Suite A - beforeEach');
|
||||
|
||||
// Check that afterEach was called after test 1
|
||||
const afterTest1 = lifecycleOrder.slice(test1Index + 1);
|
||||
expect(afterTest1).toContain('Suite A - afterEach');
|
||||
|
||||
// Check nested suite lifecycle
|
||||
const nestedTest1Index = lifecycleOrder.indexOf('Nested Test 1');
|
||||
expect(nestedTest1Index).toBeGreaterThan(-1);
|
||||
const beforeNestedTest1 = lifecycleOrder.slice(0, nestedTest1Index);
|
||||
expect(beforeNestedTest1).toContain('Suite B - beforeEach');
|
||||
});
|
||||
|
||||
tap.start();
|
120
test/tapbundle/test.fixtures.ts
Normal file
120
test/tapbundle/test.fixtures.ts
Normal file
@@ -0,0 +1,120 @@
|
||||
import { tap, TapTools } from '../../ts_tapbundle/index.js';
|
||||
import { expect } from '@push.rocks/smartexpect';
|
||||
|
||||
// Define fixture factories
|
||||
interface User {
|
||||
id: number;
|
||||
name: string;
|
||||
email: string;
|
||||
role: string;
|
||||
}
|
||||
|
||||
interface Post {
|
||||
id: number;
|
||||
title: string;
|
||||
content: string;
|
||||
authorId: number;
|
||||
tags: string[];
|
||||
}
|
||||
|
||||
// Define user fixture factory
|
||||
TapTools.defineFixture<User>('user', (data) => {
|
||||
const id = data?.id || Math.floor(Math.random() * 10000);
|
||||
return {
|
||||
id,
|
||||
name: data?.name || `Test User ${id}`,
|
||||
email: data?.email || `user${id}@test.com`,
|
||||
role: data?.role || 'user'
|
||||
};
|
||||
});
|
||||
|
||||
// Define post fixture factory
|
||||
TapTools.defineFixture<Post>('post', async (data) => {
|
||||
const id = data?.id || Math.floor(Math.random() * 10000);
|
||||
return {
|
||||
id,
|
||||
title: data?.title || `Post ${id}`,
|
||||
content: data?.content || `Content for post ${id}`,
|
||||
authorId: data?.authorId || 1,
|
||||
tags: data?.tags || ['test', 'sample']
|
||||
};
|
||||
});
|
||||
|
||||
tap.describe('Fixture System', () => {
|
||||
tap.afterEach(async () => {
|
||||
// Clean up fixtures after each test
|
||||
await TapTools.cleanupFixtures();
|
||||
});
|
||||
|
||||
tap.tags('unit', 'fixtures')
|
||||
.test('should create a simple fixture', async (toolsArg) => {
|
||||
const user = await toolsArg.fixture<User>('user');
|
||||
|
||||
expect(user).toHaveProperty('id');
|
||||
expect(user).toHaveProperty('name');
|
||||
expect(user).toHaveProperty('email');
|
||||
expect(user.role).toEqual('user');
|
||||
});
|
||||
|
||||
tap.tags('unit', 'fixtures')
|
||||
.test('should create fixture with custom data', async (toolsArg) => {
|
||||
const admin = await toolsArg.fixture<User>('user', {
|
||||
name: 'Admin User',
|
||||
role: 'admin'
|
||||
});
|
||||
|
||||
expect(admin.name).toEqual('Admin User');
|
||||
expect(admin.role).toEqual('admin');
|
||||
expect(admin.email).toContain('@test.com');
|
||||
});
|
||||
|
||||
tap.tags('unit', 'fixtures')
|
||||
.test('should create multiple fixtures with factory', async (toolsArg) => {
|
||||
const userFactory = toolsArg.factory<User>('user');
|
||||
const users = await userFactory.createMany(3);
|
||||
|
||||
// Try different approach
|
||||
expect(users.length).toEqual(3);
|
||||
expect(users[0].id).not.toEqual(users[1].id);
|
||||
expect(users[0].email).not.toEqual(users[1].email);
|
||||
});
|
||||
|
||||
tap.tags('unit', 'fixtures')
|
||||
.test('should create fixtures with custom data per instance', async (toolsArg) => {
|
||||
const postFactory = toolsArg.factory<Post>('post');
|
||||
const posts = await postFactory.createMany(3, (index) => ({
|
||||
title: `Post ${index + 1}`,
|
||||
tags: [`tag${index + 1}`]
|
||||
}));
|
||||
|
||||
expect(posts[0].title).toEqual('Post 1');
|
||||
expect(posts[1].title).toEqual('Post 2');
|
||||
expect(posts[2].title).toEqual('Post 3');
|
||||
|
||||
expect(posts[0].tags).toContain('tag1');
|
||||
expect(posts[1].tags).toContain('tag2');
|
||||
});
|
||||
|
||||
tap.tags('unit', 'fixtures')
|
||||
.test('should handle related fixtures', async (toolsArg) => {
|
||||
const user = await toolsArg.fixture<User>('user', { name: 'Author' });
|
||||
const post = await toolsArg.fixture<Post>('post', {
|
||||
title: 'My Article',
|
||||
authorId: user.id
|
||||
});
|
||||
|
||||
expect(post.authorId).toEqual(user.id);
|
||||
});
|
||||
|
||||
tap.tags('unit', 'fixtures', 'error')
|
||||
.test('should throw error for undefined fixture', async (toolsArg) => {
|
||||
try {
|
||||
await toolsArg.fixture('nonexistent');
|
||||
expect(true).toBeFalse(); // Should not reach here
|
||||
} catch (error: any) {
|
||||
expect(error.message).toContain('Fixture \'nonexistent\' not found');
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
tap.start();
|
32
test/tapbundle/test.fluent-syntax.ts
Normal file
32
test/tapbundle/test.fluent-syntax.ts
Normal file
@@ -0,0 +1,32 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
|
||||
// Test with fluent syntax
|
||||
tap.tags('unit', 'fluent')
|
||||
.priority('high')
|
||||
.test('test with fluent syntax', async (toolsArg) => {
|
||||
expect(true).toBeTrue();
|
||||
toolsArg.context.set('fluentTest', 'works');
|
||||
});
|
||||
|
||||
// Chain multiple settings
|
||||
tap.tags('integration')
|
||||
.priority('low')
|
||||
.retry(3)
|
||||
.timeout(5000)
|
||||
.test('test with multiple settings', async (toolsArg) => {
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// Test context access from fluent test
|
||||
tap.tags('unit')
|
||||
.test('verify fluent context', async (toolsArg) => {
|
||||
const fluentValue = toolsArg.context.get('fluentTest');
|
||||
expect(fluentValue).toEqual('works');
|
||||
});
|
||||
|
||||
// Test without tags - should show all tests run without filtering
|
||||
tap.test('regular test without tags', async (toolsArg) => {
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.start();
|
28
test/tapbundle/test.node.ts
Normal file
28
test/tapbundle/test.node.ts
Normal file
@@ -0,0 +1,28 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
|
||||
import { tapNodeTools } from '../../ts_tapbundle_node/index.js';
|
||||
|
||||
tap.test('should execure a command', async () => {
|
||||
const result = await tapNodeTools.runCommand('ls -la');
|
||||
expect(result.exitCode).toEqual(0);
|
||||
});
|
||||
|
||||
tap.test('should create a https cert', async () => {
|
||||
const { key, cert } = await tapNodeTools.createHttpsCert('localhost');
|
||||
console.log(key);
|
||||
console.log(cert);
|
||||
expect(key).toInclude('-----BEGIN RSA PRIVATE KEY-----');
|
||||
expect(cert).toInclude('-----BEGIN CERTIFICATE-----');
|
||||
});
|
||||
|
||||
tap.test('should create a smartmongo instance', async () => {
|
||||
const smartmongo = await tapNodeTools.createSmartmongo();
|
||||
await smartmongo.stop();
|
||||
});
|
||||
|
||||
tap.test('should create a smarts3 instance', async () => {
|
||||
const smarts3 = await tapNodeTools.createSmarts3();
|
||||
await smarts3.stop();
|
||||
});
|
||||
|
||||
tap.start();
|
167
test/tapbundle/test.performance-metrics.ts
Normal file
167
test/tapbundle/test.performance-metrics.ts
Normal file
@@ -0,0 +1,167 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
|
||||
// Create tests with known, distinct timing patterns to verify metrics calculation
|
||||
tap.test('metric test 1 - 10ms baseline', async (tools) => {
|
||||
await tools.delayFor(10);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('metric test 2 - 20ms double baseline', async (tools) => {
|
||||
await tools.delayFor(20);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('metric test 3 - 30ms triple baseline', async (tools) => {
|
||||
await tools.delayFor(30);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('metric test 4 - 40ms quadruple baseline', async (tools) => {
|
||||
await tools.delayFor(40);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('metric test 5 - 50ms quintuple baseline', async (tools) => {
|
||||
await tools.delayFor(50);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// Test that should be the slowest
|
||||
tap.test('metric test slowest - 200ms intentionally slow', async (tools) => {
|
||||
await tools.delayFor(200);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// Tests to verify edge cases in average calculation
|
||||
tap.test('metric test fast 1 - minimal work', async () => {
|
||||
expect(1).toEqual(1);
|
||||
});
|
||||
|
||||
tap.test('metric test fast 2 - minimal work', async () => {
|
||||
expect(2).toEqual(2);
|
||||
});
|
||||
|
||||
tap.test('metric test fast 3 - minimal work', async () => {
|
||||
expect(3).toEqual(3);
|
||||
});
|
||||
|
||||
// Test to verify that failed tests still contribute to timing metrics
|
||||
tap.test('metric test that fails - 60ms before failure', async (tools) => {
|
||||
await tools.delayFor(60);
|
||||
expect(true).toBeFalse(); // This will fail
|
||||
});
|
||||
|
||||
// Describe block with timing to test aggregation
|
||||
tap.describe('performance metrics in describe block', () => {
|
||||
tap.test('described test 1 - 15ms', async (tools) => {
|
||||
await tools.delayFor(15);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('described test 2 - 25ms', async (tools) => {
|
||||
await tools.delayFor(25);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('described test 3 - 35ms', async (tools) => {
|
||||
await tools.delayFor(35);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
});
|
||||
|
||||
// Test timing with hooks
|
||||
tap.describe('performance with hooks', () => {
|
||||
let hookTime = 0;
|
||||
|
||||
tap.beforeEach(async () => {
|
||||
// Hooks shouldn't count toward test time
|
||||
await new Promise(resolve => setTimeout(resolve, 10));
|
||||
hookTime += 10;
|
||||
});
|
||||
|
||||
tap.afterEach(async () => {
|
||||
// Hooks shouldn't count toward test time
|
||||
await new Promise(resolve => setTimeout(resolve, 10));
|
||||
hookTime += 10;
|
||||
});
|
||||
|
||||
tap.test('test with hooks 1 - should only count test time', async (tools) => {
|
||||
await tools.delayFor(30);
|
||||
expect(true).toBeTrue();
|
||||
// Test time should be ~30ms, not 50ms (including hooks)
|
||||
});
|
||||
|
||||
tap.test('test with hooks 2 - should only count test time', async (tools) => {
|
||||
await tools.delayFor(40);
|
||||
expect(true).toBeTrue();
|
||||
// Test time should be ~40ms, not 60ms (including hooks)
|
||||
});
|
||||
});
|
||||
|
||||
// Parallel tests to verify timing is captured correctly
|
||||
tap.describe('parallel timing verification', () => {
|
||||
const startTimes: Map<string, number> = new Map();
|
||||
const endTimes: Map<string, number> = new Map();
|
||||
|
||||
tap.testParallel('parallel metric 1 - 80ms', async (tools) => {
|
||||
startTimes.set('p1', Date.now());
|
||||
await tools.delayFor(80);
|
||||
endTimes.set('p1', Date.now());
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.testParallel('parallel metric 2 - 90ms', async (tools) => {
|
||||
startTimes.set('p2', Date.now());
|
||||
await tools.delayFor(90);
|
||||
endTimes.set('p2', Date.now());
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.testParallel('parallel metric 3 - 100ms', async (tools) => {
|
||||
startTimes.set('p3', Date.now());
|
||||
await tools.delayFor(100);
|
||||
endTimes.set('p3', Date.now());
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('verify parallel execution', async () => {
|
||||
// This test runs after parallel tests
|
||||
// Verify they actually ran in parallel by checking overlapping times
|
||||
if (startTimes.size === 3 && endTimes.size === 3) {
|
||||
const p1Start = startTimes.get('p1')!;
|
||||
const p2Start = startTimes.get('p2')!;
|
||||
const p3Start = startTimes.get('p3')!;
|
||||
const p1End = endTimes.get('p1')!;
|
||||
const p2End = endTimes.get('p2')!;
|
||||
const p3End = endTimes.get('p3')!;
|
||||
|
||||
// Start times should be very close (within 50ms)
|
||||
expect(Math.abs(p1Start - p2Start)).toBeLessThan(50);
|
||||
expect(Math.abs(p2Start - p3Start)).toBeLessThan(50);
|
||||
|
||||
// There should be overlap in execution
|
||||
const p1Overlaps = p1Start < p2End && p1End > p2Start;
|
||||
const p2Overlaps = p2Start < p3End && p2End > p3Start;
|
||||
|
||||
expect(p1Overlaps || p2Overlaps).toBeTrue();
|
||||
} else {
|
||||
// Skip verification if parallel tests didn't run yet
|
||||
expect(true).toBeTrue();
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// Test to ensure average calculation handles mixed timing correctly
|
||||
tap.test('final metrics test - 5ms minimal', async (tools) => {
|
||||
await tools.delayFor(5);
|
||||
expect(true).toBeTrue();
|
||||
|
||||
console.log('\n📊 Expected Performance Metrics Summary:');
|
||||
console.log('- Tests include a mix of durations from <1ms to 200ms');
|
||||
console.log('- Slowest test should be "metric test slowest" at ~200ms');
|
||||
console.log('- Average should be calculated from individual test times');
|
||||
console.log('- Failed test should still contribute its 60ms to timing');
|
||||
console.log('- Parallel tests should show their individual times (80ms, 90ms, 100ms)');
|
||||
});
|
||||
|
||||
tap.start();
|
52
test/tapbundle/test.snapshot.ts
Normal file
52
test/tapbundle/test.snapshot.ts
Normal file
@@ -0,0 +1,52 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
|
||||
// Test basic snapshot functionality
|
||||
tap.tags('unit', 'snapshot')
|
||||
.test('should match string snapshot', async (toolsArg) => {
|
||||
const testString = 'Hello, World!';
|
||||
await toolsArg.matchSnapshot(testString);
|
||||
});
|
||||
|
||||
// Test object snapshot
|
||||
tap.tags('unit', 'snapshot')
|
||||
.test('should match object snapshot', async (toolsArg) => {
|
||||
const testObject = {
|
||||
name: 'Test User',
|
||||
age: 30,
|
||||
hobbies: ['reading', 'coding', 'gaming'],
|
||||
metadata: {
|
||||
created: '2024-01-01',
|
||||
updated: '2024-01-15'
|
||||
}
|
||||
};
|
||||
await toolsArg.matchSnapshot(testObject);
|
||||
});
|
||||
|
||||
// Test named snapshots
|
||||
tap.tags('unit', 'snapshot')
|
||||
.test('should handle multiple named snapshots', async (toolsArg) => {
|
||||
const config1 = { version: '1.0.0', features: ['a', 'b'] };
|
||||
const config2 = { version: '2.0.0', features: ['a', 'b', 'c'] };
|
||||
|
||||
await toolsArg.matchSnapshot(config1, 'config_v1');
|
||||
await toolsArg.matchSnapshot(config2, 'config_v2');
|
||||
});
|
||||
|
||||
// Test dynamic content with snapshot
|
||||
tap.tags('unit', 'snapshot')
|
||||
.test('should handle template snapshot', async (toolsArg) => {
|
||||
const template = `
|
||||
<div class="container">
|
||||
<h1>Welcome</h1>
|
||||
<p>This is a test template</p>
|
||||
<ul>
|
||||
<li>Item 1</li>
|
||||
<li>Item 2</li>
|
||||
</ul>
|
||||
</div>
|
||||
`.trim();
|
||||
|
||||
await toolsArg.matchSnapshot(template, 'html_template');
|
||||
});
|
||||
|
||||
tap.start();
|
49
test/tapbundle/test.tags-context.ts
Normal file
49
test/tapbundle/test.tags-context.ts
Normal file
@@ -0,0 +1,49 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
|
||||
// First test sets some data and has tags
|
||||
tap.tags('unit', 'context')
|
||||
.priority('high')
|
||||
.test('test with tags and context setting', async (toolsArg) => {
|
||||
// Set some data in context
|
||||
toolsArg.context.set('testData', { value: 42 });
|
||||
toolsArg.context.set('users', ['alice', 'bob']);
|
||||
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// Second test reads the context data
|
||||
tap.tags('unit', 'context')
|
||||
.test('test reading context', async (toolsArg) => {
|
||||
// Read data from context
|
||||
const testData = toolsArg.context.get('testData');
|
||||
const users = toolsArg.context.get('users');
|
||||
|
||||
expect(testData).toEqual({ value: 42 });
|
||||
expect(users).toContain('alice');
|
||||
expect(users).toContain('bob');
|
||||
});
|
||||
|
||||
// Test without tags - should be skipped when filtering by tags
|
||||
tap.test('test without tags', async (toolsArg) => {
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// Test with different tags
|
||||
tap.tags('integration')
|
||||
.priority('low')
|
||||
.test('integration test', async (toolsArg) => {
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// Test context cleanup
|
||||
tap.tags('unit')
|
||||
.test('test context operations', async (toolsArg) => {
|
||||
// Set and delete
|
||||
toolsArg.context.set('temp', 'value');
|
||||
expect(toolsArg.context.get('temp')).toEqual('value');
|
||||
|
||||
toolsArg.context.delete('temp');
|
||||
expect(toolsArg.context.get('temp')).toBeUndefined();
|
||||
});
|
||||
|
||||
tap.start();
|
5
test/tapbundle/test.tapwrap.ts
Normal file
5
test/tapbundle/test.tapwrap.ts
Normal file
@@ -0,0 +1,5 @@
|
||||
import { tap, expect, TapWrap } from '../../ts_tapbundle/index.js';
|
||||
|
||||
tap.test('should run a test', async () => {});
|
||||
|
||||
tap.start();
|
214
test/tapbundle/test.timing-edge-cases.ts
Normal file
214
test/tapbundle/test.timing-edge-cases.ts
Normal file
@@ -0,0 +1,214 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
|
||||
tap.test('ultra-fast test - should capture sub-millisecond timing', async () => {
|
||||
// This test does almost nothing, should complete in < 1ms
|
||||
const x = 1 + 1;
|
||||
expect(x).toEqual(2);
|
||||
});
|
||||
|
||||
tap.test('test with exact 1ms delay', async (tools) => {
|
||||
const start = Date.now();
|
||||
await tools.delayFor(1);
|
||||
const elapsed = Date.now() - start;
|
||||
// Should be at least 1ms but could be more due to event loop
|
||||
expect(elapsed).toBeGreaterThanOrEqual(1);
|
||||
});
|
||||
|
||||
tap.test('test with 10ms delay', async (tools) => {
|
||||
await tools.delayFor(10);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('test with 100ms delay', async (tools) => {
|
||||
await tools.delayFor(100);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('test with 250ms delay', async (tools) => {
|
||||
await tools.delayFor(250);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('test with 500ms delay', async (tools) => {
|
||||
await tools.delayFor(500);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('test with variable processing time', async (tools) => {
|
||||
// Simulate variable processing
|
||||
const iterations = 1000000;
|
||||
let sum = 0;
|
||||
for (let i = 0; i < iterations; i++) {
|
||||
sum += Math.sqrt(i);
|
||||
}
|
||||
expect(sum).toBeGreaterThan(0);
|
||||
|
||||
// Add a small delay to ensure measurable time
|
||||
await tools.delayFor(5);
|
||||
});
|
||||
|
||||
tap.test('test with multiple async operations', async () => {
|
||||
// Multiple promises in parallel
|
||||
const results = await Promise.all([
|
||||
new Promise(resolve => setTimeout(() => resolve(1), 10)),
|
||||
new Promise(resolve => setTimeout(() => resolve(2), 20)),
|
||||
new Promise(resolve => setTimeout(() => resolve(3), 30))
|
||||
]);
|
||||
|
||||
expect(results).toEqual([1, 2, 3]);
|
||||
// This should take at least 30ms (the longest delay)
|
||||
});
|
||||
|
||||
tap.test('test with synchronous heavy computation', async () => {
|
||||
// Heavy synchronous computation
|
||||
const fibonacci = (n: number): number => {
|
||||
if (n <= 1) return n;
|
||||
return fibonacci(n - 1) + fibonacci(n - 2);
|
||||
};
|
||||
|
||||
// Calculate fibonacci(30) - should take measurable time
|
||||
const result = fibonacci(30);
|
||||
expect(result).toEqual(832040);
|
||||
});
|
||||
|
||||
// Test with retry to see if timing accumulates correctly
|
||||
tap.retry(2).test('test with retry - fails first then passes', async (tools) => {
|
||||
// Get or initialize retry count
|
||||
const retryCount = tools.context.get('retryCount') || 0;
|
||||
tools.context.set('retryCount', retryCount + 1);
|
||||
|
||||
await tools.delayFor(50);
|
||||
|
||||
if (retryCount === 0) {
|
||||
throw new Error('First attempt fails');
|
||||
}
|
||||
|
||||
expect(retryCount).toEqual(1);
|
||||
});
|
||||
|
||||
// Test timeout handling
|
||||
tap.timeout(100).test('test with timeout - should complete just in time', async (tools) => {
|
||||
await tools.delayFor(80); // Just under the timeout
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// Skip test - should show 0ms
|
||||
tap.skip.test('skipped test - should report 0ms', async (tools) => {
|
||||
await tools.delayFor(1000); // This won't execute
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// Todo test - should show 0ms
|
||||
tap.todo.test('todo test - should report 0ms', async (tools) => {
|
||||
await tools.delayFor(1000); // This won't execute
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// Test with skip inside
|
||||
tap.test('test that skips conditionally - should show time until skip', async (tools) => {
|
||||
await tools.delayFor(25);
|
||||
|
||||
const shouldSkip = true;
|
||||
if (shouldSkip) {
|
||||
tools.skip('Skipping after 25ms');
|
||||
}
|
||||
|
||||
// This won't execute
|
||||
await tools.delayFor(1000);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// Test with very precise timing
|
||||
tap.test('test with precise timing measurements', async (tools) => {
|
||||
const measurements: number[] = [];
|
||||
|
||||
for (let i = 0; i < 5; i++) {
|
||||
const start = process.hrtime.bigint();
|
||||
await tools.delayFor(10);
|
||||
const end = process.hrtime.bigint();
|
||||
const durationMs = Number(end - start) / 1_000_000;
|
||||
measurements.push(durationMs);
|
||||
}
|
||||
|
||||
// All measurements should be at least 10ms
|
||||
measurements.forEach(m => {
|
||||
expect(m).toBeGreaterThanOrEqual(10);
|
||||
});
|
||||
|
||||
// But not too much more (accounting for timer precision)
|
||||
measurements.forEach(m => {
|
||||
expect(m).toBeLessThan(20);
|
||||
});
|
||||
});
|
||||
|
||||
// Test that intentionally has 0 actual work
|
||||
tap.test('empty test - absolute minimum execution time', async () => {
|
||||
// Literally nothing
|
||||
});
|
||||
|
||||
// Test with promise that resolves immediately
|
||||
tap.test('test with immediate promise resolution', async () => {
|
||||
await Promise.resolve();
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// Test with microtask queue
|
||||
tap.test('test with microtask queue processing', async () => {
|
||||
let value = 0;
|
||||
|
||||
await Promise.resolve().then(() => {
|
||||
value = 1;
|
||||
return Promise.resolve();
|
||||
}).then(() => {
|
||||
value = 2;
|
||||
return Promise.resolve();
|
||||
}).then(() => {
|
||||
value = 3;
|
||||
});
|
||||
|
||||
expect(value).toEqual(3);
|
||||
});
|
||||
|
||||
// Test to verify timing accumulation in describe blocks
|
||||
tap.describe('timing in describe blocks', () => {
|
||||
let startTime: number;
|
||||
|
||||
tap.beforeEach(async () => {
|
||||
startTime = Date.now();
|
||||
await new Promise(resolve => setTimeout(resolve, 5));
|
||||
});
|
||||
|
||||
tap.afterEach(async () => {
|
||||
await new Promise(resolve => setTimeout(resolve, 5));
|
||||
});
|
||||
|
||||
tap.test('first test in describe', async (tools) => {
|
||||
await tools.delayFor(10);
|
||||
const elapsed = Date.now() - startTime;
|
||||
expect(elapsed).toBeGreaterThanOrEqual(10);
|
||||
});
|
||||
|
||||
tap.test('second test in describe', async (tools) => {
|
||||
await tools.delayFor(20);
|
||||
const elapsed = Date.now() - startTime;
|
||||
expect(elapsed).toBeGreaterThanOrEqual(20);
|
||||
});
|
||||
});
|
||||
|
||||
// Parallel tests to see timing differences
|
||||
tap.testParallel('parallel test 1 - 100ms', async (tools) => {
|
||||
await tools.delayFor(100);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.testParallel('parallel test 2 - 50ms', async (tools) => {
|
||||
await tools.delayFor(50);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.testParallel('parallel test 3 - 150ms', async (tools) => {
|
||||
await tools.delayFor(150);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.start();
|
204
test/tapbundle/test.timing-protocol.ts
Normal file
204
test/tapbundle/test.timing-protocol.ts
Normal file
@@ -0,0 +1,204 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
import { ProtocolParser, ProtocolEmitter } from '../../ts_tapbundle_protocol/index.js';
|
||||
|
||||
// Test the protocol's ability to emit and parse timing metadata
|
||||
tap.test('protocol should correctly emit timing metadata', async () => {
|
||||
const emitter = new ProtocolEmitter();
|
||||
|
||||
const testResult = {
|
||||
ok: true,
|
||||
testNumber: 1,
|
||||
description: 'test with timing',
|
||||
metadata: {
|
||||
time: 123
|
||||
}
|
||||
};
|
||||
|
||||
const lines = emitter.emitTest(testResult);
|
||||
|
||||
// Should have inline timing metadata
|
||||
expect(lines.length).toEqual(1);
|
||||
expect(lines[0]).toInclude('⟦TSTEST:time:123⟧');
|
||||
});
|
||||
|
||||
tap.test('protocol should correctly parse timing metadata', async () => {
|
||||
const parser = new ProtocolParser();
|
||||
|
||||
const line = 'ok 1 - test with timing ⟦TSTEST:time:456⟧';
|
||||
const messages = parser.parseLine(line);
|
||||
|
||||
expect(messages.length).toEqual(1);
|
||||
expect(messages[0].type).toEqual('test');
|
||||
|
||||
const content = messages[0].content as any;
|
||||
expect(content.metadata).toBeDefined();
|
||||
expect(content.metadata.time).toEqual(456);
|
||||
});
|
||||
|
||||
tap.test('protocol should handle 0ms timing', async () => {
|
||||
const parser = new ProtocolParser();
|
||||
|
||||
const line = 'ok 1 - ultra fast test ⟦TSTEST:time:0⟧';
|
||||
const messages = parser.parseLine(line);
|
||||
|
||||
const content = messages[0].content as any;
|
||||
expect(content.metadata.time).toEqual(0);
|
||||
});
|
||||
|
||||
tap.test('protocol should handle large timing values', async () => {
|
||||
const parser = new ProtocolParser();
|
||||
|
||||
const line = 'ok 1 - slow test ⟦TSTEST:time:999999⟧';
|
||||
const messages = parser.parseLine(line);
|
||||
|
||||
const content = messages[0].content as any;
|
||||
expect(content.metadata.time).toEqual(999999);
|
||||
});
|
||||
|
||||
tap.test('protocol should handle timing with other metadata', async () => {
|
||||
const emitter = new ProtocolEmitter();
|
||||
|
||||
const testResult = {
|
||||
ok: true,
|
||||
testNumber: 1,
|
||||
description: 'complex test',
|
||||
metadata: {
|
||||
time: 789,
|
||||
file: 'test.ts',
|
||||
tags: ['slow', 'integration']
|
||||
}
|
||||
};
|
||||
|
||||
const lines = emitter.emitTest(testResult);
|
||||
|
||||
// Should use block metadata format for complex metadata
|
||||
expect(lines.length).toBeGreaterThan(1);
|
||||
expect(lines[1]).toInclude('META:');
|
||||
expect(lines[1]).toInclude('"time":789');
|
||||
});
|
||||
|
||||
tap.test('protocol should parse timing from block metadata', async () => {
|
||||
const parser = new ProtocolParser();
|
||||
|
||||
const lines = [
|
||||
'ok 1 - complex test',
|
||||
'⟦TSTEST:META:{"time":321,"file":"test.ts"}⟧'
|
||||
];
|
||||
|
||||
let testResult: any;
|
||||
|
||||
for (const line of lines) {
|
||||
const messages = parser.parseLine(line);
|
||||
if (messages.length > 0 && messages[0].type === 'test') {
|
||||
testResult = messages[0].content;
|
||||
}
|
||||
}
|
||||
|
||||
expect(testResult).toBeDefined();
|
||||
expect(testResult.metadata).toBeUndefined(); // Metadata comes separately in block format
|
||||
});
|
||||
|
||||
tap.test('timing for skipped tests should be 0 or missing', async () => {
|
||||
const emitter = new ProtocolEmitter();
|
||||
|
||||
const testResult = {
|
||||
ok: true,
|
||||
testNumber: 1,
|
||||
description: 'skipped test',
|
||||
directive: {
|
||||
type: 'skip' as const,
|
||||
reason: 'Not ready'
|
||||
},
|
||||
metadata: {
|
||||
time: 0
|
||||
}
|
||||
};
|
||||
|
||||
const lines = emitter.emitTest(testResult);
|
||||
expect(lines[0]).toInclude('# SKIP');
|
||||
|
||||
// If time is 0, it might be included or omitted
|
||||
if (lines[0].includes('⟦TSTEST:')) {
|
||||
expect(lines[0]).toInclude('time:0');
|
||||
}
|
||||
});
|
||||
|
||||
tap.test('protocol should handle fractional milliseconds', async () => {
|
||||
const emitter = new ProtocolEmitter();
|
||||
|
||||
// Even though we use integers, test that protocol handles them correctly
|
||||
const testResult = {
|
||||
ok: true,
|
||||
testNumber: 1,
|
||||
description: 'precise test',
|
||||
metadata: {
|
||||
time: 123 // Protocol uses integers for milliseconds
|
||||
}
|
||||
};
|
||||
|
||||
const lines = emitter.emitTest(testResult);
|
||||
expect(lines[0]).toInclude('time:123');
|
||||
});
|
||||
|
||||
tap.test('protocol should handle timing in retry scenarios', async () => {
|
||||
const emitter = new ProtocolEmitter();
|
||||
|
||||
const testResult = {
|
||||
ok: true,
|
||||
testNumber: 1,
|
||||
description: 'retry test',
|
||||
metadata: {
|
||||
time: 200,
|
||||
retry: 2
|
||||
}
|
||||
};
|
||||
|
||||
const lines = emitter.emitTest(testResult);
|
||||
// Should include both time and retry
|
||||
expect(lines[0]).toMatch(/time:200.*retry:2|retry:2.*time:200/);
|
||||
});
|
||||
|
||||
// Test actual timing capture
|
||||
tap.test('HrtMeasurement should capture accurate timing', async (tools) => {
|
||||
// Import HrtMeasurement
|
||||
const { HrtMeasurement } = await import('@push.rocks/smarttime');
|
||||
|
||||
const measurement = new HrtMeasurement();
|
||||
measurement.start();
|
||||
|
||||
await tools.delayFor(50);
|
||||
|
||||
measurement.stop();
|
||||
|
||||
// Should be at least 50ms
|
||||
expect(measurement.milliSeconds).toBeGreaterThanOrEqual(50);
|
||||
// But not too much more (allow for some overhead)
|
||||
expect(measurement.milliSeconds).toBeLessThan(100);
|
||||
});
|
||||
|
||||
tap.test('multiple timing measurements should be independent', async (tools) => {
|
||||
const { HrtMeasurement } = await import('@push.rocks/smarttime');
|
||||
|
||||
const measurement1 = new HrtMeasurement();
|
||||
const measurement2 = new HrtMeasurement();
|
||||
|
||||
measurement1.start();
|
||||
await tools.delayFor(25);
|
||||
|
||||
measurement2.start();
|
||||
await tools.delayFor(25);
|
||||
|
||||
measurement1.stop();
|
||||
await tools.delayFor(25);
|
||||
measurement2.stop();
|
||||
|
||||
// measurement1 should be ~50ms (25ms + 25ms)
|
||||
expect(measurement1.milliSeconds).toBeGreaterThanOrEqual(50);
|
||||
expect(measurement1.milliSeconds).toBeLessThan(70);
|
||||
|
||||
// measurement2 should be ~50ms (25ms + 25ms)
|
||||
expect(measurement2.milliSeconds).toBeGreaterThanOrEqual(50);
|
||||
expect(measurement2.milliSeconds).toBeLessThan(70);
|
||||
});
|
||||
|
||||
tap.start();
|
85
test/tapbundle/test.toolsarg.ts
Normal file
85
test/tapbundle/test.toolsarg.ts
Normal file
@@ -0,0 +1,85 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
|
||||
// Test skip functionality
|
||||
tap.test('should skip a test with skip()', async (toolsArg) => {
|
||||
toolsArg.skip('This test is skipped');
|
||||
// This code should not run
|
||||
expect(false).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('should conditionally skip with skipIf()', async (toolsArg) => {
|
||||
const shouldSkip = true;
|
||||
toolsArg.skipIf(shouldSkip, 'Condition met, skipping');
|
||||
// This code should not run
|
||||
expect(false).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('should not skip when skipIf condition is false', async (toolsArg) => {
|
||||
const shouldSkip = false;
|
||||
toolsArg.skipIf(shouldSkip, 'Should not skip');
|
||||
// This code should run
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// Test todo functionality
|
||||
tap.test('should mark test as todo', async (toolsArg) => {
|
||||
toolsArg.todo('Not implemented yet');
|
||||
// Test code that would be implemented later
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// Test timeout functionality
|
||||
tap.test('should set custom timeout', async (toolsArg) => {
|
||||
toolsArg.timeout(5000);
|
||||
// Simulate a task that takes 100ms
|
||||
await toolsArg.delayFor(100);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// This test is expected to fail due to timeout
|
||||
tap.test('should timeout when exceeding limit', async (toolsArg) => {
|
||||
toolsArg.timeout(100);
|
||||
// This test will timeout and be marked as failed by the test runner
|
||||
await toolsArg.delayFor(2000);
|
||||
// This line should not be reached due to timeout
|
||||
});
|
||||
|
||||
tap.test('timeout should work properly', async (toolsArg) => {
|
||||
toolsArg.timeout(200);
|
||||
// This test should complete successfully within the timeout
|
||||
await toolsArg.delayFor(50);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// Test retry functionality
|
||||
tap.retry(3)
|
||||
.test('should retry on failure', async (toolsArg) => {
|
||||
// Use retry count to determine success
|
||||
const currentRetry = toolsArg.retryCount;
|
||||
|
||||
// Fail on first two attempts (0 and 1), succeed on third (2)
|
||||
if (currentRetry < 2) {
|
||||
throw new Error(`Attempt ${currentRetry + 1} failed`);
|
||||
}
|
||||
|
||||
expect(currentRetry).toEqual(2);
|
||||
});
|
||||
|
||||
tap.test('should expose retry count', async (toolsArg) => {
|
||||
toolsArg.retry(2);
|
||||
|
||||
// The retry count should be available
|
||||
expect(toolsArg.retryCount).toBeLessThanOrEqual(2);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
// Test allowFailure
|
||||
tap.test('should allow failure', async (toolsArg) => {
|
||||
// Just verify that allowFailure() can be called without throwing
|
||||
toolsArg.allowFailure();
|
||||
expect(true).toBeTrue();
|
||||
// Note: In a real implementation, we would see "please note: failure allowed!"
|
||||
// in the output when this test fails, but the test itself will still be marked as failed
|
||||
});
|
||||
|
||||
tap.start();
|
49
test/tapbundle/test.ts
Normal file
49
test/tapbundle/test.ts
Normal file
@@ -0,0 +1,49 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
|
||||
tap.preTask('hi there', async () => {
|
||||
console.log('this is a pretask');
|
||||
});
|
||||
|
||||
const test1 = tap.test('my first test -> expect true to be true', async () => {
|
||||
return expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
const test2 = tap.test('my second test', async (tools) => {
|
||||
await tools.delayFor(1000);
|
||||
});
|
||||
|
||||
const test3 = tap.test(
|
||||
'my third test -> test2 should take longer than test1 and endure at least 1000ms',
|
||||
async () => {
|
||||
expect(
|
||||
(await test1.testPromise).hrtMeasurement.milliSeconds <
|
||||
(await test2.testPromise).hrtMeasurement.milliSeconds,
|
||||
).toBeTrue();
|
||||
expect((await test2.testPromise).hrtMeasurement.milliSeconds >= 1000).toBeTrue();
|
||||
},
|
||||
);
|
||||
|
||||
const test4 = tap.test('my 4th test -> should fail', async (tools) => {
|
||||
tools.allowFailure();
|
||||
expect(false).toBeFalse();
|
||||
return 'hello';
|
||||
});
|
||||
|
||||
const test5 = tap.test('my 5th test -> should pass in about 500ms', async (tools) => {
|
||||
const test4Result = await test4.testResultPromise;
|
||||
tools.timeout(1000);
|
||||
await tools.delayFor(500);
|
||||
});
|
||||
|
||||
const test6 = tap.skip.test('my 6th test -> should fail after 1000ms', async (tools) => {
|
||||
tools.allowFailure();
|
||||
tools.timeout(1000);
|
||||
await tools.delayFor(2000);
|
||||
});
|
||||
|
||||
const test7 = tap.test('my 7th test -> should print a colored string', async (tools) => {
|
||||
const cs = await tools.coloredString('hello', 'red', 'cyan');
|
||||
console.log(cs);
|
||||
});
|
||||
|
||||
tap.start();
|
111
test/test.migration.node.ts
Normal file
111
test/test.migration.node.ts
Normal file
@@ -0,0 +1,111 @@
|
||||
import { expect, tap } from '../ts_tapbundle/index.js';
|
||||
import { Migration } from '../ts/tstest.classes.migration.js';
|
||||
import * as plugins from '../ts/tstest.plugins.js';
|
||||
import * as paths from '../ts/tstest.paths.js';
|
||||
|
||||
tap.test('Migration - can initialize', async () => {
|
||||
const migration = new Migration({
|
||||
baseDir: process.cwd(),
|
||||
dryRun: true,
|
||||
});
|
||||
|
||||
expect(migration).toBeInstanceOf(Migration);
|
||||
});
|
||||
|
||||
tap.test('Migration - findLegacyFiles returns empty for no legacy files', async () => {
|
||||
const migration = new Migration({
|
||||
baseDir: process.cwd(),
|
||||
pattern: 'test/test.migration.node.ts', // This file itself, not legacy
|
||||
dryRun: true,
|
||||
});
|
||||
|
||||
const legacyFiles = await migration.findLegacyFiles();
|
||||
expect(legacyFiles).toEqual([]);
|
||||
});
|
||||
|
||||
tap.test('Migration - generateReport works', async () => {
|
||||
const migration = new Migration({
|
||||
baseDir: process.cwd(),
|
||||
dryRun: true,
|
||||
});
|
||||
|
||||
const report = await migration.generateReport();
|
||||
expect(report).toBeTypeOf('string');
|
||||
expect(report).toContain('Test File Migration Report');
|
||||
});
|
||||
|
||||
tap.test('Migration - detects legacy files when they exist', async () => {
|
||||
// Create a temporary legacy test file
|
||||
const tempDir = plugins.path.join(process.cwd(), '.nogit', 'test_migration');
|
||||
await plugins.smartfile.fs.ensureEmptyDir(tempDir);
|
||||
|
||||
const legacyFile = plugins.path.join(tempDir, 'test.browser.ts');
|
||||
await plugins.smartfile.memory.toFs('// Legacy test file\nexport default Promise.resolve();', legacyFile);
|
||||
|
||||
const migration = new Migration({
|
||||
baseDir: tempDir,
|
||||
pattern: '**/*.ts',
|
||||
dryRun: true,
|
||||
});
|
||||
|
||||
const legacyFiles = await migration.findLegacyFiles();
|
||||
expect(legacyFiles.length).toEqual(1);
|
||||
expect(legacyFiles[0]).toContain('test.browser.ts');
|
||||
|
||||
// Clean up
|
||||
await plugins.smartfile.fs.removeSync(tempDir);
|
||||
});
|
||||
|
||||
tap.test('Migration - detects both legacy pattern', async () => {
|
||||
// Create temporary legacy files
|
||||
const tempDir = plugins.path.join(process.cwd(), '.nogit', 'test_migration_both');
|
||||
await plugins.smartfile.fs.ensureEmptyDir(tempDir);
|
||||
|
||||
const browserFile = plugins.path.join(tempDir, 'test.browser.ts');
|
||||
const bothFile = plugins.path.join(tempDir, 'test.both.ts');
|
||||
await plugins.smartfile.memory.toFs('// Browser test\nexport default Promise.resolve();', browserFile);
|
||||
await plugins.smartfile.memory.toFs('// Both test\nexport default Promise.resolve();', bothFile);
|
||||
|
||||
const migration = new Migration({
|
||||
baseDir: tempDir,
|
||||
pattern: '**/*.ts',
|
||||
dryRun: true,
|
||||
});
|
||||
|
||||
const legacyFiles = await migration.findLegacyFiles();
|
||||
expect(legacyFiles.length).toEqual(2);
|
||||
|
||||
// Clean up
|
||||
await plugins.smartfile.fs.removeSync(tempDir);
|
||||
});
|
||||
|
||||
tap.test('Migration - dry run does not modify files', async () => {
|
||||
// Create a temporary legacy test file
|
||||
const tempDir = plugins.path.join(process.cwd(), '.nogit', 'test_migration_dryrun');
|
||||
await plugins.smartfile.fs.ensureEmptyDir(tempDir);
|
||||
|
||||
const legacyFile = plugins.path.join(tempDir, 'test.browser.ts');
|
||||
await plugins.smartfile.memory.toFs('// Legacy test file\nexport default Promise.resolve();', legacyFile);
|
||||
|
||||
const migration = new Migration({
|
||||
baseDir: tempDir,
|
||||
pattern: '**/*.ts',
|
||||
dryRun: true,
|
||||
verbose: false,
|
||||
});
|
||||
|
||||
const summary = await migration.run();
|
||||
|
||||
expect(summary.dryRun).toEqual(true);
|
||||
expect(summary.totalLegacyFiles).toEqual(1);
|
||||
expect(summary.migratedCount).toEqual(1); // Dry run still counts as "would migrate"
|
||||
|
||||
// Verify original file still exists
|
||||
const fileExists = await plugins.smartfile.fs.fileExists(legacyFile);
|
||||
expect(fileExists).toEqual(true);
|
||||
|
||||
// Clean up
|
||||
await plugins.smartfile.fs.removeSync(tempDir);
|
||||
});
|
||||
|
||||
export default tap.start();
|
167
test/test.runtime.parser.node.ts
Normal file
167
test/test.runtime.parser.node.ts
Normal file
@@ -0,0 +1,167 @@
|
||||
import { expect, tap } from '../ts_tapbundle/index.js';
|
||||
import { parseTestFilename, isLegacyFilename, getLegacyMigrationTarget } from '../ts/tstest.classes.runtime.parser.js';
|
||||
|
||||
tap.test('parseTestFilename - single runtime', async () => {
|
||||
const parsed = parseTestFilename('test.node.ts');
|
||||
expect(parsed.baseName).toEqual('test');
|
||||
expect(parsed.runtimes).toEqual(['node']);
|
||||
expect(parsed.modifiers).toEqual([]);
|
||||
expect(parsed.extension).toEqual('ts');
|
||||
expect(parsed.isLegacy).toEqual(false);
|
||||
});
|
||||
|
||||
tap.test('parseTestFilename - chromium runtime', async () => {
|
||||
const parsed = parseTestFilename('test.chromium.ts');
|
||||
expect(parsed.baseName).toEqual('test');
|
||||
expect(parsed.runtimes).toEqual(['chromium']);
|
||||
expect(parsed.modifiers).toEqual([]);
|
||||
expect(parsed.extension).toEqual('ts');
|
||||
expect(parsed.isLegacy).toEqual(false);
|
||||
});
|
||||
|
||||
tap.test('parseTestFilename - multiple runtimes', async () => {
|
||||
const parsed = parseTestFilename('test.node+chromium.ts');
|
||||
expect(parsed.baseName).toEqual('test');
|
||||
expect(parsed.runtimes).toEqual(['node', 'chromium']);
|
||||
expect(parsed.modifiers).toEqual([]);
|
||||
expect(parsed.extension).toEqual('ts');
|
||||
expect(parsed.isLegacy).toEqual(false);
|
||||
});
|
||||
|
||||
tap.test('parseTestFilename - deno+bun runtime', async () => {
|
||||
const parsed = parseTestFilename('test.deno+bun.ts');
|
||||
expect(parsed.baseName).toEqual('test');
|
||||
expect(parsed.runtimes).toEqual(['deno', 'bun']);
|
||||
expect(parsed.modifiers).toEqual([]);
|
||||
expect(parsed.extension).toEqual('ts');
|
||||
expect(parsed.isLegacy).toEqual(false);
|
||||
});
|
||||
|
||||
tap.test('parseTestFilename - with nonci modifier', async () => {
|
||||
const parsed = parseTestFilename('test.chromium.nonci.ts');
|
||||
expect(parsed.baseName).toEqual('test');
|
||||
expect(parsed.runtimes).toEqual(['chromium']);
|
||||
expect(parsed.modifiers).toEqual(['nonci']);
|
||||
expect(parsed.extension).toEqual('ts');
|
||||
expect(parsed.isLegacy).toEqual(false);
|
||||
});
|
||||
|
||||
tap.test('parseTestFilename - multi-runtime with nonci', async () => {
|
||||
const parsed = parseTestFilename('test.node+chromium.nonci.ts');
|
||||
expect(parsed.baseName).toEqual('test');
|
||||
expect(parsed.runtimes).toEqual(['node', 'chromium']);
|
||||
expect(parsed.modifiers).toEqual(['nonci']);
|
||||
expect(parsed.extension).toEqual('ts');
|
||||
expect(parsed.isLegacy).toEqual(false);
|
||||
});
|
||||
|
||||
tap.test('parseTestFilename - legacy browser', async () => {
|
||||
const parsed = parseTestFilename('test.browser.ts');
|
||||
expect(parsed.baseName).toEqual('test');
|
||||
expect(parsed.runtimes).toEqual(['chromium']);
|
||||
expect(parsed.modifiers).toEqual([]);
|
||||
expect(parsed.extension).toEqual('ts');
|
||||
expect(parsed.isLegacy).toEqual(true);
|
||||
});
|
||||
|
||||
tap.test('parseTestFilename - legacy both', async () => {
|
||||
const parsed = parseTestFilename('test.both.ts');
|
||||
expect(parsed.baseName).toEqual('test');
|
||||
expect(parsed.runtimes).toEqual(['node', 'chromium']);
|
||||
expect(parsed.modifiers).toEqual([]);
|
||||
expect(parsed.extension).toEqual('ts');
|
||||
expect(parsed.isLegacy).toEqual(true);
|
||||
});
|
||||
|
||||
tap.test('parseTestFilename - legacy browser with nonci', async () => {
|
||||
const parsed = parseTestFilename('test.browser.nonci.ts');
|
||||
expect(parsed.baseName).toEqual('test');
|
||||
expect(parsed.runtimes).toEqual(['chromium']);
|
||||
expect(parsed.modifiers).toEqual(['nonci']);
|
||||
expect(parsed.extension).toEqual('ts');
|
||||
expect(parsed.isLegacy).toEqual(true);
|
||||
});
|
||||
|
||||
tap.test('parseTestFilename - complex basename', async () => {
|
||||
const parsed = parseTestFilename('test.some.feature.node.ts');
|
||||
expect(parsed.baseName).toEqual('test.some.feature');
|
||||
expect(parsed.runtimes).toEqual(['node']);
|
||||
expect(parsed.modifiers).toEqual([]);
|
||||
expect(parsed.extension).toEqual('ts');
|
||||
expect(parsed.isLegacy).toEqual(false);
|
||||
});
|
||||
|
||||
tap.test('parseTestFilename - default to node when no runtime', async () => {
|
||||
const parsed = parseTestFilename('test.ts');
|
||||
expect(parsed.baseName).toEqual('test');
|
||||
expect(parsed.runtimes).toEqual(['node']);
|
||||
expect(parsed.modifiers).toEqual([]);
|
||||
expect(parsed.extension).toEqual('ts');
|
||||
expect(parsed.isLegacy).toEqual(false);
|
||||
});
|
||||
|
||||
tap.test('parseTestFilename - tsx extension', async () => {
|
||||
const parsed = parseTestFilename('test.chromium.tsx');
|
||||
expect(parsed.baseName).toEqual('test');
|
||||
expect(parsed.runtimes).toEqual(['chromium']);
|
||||
expect(parsed.modifiers).toEqual([]);
|
||||
expect(parsed.extension).toEqual('tsx');
|
||||
expect(parsed.isLegacy).toEqual(false);
|
||||
});
|
||||
|
||||
tap.test('parseTestFilename - deduplicates runtime tokens', async () => {
|
||||
const parsed = parseTestFilename('test.node+node.ts');
|
||||
expect(parsed.baseName).toEqual('test');
|
||||
expect(parsed.runtimes).toEqual(['node']);
|
||||
expect(parsed.modifiers).toEqual([]);
|
||||
expect(parsed.extension).toEqual('ts');
|
||||
expect(parsed.isLegacy).toEqual(false);
|
||||
});
|
||||
|
||||
tap.test('isLegacyFilename - detects browser', async () => {
|
||||
expect(isLegacyFilename('test.browser.ts')).toEqual(true);
|
||||
});
|
||||
|
||||
tap.test('isLegacyFilename - detects both', async () => {
|
||||
expect(isLegacyFilename('test.both.ts')).toEqual(true);
|
||||
});
|
||||
|
||||
tap.test('isLegacyFilename - rejects new naming', async () => {
|
||||
expect(isLegacyFilename('test.node.ts')).toEqual(false);
|
||||
expect(isLegacyFilename('test.chromium.ts')).toEqual(false);
|
||||
expect(isLegacyFilename('test.node+chromium.ts')).toEqual(false);
|
||||
});
|
||||
|
||||
tap.test('getLegacyMigrationTarget - browser to chromium', async () => {
|
||||
const target = getLegacyMigrationTarget('test.browser.ts');
|
||||
expect(target).toEqual('test.chromium.ts');
|
||||
});
|
||||
|
||||
tap.test('getLegacyMigrationTarget - both to node+chromium', async () => {
|
||||
const target = getLegacyMigrationTarget('test.both.ts');
|
||||
expect(target).toEqual('test.node+chromium.ts');
|
||||
});
|
||||
|
||||
tap.test('getLegacyMigrationTarget - browser with nonci', async () => {
|
||||
const target = getLegacyMigrationTarget('test.browser.nonci.ts');
|
||||
expect(target).toEqual('test.chromium.nonci.ts');
|
||||
});
|
||||
|
||||
tap.test('getLegacyMigrationTarget - both with nonci', async () => {
|
||||
const target = getLegacyMigrationTarget('test.both.nonci.ts');
|
||||
expect(target).toEqual('test.node+chromium.nonci.ts');
|
||||
});
|
||||
|
||||
tap.test('getLegacyMigrationTarget - returns null for non-legacy', async () => {
|
||||
const target = getLegacyMigrationTarget('test.node.ts');
|
||||
expect(target).toEqual(null);
|
||||
});
|
||||
|
||||
tap.test('parseTestFilename - handles full paths', async () => {
|
||||
const parsed = parseTestFilename('/path/to/test.node+chromium.ts');
|
||||
expect(parsed.baseName).toEqual('test');
|
||||
expect(parsed.runtimes).toEqual(['node', 'chromium']);
|
||||
expect(parsed.original).toEqual('test.node+chromium.ts');
|
||||
});
|
||||
|
||||
export default tap.start();
|
@@ -1,6 +0,0 @@
|
||||
import { expect, tap } from '@pushrocks/tapbundle';
|
||||
import * as tstest from '../ts/index';
|
||||
|
||||
tap.test('prepare test', async () => {});
|
||||
|
||||
tap.start();
|
8
test/tstest/subdir/test.sub.ts
Normal file
8
test/tstest/subdir/test.sub.ts
Normal file
@@ -0,0 +1,8 @@
|
||||
import { expect, tap } from '../../../ts_tapbundle/index.js';
|
||||
|
||||
tap.test('subdirectory test execution', async () => {
|
||||
console.log('This test verifies subdirectory test discovery works');
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.start();
|
16
test/tstest/test-parallel-demo.ts
Normal file
16
test/tstest/test-parallel-demo.ts
Normal file
@@ -0,0 +1,16 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
import * as fs from 'fs';
|
||||
|
||||
// Test to demonstrate parallel execution timing - run with glob pattern
|
||||
// This will give us a clear view of execution order with timestamps
|
||||
|
||||
const timestamp = () => new Date().toISOString().substr(11, 12);
|
||||
|
||||
tap.test('demo test in main file', async (toolsArg) => {
|
||||
console.log(`[${timestamp()}] Test parallel demo started`);
|
||||
await toolsArg.delayFor(1000);
|
||||
console.log(`[${timestamp()}] Test parallel demo completed`);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.start();
|
11
test/tstest/test.api.para__2.ts
Normal file
11
test/tstest/test.api.para__2.ts
Normal file
@@ -0,0 +1,11 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
|
||||
// This test runs in parallel group 2
|
||||
tap.test('api test in parallel group 2', async (toolsArg) => {
|
||||
console.log('API test started');
|
||||
await toolsArg.delayFor(800);
|
||||
console.log('API test completed');
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.start();
|
13
test/tstest/test.auth.para__1.ts
Normal file
13
test/tstest/test.auth.para__1.ts
Normal file
@@ -0,0 +1,13 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
|
||||
// This test runs in parallel group 1
|
||||
const timestamp = () => new Date().toISOString().substr(11, 12);
|
||||
|
||||
tap.test('auth test in parallel group 1', async (toolsArg) => {
|
||||
console.log(`[${timestamp()}] Auth test started`);
|
||||
await toolsArg.delayFor(1000);
|
||||
console.log(`[${timestamp()}] Auth test completed`);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.start();
|
11
test/tstest/test.console.ts
Normal file
11
test/tstest/test.console.ts
Normal file
@@ -0,0 +1,11 @@
|
||||
import { expect, tap } from '../../ts_tapbundle/index.js';
|
||||
|
||||
tap.test('Test with console output', async () => {
|
||||
console.log('Log message 1 from test');
|
||||
console.log('Log message 2 from test');
|
||||
console.error('Error message from test');
|
||||
console.warn('Warning message from test');
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.start();
|
11
test/tstest/test.db.para__2.ts
Normal file
11
test/tstest/test.db.para__2.ts
Normal file
@@ -0,0 +1,11 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
|
||||
// This test runs in parallel group 2
|
||||
tap.test('db test in parallel group 2', async (toolsArg) => {
|
||||
console.log('DB test started');
|
||||
await toolsArg.delayFor(800);
|
||||
console.log('DB test completed');
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.start();
|
13
test/tstest/test.fail.ts
Normal file
13
test/tstest/test.fail.ts
Normal file
@@ -0,0 +1,13 @@
|
||||
import { expect, tap } from '../../ts_tapbundle/index.js';
|
||||
|
||||
tap.test('This test should fail', async () => {
|
||||
console.log('This test will fail on purpose');
|
||||
expect(true).toBeFalse();
|
||||
});
|
||||
|
||||
tap.test('This test should pass', async () => {
|
||||
console.log('This test will pass');
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.start();
|
23
test/tstest/test.failing-with-logs.ts
Normal file
23
test/tstest/test.failing-with-logs.ts
Normal file
@@ -0,0 +1,23 @@
|
||||
import { expect, tap } from '../../ts_tapbundle/index.js';
|
||||
|
||||
tap.test('Test that will fail with console logs', async () => {
|
||||
console.log('Starting the test...');
|
||||
console.log('Doing some setup work');
|
||||
console.log('About to check assertion');
|
||||
|
||||
const value = 42;
|
||||
console.log(`The value is: ${value}`);
|
||||
|
||||
// This will fail
|
||||
expect(value).toEqual(100);
|
||||
|
||||
console.log('This log will not be reached');
|
||||
});
|
||||
|
||||
tap.test('Test that passes', async () => {
|
||||
console.log('This test passes');
|
||||
console.log('So these logs should not show in default mode');
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.start();
|
8
test/tstest/test.glob.ts
Normal file
8
test/tstest/test.glob.ts
Normal file
@@ -0,0 +1,8 @@
|
||||
import { expect, tap } from '../../ts_tapbundle/index.js';
|
||||
|
||||
tap.test('glob pattern test execution', async () => {
|
||||
console.log('This test verifies glob pattern execution works');
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.start();
|
10
test/tstest/test.serial1.ts
Normal file
10
test/tstest/test.serial1.ts
Normal file
@@ -0,0 +1,10 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
|
||||
// This test runs serially (no para__ in filename)
|
||||
tap.test('serial test 1', async (toolsArg) => {
|
||||
await toolsArg.delayFor(500);
|
||||
console.log('Serial test 1 completed');
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.start();
|
10
test/tstest/test.serial2.ts
Normal file
10
test/tstest/test.serial2.ts
Normal file
@@ -0,0 +1,10 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
|
||||
// This test runs serially (no para__ in filename)
|
||||
tap.test('serial test 2', async (toolsArg) => {
|
||||
await toolsArg.delayFor(500);
|
||||
console.log('Serial test 2 completed');
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.start();
|
8
test/tstest/test.single.ts
Normal file
8
test/tstest/test.single.ts
Normal file
@@ -0,0 +1,8 @@
|
||||
import { expect, tap } from '../../ts_tapbundle/index.js';
|
||||
|
||||
tap.test('single file test execution', async () => {
|
||||
console.log('This test verifies single file execution works');
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.start();
|
6
test/tstest/test.ts
Normal file
6
test/tstest/test.ts
Normal file
@@ -0,0 +1,6 @@
|
||||
import { expect, tap } from '../../ts_tapbundle/index.js';
|
||||
import * as tstest from '../../ts/index.js';
|
||||
|
||||
tap.test('prepare test', async () => {});
|
||||
|
||||
tap.start();
|
13
test/tstest/test.user.para__1.ts
Normal file
13
test/tstest/test.user.para__1.ts
Normal file
@@ -0,0 +1,13 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
|
||||
// This test runs in parallel group 1
|
||||
const timestamp = () => new Date().toISOString().substr(11, 12);
|
||||
|
||||
tap.test('user test in parallel group 1', async (toolsArg) => {
|
||||
console.log(`[${timestamp()}] User test started`);
|
||||
await toolsArg.delayFor(1000);
|
||||
console.log(`[${timestamp()}] User test completed`);
|
||||
expect(true).toBeTrue();
|
||||
});
|
||||
|
||||
tap.start();
|
17
test/watch-demo/test.demo.ts
Normal file
17
test/watch-demo/test.demo.ts
Normal file
@@ -0,0 +1,17 @@
|
||||
import { tap, expect } from '../../ts_tapbundle/index.js';
|
||||
|
||||
// This test file demonstrates watch mode
|
||||
// Try modifying this file while running: tstest test/watch-demo --watch
|
||||
|
||||
let counter = 1;
|
||||
|
||||
tap.test('demo test that changes', async () => {
|
||||
expect(counter).toEqual(1);
|
||||
console.log(`Test run at: ${new Date().toISOString()}`);
|
||||
});
|
||||
|
||||
tap.test('another test', async () => {
|
||||
expect('hello').toEqual('hello');
|
||||
});
|
||||
|
||||
tap.start();
|
8
ts/00_commitinfo_data.ts
Normal file
8
ts/00_commitinfo_data.ts
Normal file
@@ -0,0 +1,8 @@
|
||||
/**
|
||||
* autocreated commitinfo by @push.rocks/commitinfo
|
||||
*/
|
||||
export const commitinfo = {
|
||||
name: '@git.zone/tstest',
|
||||
version: '2.4.3',
|
||||
description: 'a test utility to run tests that match test/**/*.ts'
|
||||
}
|
202
ts/index.ts
202
ts/index.ts
@@ -1,6 +1,202 @@
|
||||
import { TsTest } from './tstest.classes.tstest';
|
||||
import { TsTest } from './tstest.classes.tstest.js';
|
||||
import type { LogOptions } from './tstest.logging.js';
|
||||
|
||||
export enum TestExecutionMode {
|
||||
DIRECTORY = 'directory',
|
||||
FILE = 'file',
|
||||
GLOB = 'glob'
|
||||
}
|
||||
|
||||
export const runCli = async () => {
|
||||
const tsTestInstance = new TsTest(process.cwd(), process.argv[2]);
|
||||
await tsTestInstance.run();
|
||||
// Check if we're using global tstest in the tstest project itself
|
||||
try {
|
||||
const packageJsonPath = `${process.cwd()}/package.json`;
|
||||
const fs = await import('fs');
|
||||
if (fs.existsSync(packageJsonPath)) {
|
||||
const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf8'));
|
||||
if (packageJson.name === '@git.zone/tstest') {
|
||||
// Check if we're running from a global installation
|
||||
const execPath = process.argv[1];
|
||||
// Debug: log the paths (uncomment for debugging)
|
||||
// console.log('DEBUG: Checking global tstest usage...');
|
||||
// console.log('execPath:', execPath);
|
||||
// console.log('cwd:', process.cwd());
|
||||
// console.log('process.argv:', process.argv);
|
||||
|
||||
// Check if this is running from global installation
|
||||
const isLocalCli = execPath.includes(process.cwd());
|
||||
const isGlobalPnpm = process.argv.some(arg => arg.includes('.pnpm') && !arg.includes(process.cwd()));
|
||||
const isGlobalNpm = process.argv.some(arg => arg.includes('npm/node_modules') && !arg.includes(process.cwd()));
|
||||
|
||||
if (!isLocalCli && (isGlobalPnpm || isGlobalNpm || !execPath.includes('node_modules'))) {
|
||||
console.error('\n⚠️ WARNING: You are using a globally installed tstest in the tstest project itself!');
|
||||
console.error(' This means you are NOT testing your local changes.');
|
||||
console.error(' Please use one of these commands instead:');
|
||||
console.error(' • node cli.js <test-path>');
|
||||
console.error(' • pnpm test <test-path>');
|
||||
console.error(' • ./cli.js <test-path> (if executable)\n');
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
// Silently ignore any errors in this check
|
||||
}
|
||||
|
||||
// Parse command line arguments
|
||||
const args = process.argv.slice(2);
|
||||
const logOptions: LogOptions = {};
|
||||
let testPath: string | null = null;
|
||||
let tags: string[] = [];
|
||||
let startFromFile: number | null = null;
|
||||
let stopAtFile: number | null = null;
|
||||
let timeoutSeconds: number | null = null;
|
||||
let watchMode: boolean = false;
|
||||
let watchIgnorePatterns: string[] = [];
|
||||
|
||||
// Parse options
|
||||
for (let i = 0; i < args.length; i++) {
|
||||
const arg = args[i];
|
||||
|
||||
switch (arg) {
|
||||
case '--version':
|
||||
// Get version from package.json
|
||||
try {
|
||||
const fs = await import('fs');
|
||||
const packagePath = new URL('../package.json', import.meta.url).pathname;
|
||||
const packageData = JSON.parse(await fs.promises.readFile(packagePath, 'utf8'));
|
||||
console.log(`tstest version ${packageData.version}`);
|
||||
} catch (error) {
|
||||
console.log('tstest version unknown');
|
||||
}
|
||||
process.exit(0);
|
||||
break;
|
||||
case '--quiet':
|
||||
case '-q':
|
||||
logOptions.quiet = true;
|
||||
break;
|
||||
case '--verbose':
|
||||
case '-v':
|
||||
logOptions.verbose = true;
|
||||
break;
|
||||
case '--no-color':
|
||||
logOptions.noColor = true;
|
||||
break;
|
||||
case '--json':
|
||||
logOptions.json = true;
|
||||
break;
|
||||
case '--log-file':
|
||||
case '--logfile':
|
||||
logOptions.logFile = true; // Set this as a flag, not a value
|
||||
break;
|
||||
case '--tags':
|
||||
if (i + 1 < args.length) {
|
||||
tags = args[++i].split(',');
|
||||
}
|
||||
break;
|
||||
case '--startFrom':
|
||||
if (i + 1 < args.length) {
|
||||
const value = parseInt(args[++i], 10);
|
||||
if (isNaN(value) || value < 1) {
|
||||
console.error('Error: --startFrom must be a positive integer');
|
||||
process.exit(1);
|
||||
}
|
||||
startFromFile = value;
|
||||
} else {
|
||||
console.error('Error: --startFrom requires a number argument');
|
||||
process.exit(1);
|
||||
}
|
||||
break;
|
||||
case '--stopAt':
|
||||
if (i + 1 < args.length) {
|
||||
const value = parseInt(args[++i], 10);
|
||||
if (isNaN(value) || value < 1) {
|
||||
console.error('Error: --stopAt must be a positive integer');
|
||||
process.exit(1);
|
||||
}
|
||||
stopAtFile = value;
|
||||
} else {
|
||||
console.error('Error: --stopAt requires a number argument');
|
||||
process.exit(1);
|
||||
}
|
||||
break;
|
||||
case '--timeout':
|
||||
if (i + 1 < args.length) {
|
||||
const value = parseInt(args[++i], 10);
|
||||
if (isNaN(value) || value < 1) {
|
||||
console.error('Error: --timeout must be a positive integer (seconds)');
|
||||
process.exit(1);
|
||||
}
|
||||
timeoutSeconds = value;
|
||||
} else {
|
||||
console.error('Error: --timeout requires a number argument (seconds)');
|
||||
process.exit(1);
|
||||
}
|
||||
break;
|
||||
case '--watch':
|
||||
case '-w':
|
||||
watchMode = true;
|
||||
break;
|
||||
case '--watch-ignore':
|
||||
if (i + 1 < args.length) {
|
||||
watchIgnorePatterns = args[++i].split(',');
|
||||
} else {
|
||||
console.error('Error: --watch-ignore requires a comma-separated list of patterns');
|
||||
process.exit(1);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
if (!arg.startsWith('-')) {
|
||||
testPath = arg;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Validate test file range options
|
||||
if (startFromFile !== null && stopAtFile !== null && startFromFile > stopAtFile) {
|
||||
console.error('Error: --startFrom cannot be greater than --stopAt');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
if (!testPath) {
|
||||
console.error('You must specify a test directory/file/pattern as argument. Please try again.');
|
||||
console.error('\nUsage: tstest <path> [options]');
|
||||
console.error('\nOptions:');
|
||||
console.error(' --version Show version information');
|
||||
console.error(' --quiet, -q Minimal output');
|
||||
console.error(' --verbose, -v Verbose output');
|
||||
console.error(' --no-color Disable colored output');
|
||||
console.error(' --json Output results as JSON');
|
||||
console.error(' --logfile Write logs to .nogit/testlogs/[testfile].log');
|
||||
console.error(' --tags <tags> Run only tests with specified tags (comma-separated)');
|
||||
console.error(' --startFrom <n> Start running from test file number n');
|
||||
console.error(' --stopAt <n> Stop running at test file number n');
|
||||
console.error(' --timeout <s> Timeout test files after s seconds');
|
||||
console.error(' --watch, -w Watch for file changes and re-run tests');
|
||||
console.error(' --watch-ignore Patterns to ignore in watch mode (comma-separated)');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
let executionMode: TestExecutionMode;
|
||||
|
||||
// Detect execution mode based on the argument
|
||||
if (testPath.includes('*') || testPath.includes('?') || testPath.includes('[') || testPath.includes('{')) {
|
||||
executionMode = TestExecutionMode.GLOB;
|
||||
} else if (testPath.endsWith('.ts')) {
|
||||
executionMode = TestExecutionMode.FILE;
|
||||
} else {
|
||||
executionMode = TestExecutionMode.DIRECTORY;
|
||||
}
|
||||
|
||||
const tsTestInstance = new TsTest(process.cwd(), testPath, executionMode, logOptions, tags, startFromFile, stopAtFile, timeoutSeconds);
|
||||
|
||||
if (watchMode) {
|
||||
await tsTestInstance.runWatch(watchIgnorePatterns);
|
||||
} else {
|
||||
await tsTestInstance.run();
|
||||
}
|
||||
};
|
||||
|
||||
// Execute CLI when this file is run directly
|
||||
if (import.meta.url === `file://${process.argv[1]}`) {
|
||||
runCli();
|
||||
}
|
||||
|
3
ts/tspublish.json
Normal file
3
ts/tspublish.json
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"order": 4
|
||||
}
|
316
ts/tstest.classes.migration.ts
Normal file
316
ts/tstest.classes.migration.ts
Normal file
@@ -0,0 +1,316 @@
|
||||
import * as plugins from './tstest.plugins.js';
|
||||
import { coloredString as cs } from '@push.rocks/consolecolor';
|
||||
import { parseTestFilename, getLegacyMigrationTarget, isLegacyFilename } from './tstest.classes.runtime.parser.js';
|
||||
|
||||
/**
|
||||
* Migration result for a single file
|
||||
*/
|
||||
export interface MigrationResult {
|
||||
/**
|
||||
* Original file path
|
||||
*/
|
||||
oldPath: string;
|
||||
|
||||
/**
|
||||
* New file path after migration
|
||||
*/
|
||||
newPath: string;
|
||||
|
||||
/**
|
||||
* Whether the migration was performed
|
||||
*/
|
||||
migrated: boolean;
|
||||
|
||||
/**
|
||||
* Error message if migration failed
|
||||
*/
|
||||
error?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Migration summary
|
||||
*/
|
||||
export interface MigrationSummary {
|
||||
/**
|
||||
* Total number of legacy files found
|
||||
*/
|
||||
totalLegacyFiles: number;
|
||||
|
||||
/**
|
||||
* Number of files successfully migrated
|
||||
*/
|
||||
migratedCount: number;
|
||||
|
||||
/**
|
||||
* Number of files that failed to migrate
|
||||
*/
|
||||
errorCount: number;
|
||||
|
||||
/**
|
||||
* Individual migration results
|
||||
*/
|
||||
results: MigrationResult[];
|
||||
|
||||
/**
|
||||
* Whether this was a dry run
|
||||
*/
|
||||
dryRun: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Migration options
|
||||
*/
|
||||
export interface MigrationOptions {
|
||||
/**
|
||||
* Base directory to search for test files
|
||||
* Default: process.cwd()
|
||||
*/
|
||||
baseDir?: string;
|
||||
|
||||
/**
|
||||
* Glob pattern for finding test files
|
||||
* Default: '** /*test*.ts' (without space)
|
||||
*/
|
||||
pattern?: string;
|
||||
|
||||
/**
|
||||
* Dry run mode - don't actually rename files
|
||||
* Default: true
|
||||
*/
|
||||
dryRun?: boolean;
|
||||
|
||||
/**
|
||||
* Verbose output
|
||||
* Default: false
|
||||
*/
|
||||
verbose?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Migration class for renaming legacy test files to new naming convention
|
||||
*
|
||||
* Migrations:
|
||||
* - .browser.ts → .chromium.ts
|
||||
* - .both.ts → .node+chromium.ts
|
||||
* - .both.nonci.ts → .node+chromium.nonci.ts
|
||||
* - .browser.nonci.ts → .chromium.nonci.ts
|
||||
*/
|
||||
export class Migration {
|
||||
private options: Required<MigrationOptions>;
|
||||
|
||||
constructor(options: MigrationOptions = {}) {
|
||||
this.options = {
|
||||
baseDir: options.baseDir || process.cwd(),
|
||||
pattern: options.pattern || '**/test*.ts',
|
||||
dryRun: options.dryRun !== undefined ? options.dryRun : true,
|
||||
verbose: options.verbose || false,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Find all legacy test files in the base directory
|
||||
*/
|
||||
async findLegacyFiles(): Promise<string[]> {
|
||||
const files = await plugins.smartfile.fs.listFileTree(
|
||||
this.options.baseDir,
|
||||
this.options.pattern
|
||||
);
|
||||
|
||||
const legacyFiles: string[] = [];
|
||||
|
||||
for (const file of files) {
|
||||
const fileName = plugins.path.basename(file);
|
||||
if (isLegacyFilename(fileName)) {
|
||||
const absolutePath = plugins.path.isAbsolute(file)
|
||||
? file
|
||||
: plugins.path.join(this.options.baseDir, file);
|
||||
legacyFiles.push(absolutePath);
|
||||
}
|
||||
}
|
||||
|
||||
return legacyFiles;
|
||||
}
|
||||
|
||||
/**
|
||||
* Migrate a single file
|
||||
*/
|
||||
private async migrateFile(filePath: string): Promise<MigrationResult> {
|
||||
const fileName = plugins.path.basename(filePath);
|
||||
const dirName = plugins.path.dirname(filePath);
|
||||
|
||||
try {
|
||||
// Get the new filename
|
||||
const newFileName = getLegacyMigrationTarget(fileName);
|
||||
|
||||
if (!newFileName) {
|
||||
return {
|
||||
oldPath: filePath,
|
||||
newPath: filePath,
|
||||
migrated: false,
|
||||
error: 'File is not a legacy file',
|
||||
};
|
||||
}
|
||||
|
||||
const newPath = plugins.path.join(dirName, newFileName);
|
||||
|
||||
// Check if target file already exists
|
||||
if (await plugins.smartfile.fs.fileExists(newPath)) {
|
||||
return {
|
||||
oldPath: filePath,
|
||||
newPath,
|
||||
migrated: false,
|
||||
error: `Target file already exists: ${newPath}`,
|
||||
};
|
||||
}
|
||||
|
||||
if (!this.options.dryRun) {
|
||||
// Check if we're in a git repository
|
||||
const isGitRepo = await this.isGitRepository(this.options.baseDir);
|
||||
|
||||
if (isGitRepo) {
|
||||
// Use git mv to preserve history
|
||||
const smartshell = new plugins.smartshell.Smartshell({
|
||||
executor: 'bash',
|
||||
pathDirectories: [],
|
||||
});
|
||||
const gitCommand = `cd "${this.options.baseDir}" && git mv "${filePath}" "${newPath}"`;
|
||||
const result = await smartshell.exec(gitCommand);
|
||||
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`git mv failed: ${result.stderr}`);
|
||||
}
|
||||
} else {
|
||||
// Not a git repository - cannot migrate without git
|
||||
throw new Error('Migration requires a git repository. We have git!');
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
oldPath: filePath,
|
||||
newPath,
|
||||
migrated: true,
|
||||
};
|
||||
} catch (error) {
|
||||
return {
|
||||
oldPath: filePath,
|
||||
newPath: filePath,
|
||||
migrated: false,
|
||||
error: error.message,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a directory is a git repository
|
||||
*/
|
||||
private async isGitRepository(dir: string): Promise<boolean> {
|
||||
try {
|
||||
const gitDir = plugins.path.join(dir, '.git');
|
||||
return await plugins.smartfile.fs.isDirectory(gitDir);
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Run the migration
|
||||
*/
|
||||
async run(): Promise<MigrationSummary> {
|
||||
const legacyFiles = await this.findLegacyFiles();
|
||||
|
||||
console.log('');
|
||||
console.log(cs('='.repeat(60), 'blue'));
|
||||
console.log(cs('Test File Migration Tool', 'blue'));
|
||||
console.log(cs('='.repeat(60), 'blue'));
|
||||
console.log('');
|
||||
|
||||
if (this.options.dryRun) {
|
||||
console.log(cs('🔍 DRY RUN MODE - No files will be modified', 'orange'));
|
||||
console.log('');
|
||||
}
|
||||
|
||||
console.log(`Found ${legacyFiles.length} legacy test file(s)`);
|
||||
console.log('');
|
||||
|
||||
const results: MigrationResult[] = [];
|
||||
let migratedCount = 0;
|
||||
let errorCount = 0;
|
||||
|
||||
for (const file of legacyFiles) {
|
||||
const result = await this.migrateFile(file);
|
||||
results.push(result);
|
||||
|
||||
if (result.migrated) {
|
||||
migratedCount++;
|
||||
const oldName = plugins.path.basename(result.oldPath);
|
||||
const newName = plugins.path.basename(result.newPath);
|
||||
|
||||
if (this.options.dryRun) {
|
||||
console.log(cs(` Would migrate:`, 'cyan'));
|
||||
} else {
|
||||
console.log(cs(` ✓ Migrated:`, 'green'));
|
||||
}
|
||||
console.log(` ${oldName}`);
|
||||
console.log(cs(` → ${newName}`, 'green'));
|
||||
console.log('');
|
||||
} else if (result.error) {
|
||||
errorCount++;
|
||||
console.log(cs(` ✗ Failed: ${plugins.path.basename(result.oldPath)}`, 'red'));
|
||||
console.log(cs(` ${result.error}`, 'red'));
|
||||
console.log('');
|
||||
}
|
||||
}
|
||||
|
||||
console.log(cs('='.repeat(60), 'blue'));
|
||||
console.log(`Summary:`);
|
||||
console.log(` Total legacy files: ${legacyFiles.length}`);
|
||||
console.log(` Successfully migrated: ${migratedCount}`);
|
||||
console.log(` Errors: ${errorCount}`);
|
||||
console.log(cs('='.repeat(60), 'blue'));
|
||||
|
||||
if (this.options.dryRun && legacyFiles.length > 0) {
|
||||
console.log('');
|
||||
console.log(cs('To apply these changes, run:', 'orange'));
|
||||
console.log(cs(' tstest migrate --write', 'orange'));
|
||||
}
|
||||
|
||||
console.log('');
|
||||
|
||||
return {
|
||||
totalLegacyFiles: legacyFiles.length,
|
||||
migratedCount,
|
||||
errorCount,
|
||||
results,
|
||||
dryRun: this.options.dryRun,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a migration report without performing the migration
|
||||
*/
|
||||
async generateReport(): Promise<string> {
|
||||
const legacyFiles = await this.findLegacyFiles();
|
||||
|
||||
let report = '';
|
||||
report += 'Test File Migration Report\n';
|
||||
report += '='.repeat(60) + '\n';
|
||||
report += '\n';
|
||||
report += `Found ${legacyFiles.length} legacy test file(s)\n`;
|
||||
report += '\n';
|
||||
|
||||
for (const file of legacyFiles) {
|
||||
const fileName = plugins.path.basename(file);
|
||||
const newFileName = getLegacyMigrationTarget(fileName);
|
||||
|
||||
if (newFileName) {
|
||||
report += `${fileName}\n`;
|
||||
report += ` → ${newFileName}\n`;
|
||||
report += '\n';
|
||||
}
|
||||
}
|
||||
|
||||
report += '='.repeat(60) + '\n';
|
||||
|
||||
return report;
|
||||
}
|
||||
}
|
245
ts/tstest.classes.runtime.adapter.ts
Normal file
245
ts/tstest.classes.runtime.adapter.ts
Normal file
@@ -0,0 +1,245 @@
|
||||
import * as plugins from './tstest.plugins.js';
|
||||
import type { Runtime } from './tstest.classes.runtime.parser.js';
|
||||
import { TapParser } from './tstest.classes.tap.parser.js';
|
||||
|
||||
/**
|
||||
* Runtime-specific configuration options
|
||||
*/
|
||||
export interface RuntimeOptions {
|
||||
/**
|
||||
* Environment variables to pass to the runtime
|
||||
*/
|
||||
env?: Record<string, string>;
|
||||
|
||||
/**
|
||||
* Additional command-line arguments
|
||||
*/
|
||||
extraArgs?: string[];
|
||||
|
||||
/**
|
||||
* Working directory for test execution
|
||||
*/
|
||||
cwd?: string;
|
||||
|
||||
/**
|
||||
* Timeout in milliseconds (0 = no timeout)
|
||||
*/
|
||||
timeout?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Deno-specific configuration options
|
||||
*/
|
||||
export interface DenoOptions extends RuntimeOptions {
|
||||
/**
|
||||
* Permissions to grant to Deno
|
||||
* Default: ['--allow-read', '--allow-env']
|
||||
*/
|
||||
permissions?: string[];
|
||||
|
||||
/**
|
||||
* Path to deno.json config file
|
||||
*/
|
||||
configPath?: string;
|
||||
|
||||
/**
|
||||
* Path to import map file
|
||||
*/
|
||||
importMap?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Chromium-specific configuration options
|
||||
*/
|
||||
export interface ChromiumOptions extends RuntimeOptions {
|
||||
/**
|
||||
* Chromium launch arguments
|
||||
*/
|
||||
launchArgs?: string[];
|
||||
|
||||
/**
|
||||
* Headless mode (default: true)
|
||||
*/
|
||||
headless?: boolean;
|
||||
|
||||
/**
|
||||
* Port range for HTTP server
|
||||
*/
|
||||
portRange?: { min: number; max: number };
|
||||
}
|
||||
|
||||
/**
|
||||
* Command configuration returned by createCommand()
|
||||
*/
|
||||
export interface RuntimeCommand {
|
||||
/**
|
||||
* The main command executable (e.g., 'node', 'deno', 'bun')
|
||||
*/
|
||||
command: string;
|
||||
|
||||
/**
|
||||
* Command-line arguments
|
||||
*/
|
||||
args: string[];
|
||||
|
||||
/**
|
||||
* Environment variables
|
||||
*/
|
||||
env?: Record<string, string>;
|
||||
|
||||
/**
|
||||
* Working directory
|
||||
*/
|
||||
cwd?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Runtime availability check result
|
||||
*/
|
||||
export interface RuntimeAvailability {
|
||||
/**
|
||||
* Whether the runtime is available
|
||||
*/
|
||||
available: boolean;
|
||||
|
||||
/**
|
||||
* Version string if available
|
||||
*/
|
||||
version?: string;
|
||||
|
||||
/**
|
||||
* Error message if not available
|
||||
*/
|
||||
error?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Abstract base class for runtime adapters
|
||||
* Each runtime (Node, Chromium, Deno, Bun) implements this interface
|
||||
*/
|
||||
export abstract class RuntimeAdapter {
|
||||
/**
|
||||
* Runtime identifier
|
||||
*/
|
||||
abstract readonly id: Runtime;
|
||||
|
||||
/**
|
||||
* Human-readable display name
|
||||
*/
|
||||
abstract readonly displayName: string;
|
||||
|
||||
/**
|
||||
* Check if this runtime is available on the system
|
||||
* @returns Availability information including version
|
||||
*/
|
||||
abstract checkAvailable(): Promise<RuntimeAvailability>;
|
||||
|
||||
/**
|
||||
* Create the command configuration for executing a test
|
||||
* @param testFile - Absolute path to the test file
|
||||
* @param options - Runtime-specific options
|
||||
* @returns Command configuration
|
||||
*/
|
||||
abstract createCommand(testFile: string, options?: RuntimeOptions): RuntimeCommand;
|
||||
|
||||
/**
|
||||
* Execute a test file and return a TAP parser
|
||||
* @param testFile - Absolute path to the test file
|
||||
* @param index - Test index (for display)
|
||||
* @param total - Total number of tests (for display)
|
||||
* @param options - Runtime-specific options
|
||||
* @returns TAP parser with test results
|
||||
*/
|
||||
abstract run(
|
||||
testFile: string,
|
||||
index: number,
|
||||
total: number,
|
||||
options?: RuntimeOptions
|
||||
): Promise<TapParser>;
|
||||
|
||||
/**
|
||||
* Get the default options for this runtime
|
||||
* Can be overridden by subclasses
|
||||
*/
|
||||
protected getDefaultOptions(): RuntimeOptions {
|
||||
return {
|
||||
timeout: 0,
|
||||
extraArgs: [],
|
||||
env: {},
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge user options with defaults
|
||||
*/
|
||||
protected mergeOptions<T extends RuntimeOptions>(userOptions?: T): T {
|
||||
const defaults = this.getDefaultOptions();
|
||||
return {
|
||||
...defaults,
|
||||
...userOptions,
|
||||
env: { ...defaults.env, ...userOptions?.env },
|
||||
extraArgs: [...(defaults.extraArgs || []), ...(userOptions?.extraArgs || [])],
|
||||
} as T;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Registry for runtime adapters
|
||||
* Manages all available runtime implementations
|
||||
*/
|
||||
export class RuntimeAdapterRegistry {
|
||||
private adapters: Map<Runtime, RuntimeAdapter> = new Map();
|
||||
|
||||
/**
|
||||
* Register a runtime adapter
|
||||
*/
|
||||
register(adapter: RuntimeAdapter): void {
|
||||
this.adapters.set(adapter.id, adapter);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get an adapter by runtime ID
|
||||
*/
|
||||
get(runtime: Runtime): RuntimeAdapter | undefined {
|
||||
return this.adapters.get(runtime);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all registered adapters
|
||||
*/
|
||||
getAll(): RuntimeAdapter[] {
|
||||
return Array.from(this.adapters.values());
|
||||
}
|
||||
|
||||
/**
|
||||
* Check which runtimes are available on the system
|
||||
*/
|
||||
async checkAvailability(): Promise<Map<Runtime, RuntimeAvailability>> {
|
||||
const results = new Map<Runtime, RuntimeAvailability>();
|
||||
|
||||
for (const [runtime, adapter] of this.adapters) {
|
||||
const availability = await adapter.checkAvailable();
|
||||
results.set(runtime, availability);
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get adapters for a list of runtimes, in order
|
||||
* @param runtimes - Ordered list of runtimes
|
||||
* @returns Adapters in the same order, skipping any that aren't registered
|
||||
*/
|
||||
getAdaptersForRuntimes(runtimes: Runtime[]): RuntimeAdapter[] {
|
||||
const adapters: RuntimeAdapter[] = [];
|
||||
|
||||
for (const runtime of runtimes) {
|
||||
const adapter = this.get(runtime);
|
||||
if (adapter) {
|
||||
adapters.push(adapter);
|
||||
}
|
||||
}
|
||||
|
||||
return adapters;
|
||||
}
|
||||
}
|
219
ts/tstest.classes.runtime.bun.ts
Normal file
219
ts/tstest.classes.runtime.bun.ts
Normal file
@@ -0,0 +1,219 @@
|
||||
import * as plugins from './tstest.plugins.js';
|
||||
import { coloredString as cs } from '@push.rocks/consolecolor';
|
||||
import {
|
||||
RuntimeAdapter,
|
||||
type RuntimeOptions,
|
||||
type RuntimeCommand,
|
||||
type RuntimeAvailability,
|
||||
} from './tstest.classes.runtime.adapter.js';
|
||||
import { TapParser } from './tstest.classes.tap.parser.js';
|
||||
import { TsTestLogger } from './tstest.logging.js';
|
||||
import type { Runtime } from './tstest.classes.runtime.parser.js';
|
||||
|
||||
/**
|
||||
* Bun runtime adapter
|
||||
* Executes tests using the Bun runtime with native TypeScript support
|
||||
*/
|
||||
export class BunRuntimeAdapter extends RuntimeAdapter {
|
||||
readonly id: Runtime = 'bun';
|
||||
readonly displayName: string = 'Bun';
|
||||
|
||||
constructor(
|
||||
private logger: TsTestLogger,
|
||||
private smartshellInstance: any, // SmartShell instance from @push.rocks/smartshell
|
||||
private timeoutSeconds: number | null,
|
||||
private filterTags: string[]
|
||||
) {
|
||||
super();
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if Bun is available
|
||||
*/
|
||||
async checkAvailable(): Promise<RuntimeAvailability> {
|
||||
try {
|
||||
const result = await this.smartshellInstance.exec('bun --version', {
|
||||
cwd: process.cwd(),
|
||||
onError: () => {
|
||||
// Ignore error
|
||||
}
|
||||
});
|
||||
|
||||
if (result.exitCode !== 0) {
|
||||
return {
|
||||
available: false,
|
||||
error: 'Bun not found. Install from: https://bun.sh/',
|
||||
};
|
||||
}
|
||||
|
||||
// Bun version is just the version number
|
||||
const version = result.stdout.trim();
|
||||
|
||||
return {
|
||||
available: true,
|
||||
version: `Bun ${version}`,
|
||||
};
|
||||
} catch (error) {
|
||||
return {
|
||||
available: false,
|
||||
error: error.message,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create command configuration for Bun test execution
|
||||
*/
|
||||
createCommand(testFile: string, options?: RuntimeOptions): RuntimeCommand {
|
||||
const mergedOptions = this.mergeOptions(options);
|
||||
|
||||
const args: string[] = ['run'];
|
||||
|
||||
// Add extra args
|
||||
if (mergedOptions.extraArgs && mergedOptions.extraArgs.length > 0) {
|
||||
args.push(...mergedOptions.extraArgs);
|
||||
}
|
||||
|
||||
// Add test file
|
||||
args.push(testFile);
|
||||
|
||||
// Set environment variables
|
||||
const env = { ...mergedOptions.env };
|
||||
|
||||
if (this.filterTags.length > 0) {
|
||||
env.TSTEST_FILTER_TAGS = this.filterTags.join(',');
|
||||
}
|
||||
|
||||
return {
|
||||
command: 'bun',
|
||||
args,
|
||||
env,
|
||||
cwd: mergedOptions.cwd,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a test file in Bun
|
||||
*/
|
||||
async run(
|
||||
testFile: string,
|
||||
index: number,
|
||||
total: number,
|
||||
options?: RuntimeOptions
|
||||
): Promise<TapParser> {
|
||||
this.logger.testFileStart(testFile, this.displayName, index, total);
|
||||
const tapParser = new TapParser(testFile + ':bun', this.logger);
|
||||
|
||||
const mergedOptions = this.mergeOptions(options);
|
||||
|
||||
// Build Bun command
|
||||
const command = this.createCommand(testFile, mergedOptions);
|
||||
const fullCommand = `${command.command} ${command.args.join(' ')}`;
|
||||
|
||||
// Set filter tags as environment variable
|
||||
if (this.filterTags.length > 0) {
|
||||
process.env.TSTEST_FILTER_TAGS = this.filterTags.join(',');
|
||||
}
|
||||
|
||||
// Check for 00init.ts file in test directory
|
||||
const testDir = plugins.path.dirname(testFile);
|
||||
const initFile = plugins.path.join(testDir, '00init.ts');
|
||||
const initFileExists = await plugins.smartfile.fs.fileExists(initFile);
|
||||
|
||||
let runCommand = fullCommand;
|
||||
let loaderPath: string | null = null;
|
||||
|
||||
// If 00init.ts exists, create a loader file
|
||||
if (initFileExists) {
|
||||
const absoluteInitFile = plugins.path.resolve(initFile);
|
||||
const absoluteTestFile = plugins.path.resolve(testFile);
|
||||
const loaderContent = `
|
||||
import '${absoluteInitFile.replace(/\\/g, '/')}';
|
||||
import '${absoluteTestFile.replace(/\\/g, '/')}';
|
||||
`;
|
||||
loaderPath = plugins.path.join(testDir, `.loader_${plugins.path.basename(testFile)}`);
|
||||
await plugins.smartfile.memory.toFs(loaderContent, loaderPath);
|
||||
|
||||
// Rebuild command with loader file
|
||||
const loaderCommand = this.createCommand(loaderPath, mergedOptions);
|
||||
runCommand = `${loaderCommand.command} ${loaderCommand.args.join(' ')}`;
|
||||
}
|
||||
|
||||
const execResultStreaming = await this.smartshellInstance.execStreamingSilent(runCommand);
|
||||
|
||||
// If we created a loader file, clean it up after test execution
|
||||
if (loaderPath) {
|
||||
const cleanup = () => {
|
||||
try {
|
||||
if (plugins.smartfile.fs.fileExistsSync(loaderPath)) {
|
||||
plugins.smartfile.fs.removeSync(loaderPath);
|
||||
}
|
||||
} catch (e) {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
};
|
||||
|
||||
execResultStreaming.childProcess.on('exit', cleanup);
|
||||
execResultStreaming.childProcess.on('error', cleanup);
|
||||
}
|
||||
|
||||
// Start warning timer if no timeout was specified
|
||||
let warningTimer: NodeJS.Timeout | null = null;
|
||||
if (this.timeoutSeconds === null) {
|
||||
warningTimer = setTimeout(() => {
|
||||
console.error('');
|
||||
console.error(cs('⚠️ WARNING: Test file is running for more than 1 minute', 'orange'));
|
||||
console.error(cs(` File: ${testFile}`, 'orange'));
|
||||
console.error(cs(' Consider using --timeout option to set a timeout for test files.', 'orange'));
|
||||
console.error(cs(' Example: tstest test --timeout=300 (for 5 minutes)', 'orange'));
|
||||
console.error('');
|
||||
}, 60000); // 1 minute
|
||||
}
|
||||
|
||||
// Handle timeout if specified
|
||||
if (this.timeoutSeconds !== null) {
|
||||
const timeoutMs = this.timeoutSeconds * 1000;
|
||||
let timeoutId: NodeJS.Timeout;
|
||||
|
||||
const timeoutPromise = new Promise<void>((_resolve, reject) => {
|
||||
timeoutId = setTimeout(async () => {
|
||||
// Use smartshell's terminate() to kill entire process tree
|
||||
await execResultStreaming.terminate();
|
||||
reject(new Error(`Test file timed out after ${this.timeoutSeconds} seconds`));
|
||||
}, timeoutMs);
|
||||
});
|
||||
|
||||
try {
|
||||
await Promise.race([
|
||||
tapParser.handleTapProcess(execResultStreaming.childProcess),
|
||||
timeoutPromise
|
||||
]);
|
||||
// Clear timeout if test completed successfully
|
||||
clearTimeout(timeoutId);
|
||||
} catch (error) {
|
||||
// Clear warning timer if it was set
|
||||
if (warningTimer) {
|
||||
clearTimeout(warningTimer);
|
||||
}
|
||||
// Handle timeout error
|
||||
tapParser.handleTimeout(this.timeoutSeconds);
|
||||
// Ensure entire process tree is killed if still running
|
||||
try {
|
||||
await execResultStreaming.kill(); // This kills the entire process tree with SIGKILL
|
||||
} catch (killError) {
|
||||
// Process tree might already be dead
|
||||
}
|
||||
await tapParser.evaluateFinalResult();
|
||||
}
|
||||
} else {
|
||||
await tapParser.handleTapProcess(execResultStreaming.childProcess);
|
||||
}
|
||||
|
||||
// Clear warning timer if it was set
|
||||
if (warningTimer) {
|
||||
clearTimeout(warningTimer);
|
||||
}
|
||||
|
||||
return tapParser;
|
||||
}
|
||||
}
|
293
ts/tstest.classes.runtime.chromium.ts
Normal file
293
ts/tstest.classes.runtime.chromium.ts
Normal file
@@ -0,0 +1,293 @@
|
||||
import * as plugins from './tstest.plugins.js';
|
||||
import * as paths from './tstest.paths.js';
|
||||
import { coloredString as cs } from '@push.rocks/consolecolor';
|
||||
import {
|
||||
RuntimeAdapter,
|
||||
type ChromiumOptions,
|
||||
type RuntimeCommand,
|
||||
type RuntimeAvailability,
|
||||
} from './tstest.classes.runtime.adapter.js';
|
||||
import { TapParser } from './tstest.classes.tap.parser.js';
|
||||
import { TsTestLogger } from './tstest.logging.js';
|
||||
import type { Runtime } from './tstest.classes.runtime.parser.js';
|
||||
|
||||
/**
|
||||
* Chromium runtime adapter
|
||||
* Executes tests in a headless Chromium browser
|
||||
*/
|
||||
export class ChromiumRuntimeAdapter extends RuntimeAdapter {
|
||||
readonly id: Runtime = 'chromium';
|
||||
readonly displayName: string = 'Chromium';
|
||||
|
||||
constructor(
|
||||
private logger: TsTestLogger,
|
||||
private tsbundleInstance: any, // TsBundle instance from @push.rocks/tsbundle
|
||||
private smartbrowserInstance: any, // SmartBrowser instance from @push.rocks/smartbrowser
|
||||
private timeoutSeconds: number | null
|
||||
) {
|
||||
super();
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if Chromium is available
|
||||
*/
|
||||
async checkAvailable(): Promise<RuntimeAvailability> {
|
||||
try {
|
||||
// Check if smartbrowser is available and can start
|
||||
// The browser binary is usually handled by @push.rocks/smartbrowser
|
||||
return {
|
||||
available: true,
|
||||
version: 'Chromium (via smartbrowser)',
|
||||
};
|
||||
} catch (error) {
|
||||
return {
|
||||
available: false,
|
||||
error: error.message || 'Chromium not available',
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create command configuration for Chromium test execution
|
||||
* Note: Chromium tests don't use a traditional command, but this satisfies the interface
|
||||
*/
|
||||
createCommand(testFile: string, options?: ChromiumOptions): RuntimeCommand {
|
||||
const mergedOptions = this.mergeOptions(options);
|
||||
|
||||
return {
|
||||
command: 'chromium',
|
||||
args: [],
|
||||
env: mergedOptions.env,
|
||||
cwd: mergedOptions.cwd,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Find free ports for HTTP server and WebSocket
|
||||
*/
|
||||
private async findFreePorts(): Promise<{ httpPort: number; wsPort: number }> {
|
||||
const smartnetwork = new plugins.smartnetwork.SmartNetwork();
|
||||
|
||||
// Find random free HTTP port in range 30000-40000 to minimize collision chance
|
||||
const httpPort = await smartnetwork.findFreePort(30000, 40000, { randomize: true });
|
||||
if (!httpPort) {
|
||||
throw new Error('Could not find a free HTTP port in range 30000-40000');
|
||||
}
|
||||
|
||||
// Find random free WebSocket port, excluding the HTTP port to ensure they're different
|
||||
const wsPort = await smartnetwork.findFreePort(30000, 40000, {
|
||||
randomize: true,
|
||||
exclude: [httpPort]
|
||||
});
|
||||
if (!wsPort) {
|
||||
throw new Error('Could not find a free WebSocket port in range 30000-40000');
|
||||
}
|
||||
|
||||
// Log selected ports for debugging
|
||||
if (!this.logger.options.quiet) {
|
||||
console.log(`Selected ports - HTTP: ${httpPort}, WebSocket: ${wsPort}`);
|
||||
}
|
||||
return { httpPort, wsPort };
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a test file in Chromium browser
|
||||
*/
|
||||
async run(
|
||||
testFile: string,
|
||||
index: number,
|
||||
total: number,
|
||||
options?: ChromiumOptions
|
||||
): Promise<TapParser> {
|
||||
this.logger.testFileStart(testFile, this.displayName, index, total);
|
||||
|
||||
// lets get all our paths sorted
|
||||
const tsbundleCacheDirPath = plugins.path.join(paths.cwd, './.nogit/tstest_cache');
|
||||
const bundleFileName = testFile.replace('/', '__') + '.js';
|
||||
const bundleFilePath = plugins.path.join(tsbundleCacheDirPath, bundleFileName);
|
||||
|
||||
// lets bundle the test
|
||||
await plugins.smartfile.fs.ensureEmptyDir(tsbundleCacheDirPath);
|
||||
await this.tsbundleInstance.build(process.cwd(), testFile, bundleFilePath, {
|
||||
bundler: 'esbuild',
|
||||
});
|
||||
|
||||
// Find free ports for HTTP and WebSocket
|
||||
const { httpPort, wsPort } = await this.findFreePorts();
|
||||
|
||||
// lets create a server
|
||||
const server = new plugins.typedserver.servertools.Server({
|
||||
cors: true,
|
||||
port: httpPort,
|
||||
});
|
||||
server.addRoute(
|
||||
'/test',
|
||||
new plugins.typedserver.servertools.Handler('GET', async (_req, res) => {
|
||||
res.type('.html');
|
||||
res.write(`
|
||||
<html>
|
||||
<head>
|
||||
<script>
|
||||
globalThis.testdom = true;
|
||||
globalThis.wsPort = ${wsPort};
|
||||
</script>
|
||||
</head>
|
||||
<body></body>
|
||||
</html>
|
||||
`);
|
||||
res.end();
|
||||
})
|
||||
);
|
||||
server.addRoute('/*splat', new plugins.typedserver.servertools.HandlerStatic(tsbundleCacheDirPath));
|
||||
await server.start();
|
||||
|
||||
// lets handle realtime comms
|
||||
const tapParser = new TapParser(testFile + ':chrome', this.logger);
|
||||
const wss = new plugins.ws.WebSocketServer({ port: wsPort });
|
||||
wss.on('connection', (ws) => {
|
||||
ws.on('message', (message) => {
|
||||
const messageStr = message.toString();
|
||||
if (messageStr.startsWith('console:')) {
|
||||
const [, level, ...messageParts] = messageStr.split(':');
|
||||
this.logger.browserConsole(messageParts.join(':'), level);
|
||||
} else {
|
||||
tapParser.handleTapLog(messageStr);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// lets do the browser bit with timeout handling
|
||||
await this.smartbrowserInstance.start();
|
||||
|
||||
const evaluatePromise = this.smartbrowserInstance.evaluateOnPage(
|
||||
`http://localhost:${httpPort}/test?bundleName=${bundleFileName}`,
|
||||
async () => {
|
||||
// lets enable real time comms
|
||||
const ws = new WebSocket(`ws://localhost:${globalThis.wsPort}`);
|
||||
await new Promise((resolve) => (ws.onopen = resolve));
|
||||
|
||||
// Ensure this function is declared with 'async'
|
||||
const logStore = [];
|
||||
const originalLog = console.log;
|
||||
const originalError = console.error;
|
||||
|
||||
// Override console methods to capture the logs
|
||||
console.log = (...args: any[]) => {
|
||||
logStore.push(args.join(' '));
|
||||
ws.send(args.join(' '));
|
||||
originalLog(...args);
|
||||
};
|
||||
console.error = (...args: any[]) => {
|
||||
logStore.push(args.join(' '));
|
||||
ws.send(args.join(' '));
|
||||
originalError(...args);
|
||||
};
|
||||
|
||||
const bundleName = new URLSearchParams(window.location.search).get('bundleName');
|
||||
originalLog(`::TSTEST IN CHROMIUM:: Relevant Script name is: ${bundleName}`);
|
||||
|
||||
try {
|
||||
// Dynamically import the test module
|
||||
const testModule = await import(`/${bundleName}`);
|
||||
if (testModule && testModule.default && testModule.default instanceof Promise) {
|
||||
// Execute the exported test function
|
||||
await testModule.default;
|
||||
} else if (testModule && testModule.default && typeof testModule.default.then === 'function') {
|
||||
console.log('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!');
|
||||
console.log('Test module default export is just promiselike: Something might be messing with your Promise implementation.');
|
||||
console.log('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!');
|
||||
await testModule.default;
|
||||
} else if (globalThis.tapPromise && typeof globalThis.tapPromise.then === 'function') {
|
||||
console.log('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!');
|
||||
console.log('Using globalThis.tapPromise');
|
||||
console.log('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!');
|
||||
await testModule.default;
|
||||
} else {
|
||||
console.error('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!');
|
||||
console.error('Test module does not export a default promise.');
|
||||
console.error('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!');
|
||||
console.log(`We got: ${JSON.stringify(testModule)}`);
|
||||
}
|
||||
} catch (err) {
|
||||
console.error(err);
|
||||
}
|
||||
|
||||
return logStore.join('\n');
|
||||
}
|
||||
);
|
||||
|
||||
// Start warning timer if no timeout was specified
|
||||
let warningTimer: NodeJS.Timeout | null = null;
|
||||
if (this.timeoutSeconds === null) {
|
||||
warningTimer = setTimeout(() => {
|
||||
console.error('');
|
||||
console.error(cs('⚠️ WARNING: Test file is running for more than 1 minute', 'orange'));
|
||||
console.error(cs(` File: ${testFile}`, 'orange'));
|
||||
console.error(cs(' Consider using --timeout option to set a timeout for test files.', 'orange'));
|
||||
console.error(cs(' Example: tstest test --timeout=300 (for 5 minutes)', 'orange'));
|
||||
console.error('');
|
||||
}, 60000); // 1 minute
|
||||
}
|
||||
|
||||
// Handle timeout if specified
|
||||
if (this.timeoutSeconds !== null) {
|
||||
const timeoutMs = this.timeoutSeconds * 1000;
|
||||
let timeoutId: NodeJS.Timeout;
|
||||
|
||||
const timeoutPromise = new Promise<void>((_resolve, reject) => {
|
||||
timeoutId = setTimeout(() => {
|
||||
reject(new Error(`Test file timed out after ${this.timeoutSeconds} seconds`));
|
||||
}, timeoutMs);
|
||||
});
|
||||
|
||||
try {
|
||||
await Promise.race([
|
||||
evaluatePromise,
|
||||
timeoutPromise
|
||||
]);
|
||||
// Clear timeout if test completed successfully
|
||||
clearTimeout(timeoutId);
|
||||
} catch (error) {
|
||||
// Clear warning timer if it was set
|
||||
if (warningTimer) {
|
||||
clearTimeout(warningTimer);
|
||||
}
|
||||
// Handle timeout error
|
||||
tapParser.handleTimeout(this.timeoutSeconds);
|
||||
}
|
||||
} else {
|
||||
await evaluatePromise;
|
||||
}
|
||||
|
||||
// Clear warning timer if it was set
|
||||
if (warningTimer) {
|
||||
clearTimeout(warningTimer);
|
||||
}
|
||||
|
||||
// Always clean up resources, even on timeout
|
||||
try {
|
||||
await this.smartbrowserInstance.stop();
|
||||
} catch (error) {
|
||||
// Browser might already be stopped
|
||||
}
|
||||
|
||||
try {
|
||||
await server.stop();
|
||||
} catch (error) {
|
||||
// Server might already be stopped
|
||||
}
|
||||
|
||||
try {
|
||||
wss.close();
|
||||
} catch (error) {
|
||||
// WebSocket server might already be closed
|
||||
}
|
||||
|
||||
console.log(
|
||||
`${cs('=> ', 'blue')} Stopped ${cs(testFile, 'orange')} chromium instance and server.`
|
||||
);
|
||||
// Always evaluate final result (handleTimeout just sets up the test state)
|
||||
await tapParser.evaluateFinalResult();
|
||||
return tapParser;
|
||||
}
|
||||
}
|
262
ts/tstest.classes.runtime.deno.ts
Normal file
262
ts/tstest.classes.runtime.deno.ts
Normal file
@@ -0,0 +1,262 @@
|
||||
import * as plugins from './tstest.plugins.js';
|
||||
import { coloredString as cs } from '@push.rocks/consolecolor';
|
||||
import {
|
||||
RuntimeAdapter,
|
||||
type DenoOptions,
|
||||
type RuntimeCommand,
|
||||
type RuntimeAvailability,
|
||||
} from './tstest.classes.runtime.adapter.js';
|
||||
import { TapParser } from './tstest.classes.tap.parser.js';
|
||||
import { TsTestLogger } from './tstest.logging.js';
|
||||
import type { Runtime } from './tstest.classes.runtime.parser.js';
|
||||
|
||||
/**
|
||||
* Deno runtime adapter
|
||||
* Executes tests using the Deno runtime
|
||||
*/
|
||||
export class DenoRuntimeAdapter extends RuntimeAdapter {
|
||||
readonly id: Runtime = 'deno';
|
||||
readonly displayName: string = 'Deno';
|
||||
|
||||
constructor(
|
||||
private logger: TsTestLogger,
|
||||
private smartshellInstance: any, // SmartShell instance from @push.rocks/smartshell
|
||||
private timeoutSeconds: number | null,
|
||||
private filterTags: string[]
|
||||
) {
|
||||
super();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get default Deno options
|
||||
*/
|
||||
protected getDefaultOptions(): DenoOptions {
|
||||
return {
|
||||
...super.getDefaultOptions(),
|
||||
permissions: [
|
||||
'--allow-read',
|
||||
'--allow-env',
|
||||
'--allow-net',
|
||||
'--allow-write',
|
||||
'--allow-sys', // Allow system info access
|
||||
'--allow-import', // Allow npm/node imports
|
||||
'--node-modules-dir', // Enable Node.js compatibility mode
|
||||
'--sloppy-imports', // Allow .js imports to resolve to .ts files
|
||||
],
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if Deno is available
|
||||
*/
|
||||
async checkAvailable(): Promise<RuntimeAvailability> {
|
||||
try {
|
||||
const result = await this.smartshellInstance.exec('deno --version', {
|
||||
cwd: process.cwd(),
|
||||
onError: () => {
|
||||
// Ignore error
|
||||
}
|
||||
});
|
||||
|
||||
if (result.exitCode !== 0) {
|
||||
return {
|
||||
available: false,
|
||||
error: 'Deno not found. Install from: https://deno.land/',
|
||||
};
|
||||
}
|
||||
|
||||
// Parse Deno version from output (first line is "deno X.Y.Z")
|
||||
const versionMatch = result.stdout.match(/deno (\d+\.\d+\.\d+)/);
|
||||
const version = versionMatch ? versionMatch[1] : 'unknown';
|
||||
|
||||
return {
|
||||
available: true,
|
||||
version: `Deno ${version}`,
|
||||
};
|
||||
} catch (error) {
|
||||
return {
|
||||
available: false,
|
||||
error: error.message,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create command configuration for Deno test execution
|
||||
*/
|
||||
createCommand(testFile: string, options?: DenoOptions): RuntimeCommand {
|
||||
const mergedOptions = this.mergeOptions(options) as DenoOptions;
|
||||
|
||||
const args: string[] = ['run'];
|
||||
|
||||
// Add permissions
|
||||
const permissions = mergedOptions.permissions || [
|
||||
'--allow-read',
|
||||
'--allow-env',
|
||||
'--allow-net',
|
||||
'--allow-write',
|
||||
'--allow-sys',
|
||||
'--allow-import',
|
||||
'--node-modules-dir',
|
||||
'--sloppy-imports',
|
||||
];
|
||||
args.push(...permissions);
|
||||
|
||||
// Add config file if specified
|
||||
if (mergedOptions.configPath) {
|
||||
args.push('--config', mergedOptions.configPath);
|
||||
}
|
||||
|
||||
// Add import map if specified
|
||||
if (mergedOptions.importMap) {
|
||||
args.push('--import-map', mergedOptions.importMap);
|
||||
}
|
||||
|
||||
// Add extra args
|
||||
if (mergedOptions.extraArgs && mergedOptions.extraArgs.length > 0) {
|
||||
args.push(...mergedOptions.extraArgs);
|
||||
}
|
||||
|
||||
// Add test file
|
||||
args.push(testFile);
|
||||
|
||||
// Set environment variables
|
||||
const env = { ...mergedOptions.env };
|
||||
|
||||
if (this.filterTags.length > 0) {
|
||||
env.TSTEST_FILTER_TAGS = this.filterTags.join(',');
|
||||
}
|
||||
|
||||
return {
|
||||
command: 'deno',
|
||||
args,
|
||||
env,
|
||||
cwd: mergedOptions.cwd,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a test file in Deno
|
||||
*/
|
||||
async run(
|
||||
testFile: string,
|
||||
index: number,
|
||||
total: number,
|
||||
options?: DenoOptions
|
||||
): Promise<TapParser> {
|
||||
this.logger.testFileStart(testFile, this.displayName, index, total);
|
||||
const tapParser = new TapParser(testFile + ':deno', this.logger);
|
||||
|
||||
const mergedOptions = this.mergeOptions(options) as DenoOptions;
|
||||
|
||||
// Build Deno command
|
||||
const command = this.createCommand(testFile, mergedOptions);
|
||||
const fullCommand = `${command.command} ${command.args.join(' ')}`;
|
||||
|
||||
// Set filter tags as environment variable
|
||||
if (this.filterTags.length > 0) {
|
||||
process.env.TSTEST_FILTER_TAGS = this.filterTags.join(',');
|
||||
}
|
||||
|
||||
// Check for 00init.ts file in test directory
|
||||
const testDir = plugins.path.dirname(testFile);
|
||||
const initFile = plugins.path.join(testDir, '00init.ts');
|
||||
const initFileExists = await plugins.smartfile.fs.fileExists(initFile);
|
||||
|
||||
let runCommand = fullCommand;
|
||||
let loaderPath: string | null = null;
|
||||
|
||||
// If 00init.ts exists, create a loader file
|
||||
if (initFileExists) {
|
||||
const absoluteInitFile = plugins.path.resolve(initFile);
|
||||
const absoluteTestFile = plugins.path.resolve(testFile);
|
||||
const loaderContent = `
|
||||
import '${absoluteInitFile.replace(/\\/g, '/')}';
|
||||
import '${absoluteTestFile.replace(/\\/g, '/')}';
|
||||
`;
|
||||
loaderPath = plugins.path.join(testDir, `.loader_${plugins.path.basename(testFile)}`);
|
||||
await plugins.smartfile.memory.toFs(loaderContent, loaderPath);
|
||||
|
||||
// Rebuild command with loader file
|
||||
const loaderCommand = this.createCommand(loaderPath, mergedOptions);
|
||||
runCommand = `${loaderCommand.command} ${loaderCommand.args.join(' ')}`;
|
||||
}
|
||||
|
||||
const execResultStreaming = await this.smartshellInstance.execStreamingSilent(runCommand);
|
||||
|
||||
// If we created a loader file, clean it up after test execution
|
||||
if (loaderPath) {
|
||||
const cleanup = () => {
|
||||
try {
|
||||
if (plugins.smartfile.fs.fileExistsSync(loaderPath)) {
|
||||
plugins.smartfile.fs.removeSync(loaderPath);
|
||||
}
|
||||
} catch (e) {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
};
|
||||
|
||||
execResultStreaming.childProcess.on('exit', cleanup);
|
||||
execResultStreaming.childProcess.on('error', cleanup);
|
||||
}
|
||||
|
||||
// Start warning timer if no timeout was specified
|
||||
let warningTimer: NodeJS.Timeout | null = null;
|
||||
if (this.timeoutSeconds === null) {
|
||||
warningTimer = setTimeout(() => {
|
||||
console.error('');
|
||||
console.error(cs('⚠️ WARNING: Test file is running for more than 1 minute', 'orange'));
|
||||
console.error(cs(` File: ${testFile}`, 'orange'));
|
||||
console.error(cs(' Consider using --timeout option to set a timeout for test files.', 'orange'));
|
||||
console.error(cs(' Example: tstest test --timeout=300 (for 5 minutes)', 'orange'));
|
||||
console.error('');
|
||||
}, 60000); // 1 minute
|
||||
}
|
||||
|
||||
// Handle timeout if specified
|
||||
if (this.timeoutSeconds !== null) {
|
||||
const timeoutMs = this.timeoutSeconds * 1000;
|
||||
let timeoutId: NodeJS.Timeout;
|
||||
|
||||
const timeoutPromise = new Promise<void>((_resolve, reject) => {
|
||||
timeoutId = setTimeout(async () => {
|
||||
// Use smartshell's terminate() to kill entire process tree
|
||||
await execResultStreaming.terminate();
|
||||
reject(new Error(`Test file timed out after ${this.timeoutSeconds} seconds`));
|
||||
}, timeoutMs);
|
||||
});
|
||||
|
||||
try {
|
||||
await Promise.race([
|
||||
tapParser.handleTapProcess(execResultStreaming.childProcess),
|
||||
timeoutPromise
|
||||
]);
|
||||
// Clear timeout if test completed successfully
|
||||
clearTimeout(timeoutId);
|
||||
} catch (error) {
|
||||
// Clear warning timer if it was set
|
||||
if (warningTimer) {
|
||||
clearTimeout(warningTimer);
|
||||
}
|
||||
// Handle timeout error
|
||||
tapParser.handleTimeout(this.timeoutSeconds);
|
||||
// Ensure entire process tree is killed if still running
|
||||
try {
|
||||
await execResultStreaming.kill(); // This kills the entire process tree with SIGKILL
|
||||
} catch (killError) {
|
||||
// Process tree might already be dead
|
||||
}
|
||||
await tapParser.evaluateFinalResult();
|
||||
}
|
||||
} else {
|
||||
await tapParser.handleTapProcess(execResultStreaming.childProcess);
|
||||
}
|
||||
|
||||
// Clear warning timer if it was set
|
||||
if (warningTimer) {
|
||||
clearTimeout(warningTimer);
|
||||
}
|
||||
|
||||
return tapParser;
|
||||
}
|
||||
}
|
222
ts/tstest.classes.runtime.node.ts
Normal file
222
ts/tstest.classes.runtime.node.ts
Normal file
@@ -0,0 +1,222 @@
|
||||
import * as plugins from './tstest.plugins.js';
|
||||
import { coloredString as cs } from '@push.rocks/consolecolor';
|
||||
import {
|
||||
RuntimeAdapter,
|
||||
type RuntimeOptions,
|
||||
type RuntimeCommand,
|
||||
type RuntimeAvailability,
|
||||
} from './tstest.classes.runtime.adapter.js';
|
||||
import { TapParser } from './tstest.classes.tap.parser.js';
|
||||
import { TsTestLogger } from './tstest.logging.js';
|
||||
import type { Runtime } from './tstest.classes.runtime.parser.js';
|
||||
|
||||
/**
|
||||
* Node.js runtime adapter
|
||||
* Executes tests using tsrun (TypeScript runner for Node.js)
|
||||
*/
|
||||
export class NodeRuntimeAdapter extends RuntimeAdapter {
|
||||
readonly id: Runtime = 'node';
|
||||
readonly displayName: string = 'Node.js';
|
||||
|
||||
constructor(
|
||||
private logger: TsTestLogger,
|
||||
private smartshellInstance: any, // SmartShell instance from @push.rocks/smartshell
|
||||
private timeoutSeconds: number | null,
|
||||
private filterTags: string[]
|
||||
) {
|
||||
super();
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if Node.js and tsrun are available
|
||||
*/
|
||||
async checkAvailable(): Promise<RuntimeAvailability> {
|
||||
try {
|
||||
// Check Node.js version
|
||||
const nodeVersion = process.version;
|
||||
|
||||
// Check if tsrun is available
|
||||
const result = await this.smartshellInstance.exec('tsrun --version', {
|
||||
cwd: process.cwd(),
|
||||
onError: () => {
|
||||
// Ignore error
|
||||
}
|
||||
});
|
||||
|
||||
if (result.exitCode !== 0) {
|
||||
return {
|
||||
available: false,
|
||||
error: 'tsrun not found. Install with: pnpm install --save-dev @git.zone/tsrun',
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
available: true,
|
||||
version: nodeVersion,
|
||||
};
|
||||
} catch (error) {
|
||||
return {
|
||||
available: false,
|
||||
error: error.message,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create command configuration for Node.js test execution
|
||||
*/
|
||||
createCommand(testFile: string, options?: RuntimeOptions): RuntimeCommand {
|
||||
const mergedOptions = this.mergeOptions(options);
|
||||
|
||||
// Build tsrun options
|
||||
const args: string[] = [];
|
||||
|
||||
if (process.argv.includes('--web')) {
|
||||
args.push('--web');
|
||||
}
|
||||
|
||||
// Add any extra args
|
||||
if (mergedOptions.extraArgs) {
|
||||
args.push(...mergedOptions.extraArgs);
|
||||
}
|
||||
|
||||
// Set environment variables
|
||||
const env = { ...mergedOptions.env };
|
||||
|
||||
if (this.filterTags.length > 0) {
|
||||
env.TSTEST_FILTER_TAGS = this.filterTags.join(',');
|
||||
}
|
||||
|
||||
return {
|
||||
command: 'tsrun',
|
||||
args: [testFile, ...args],
|
||||
env,
|
||||
cwd: mergedOptions.cwd,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a test file in Node.js
|
||||
*/
|
||||
async run(
|
||||
testFile: string,
|
||||
index: number,
|
||||
total: number,
|
||||
options?: RuntimeOptions
|
||||
): Promise<TapParser> {
|
||||
this.logger.testFileStart(testFile, this.displayName, index, total);
|
||||
const tapParser = new TapParser(testFile + ':node', this.logger);
|
||||
|
||||
const mergedOptions = this.mergeOptions(options);
|
||||
|
||||
// Build tsrun command
|
||||
let tsrunOptions = '';
|
||||
if (process.argv.includes('--web')) {
|
||||
tsrunOptions += ' --web';
|
||||
}
|
||||
|
||||
// Set filter tags as environment variable
|
||||
if (this.filterTags.length > 0) {
|
||||
process.env.TSTEST_FILTER_TAGS = this.filterTags.join(',');
|
||||
}
|
||||
|
||||
// Check for 00init.ts file in test directory
|
||||
const testDir = plugins.path.dirname(testFile);
|
||||
const initFile = plugins.path.join(testDir, '00init.ts');
|
||||
let runCommand = `tsrun ${testFile}${tsrunOptions}`;
|
||||
|
||||
const initFileExists = await plugins.smartfile.fs.fileExists(initFile);
|
||||
|
||||
// If 00init.ts exists, run it first
|
||||
let loaderPath: string | null = null;
|
||||
if (initFileExists) {
|
||||
// Create a temporary loader file that imports both 00init.ts and the test file
|
||||
const absoluteInitFile = plugins.path.resolve(initFile);
|
||||
const absoluteTestFile = plugins.path.resolve(testFile);
|
||||
const loaderContent = `
|
||||
import '${absoluteInitFile.replace(/\\/g, '/')}';
|
||||
import '${absoluteTestFile.replace(/\\/g, '/')}';
|
||||
`;
|
||||
loaderPath = plugins.path.join(testDir, `.loader_${plugins.path.basename(testFile)}`);
|
||||
await plugins.smartfile.memory.toFs(loaderContent, loaderPath);
|
||||
runCommand = `tsrun ${loaderPath}${tsrunOptions}`;
|
||||
}
|
||||
|
||||
const execResultStreaming = await this.smartshellInstance.execStreamingSilent(runCommand);
|
||||
|
||||
// If we created a loader file, clean it up after test execution
|
||||
if (loaderPath) {
|
||||
const cleanup = () => {
|
||||
try {
|
||||
if (plugins.smartfile.fs.fileExistsSync(loaderPath)) {
|
||||
plugins.smartfile.fs.removeSync(loaderPath);
|
||||
}
|
||||
} catch (e) {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
};
|
||||
|
||||
execResultStreaming.childProcess.on('exit', cleanup);
|
||||
execResultStreaming.childProcess.on('error', cleanup);
|
||||
}
|
||||
|
||||
// Start warning timer if no timeout was specified
|
||||
let warningTimer: NodeJS.Timeout | null = null;
|
||||
if (this.timeoutSeconds === null) {
|
||||
warningTimer = setTimeout(() => {
|
||||
console.error('');
|
||||
console.error(cs('⚠️ WARNING: Test file is running for more than 1 minute', 'orange'));
|
||||
console.error(cs(` File: ${testFile}`, 'orange'));
|
||||
console.error(cs(' Consider using --timeout option to set a timeout for test files.', 'orange'));
|
||||
console.error(cs(' Example: tstest test --timeout=300 (for 5 minutes)', 'orange'));
|
||||
console.error('');
|
||||
}, 60000); // 1 minute
|
||||
}
|
||||
|
||||
// Handle timeout if specified
|
||||
if (this.timeoutSeconds !== null) {
|
||||
const timeoutMs = this.timeoutSeconds * 1000;
|
||||
let timeoutId: NodeJS.Timeout;
|
||||
|
||||
const timeoutPromise = new Promise<void>((_resolve, reject) => {
|
||||
timeoutId = setTimeout(async () => {
|
||||
// Use smartshell's terminate() to kill entire process tree
|
||||
await execResultStreaming.terminate();
|
||||
reject(new Error(`Test file timed out after ${this.timeoutSeconds} seconds`));
|
||||
}, timeoutMs);
|
||||
});
|
||||
|
||||
try {
|
||||
await Promise.race([
|
||||
tapParser.handleTapProcess(execResultStreaming.childProcess),
|
||||
timeoutPromise
|
||||
]);
|
||||
// Clear timeout if test completed successfully
|
||||
clearTimeout(timeoutId);
|
||||
} catch (error) {
|
||||
// Clear warning timer if it was set
|
||||
if (warningTimer) {
|
||||
clearTimeout(warningTimer);
|
||||
}
|
||||
// Handle timeout error
|
||||
tapParser.handleTimeout(this.timeoutSeconds);
|
||||
// Ensure entire process tree is killed if still running
|
||||
try {
|
||||
await execResultStreaming.kill(); // This kills the entire process tree with SIGKILL
|
||||
} catch (killError) {
|
||||
// Process tree might already be dead
|
||||
}
|
||||
await tapParser.evaluateFinalResult();
|
||||
}
|
||||
} else {
|
||||
await tapParser.handleTapProcess(execResultStreaming.childProcess);
|
||||
}
|
||||
|
||||
// Clear warning timer if it was set
|
||||
if (warningTimer) {
|
||||
clearTimeout(warningTimer);
|
||||
}
|
||||
|
||||
return tapParser;
|
||||
}
|
||||
}
|
211
ts/tstest.classes.runtime.parser.ts
Normal file
211
ts/tstest.classes.runtime.parser.ts
Normal file
@@ -0,0 +1,211 @@
|
||||
/**
|
||||
* Runtime parser for test file naming convention
|
||||
* Supports: test.runtime1+runtime2.modifier.ts
|
||||
* Examples:
|
||||
* - test.node.ts
|
||||
* - test.chromium.ts
|
||||
* - test.node+chromium.ts
|
||||
* - test.deno+bun.ts
|
||||
* - test.chromium.nonci.ts
|
||||
*/
|
||||
|
||||
export type Runtime = 'node' | 'chromium' | 'deno' | 'bun';
|
||||
export type Modifier = 'nonci';
|
||||
|
||||
export interface ParsedFilename {
|
||||
baseName: string;
|
||||
runtimes: Runtime[];
|
||||
modifiers: Modifier[];
|
||||
extension: string;
|
||||
isLegacy: boolean;
|
||||
original: string;
|
||||
}
|
||||
|
||||
export interface ParserConfig {
|
||||
strictUnknownRuntime?: boolean; // default: true
|
||||
defaultRuntimes?: Runtime[]; // default: ['node']
|
||||
}
|
||||
|
||||
const KNOWN_RUNTIMES: Set<string> = new Set(['node', 'chromium', 'deno', 'bun']);
|
||||
const KNOWN_MODIFIERS: Set<string> = new Set(['nonci']);
|
||||
const VALID_EXTENSIONS: Set<string> = new Set(['ts', 'tsx', 'mts', 'cts']);
|
||||
|
||||
// Legacy mappings for backwards compatibility
|
||||
const LEGACY_RUNTIME_MAP: Record<string, Runtime[]> = {
|
||||
browser: ['chromium'],
|
||||
both: ['node', 'chromium'],
|
||||
};
|
||||
|
||||
/**
|
||||
* Parse a test filename to extract runtimes, modifiers, and detect legacy patterns
|
||||
* Algorithm: Right-to-left token analysis from the extension
|
||||
*/
|
||||
export function parseTestFilename(
|
||||
filePath: string,
|
||||
config: ParserConfig = {}
|
||||
): ParsedFilename {
|
||||
const strictUnknownRuntime = config.strictUnknownRuntime ?? true;
|
||||
const defaultRuntimes = config.defaultRuntimes ?? ['node'];
|
||||
|
||||
// Extract just the filename from the path
|
||||
const fileName = filePath.split('/').pop() || filePath;
|
||||
const original = fileName;
|
||||
|
||||
// Step 1: Extract and validate extension
|
||||
const lastDot = fileName.lastIndexOf('.');
|
||||
if (lastDot === -1) {
|
||||
throw new Error(`Invalid test file: no extension found in "${fileName}"`);
|
||||
}
|
||||
|
||||
const extension = fileName.substring(lastDot + 1);
|
||||
if (!VALID_EXTENSIONS.has(extension)) {
|
||||
throw new Error(
|
||||
`Invalid test file extension ".${extension}" in "${fileName}". ` +
|
||||
`Valid extensions: ${Array.from(VALID_EXTENSIONS).join(', ')}`
|
||||
);
|
||||
}
|
||||
|
||||
// Step 2: Split remaining basename by dots
|
||||
const withoutExtension = fileName.substring(0, lastDot);
|
||||
const tokens = withoutExtension.split('.');
|
||||
|
||||
if (tokens.length === 0) {
|
||||
throw new Error(`Invalid test file: empty basename in "${fileName}"`);
|
||||
}
|
||||
|
||||
// Step 3: Parse from right to left
|
||||
let isLegacy = false;
|
||||
const modifiers: Modifier[] = [];
|
||||
let runtimes: Runtime[] = [];
|
||||
let runtimeTokenIndex = -1;
|
||||
|
||||
// Scan from right to left
|
||||
for (let i = tokens.length - 1; i >= 0; i--) {
|
||||
const token = tokens[i];
|
||||
|
||||
// Check if this is a known modifier
|
||||
if (KNOWN_MODIFIERS.has(token)) {
|
||||
modifiers.unshift(token as Modifier);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if this is a legacy runtime token
|
||||
if (LEGACY_RUNTIME_MAP[token]) {
|
||||
isLegacy = true;
|
||||
runtimes = LEGACY_RUNTIME_MAP[token];
|
||||
runtimeTokenIndex = i;
|
||||
break;
|
||||
}
|
||||
|
||||
// Check if this is a runtime chain (may contain + separators)
|
||||
if (token.includes('+')) {
|
||||
const runtimeCandidates = token.split('+').map(r => r.trim()).filter(Boolean);
|
||||
const validRuntimes: Runtime[] = [];
|
||||
const invalidRuntimes: string[] = [];
|
||||
|
||||
for (const candidate of runtimeCandidates) {
|
||||
if (KNOWN_RUNTIMES.has(candidate)) {
|
||||
// Dedupe: only add if not already in list
|
||||
if (!validRuntimes.includes(candidate as Runtime)) {
|
||||
validRuntimes.push(candidate as Runtime);
|
||||
}
|
||||
} else {
|
||||
invalidRuntimes.push(candidate);
|
||||
}
|
||||
}
|
||||
|
||||
if (invalidRuntimes.length > 0) {
|
||||
if (strictUnknownRuntime) {
|
||||
throw new Error(
|
||||
`Unknown runtime(s) in "${fileName}": ${invalidRuntimes.join(', ')}. ` +
|
||||
`Valid runtimes: ${Array.from(KNOWN_RUNTIMES).join(', ')}`
|
||||
);
|
||||
} else {
|
||||
console.warn(
|
||||
`⚠️ Warning: Unknown runtime(s) in "${fileName}": ${invalidRuntimes.join(', ')}. ` +
|
||||
`Defaulting to: ${defaultRuntimes.join('+')}`
|
||||
);
|
||||
runtimes = [...defaultRuntimes];
|
||||
runtimeTokenIndex = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (validRuntimes.length > 0) {
|
||||
runtimes = validRuntimes;
|
||||
runtimeTokenIndex = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Check if this is a single runtime token
|
||||
if (KNOWN_RUNTIMES.has(token)) {
|
||||
runtimes = [token as Runtime];
|
||||
runtimeTokenIndex = i;
|
||||
break;
|
||||
}
|
||||
|
||||
// If we've scanned past modifiers and haven't found a runtime, stop looking
|
||||
if (modifiers.length > 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Step 4: Determine base name
|
||||
// Everything before the runtime token (if found) is the base name
|
||||
const baseNameTokens = runtimeTokenIndex >= 0 ? tokens.slice(0, runtimeTokenIndex) : tokens;
|
||||
const baseName = baseNameTokens.join('.');
|
||||
|
||||
// Step 5: Apply defaults if no runtime was detected
|
||||
if (runtimes.length === 0) {
|
||||
runtimes = [...defaultRuntimes];
|
||||
}
|
||||
|
||||
return {
|
||||
baseName: baseName || 'test',
|
||||
runtimes,
|
||||
modifiers,
|
||||
extension,
|
||||
isLegacy,
|
||||
original,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a filename uses legacy naming convention
|
||||
*/
|
||||
export function isLegacyFilename(fileName: string): boolean {
|
||||
const tokens = fileName.split('.');
|
||||
for (const token of tokens) {
|
||||
if (LEGACY_RUNTIME_MAP[token]) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the suggested new filename for a legacy filename
|
||||
*/
|
||||
export function getLegacyMigrationTarget(fileName: string): string | null {
|
||||
const parsed = parseTestFilename(fileName, { strictUnknownRuntime: false });
|
||||
|
||||
if (!parsed.isLegacy) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Reconstruct filename with new naming
|
||||
const parts = [parsed.baseName];
|
||||
|
||||
if (parsed.runtimes.length > 0) {
|
||||
parts.push(parsed.runtimes.join('+'));
|
||||
}
|
||||
|
||||
if (parsed.modifiers.length > 0) {
|
||||
parts.push(...parsed.modifiers);
|
||||
}
|
||||
|
||||
parts.push(parsed.extension);
|
||||
|
||||
return parts.join('.');
|
||||
}
|
@@ -1,64 +1,47 @@
|
||||
// ============
|
||||
// combines different tap test files to an overall result
|
||||
// ============
|
||||
import * as plugins from './tstest.plugins';
|
||||
import { coloredString as cs } from '@pushrocks/consolecolor';
|
||||
import * as plugins from './tstest.plugins.js';
|
||||
import { coloredString as cs } from '@push.rocks/consolecolor';
|
||||
|
||||
import { TapParser } from './tstest.classes.tap.parser';
|
||||
import * as logPrefixes from './tstest.logprefixes';
|
||||
import { TapParser } from './tstest.classes.tap.parser.js';
|
||||
import * as logPrefixes from './tstest.logprefixes.js';
|
||||
import { TsTestLogger } from './tstest.logging.js';
|
||||
|
||||
export class TapCombinator {
|
||||
tapParserStore: TapParser[] = [];
|
||||
skippedFiles: string[] = [];
|
||||
private logger: TsTestLogger;
|
||||
|
||||
constructor(logger: TsTestLogger) {
|
||||
this.logger = logger;
|
||||
}
|
||||
|
||||
addTapParser(tapParserArg: TapParser) {
|
||||
this.tapParserStore.push(tapParserArg);
|
||||
}
|
||||
|
||||
addSkippedFile(filename: string) {
|
||||
this.skippedFiles.push(filename);
|
||||
}
|
||||
|
||||
evaluate() {
|
||||
console.log(
|
||||
`${logPrefixes.TsTestPrefix} RESULTS FOR ${this.tapParserStore.length} TESTFILE(S):`
|
||||
);
|
||||
|
||||
let failGlobal = false; // determine wether tstest should fail
|
||||
// Call the logger's summary method with skipped files
|
||||
this.logger.summary(this.skippedFiles);
|
||||
|
||||
// Check for failures
|
||||
let failGlobal = false;
|
||||
for (const tapParser of this.tapParserStore) {
|
||||
if (!tapParser.expectedTests) {
|
||||
if (!tapParser.expectedTests ||
|
||||
tapParser.expectedTests !== tapParser.receivedTests ||
|
||||
tapParser.getErrorTests().length > 0) {
|
||||
failGlobal = true;
|
||||
let overviewString =
|
||||
logPrefixes.TsTestPrefix +
|
||||
cs(` ${tapParser.fileName} ${plugins.figures.cross}`, 'red') +
|
||||
` ${plugins.figures.pointer} ` +
|
||||
`does not specify tests!`;
|
||||
console.log(overviewString);
|
||||
} else if (tapParser.expectedTests !== tapParser.receivedTests) {
|
||||
failGlobal = true;
|
||||
let overviewString =
|
||||
logPrefixes.TsTestPrefix +
|
||||
cs(` ${tapParser.fileName} ${plugins.figures.cross}`, 'red') +
|
||||
` ${plugins.figures.pointer} ` +
|
||||
tapParser.getTestOverviewAsString() +
|
||||
`did not execute all specified tests!`;
|
||||
console.log(overviewString);
|
||||
} else if (tapParser.getErrorTests().length === 0) {
|
||||
let overviewString =
|
||||
logPrefixes.TsTestPrefix +
|
||||
cs(` ${tapParser.fileName} ${plugins.figures.tick}`, 'green') +
|
||||
` ${plugins.figures.pointer} ` +
|
||||
tapParser.getTestOverviewAsString();
|
||||
console.log(overviewString);
|
||||
} else {
|
||||
failGlobal = true;
|
||||
let overviewString =
|
||||
logPrefixes.TsTestPrefix +
|
||||
cs(` ${tapParser.fileName} ${plugins.figures.cross}`, 'red') +
|
||||
` ${plugins.figures.pointer} ` +
|
||||
tapParser.getTestOverviewAsString();
|
||||
console.log(overviewString);
|
||||
break;
|
||||
}
|
||||
}
|
||||
console.log(cs(plugins.figures.hamburger.repeat(48), 'cyan'));
|
||||
if (!failGlobal) {
|
||||
console.log(cs('FINAL RESULT: SUCCESS!', 'green'));
|
||||
} else {
|
||||
console.log(cs('FINAL RESULT: FAIL!', 'red'));
|
||||
|
||||
// Exit with error code if tests failed
|
||||
if (failGlobal) {
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
@@ -1,27 +1,67 @@
|
||||
import { ChildProcess } from 'child_process';
|
||||
import { coloredString as cs } from '@pushrocks/consolecolor';
|
||||
import { coloredString as cs } from '@push.rocks/consolecolor';
|
||||
|
||||
// ============
|
||||
// combines different tap test files to an overall result
|
||||
// ============
|
||||
import * as plugins from './tstest.plugins';
|
||||
import { TapTestResult } from './tstest.classes.tap.testresult';
|
||||
import * as logPrefixes from './tstest.logprefixes';
|
||||
import * as plugins from './tstest.plugins.js';
|
||||
import { TapTestResult } from './tstest.classes.tap.testresult.js';
|
||||
import * as logPrefixes from './tstest.logprefixes.js';
|
||||
import { TsTestLogger } from './tstest.logging.js';
|
||||
import { ProtocolParser } from '../dist_ts_tapbundle_protocol/index.js';
|
||||
import type { IProtocolMessage, ITestResult, IPlanLine, IErrorBlock, ITestEvent } from '../dist_ts_tapbundle_protocol/index.js';
|
||||
|
||||
export class TapParser {
|
||||
testStore: TapTestResult[] = [];
|
||||
|
||||
expectedTestsRegex = /([0-9]*)\.\.([0-9]*)$/;
|
||||
expectedTests: number;
|
||||
receivedTests: number;
|
||||
expectedTests: number = 0;
|
||||
receivedTests: number = 0;
|
||||
|
||||
testStatusRegex = /(ok|not\sok)\s([0-9]+)\s-\s(.*)\s#\stime=(.*)ms$/;
|
||||
activeTapTestResult: TapTestResult;
|
||||
|
||||
private logger: TsTestLogger;
|
||||
private protocolParser: ProtocolParser;
|
||||
private protocolVersion: string | null = null;
|
||||
private startTime: number;
|
||||
|
||||
/**
|
||||
* the constructor for TapParser
|
||||
*/
|
||||
constructor(public fileName: string) {}
|
||||
constructor(public fileName: string, logger?: TsTestLogger) {
|
||||
this.logger = logger;
|
||||
this.protocolParser = new ProtocolParser();
|
||||
this.startTime = Date.now();
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle test file timeout
|
||||
*/
|
||||
public handleTimeout(timeoutSeconds: number) {
|
||||
// If no tests have been defined yet, set expected to 1
|
||||
if (this.expectedTests === 0) {
|
||||
this.expectedTests = 1;
|
||||
}
|
||||
|
||||
// Create a fake failing test result for timeout
|
||||
this._getNewTapTestResult();
|
||||
this.activeTapTestResult.testOk = false;
|
||||
this.activeTapTestResult.testSettled = true;
|
||||
this.testStore.push(this.activeTapTestResult);
|
||||
|
||||
// Log the timeout error
|
||||
if (this.logger) {
|
||||
// First log the test result
|
||||
this.logger.testResult(
|
||||
`Test file timeout`,
|
||||
false,
|
||||
timeoutSeconds * 1000,
|
||||
`Error: Test file exceeded timeout of ${timeoutSeconds} seconds`
|
||||
);
|
||||
this.logger.testErrorDetails(`Test execution was terminated after ${timeoutSeconds} seconds`);
|
||||
}
|
||||
|
||||
// Don't call evaluateFinalResult here, let the caller handle it
|
||||
}
|
||||
|
||||
private _getNewTapTestResult() {
|
||||
this.activeTapTestResult = new TapTestResult(this.testStore.length + 1);
|
||||
@@ -36,71 +76,299 @@ export class TapParser {
|
||||
logLineArray.pop();
|
||||
}
|
||||
|
||||
// lets parse the log information
|
||||
// Process each line through the protocol parser
|
||||
for (const logLine of logLineArray) {
|
||||
let logLineIsTapProtocol = false;
|
||||
if (!this.expectedTests && this.expectedTestsRegex.test(logLine)) {
|
||||
logLineIsTapProtocol = true;
|
||||
const regexResult = this.expectedTestsRegex.exec(logLine);
|
||||
this.expectedTests = parseInt(regexResult[2]);
|
||||
console.log(
|
||||
`${logPrefixes.TapPrefix} ${cs(`Expecting ${this.expectedTests} tests!`, 'blue')}`
|
||||
);
|
||||
|
||||
// initiating first TapResult
|
||||
this._getNewTapTestResult();
|
||||
} else if (this.testStatusRegex.test(logLine)) {
|
||||
logLineIsTapProtocol = true;
|
||||
const regexResult = this.testStatusRegex.exec(logLine);
|
||||
const testId = parseInt(regexResult[2]);
|
||||
const testOk = (() => {
|
||||
if (regexResult[1] === 'ok') {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
})();
|
||||
|
||||
const testSubject = regexResult[3];
|
||||
const testDuration = parseInt(regexResult[4]);
|
||||
|
||||
// test for protocol error
|
||||
if (testId !== this.activeTapTestResult.id) {
|
||||
console.log(
|
||||
`${logPrefixes.TapErrorPrefix} Something is strange! Test Ids are not equal!`
|
||||
);
|
||||
const messages = this.protocolParser.parseLine(logLine);
|
||||
|
||||
if (messages.length > 0) {
|
||||
// Handle protocol messages
|
||||
for (const message of messages) {
|
||||
this._handleProtocolMessage(message, logLine);
|
||||
}
|
||||
this.activeTapTestResult.setTestResult(testOk);
|
||||
|
||||
if (testOk) {
|
||||
console.log(
|
||||
logPrefixes.TapPrefix,
|
||||
`${cs(`T${testId} ${plugins.figures.tick}`, 'green')} ${plugins.figures.arrowRight} ` +
|
||||
cs(testSubject, 'blue') +
|
||||
` | ${cs(`${testDuration} ms`, 'orange')}`
|
||||
);
|
||||
} else {
|
||||
console.log(
|
||||
logPrefixes.TapPrefix,
|
||||
`${cs(`T${testId} ${plugins.figures.cross}`, 'red')} ${plugins.figures.arrowRight} ` +
|
||||
cs(testSubject, 'blue') +
|
||||
` | ${cs(`${testDuration} ms`, 'orange')}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if (!logLineIsTapProtocol) {
|
||||
} else {
|
||||
// Not a protocol message, handle as console output
|
||||
if (this.activeTapTestResult) {
|
||||
this.activeTapTestResult.addLogLine(logLine);
|
||||
}
|
||||
console.log(logLine);
|
||||
|
||||
// Check for snapshot communication (legacy)
|
||||
const snapshotMatch = logLine.match(/###SNAPSHOT###(.+)###SNAPSHOT###/);
|
||||
if (snapshotMatch) {
|
||||
const base64Data = snapshotMatch[1];
|
||||
try {
|
||||
const snapshotData = JSON.parse(Buffer.from(base64Data, 'base64').toString());
|
||||
this.handleSnapshot(snapshotData);
|
||||
} catch (error: any) {
|
||||
if (this.logger) {
|
||||
this.logger.testConsoleOutput(`Error parsing snapshot data: ${error.message}`);
|
||||
}
|
||||
}
|
||||
} else if (this.logger) {
|
||||
// This is console output from the test file
|
||||
this.logger.testConsoleOutput(logLine);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (this.activeTapTestResult && this.activeTapTestResult.testSettled) {
|
||||
private _handleProtocolMessage(message: IProtocolMessage, originalLine: string) {
|
||||
switch (message.type) {
|
||||
case 'protocol':
|
||||
this.protocolVersion = message.content.version;
|
||||
if (this.logger) {
|
||||
this.logger.tapOutput(`Protocol version: ${this.protocolVersion}`);
|
||||
}
|
||||
break;
|
||||
|
||||
case 'version':
|
||||
// TAP version, we can ignore this
|
||||
break;
|
||||
|
||||
case 'plan':
|
||||
const plan = message.content as IPlanLine;
|
||||
this.expectedTests = plan.end - plan.start + 1;
|
||||
if (plan.skipAll) {
|
||||
if (this.logger) {
|
||||
this.logger.tapOutput(`Skipping all tests: ${plan.skipAll}`);
|
||||
}
|
||||
} else {
|
||||
if (this.logger) {
|
||||
this.logger.tapOutput(`Expecting ${this.expectedTests} tests!`);
|
||||
}
|
||||
}
|
||||
// Initialize first TapResult
|
||||
this._getNewTapTestResult();
|
||||
break;
|
||||
|
||||
case 'test':
|
||||
const testResult = message.content as ITestResult;
|
||||
|
||||
// Update active test result
|
||||
this.activeTapTestResult.setTestResult(testResult.ok);
|
||||
|
||||
// Extract test duration from metadata
|
||||
let testDuration = 0;
|
||||
if (testResult.metadata?.time) {
|
||||
testDuration = testResult.metadata.time;
|
||||
}
|
||||
|
||||
// Log test result
|
||||
if (this.logger) {
|
||||
if (testResult.ok) {
|
||||
this.logger.testResult(testResult.description, true, testDuration);
|
||||
} else {
|
||||
this.logger.testResult(testResult.description, false, testDuration);
|
||||
|
||||
// If there's error metadata, show it
|
||||
if (testResult.metadata?.error) {
|
||||
const error = testResult.metadata.error;
|
||||
let errorDetails = error.message;
|
||||
if (error.stack) {
|
||||
errorDetails = error.stack;
|
||||
}
|
||||
this.logger.testErrorDetails(errorDetails);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle directives (skip/todo)
|
||||
if (testResult.directive) {
|
||||
if (this.logger) {
|
||||
if (testResult.directive.type === 'skip') {
|
||||
this.logger.testConsoleOutput(`Test skipped: ${testResult.directive.reason || 'No reason given'}`);
|
||||
} else if (testResult.directive.type === 'todo') {
|
||||
this.logger.testConsoleOutput(`Test todo: ${testResult.directive.reason || 'No reason given'}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Mark test as settled and move to next
|
||||
this.activeTapTestResult.testSettled = true;
|
||||
this.testStore.push(this.activeTapTestResult);
|
||||
this._getNewTapTestResult();
|
||||
break;
|
||||
|
||||
case 'comment':
|
||||
if (this.logger) {
|
||||
// Check if it's a pretask comment
|
||||
const pretaskMatch = message.content.match(/^Pretask -> (.+): Success\.$/);
|
||||
if (pretaskMatch) {
|
||||
this.logger.tapOutput(message.content);
|
||||
} else {
|
||||
this.logger.testConsoleOutput(message.content);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case 'bailout':
|
||||
if (this.logger) {
|
||||
this.logger.error(`Bail out! ${message.content}`);
|
||||
}
|
||||
break;
|
||||
|
||||
case 'error':
|
||||
const errorBlock = message.content as IErrorBlock;
|
||||
if (this.logger && errorBlock.error) {
|
||||
let errorDetails = errorBlock.error.message;
|
||||
if (errorBlock.error.stack) {
|
||||
errorDetails = errorBlock.error.stack;
|
||||
}
|
||||
this.logger.testErrorDetails(errorDetails);
|
||||
}
|
||||
break;
|
||||
|
||||
case 'snapshot':
|
||||
// Handle new protocol snapshot format
|
||||
const snapshot = message.content;
|
||||
this.handleSnapshot({
|
||||
path: snapshot.name,
|
||||
content: typeof snapshot.content === 'string' ? snapshot.content : JSON.stringify(snapshot.content),
|
||||
action: 'compare' // Default action
|
||||
});
|
||||
break;
|
||||
|
||||
case 'event':
|
||||
const event = message.content as ITestEvent;
|
||||
this._handleTestEvent(event);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
private _handleTestEvent(event: ITestEvent) {
|
||||
if (!this.logger) return;
|
||||
|
||||
switch (event.eventType) {
|
||||
case 'test:queued':
|
||||
// We can track queued tests if needed
|
||||
break;
|
||||
|
||||
case 'test:started':
|
||||
this.logger.testConsoleOutput(cs(`Test starting: ${event.data.description}`, 'cyan'));
|
||||
if (event.data.retry) {
|
||||
this.logger.testConsoleOutput(cs(` Retry attempt ${event.data.retry}`, 'orange'));
|
||||
}
|
||||
break;
|
||||
|
||||
case 'test:progress':
|
||||
if (event.data.progress !== undefined) {
|
||||
this.logger.testConsoleOutput(cs(` Progress: ${event.data.progress}%`, 'cyan'));
|
||||
}
|
||||
break;
|
||||
|
||||
case 'test:completed':
|
||||
// Test completion is already handled by the test result
|
||||
// This event provides additional timing info if needed
|
||||
break;
|
||||
|
||||
case 'suite:started':
|
||||
this.logger.testConsoleOutput(cs(`\nSuite: ${event.data.suiteName}`, 'blue'));
|
||||
break;
|
||||
|
||||
case 'suite:completed':
|
||||
this.logger.testConsoleOutput(cs(`Suite completed: ${event.data.suiteName}\n`, 'blue'));
|
||||
break;
|
||||
|
||||
case 'hook:started':
|
||||
this.logger.testConsoleOutput(cs(` Hook: ${event.data.hookName}`, 'cyan'));
|
||||
break;
|
||||
|
||||
case 'hook:completed':
|
||||
// Silent unless there's an error
|
||||
if (event.data.error) {
|
||||
this.logger.testConsoleOutput(cs(` Hook failed: ${event.data.hookName}`, 'red'));
|
||||
}
|
||||
break;
|
||||
|
||||
case 'assertion:failed':
|
||||
// Enhanced assertion failure with diff
|
||||
if (event.data.error) {
|
||||
this._displayAssertionError(event.data.error);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
private _displayAssertionError(error: any) {
|
||||
if (!this.logger) return;
|
||||
|
||||
// Display error message
|
||||
if (error.message) {
|
||||
this.logger.testErrorDetails(error.message);
|
||||
}
|
||||
|
||||
// Display visual diff if available
|
||||
if (error.diff) {
|
||||
this._displayDiff(error.diff, error.expected, error.actual);
|
||||
}
|
||||
}
|
||||
|
||||
private _displayDiff(diff: any, expected: any, actual: any) {
|
||||
if (!this.logger) return;
|
||||
|
||||
this.logger.testConsoleOutput(cs('\n Diff:', 'cyan'));
|
||||
|
||||
switch (diff.type) {
|
||||
case 'string':
|
||||
this._displayStringDiff(diff.changes);
|
||||
break;
|
||||
|
||||
case 'object':
|
||||
this._displayObjectDiff(diff.changes, expected, actual);
|
||||
break;
|
||||
|
||||
case 'array':
|
||||
this._displayArrayDiff(diff.changes, expected, actual);
|
||||
break;
|
||||
|
||||
case 'primitive':
|
||||
this._displayPrimitiveDiff(diff.changes);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
private _displayStringDiff(changes: any[]) {
|
||||
for (const change of changes) {
|
||||
const linePrefix = ` Line ${change.line + 1}: `;
|
||||
if (change.type === 'add') {
|
||||
this.logger.testConsoleOutput(cs(`${linePrefix}+ ${change.content}`, 'green'));
|
||||
} else if (change.type === 'remove') {
|
||||
this.logger.testConsoleOutput(cs(`${linePrefix}- ${change.content}`, 'red'));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private _displayObjectDiff(changes: any[], expected: any, actual: any) {
|
||||
this.logger.testConsoleOutput(cs(' Expected:', 'red'));
|
||||
this.logger.testConsoleOutput(` ${JSON.stringify(expected, null, 2)}`);
|
||||
this.logger.testConsoleOutput(cs(' Actual:', 'green'));
|
||||
this.logger.testConsoleOutput(` ${JSON.stringify(actual, null, 2)}`);
|
||||
|
||||
this.logger.testConsoleOutput(cs('\n Changes:', 'cyan'));
|
||||
for (const change of changes) {
|
||||
const path = change.path.join('.');
|
||||
if (change.type === 'add') {
|
||||
this.logger.testConsoleOutput(cs(` + ${path}: ${JSON.stringify(change.newValue)}`, 'green'));
|
||||
} else if (change.type === 'remove') {
|
||||
this.logger.testConsoleOutput(cs(` - ${path}: ${JSON.stringify(change.oldValue)}`, 'red'));
|
||||
} else if (change.type === 'modify') {
|
||||
this.logger.testConsoleOutput(cs(` ~ ${path}:`, 'cyan'));
|
||||
this.logger.testConsoleOutput(cs(` - ${JSON.stringify(change.oldValue)}`, 'red'));
|
||||
this.logger.testConsoleOutput(cs(` + ${JSON.stringify(change.newValue)}`, 'green'));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private _displayArrayDiff(changes: any[], expected: any[], actual: any[]) {
|
||||
this._displayObjectDiff(changes, expected, actual);
|
||||
}
|
||||
|
||||
private _displayPrimitiveDiff(changes: any[]) {
|
||||
const change = changes[0];
|
||||
if (change) {
|
||||
this.logger.testConsoleOutput(cs(` Expected: ${JSON.stringify(change.oldValue)}`, 'red'));
|
||||
this.logger.testConsoleOutput(cs(` Actual: ${JSON.stringify(change.newValue)}`, 'green'));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* returns all tests that are not completed
|
||||
@@ -149,7 +417,7 @@ export class TapParser {
|
||||
this._processLog(data);
|
||||
});
|
||||
childProcessArg.on('exit', async () => {
|
||||
await this._evaluateResult();
|
||||
await this.evaluateFinalResult();
|
||||
done.resolve();
|
||||
});
|
||||
await done.promise;
|
||||
@@ -157,46 +425,96 @@ export class TapParser {
|
||||
|
||||
public async handleTapLog(tapLog: string) {
|
||||
this._processLog(tapLog);
|
||||
await this._evaluateResult();
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle snapshot data from the test
|
||||
*/
|
||||
private async handleSnapshot(snapshotData: { path: string; content: string; action: string }) {
|
||||
try {
|
||||
const smartfile = await import('@push.rocks/smartfile');
|
||||
|
||||
if (snapshotData.action === 'compare') {
|
||||
// Try to read existing snapshot
|
||||
try {
|
||||
const existingSnapshot = await smartfile.fs.toStringSync(snapshotData.path);
|
||||
if (existingSnapshot !== snapshotData.content) {
|
||||
// Snapshot mismatch
|
||||
if (this.logger) {
|
||||
this.logger.testConsoleOutput(`Snapshot mismatch: ${snapshotData.path}`);
|
||||
this.logger.testConsoleOutput(`Expected:\n${existingSnapshot}`);
|
||||
this.logger.testConsoleOutput(`Received:\n${snapshotData.content}`);
|
||||
}
|
||||
// TODO: Communicate failure back to the test
|
||||
} else {
|
||||
if (this.logger) {
|
||||
this.logger.testConsoleOutput(`Snapshot matched: ${snapshotData.path}`);
|
||||
}
|
||||
}
|
||||
} catch (error: any) {
|
||||
if (error.code === 'ENOENT') {
|
||||
// Snapshot doesn't exist, create it
|
||||
const dirPath = snapshotData.path.substring(0, snapshotData.path.lastIndexOf('/'));
|
||||
await smartfile.fs.ensureDir(dirPath);
|
||||
await smartfile.memory.toFs(snapshotData.content, snapshotData.path);
|
||||
if (this.logger) {
|
||||
this.logger.testConsoleOutput(`Snapshot created: ${snapshotData.path}`);
|
||||
}
|
||||
} else {
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
} else if (snapshotData.action === 'update') {
|
||||
// Update snapshot
|
||||
const dirPath = snapshotData.path.substring(0, snapshotData.path.lastIndexOf('/'));
|
||||
await smartfile.fs.ensureDir(dirPath);
|
||||
await smartfile.memory.toFs(snapshotData.content, snapshotData.path);
|
||||
if (this.logger) {
|
||||
this.logger.testConsoleOutput(`Snapshot updated: ${snapshotData.path}`);
|
||||
}
|
||||
}
|
||||
} catch (error: any) {
|
||||
if (this.logger) {
|
||||
this.logger.testConsoleOutput(`Error handling snapshot: ${error.message}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private async _evaluateResult() {
|
||||
public async evaluateFinalResult() {
|
||||
this.receivedTests = this.testStore.length;
|
||||
const duration = Date.now() - this.startTime;
|
||||
|
||||
// check wether all tests ran
|
||||
if (this.expectedTests === this.receivedTests) {
|
||||
console.log(
|
||||
`${logPrefixes.TapPrefix} ${cs(
|
||||
`${this.receivedTests} out of ${this.expectedTests} Tests completed!`,
|
||||
'green'
|
||||
)}`
|
||||
);
|
||||
if (this.logger) {
|
||||
this.logger.tapOutput(`${this.receivedTests} out of ${this.expectedTests} Tests completed!`);
|
||||
}
|
||||
} else {
|
||||
console.log(
|
||||
`${logPrefixes.TapErrorPrefix} ${cs(
|
||||
`Only ${this.receivedTests} out of ${this.expectedTests} completed!`,
|
||||
'red'
|
||||
)}`
|
||||
);
|
||||
if (this.logger) {
|
||||
this.logger.error(`Only ${this.receivedTests} out of ${this.expectedTests} completed!`);
|
||||
}
|
||||
}
|
||||
if (!this.expectedTests) {
|
||||
console.log(cs('Error: No tests were defined. Therefore the testfile failed!', 'red'));
|
||||
if (!this.expectedTests && this.receivedTests === 0) {
|
||||
if (this.logger) {
|
||||
this.logger.error('No tests were defined. Therefore the testfile failed!');
|
||||
this.logger.testFileEnd(0, 1, duration); // Count as 1 failure
|
||||
}
|
||||
} else if (this.expectedTests !== this.receivedTests) {
|
||||
console.log(
|
||||
cs(
|
||||
'Error: The amount of received tests and expectedTests is unequal! Therefore the testfile failed',
|
||||
'red'
|
||||
)
|
||||
);
|
||||
if (this.logger) {
|
||||
this.logger.error('The amount of received tests and expectedTests is unequal! Therefore the testfile failed');
|
||||
const errorCount = this.getErrorTests().length || 1; // At least 1 error
|
||||
this.logger.testFileEnd(this.receivedTests - errorCount, errorCount, duration);
|
||||
}
|
||||
} else if (this.getErrorTests().length === 0) {
|
||||
console.log(`${logPrefixes.TapPrefix} ${cs(`All tests are successfull!!!`, 'green')}`);
|
||||
if (this.logger) {
|
||||
this.logger.tapOutput('All tests are successfull!!!');
|
||||
this.logger.testFileEnd(this.receivedTests, 0, duration);
|
||||
}
|
||||
} else {
|
||||
console.log(
|
||||
`${logPrefixes.TapPrefix} ${cs(
|
||||
`${this.getErrorTests().length} tests threw an error!!!`,
|
||||
'red'
|
||||
)}`
|
||||
);
|
||||
if (this.logger) {
|
||||
this.logger.tapOutput(`${this.getErrorTests().length} tests threw an error!!!`, true);
|
||||
this.logger.testFileEnd(this.receivedTests - this.getErrorTests().length, this.getErrorTests().length, duration);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,7 +1,7 @@
|
||||
// ============
|
||||
// combines different tap test files to an overall result
|
||||
// ============
|
||||
import * as plugins from './tstest.plugins';
|
||||
import * as plugins from './tstest.plugins.js';
|
||||
|
||||
export class TapTestResult {
|
||||
testLogBuffer = Buffer.from('');
|
||||
|
@@ -1,11 +1,12 @@
|
||||
import * as plugins from './tstest.plugins';
|
||||
import * as paths from './tstest.paths';
|
||||
import { Smartfile } from '@pushrocks/smartfile';
|
||||
import * as plugins from './tstest.plugins.js';
|
||||
import * as paths from './tstest.paths.js';
|
||||
import { SmartFile } from '@push.rocks/smartfile';
|
||||
import { TestExecutionMode } from './index.js';
|
||||
|
||||
// tap related stuff
|
||||
import { TapCombinator } from './tstest.classes.tap.combinator';
|
||||
import { TapParser } from './tstest.classes.tap.parser';
|
||||
import { TapTestResult } from './tstest.classes.tap.testresult';
|
||||
import { TapCombinator } from './tstest.classes.tap.combinator.js';
|
||||
import { TapParser } from './tstest.classes.tap.parser.js';
|
||||
import { TapTestResult } from './tstest.classes.tap.testresult.js';
|
||||
|
||||
export class TestDirectory {
|
||||
/**
|
||||
@@ -14,44 +15,127 @@ export class TestDirectory {
|
||||
cwd: string;
|
||||
|
||||
/**
|
||||
* the relative location of the test dir
|
||||
* the test path or pattern
|
||||
*/
|
||||
relativePath: string;
|
||||
testPath: string;
|
||||
|
||||
/**
|
||||
* the absolute path of the test dir
|
||||
* the execution mode
|
||||
*/
|
||||
absolutePath: string;
|
||||
executionMode: TestExecutionMode;
|
||||
|
||||
/**
|
||||
* an array of Smartfiles
|
||||
*/
|
||||
testfileArray: Smartfile[] = [];
|
||||
testfileArray: SmartFile[] = [];
|
||||
|
||||
/**
|
||||
* the constructor for TestDirectory
|
||||
* tell it the path
|
||||
* @param pathToTestDirectory
|
||||
* @param cwdArg - the current working directory
|
||||
* @param testPathArg - the test path/pattern
|
||||
* @param executionModeArg - the execution mode
|
||||
*/
|
||||
constructor(cwdArg: string, relativePathToTestDirectory: string) {
|
||||
constructor(cwdArg: string, testPathArg: string, executionModeArg: TestExecutionMode) {
|
||||
this.cwd = cwdArg;
|
||||
this.relativePath = relativePathToTestDirectory;
|
||||
this.testPath = testPathArg;
|
||||
this.executionMode = executionModeArg;
|
||||
}
|
||||
|
||||
private async _init() {
|
||||
this.testfileArray = await plugins.smartfile.fs.fileTreeToObject(
|
||||
plugins.path.join(this.cwd, this.relativePath),
|
||||
'test*.ts'
|
||||
);
|
||||
switch (this.executionMode) {
|
||||
case TestExecutionMode.FILE:
|
||||
// Single file mode
|
||||
const filePath = plugins.path.isAbsolute(this.testPath)
|
||||
? this.testPath
|
||||
: plugins.path.join(this.cwd, this.testPath);
|
||||
|
||||
if (await plugins.smartfile.fs.fileExists(filePath)) {
|
||||
this.testfileArray = [await plugins.smartfile.SmartFile.fromFilePath(filePath)];
|
||||
} else {
|
||||
throw new Error(`Test file not found: ${filePath}`);
|
||||
}
|
||||
break;
|
||||
|
||||
case TestExecutionMode.GLOB:
|
||||
// Glob pattern mode - use listFileTree which supports glob patterns
|
||||
const globPattern = this.testPath;
|
||||
const matchedFiles = await plugins.smartfile.fs.listFileTree(this.cwd, globPattern);
|
||||
|
||||
this.testfileArray = await Promise.all(
|
||||
matchedFiles.map(async (filePath) => {
|
||||
const absolutePath = plugins.path.isAbsolute(filePath)
|
||||
? filePath
|
||||
: plugins.path.join(this.cwd, filePath);
|
||||
return await plugins.smartfile.SmartFile.fromFilePath(absolutePath);
|
||||
})
|
||||
);
|
||||
break;
|
||||
|
||||
case TestExecutionMode.DIRECTORY:
|
||||
// Directory mode - now recursive with ** pattern
|
||||
const dirPath = plugins.path.join(this.cwd, this.testPath);
|
||||
const testPattern = '**/test*.ts';
|
||||
|
||||
const testFiles = await plugins.smartfile.fs.listFileTree(dirPath, testPattern);
|
||||
|
||||
this.testfileArray = await Promise.all(
|
||||
testFiles.map(async (filePath) => {
|
||||
const absolutePath = plugins.path.isAbsolute(filePath)
|
||||
? filePath
|
||||
: plugins.path.join(dirPath, filePath);
|
||||
return await plugins.smartfile.SmartFile.fromFilePath(absolutePath);
|
||||
})
|
||||
);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
async getTestFilePathArray() {
|
||||
await this._init();
|
||||
const testFilePaths: string[] = [];
|
||||
for (const testFile of this.testfileArray) {
|
||||
const filePath = plugins.path.join(this.relativePath, testFile.path);
|
||||
testFilePaths.push(filePath);
|
||||
// Use the path directly from the SmartFile
|
||||
testFilePaths.push(testFile.path);
|
||||
}
|
||||
return testFilePaths;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get test files organized by parallel execution groups
|
||||
* @returns An object with grouped tests
|
||||
*/
|
||||
async getTestFileGroups(): Promise<{
|
||||
serial: string[];
|
||||
parallelGroups: { [groupName: string]: string[] };
|
||||
}> {
|
||||
await this._init();
|
||||
|
||||
const result = {
|
||||
serial: [] as string[],
|
||||
parallelGroups: {} as { [groupName: string]: string[] }
|
||||
};
|
||||
|
||||
for (const testFile of this.testfileArray) {
|
||||
const filePath = testFile.path;
|
||||
const fileName = plugins.path.basename(filePath);
|
||||
|
||||
// Check if file has parallel group pattern
|
||||
const parallelMatch = fileName.match(/\.para__(\d+)\./);
|
||||
|
||||
if (parallelMatch) {
|
||||
const groupNumber = parallelMatch[1];
|
||||
const groupName = `para__${groupNumber}`;
|
||||
|
||||
if (!result.parallelGroups[groupName]) {
|
||||
result.parallelGroups[groupName] = [];
|
||||
}
|
||||
result.parallelGroups[groupName].push(filePath);
|
||||
} else {
|
||||
// File runs serially
|
||||
result.serial.push(filePath);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
@@ -1,15 +1,31 @@
|
||||
import * as plugins from './tstest.plugins';
|
||||
import * as paths from './tstest.paths';
|
||||
import * as logPrefixes from './tstest.logprefixes';
|
||||
import * as plugins from './tstest.plugins.js';
|
||||
import * as paths from './tstest.paths.js';
|
||||
|
||||
import { coloredString as cs } from '@pushrocks/consolecolor';
|
||||
import { coloredString as cs } from '@push.rocks/consolecolor';
|
||||
|
||||
import { TestDirectory } from './tstest.classes.testdirectory';
|
||||
import { TapCombinator } from './tstest.classes.tap.combinator';
|
||||
import { TapParser } from './tstest.classes.tap.parser';
|
||||
import { TestDirectory } from './tstest.classes.testdirectory.js';
|
||||
import { TapCombinator } from './tstest.classes.tap.combinator.js';
|
||||
import { TapParser } from './tstest.classes.tap.parser.js';
|
||||
import { TestExecutionMode } from './index.js';
|
||||
import { TsTestLogger } from './tstest.logging.js';
|
||||
import type { LogOptions } from './tstest.logging.js';
|
||||
|
||||
// Runtime adapters
|
||||
import { parseTestFilename } from './tstest.classes.runtime.parser.js';
|
||||
import { RuntimeAdapterRegistry } from './tstest.classes.runtime.adapter.js';
|
||||
import { NodeRuntimeAdapter } from './tstest.classes.runtime.node.js';
|
||||
import { ChromiumRuntimeAdapter } from './tstest.classes.runtime.chromium.js';
|
||||
import { DenoRuntimeAdapter } from './tstest.classes.runtime.deno.js';
|
||||
import { BunRuntimeAdapter } from './tstest.classes.runtime.bun.js';
|
||||
|
||||
export class TsTest {
|
||||
public testDir: TestDirectory;
|
||||
public executionMode: TestExecutionMode;
|
||||
public logger: TsTestLogger;
|
||||
public filterTags: string[];
|
||||
public startFromFile: number | null;
|
||||
public stopAtFile: number | null;
|
||||
public timeoutSeconds: number | null;
|
||||
|
||||
public smartshellInstance = new plugins.smartshell.Smartshell({
|
||||
executor: 'bash',
|
||||
@@ -20,71 +36,358 @@ export class TsTest {
|
||||
|
||||
public tsbundleInstance = new plugins.tsbundle.TsBundle();
|
||||
|
||||
constructor(cwdArg: string, relativePathToTestDirectory: string) {
|
||||
this.testDir = new TestDirectory(cwdArg, relativePathToTestDirectory);
|
||||
public runtimeRegistry = new RuntimeAdapterRegistry();
|
||||
|
||||
constructor(cwdArg: string, testPathArg: string, executionModeArg: TestExecutionMode, logOptions: LogOptions = {}, tags: string[] = [], startFromFile: number | null = null, stopAtFile: number | null = null, timeoutSeconds: number | null = null) {
|
||||
this.executionMode = executionModeArg;
|
||||
this.testDir = new TestDirectory(cwdArg, testPathArg, executionModeArg);
|
||||
this.logger = new TsTestLogger(logOptions);
|
||||
this.filterTags = tags;
|
||||
this.startFromFile = startFromFile;
|
||||
this.stopAtFile = stopAtFile;
|
||||
this.timeoutSeconds = timeoutSeconds;
|
||||
|
||||
// Register runtime adapters
|
||||
this.runtimeRegistry.register(
|
||||
new NodeRuntimeAdapter(this.logger, this.smartshellInstance, this.timeoutSeconds, this.filterTags)
|
||||
);
|
||||
this.runtimeRegistry.register(
|
||||
new ChromiumRuntimeAdapter(this.logger, this.tsbundleInstance, this.smartbrowserInstance, this.timeoutSeconds)
|
||||
);
|
||||
this.runtimeRegistry.register(
|
||||
new DenoRuntimeAdapter(this.logger, this.smartshellInstance, this.timeoutSeconds, this.filterTags)
|
||||
);
|
||||
this.runtimeRegistry.register(
|
||||
new BunRuntimeAdapter(this.logger, this.smartshellInstance, this.timeoutSeconds, this.filterTags)
|
||||
);
|
||||
}
|
||||
|
||||
async run() {
|
||||
const fileNamesToRun: string[] = await this.testDir.getTestFilePathArray();
|
||||
console.log(cs(plugins.figures.hamburger.repeat(80), 'cyan'));
|
||||
console.log('');
|
||||
console.log(`${logPrefixes.TsTestPrefix} FOUND ${fileNamesToRun.length} TESTFILE(S):`);
|
||||
for (const fileName of fileNamesToRun) {
|
||||
console.log(`${logPrefixes.TsTestPrefix} ${cs(fileName, 'orange')}`);
|
||||
// Move previous log files if --logfile option is used
|
||||
if (this.logger.options.logFile) {
|
||||
await this.movePreviousLogFiles();
|
||||
}
|
||||
console.log('-'.repeat(48));
|
||||
console.log(''); // force new line
|
||||
|
||||
const testGroups = await this.testDir.getTestFileGroups();
|
||||
const allFiles = [...testGroups.serial, ...Object.values(testGroups.parallelGroups).flat()];
|
||||
|
||||
// Log test discovery - always show full count
|
||||
this.logger.testDiscovery(
|
||||
allFiles.length,
|
||||
this.testDir.testPath,
|
||||
this.executionMode
|
||||
);
|
||||
|
||||
const tapCombinator = new TapCombinator(); // lets create the TapCombinator
|
||||
for (const fileNameArg of fileNamesToRun) {
|
||||
switch (true) {
|
||||
case fileNameArg.endsWith('.browser.ts'):
|
||||
const tapParserBrowser = await this.runInChrome(fileNameArg);
|
||||
tapCombinator.addTapParser(tapParserBrowser);
|
||||
break;
|
||||
case fileNameArg.endsWith('.both.ts'):
|
||||
console.log('>>>>>>> TEST PART 1: chrome');
|
||||
const tapParserBothBrowser = await this.runInChrome(fileNameArg);
|
||||
tapCombinator.addTapParser(tapParserBothBrowser);
|
||||
console.log(cs(`|`.repeat(16), 'cyan'));
|
||||
console.log(''); // force new line
|
||||
console.log('>>>>>>> TEST PART 2: node');
|
||||
const tapParserBothNode = await this.runInNode(fileNameArg);
|
||||
tapCombinator.addTapParser(tapParserBothBrowser);
|
||||
break;
|
||||
default:
|
||||
const tapParserNode = await this.runInNode(fileNameArg);
|
||||
tapCombinator.addTapParser(tapParserNode);
|
||||
break;
|
||||
}
|
||||
|
||||
console.log(cs(`^`.repeat(16), 'cyan'));
|
||||
console.log(''); // force new line
|
||||
const tapCombinator = new TapCombinator(this.logger); // lets create the TapCombinator
|
||||
let fileIndex = 0;
|
||||
|
||||
// Execute serial tests first
|
||||
for (const fileNameArg of testGroups.serial) {
|
||||
fileIndex++;
|
||||
await this.runSingleTestOrSkip(fileNameArg, fileIndex, allFiles.length, tapCombinator);
|
||||
}
|
||||
|
||||
// Execute parallel groups sequentially
|
||||
const groupNames = Object.keys(testGroups.parallelGroups).sort();
|
||||
for (const groupName of groupNames) {
|
||||
const groupFiles = testGroups.parallelGroups[groupName];
|
||||
|
||||
if (groupFiles.length > 0) {
|
||||
this.logger.sectionStart(`Parallel Group: ${groupName}`);
|
||||
|
||||
// Run all tests in this group in parallel
|
||||
const parallelPromises = groupFiles.map(async (fileNameArg) => {
|
||||
fileIndex++;
|
||||
return this.runSingleTestOrSkip(fileNameArg, fileIndex, allFiles.length, tapCombinator);
|
||||
});
|
||||
|
||||
await Promise.all(parallelPromises);
|
||||
this.logger.sectionEnd();
|
||||
}
|
||||
}
|
||||
|
||||
tapCombinator.evaluate();
|
||||
}
|
||||
|
||||
public async runWatch(ignorePatterns: string[] = []) {
|
||||
const smartchokInstance = new plugins.smartchok.Smartchok([this.testDir.cwd]);
|
||||
|
||||
console.clear();
|
||||
this.logger.watchModeStart();
|
||||
|
||||
// Initial run
|
||||
await this.run();
|
||||
|
||||
// Set up file watcher
|
||||
const fileChanges = new Map<string, NodeJS.Timeout>();
|
||||
const debounceTime = 300; // 300ms debounce
|
||||
|
||||
const runTestsAfterChange = async () => {
|
||||
console.clear();
|
||||
const changedFiles = Array.from(fileChanges.keys());
|
||||
fileChanges.clear();
|
||||
|
||||
this.logger.watchModeRerun(changedFiles);
|
||||
await this.run();
|
||||
this.logger.watchModeWaiting();
|
||||
};
|
||||
|
||||
// Start watching before subscribing to events
|
||||
await smartchokInstance.start();
|
||||
|
||||
// Subscribe to file change events
|
||||
const changeObservable = await smartchokInstance.getObservableFor('change');
|
||||
const addObservable = await smartchokInstance.getObservableFor('add');
|
||||
const unlinkObservable = await smartchokInstance.getObservableFor('unlink');
|
||||
|
||||
const handleFileChange = (changedPath: string) => {
|
||||
// Skip if path matches ignore patterns
|
||||
if (ignorePatterns.some(pattern => changedPath.includes(pattern))) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Clear existing timeout for this file if any
|
||||
if (fileChanges.has(changedPath)) {
|
||||
clearTimeout(fileChanges.get(changedPath));
|
||||
}
|
||||
|
||||
// Set new timeout for this file
|
||||
const timeout = setTimeout(() => {
|
||||
fileChanges.delete(changedPath);
|
||||
if (fileChanges.size === 0) {
|
||||
runTestsAfterChange();
|
||||
}
|
||||
}, debounceTime);
|
||||
|
||||
fileChanges.set(changedPath, timeout);
|
||||
};
|
||||
|
||||
// Subscribe to all relevant events
|
||||
changeObservable.subscribe(([path]) => handleFileChange(path));
|
||||
addObservable.subscribe(([path]) => handleFileChange(path));
|
||||
unlinkObservable.subscribe(([path]) => handleFileChange(path));
|
||||
|
||||
this.logger.watchModeWaiting();
|
||||
|
||||
// Handle Ctrl+C to exit gracefully
|
||||
process.on('SIGINT', async () => {
|
||||
this.logger.watchModeStop();
|
||||
await smartchokInstance.stop();
|
||||
process.exit(0);
|
||||
});
|
||||
|
||||
// Keep the process running
|
||||
await new Promise(() => {}); // This promise never resolves
|
||||
}
|
||||
|
||||
private async runSingleTestOrSkip(fileNameArg: string, fileIndex: number, totalFiles: number, tapCombinator: TapCombinator) {
|
||||
// Check if this file should be skipped based on range
|
||||
if (this.startFromFile !== null && fileIndex < this.startFromFile) {
|
||||
this.logger.testFileSkipped(fileNameArg, fileIndex, totalFiles, `before start range (${this.startFromFile})`);
|
||||
tapCombinator.addSkippedFile(fileNameArg);
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.stopAtFile !== null && fileIndex > this.stopAtFile) {
|
||||
this.logger.testFileSkipped(fileNameArg, fileIndex, totalFiles, `after stop range (${this.stopAtFile})`);
|
||||
tapCombinator.addSkippedFile(fileNameArg);
|
||||
return;
|
||||
}
|
||||
|
||||
// File is in range, run it
|
||||
await this.runSingleTest(fileNameArg, fileIndex, totalFiles, tapCombinator);
|
||||
}
|
||||
|
||||
private async runSingleTest(fileNameArg: string, fileIndex: number, totalFiles: number, tapCombinator: TapCombinator) {
|
||||
// Parse the filename to determine runtimes and modifiers
|
||||
const fileName = plugins.path.basename(fileNameArg);
|
||||
const parsed = parseTestFilename(fileName, { strictUnknownRuntime: false });
|
||||
|
||||
public async runInNode(fileNameArg: string): Promise<TapParser> {
|
||||
console.log(`${cs('=> ', 'blue')} Running ${cs(fileNameArg, 'orange')} in node.js runtime.`);
|
||||
console.log(`${cs(`= `.repeat(32), 'cyan')}`);
|
||||
const tapParser = new TapParser(fileNameArg + ':node');
|
||||
// Check for nonci modifier in CI environment
|
||||
if (process.env.CI && parsed.modifiers.includes('nonci')) {
|
||||
this.logger.tapOutput(`Skipping ${fileNameArg} - marked as non-CI`);
|
||||
return;
|
||||
}
|
||||
|
||||
// Show deprecation warning for legacy naming
|
||||
if (parsed.isLegacy) {
|
||||
console.warn('');
|
||||
console.warn(cs('⚠️ DEPRECATION WARNING', 'orange'));
|
||||
console.warn(cs(` File: ${fileName}`, 'orange'));
|
||||
console.warn(cs(` Legacy naming detected. Please migrate to new naming convention.`, 'orange'));
|
||||
console.warn(cs(` Suggested: ${fileName.replace('.browser.', '.chromium.').replace('.both.', '.node+chromium.')}`, 'green'));
|
||||
console.warn(cs(` Run: tstest migrate --dry-run`, 'cyan'));
|
||||
console.warn('');
|
||||
}
|
||||
|
||||
// Get adapters for the specified runtimes
|
||||
const adapters = this.runtimeRegistry.getAdaptersForRuntimes(parsed.runtimes);
|
||||
|
||||
if (adapters.length === 0) {
|
||||
this.logger.tapOutput(`Skipping ${fileNameArg} - no runtime adapters available`);
|
||||
return;
|
||||
}
|
||||
|
||||
// Execute tests for each runtime
|
||||
if (adapters.length === 1) {
|
||||
// Single runtime - no sections needed
|
||||
const adapter = adapters[0];
|
||||
const tapParser = await adapter.run(fileNameArg, fileIndex, totalFiles);
|
||||
tapCombinator.addTapParser(tapParser);
|
||||
} else {
|
||||
// Multiple runtimes - use sections
|
||||
for (let i = 0; i < adapters.length; i++) {
|
||||
const adapter = adapters[i];
|
||||
this.logger.sectionStart(`Part ${i + 1}: ${adapter.displayName}`);
|
||||
const tapParser = await adapter.run(fileNameArg, fileIndex, totalFiles);
|
||||
tapCombinator.addTapParser(tapParser);
|
||||
this.logger.sectionEnd();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public async runInNode(fileNameArg: string, index: number, total: number): Promise<TapParser> {
|
||||
this.logger.testFileStart(fileNameArg, 'node.js', index, total);
|
||||
const tapParser = new TapParser(fileNameArg + ':node', this.logger);
|
||||
|
||||
// tsrun options
|
||||
let tsrunOptions = '';
|
||||
if (process.argv.includes('--web')) {
|
||||
tsrunOptions += ' --web';
|
||||
}
|
||||
|
||||
// Set filter tags as environment variable
|
||||
if (this.filterTags.length > 0) {
|
||||
process.env.TSTEST_FILTER_TAGS = this.filterTags.join(',');
|
||||
}
|
||||
|
||||
const execResultStreaming = await this.smartshellInstance.execStreamingSilent(
|
||||
`tsrun ${fileNameArg}${tsrunOptions}`
|
||||
);
|
||||
await tapParser.handleTapProcess(execResultStreaming.childProcess);
|
||||
// Check for 00init.ts file in test directory
|
||||
const testDir = plugins.path.dirname(fileNameArg);
|
||||
const initFile = plugins.path.join(testDir, '00init.ts');
|
||||
let runCommand = `tsrun ${fileNameArg}${tsrunOptions}`;
|
||||
|
||||
const initFileExists = await plugins.smartfile.fs.fileExists(initFile);
|
||||
|
||||
// If 00init.ts exists, run it first
|
||||
if (initFileExists) {
|
||||
// Create a temporary loader file that imports both 00init.ts and the test file
|
||||
const absoluteInitFile = plugins.path.resolve(initFile);
|
||||
const absoluteTestFile = plugins.path.resolve(fileNameArg);
|
||||
const loaderContent = `
|
||||
import '${absoluteInitFile.replace(/\\/g, '/')}';
|
||||
import '${absoluteTestFile.replace(/\\/g, '/')}';
|
||||
`;
|
||||
const loaderPath = plugins.path.join(testDir, `.loader_${plugins.path.basename(fileNameArg)}`);
|
||||
await plugins.smartfile.memory.toFs(loaderContent, loaderPath);
|
||||
runCommand = `tsrun ${loaderPath}${tsrunOptions}`;
|
||||
}
|
||||
|
||||
const execResultStreaming = await this.smartshellInstance.execStreamingSilent(runCommand);
|
||||
|
||||
// If we created a loader file, clean it up after test execution
|
||||
if (initFileExists) {
|
||||
const loaderPath = plugins.path.join(testDir, `.loader_${plugins.path.basename(fileNameArg)}`);
|
||||
const cleanup = () => {
|
||||
try {
|
||||
if (plugins.smartfile.fs.fileExistsSync(loaderPath)) {
|
||||
plugins.smartfile.fs.removeSync(loaderPath);
|
||||
}
|
||||
} catch (e) {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
};
|
||||
|
||||
execResultStreaming.childProcess.on('exit', cleanup);
|
||||
execResultStreaming.childProcess.on('error', cleanup);
|
||||
}
|
||||
|
||||
// Start warning timer if no timeout was specified
|
||||
let warningTimer: NodeJS.Timeout | null = null;
|
||||
if (this.timeoutSeconds === null) {
|
||||
warningTimer = setTimeout(() => {
|
||||
console.error('');
|
||||
console.error(cs('⚠️ WARNING: Test file is running for more than 1 minute', 'orange'));
|
||||
console.error(cs(` File: ${fileNameArg}`, 'orange'));
|
||||
console.error(cs(' Consider using --timeout option to set a timeout for test files.', 'orange'));
|
||||
console.error(cs(' Example: tstest test --timeout=300 (for 5 minutes)', 'orange'));
|
||||
console.error('');
|
||||
}, 60000); // 1 minute
|
||||
}
|
||||
|
||||
// Handle timeout if specified
|
||||
if (this.timeoutSeconds !== null) {
|
||||
const timeoutMs = this.timeoutSeconds * 1000;
|
||||
let timeoutId: NodeJS.Timeout;
|
||||
|
||||
const timeoutPromise = new Promise<void>((_resolve, reject) => {
|
||||
timeoutId = setTimeout(async () => {
|
||||
// Use smartshell's terminate() to kill entire process tree
|
||||
await execResultStreaming.terminate();
|
||||
reject(new Error(`Test file timed out after ${this.timeoutSeconds} seconds`));
|
||||
}, timeoutMs);
|
||||
});
|
||||
|
||||
try {
|
||||
await Promise.race([
|
||||
tapParser.handleTapProcess(execResultStreaming.childProcess),
|
||||
timeoutPromise
|
||||
]);
|
||||
// Clear timeout if test completed successfully
|
||||
clearTimeout(timeoutId);
|
||||
} catch (error) {
|
||||
// Clear warning timer if it was set
|
||||
if (warningTimer) {
|
||||
clearTimeout(warningTimer);
|
||||
}
|
||||
// Handle timeout error
|
||||
tapParser.handleTimeout(this.timeoutSeconds);
|
||||
// Ensure entire process tree is killed if still running
|
||||
try {
|
||||
await execResultStreaming.kill(); // This kills the entire process tree with SIGKILL
|
||||
} catch (killError) {
|
||||
// Process tree might already be dead
|
||||
}
|
||||
await tapParser.evaluateFinalResult();
|
||||
}
|
||||
} else {
|
||||
await tapParser.handleTapProcess(execResultStreaming.childProcess);
|
||||
}
|
||||
|
||||
// Clear warning timer if it was set
|
||||
if (warningTimer) {
|
||||
clearTimeout(warningTimer);
|
||||
}
|
||||
|
||||
return tapParser;
|
||||
}
|
||||
|
||||
public async runInChrome(fileNameArg: string): Promise<TapParser> {
|
||||
console.log(`${cs('=> ', 'blue')} Running ${cs(fileNameArg, 'orange')} in chromium runtime.`);
|
||||
console.log(`${cs(`= `.repeat(32), 'cyan')}`);
|
||||
private async findFreePorts(): Promise<{ httpPort: number; wsPort: number }> {
|
||||
const smartnetwork = new plugins.smartnetwork.SmartNetwork();
|
||||
|
||||
// Find random free HTTP port in range 30000-40000 to minimize collision chance
|
||||
const httpPort = await smartnetwork.findFreePort(30000, 40000, { randomize: true });
|
||||
if (!httpPort) {
|
||||
throw new Error('Could not find a free HTTP port in range 30000-40000');
|
||||
}
|
||||
|
||||
// Find random free WebSocket port, excluding the HTTP port to ensure they're different
|
||||
const wsPort = await smartnetwork.findFreePort(30000, 40000, {
|
||||
randomize: true,
|
||||
exclude: [httpPort]
|
||||
});
|
||||
if (!wsPort) {
|
||||
throw new Error('Could not find a free WebSocket port in range 30000-40000');
|
||||
}
|
||||
|
||||
// Log selected ports for debugging
|
||||
if (!this.logger.options.quiet) {
|
||||
console.log(`Selected ports - HTTP: ${httpPort}, WebSocket: ${wsPort}`);
|
||||
}
|
||||
return { httpPort, wsPort };
|
||||
}
|
||||
|
||||
public async runInChrome(fileNameArg: string, index: number, total: number): Promise<TapParser> {
|
||||
this.logger.testFileStart(fileNameArg, 'chromium', index, total);
|
||||
|
||||
// lets get all our paths sorted
|
||||
const tsbundleCacheDirPath = plugins.path.join(paths.cwd, './.nogit/tstest_cache');
|
||||
@@ -92,23 +395,29 @@ export class TsTest {
|
||||
const bundleFilePath = plugins.path.join(tsbundleCacheDirPath, bundleFileName);
|
||||
|
||||
// lets bundle the test
|
||||
await plugins.smartfile.fs.ensureDir(tsbundleCacheDirPath);
|
||||
await this.tsbundleInstance.buildTest(fileNameArg, bundleFilePath, 'parcel');
|
||||
await plugins.smartfile.fs.ensureEmptyDir(tsbundleCacheDirPath);
|
||||
await this.tsbundleInstance.build(process.cwd(), fileNameArg, bundleFilePath, {
|
||||
bundler: 'esbuild',
|
||||
});
|
||||
|
||||
// Find free ports for HTTP and WebSocket
|
||||
const { httpPort, wsPort } = await this.findFreePorts();
|
||||
|
||||
// lets create a server
|
||||
const server = new plugins.smartexpress.Server({
|
||||
const server = new plugins.typedserver.servertools.Server({
|
||||
cors: true,
|
||||
port: 3007,
|
||||
port: httpPort,
|
||||
});
|
||||
server.addRoute(
|
||||
'/test',
|
||||
new plugins.smartexpress.Handler('GET', async (req, res) => {
|
||||
new plugins.typedserver.servertools.Handler('GET', async (_req, res) => {
|
||||
res.type('.html');
|
||||
res.write(`
|
||||
<html>
|
||||
<head>
|
||||
<script>
|
||||
globalThis.testdom = true;
|
||||
globalThis.wsPort = ${wsPort};
|
||||
</script>
|
||||
</head>
|
||||
<body></body>
|
||||
@@ -117,99 +426,205 @@ export class TsTest {
|
||||
res.end();
|
||||
})
|
||||
);
|
||||
server.addRoute('*', new plugins.smartexpress.HandlerStatic(tsbundleCacheDirPath));
|
||||
server.addRoute('/*splat', new plugins.typedserver.servertools.HandlerStatic(tsbundleCacheDirPath));
|
||||
await server.start();
|
||||
|
||||
// lets do the browser bit
|
||||
// lets handle realtime comms
|
||||
const tapParser = new TapParser(fileNameArg + ':chrome', this.logger);
|
||||
const wss = new plugins.ws.WebSocketServer({ port: wsPort });
|
||||
wss.on('connection', (ws) => {
|
||||
ws.on('message', (message) => {
|
||||
const messageStr = message.toString();
|
||||
if (messageStr.startsWith('console:')) {
|
||||
const [, level, ...messageParts] = messageStr.split(':');
|
||||
this.logger.browserConsole(messageParts.join(':'), level);
|
||||
} else {
|
||||
tapParser.handleTapLog(messageStr);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// lets do the browser bit with timeout handling
|
||||
await this.smartbrowserInstance.start();
|
||||
const evaluation = await this.smartbrowserInstance.evaluateOnPage(
|
||||
`http://localhost:3007/test?bundleName=${bundleFileName}`,
|
||||
|
||||
const evaluatePromise = this.smartbrowserInstance.evaluateOnPage(
|
||||
`http://localhost:${httpPort}/test?bundleName=${bundleFileName}`,
|
||||
async () => {
|
||||
const convertToText = (obj) => {
|
||||
// create an array that will later be joined into a string.
|
||||
const stringArray = [];
|
||||
// lets enable real time comms
|
||||
const ws = new WebSocket(`ws://localhost:${globalThis.wsPort}`);
|
||||
await new Promise((resolve) => (ws.onopen = resolve));
|
||||
|
||||
if (typeof obj === 'object' && typeof obj.toString === 'function') {
|
||||
stringArray.push(obj.toString());
|
||||
} else if (typeof obj === 'object' && obj.join === undefined) {
|
||||
stringArray.push('{');
|
||||
for (const prop of Object.keys(obj)) {
|
||||
stringArray.push(prop, ': ', convertToText(obj[prop]), ',');
|
||||
}
|
||||
stringArray.push('}');
|
||||
// Ensure this function is declared with 'async'
|
||||
const logStore = [];
|
||||
const originalLog = console.log;
|
||||
const originalError = console.error;
|
||||
|
||||
// is array
|
||||
} else if (typeof obj === 'object' && !(obj.join === undefined)) {
|
||||
stringArray.push('[');
|
||||
for (const prop of Object.keys(obj)) {
|
||||
stringArray.push(convertToText(obj[prop]), ',');
|
||||
}
|
||||
stringArray.push(']');
|
||||
|
||||
// is function
|
||||
} else if (typeof obj === 'function') {
|
||||
stringArray.push(obj.toString());
|
||||
|
||||
// all other values can be done with JSON.stringify
|
||||
} else {
|
||||
stringArray.push(JSON.stringify(obj));
|
||||
}
|
||||
|
||||
return stringArray.join('');
|
||||
// Override console methods to capture the logs
|
||||
console.log = (...args: any[]) => {
|
||||
logStore.push(args.join(' '));
|
||||
ws.send(args.join(' '));
|
||||
originalLog(...args);
|
||||
};
|
||||
console.error = (...args: any[]) => {
|
||||
logStore.push(args.join(' '));
|
||||
ws.send(args.join(' '));
|
||||
originalError(...args);
|
||||
};
|
||||
|
||||
let logStore = '';
|
||||
// tslint:disable-next-line: max-classes-per-file
|
||||
const log = console.log.bind(console);
|
||||
console.log = (...args) => {
|
||||
args = args.map((argument) => {
|
||||
return typeof argument !== 'string' ? convertToText(argument) : argument;
|
||||
});
|
||||
logStore += `${args}\n`;
|
||||
log(...args);
|
||||
};
|
||||
const error = console.error;
|
||||
console.error = (...args) => {
|
||||
args = args.map((argument) => {
|
||||
return typeof argument !== 'string' ? convertToText(argument) : argument;
|
||||
});
|
||||
logStore += `${args}\n`;
|
||||
error(...args);
|
||||
};
|
||||
const bundleName = new URLSearchParams(window.location.search).get('bundleName');
|
||||
console.log(`::TSTEST IN CHROMIUM:: Relevant Script name is: ${bundleName}`);
|
||||
const bundleResponse = await fetch(`/${bundleName}`);
|
||||
console.log(
|
||||
`::TSTEST IN CHROMIUM:: Got ${bundleName} with STATUS ${bundleResponse.status}`
|
||||
);
|
||||
const bundle = await bundleResponse.text();
|
||||
console.log(`::TSTEST IN CHROMIUM:: Executing ${bundleName}`);
|
||||
originalLog(`::TSTEST IN CHROMIUM:: Relevant Script name is: ${bundleName}`);
|
||||
|
||||
try {
|
||||
// tslint:disable-next-line: no-eval
|
||||
eval(bundle);
|
||||
// Dynamically import the test module
|
||||
const testModule = await import(`/${bundleName}`);
|
||||
if (testModule && testModule.default && testModule.default instanceof Promise) {
|
||||
// Execute the exported test function
|
||||
await testModule.default;
|
||||
} else if (testModule && testModule.default && typeof testModule.default.then === 'function') {
|
||||
console.log('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!');
|
||||
console.log('Test module default export is just promiselike: Something might be messing with your Promise implementation.');
|
||||
console.log('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!');
|
||||
await testModule.default;
|
||||
} else if (globalThis.tapPromise && typeof globalThis.tapPromise.then === 'function') {
|
||||
console.log('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!');
|
||||
console.log('Using globalThis.tapPromise');
|
||||
console.log('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!');
|
||||
await testModule.default;
|
||||
} else {
|
||||
console.error('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!');
|
||||
console.error('Test module does not export a default promise.');
|
||||
console.error('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!');
|
||||
console.log(`We got: ${JSON.stringify(testModule)}`);
|
||||
|
||||
}
|
||||
} catch (err) {
|
||||
console.error(err);
|
||||
}
|
||||
|
||||
if (globalThis.tapbundleDeferred && globalThis.tapbundleDeferred.promise) {
|
||||
await globalThis.tapbundleDeferred.promise;
|
||||
} else {
|
||||
console.log('Error: Could not find tapbundle Deferred');
|
||||
}
|
||||
return logStore;
|
||||
return logStore.join('\n');
|
||||
}
|
||||
);
|
||||
await this.smartbrowserInstance.stop();
|
||||
await server.stop();
|
||||
|
||||
// Start warning timer if no timeout was specified
|
||||
let warningTimer: NodeJS.Timeout | null = null;
|
||||
if (this.timeoutSeconds === null) {
|
||||
warningTimer = setTimeout(() => {
|
||||
console.error('');
|
||||
console.error(cs('⚠️ WARNING: Test file is running for more than 1 minute', 'orange'));
|
||||
console.error(cs(` File: ${fileNameArg}`, 'orange'));
|
||||
console.error(cs(' Consider using --timeout option to set a timeout for test files.', 'orange'));
|
||||
console.error(cs(' Example: tstest test --timeout=300 (for 5 minutes)', 'orange'));
|
||||
console.error('');
|
||||
}, 60000); // 1 minute
|
||||
}
|
||||
|
||||
// Handle timeout if specified
|
||||
if (this.timeoutSeconds !== null) {
|
||||
const timeoutMs = this.timeoutSeconds * 1000;
|
||||
let timeoutId: NodeJS.Timeout;
|
||||
|
||||
const timeoutPromise = new Promise<void>((_resolve, reject) => {
|
||||
timeoutId = setTimeout(() => {
|
||||
reject(new Error(`Test file timed out after ${this.timeoutSeconds} seconds`));
|
||||
}, timeoutMs);
|
||||
});
|
||||
|
||||
try {
|
||||
await Promise.race([
|
||||
evaluatePromise,
|
||||
timeoutPromise
|
||||
]);
|
||||
// Clear timeout if test completed successfully
|
||||
clearTimeout(timeoutId);
|
||||
} catch (error) {
|
||||
// Clear warning timer if it was set
|
||||
if (warningTimer) {
|
||||
clearTimeout(warningTimer);
|
||||
}
|
||||
// Handle timeout error
|
||||
tapParser.handleTimeout(this.timeoutSeconds);
|
||||
}
|
||||
} else {
|
||||
await evaluatePromise;
|
||||
}
|
||||
|
||||
// Clear warning timer if it was set
|
||||
if (warningTimer) {
|
||||
clearTimeout(warningTimer);
|
||||
}
|
||||
|
||||
// Always clean up resources, even on timeout
|
||||
try {
|
||||
await this.smartbrowserInstance.stop();
|
||||
} catch (error) {
|
||||
// Browser might already be stopped
|
||||
}
|
||||
|
||||
try {
|
||||
await server.stop();
|
||||
} catch (error) {
|
||||
// Server might already be stopped
|
||||
}
|
||||
|
||||
try {
|
||||
wss.close();
|
||||
} catch (error) {
|
||||
// WebSocket server might already be closed
|
||||
}
|
||||
|
||||
console.log(
|
||||
`${cs('=> ', 'blue')} Stopped ${cs(fileNameArg, 'orange')} chromium instance and server.`
|
||||
);
|
||||
console.log(`${cs('=> ', 'blue')} See the result captured from the chromium execution:`);
|
||||
// lets create the tap parser
|
||||
const tapParser = new TapParser(fileNameArg + ':chrome');
|
||||
tapParser.handleTapLog(evaluation);
|
||||
// Always evaluate final result (handleTimeout just sets up the test state)
|
||||
await tapParser.evaluateFinalResult();
|
||||
return tapParser;
|
||||
}
|
||||
|
||||
public async runInDeno() {}
|
||||
|
||||
private async movePreviousLogFiles() {
|
||||
const logDir = plugins.path.join('.nogit', 'testlogs');
|
||||
const previousDir = plugins.path.join('.nogit', 'testlogs', 'previous');
|
||||
const errDir = plugins.path.join('.nogit', 'testlogs', '00err');
|
||||
const diffDir = plugins.path.join('.nogit', 'testlogs', '00diff');
|
||||
|
||||
try {
|
||||
// Delete 00err and 00diff directories if they exist
|
||||
if (plugins.smartfile.fs.isDirectorySync(errDir)) {
|
||||
plugins.smartfile.fs.removeSync(errDir);
|
||||
}
|
||||
if (plugins.smartfile.fs.isDirectorySync(diffDir)) {
|
||||
plugins.smartfile.fs.removeSync(diffDir);
|
||||
}
|
||||
|
||||
// Get all .log files in log directory (not in subdirectories)
|
||||
const files = await plugins.smartfile.fs.listFileTree(logDir, '*.log');
|
||||
const logFiles = files.filter((file: string) => !file.includes('/'));
|
||||
|
||||
if (logFiles.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Ensure previous directory exists
|
||||
await plugins.smartfile.fs.ensureDir(previousDir);
|
||||
|
||||
// Move each log file to previous directory
|
||||
for (const file of logFiles) {
|
||||
const filename = plugins.path.basename(file);
|
||||
const sourcePath = plugins.path.join(logDir, filename);
|
||||
const destPath = plugins.path.join(previousDir, filename);
|
||||
|
||||
try {
|
||||
// Copy file to new location and remove original
|
||||
await plugins.smartfile.fs.copy(sourcePath, destPath);
|
||||
await plugins.smartfile.fs.remove(sourcePath);
|
||||
} catch (error) {
|
||||
// Silently continue if a file can't be moved
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
// Directory might not exist, which is fine
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
585
ts/tstest.logging.ts
Normal file
585
ts/tstest.logging.ts
Normal file
@@ -0,0 +1,585 @@
|
||||
import { coloredString as cs } from '@push.rocks/consolecolor';
|
||||
import * as plugins from './tstest.plugins.js';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
|
||||
export interface LogOptions {
|
||||
quiet?: boolean;
|
||||
verbose?: boolean;
|
||||
noColor?: boolean;
|
||||
json?: boolean;
|
||||
logFile?: boolean;
|
||||
}
|
||||
|
||||
export interface TestFileResult {
|
||||
file: string;
|
||||
passed: number;
|
||||
failed: number;
|
||||
total: number;
|
||||
duration: number;
|
||||
tests: Array<{
|
||||
name: string;
|
||||
passed: boolean;
|
||||
duration: number;
|
||||
error?: string;
|
||||
}>;
|
||||
}
|
||||
|
||||
export interface TestSummary {
|
||||
totalFiles: number;
|
||||
totalTests: number;
|
||||
totalPassed: number;
|
||||
totalFailed: number;
|
||||
totalSkipped: number;
|
||||
totalDuration: number;
|
||||
fileResults: TestFileResult[];
|
||||
skippedFiles: string[];
|
||||
}
|
||||
|
||||
export class TsTestLogger {
|
||||
public readonly options: LogOptions;
|
||||
private startTime: number;
|
||||
private fileResults: TestFileResult[] = [];
|
||||
private currentFileResult: TestFileResult | null = null;
|
||||
private currentTestLogFile: string | null = null;
|
||||
private currentTestLogs: string[] = []; // Buffer for current test logs
|
||||
private currentTestFailed: boolean = false;
|
||||
|
||||
constructor(options: LogOptions = {}) {
|
||||
this.options = options;
|
||||
this.startTime = Date.now();
|
||||
}
|
||||
|
||||
private format(text: string, color?: string): string {
|
||||
if (this.options.noColor || !color) {
|
||||
return text;
|
||||
}
|
||||
return cs(text, color as any);
|
||||
}
|
||||
|
||||
private log(message: string) {
|
||||
if (this.options.json) {
|
||||
// For JSON mode, skip console output
|
||||
// JSON output is handled by logJson method
|
||||
return;
|
||||
}
|
||||
|
||||
console.log(message);
|
||||
|
||||
// Log to the current test file log if we're in a test and --logfile is specified
|
||||
if (this.currentTestLogFile) {
|
||||
this.logToTestFile(message);
|
||||
}
|
||||
}
|
||||
|
||||
private logToFile(message: string) {
|
||||
// This method is no longer used since we use logToTestFile for individual test logs
|
||||
// Keeping it for potential future use with a global log file
|
||||
}
|
||||
|
||||
private logToTestFile(message: string) {
|
||||
try {
|
||||
// Remove ANSI color codes for file logging
|
||||
const cleanMessage = message.replace(/\u001b\[[0-9;]*m/g, '');
|
||||
|
||||
// Append to test log file
|
||||
fs.appendFileSync(this.currentTestLogFile, cleanMessage + '\n');
|
||||
} catch (error) {
|
||||
// Silently fail to avoid disrupting the test run
|
||||
}
|
||||
}
|
||||
|
||||
private logJson(data: any) {
|
||||
const jsonString = JSON.stringify(data);
|
||||
console.log(jsonString);
|
||||
|
||||
// Also log to test file if --logfile is specified
|
||||
if (this.currentTestLogFile) {
|
||||
this.logToTestFile(jsonString);
|
||||
}
|
||||
}
|
||||
|
||||
// Section separators
|
||||
sectionStart(title: string) {
|
||||
if (this.options.quiet || this.options.json) return;
|
||||
this.log(this.format(`\n━━━ ${title} ━━━`, 'cyan'));
|
||||
}
|
||||
|
||||
sectionEnd() {
|
||||
if (this.options.quiet || this.options.json) return;
|
||||
this.log(this.format('─'.repeat(50), 'dim'));
|
||||
}
|
||||
|
||||
// Progress indication
|
||||
progress(current: number, total: number, message: string) {
|
||||
if (this.options.quiet || this.options.json) return;
|
||||
const percentage = Math.round((current / total) * 100);
|
||||
const filled = Math.round((current / total) * 20);
|
||||
const empty = 20 - filled;
|
||||
|
||||
this.log(this.format(`\n📊 Progress: ${current}/${total} (${percentage}%)`, 'cyan'));
|
||||
this.log(this.format(`[${'█'.repeat(filled)}${'░'.repeat(empty)}] ${message}`, 'dim'));
|
||||
}
|
||||
|
||||
// Test discovery
|
||||
testDiscovery(count: number, pattern: string, executionMode: string) {
|
||||
if (this.options.json) {
|
||||
this.logJson({ event: 'discovery', count, pattern, executionMode });
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.options.quiet) {
|
||||
this.log(`Found ${count} tests`);
|
||||
} else {
|
||||
this.log(this.format(`\n🔍 Test Discovery`, 'bold'));
|
||||
this.log(this.format(` Mode: ${executionMode}`, 'dim'));
|
||||
this.log(this.format(` Pattern: ${pattern}`, 'dim'));
|
||||
this.log(this.format(` Found: ${count} test file(s)`, 'green'));
|
||||
}
|
||||
}
|
||||
|
||||
// Test execution
|
||||
testFileStart(filename: string, runtime: string, index: number, total: number) {
|
||||
this.currentFileResult = {
|
||||
file: filename,
|
||||
passed: 0,
|
||||
failed: 0,
|
||||
total: 0,
|
||||
duration: 0,
|
||||
tests: []
|
||||
};
|
||||
|
||||
// Reset test-specific state
|
||||
this.currentTestLogs = [];
|
||||
this.currentTestFailed = false;
|
||||
|
||||
// Only set up test log file if --logfile option is specified
|
||||
if (this.options.logFile) {
|
||||
// Create a safe filename that preserves directory structure
|
||||
// Convert relative path to a flat filename by replacing separators with __
|
||||
const relativeFilename = path.relative(process.cwd(), filename);
|
||||
const safeFilename = relativeFilename
|
||||
.replace(/\\/g, '/') // Normalize Windows paths
|
||||
.replace(/\//g, '__') // Replace path separators with double underscores
|
||||
.replace(/\.ts$/, '') // Remove .ts extension
|
||||
.replace(/^\.\.__|^\.__|^__/, ''); // Clean up leading separators from relative paths
|
||||
|
||||
this.currentTestLogFile = path.join('.nogit', 'testlogs', `${safeFilename}.log`);
|
||||
|
||||
// Ensure the directory exists
|
||||
const logDir = path.dirname(this.currentTestLogFile);
|
||||
if (!fs.existsSync(logDir)) {
|
||||
fs.mkdirSync(logDir, { recursive: true });
|
||||
}
|
||||
|
||||
// Clear the log file for this test
|
||||
fs.writeFileSync(this.currentTestLogFile, '');
|
||||
}
|
||||
|
||||
if (this.options.json) {
|
||||
this.logJson({ event: 'fileStart', filename, runtime, index, total });
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.options.quiet) return;
|
||||
|
||||
this.log(this.format(`\n▶️ ${filename} (${index}/${total})`, 'blue'));
|
||||
this.log(this.format(` Runtime: ${runtime}`, 'dim'));
|
||||
}
|
||||
|
||||
testResult(testName: string, passed: boolean, duration: number, error?: string) {
|
||||
if (this.currentFileResult) {
|
||||
this.currentFileResult.tests.push({ name: testName, passed, duration, error });
|
||||
this.currentFileResult.total++;
|
||||
if (passed) {
|
||||
this.currentFileResult.passed++;
|
||||
} else {
|
||||
this.currentFileResult.failed++;
|
||||
this.currentTestFailed = true;
|
||||
}
|
||||
this.currentFileResult.duration += duration;
|
||||
}
|
||||
|
||||
if (this.options.json) {
|
||||
this.logJson({ event: 'testResult', testName, passed, duration, error });
|
||||
return;
|
||||
}
|
||||
|
||||
// If test failed and we have buffered logs, show them now
|
||||
if (!passed && this.currentTestLogs.length > 0 && !this.options.verbose) {
|
||||
this.log(this.format(' 📋 Console output from failed test:', 'yellow'));
|
||||
this.currentTestLogs.forEach(logMessage => {
|
||||
this.log(this.format(` ${logMessage}`, 'dim'));
|
||||
});
|
||||
}
|
||||
|
||||
const icon = passed ? '✅' : '❌';
|
||||
const color = passed ? 'green' : 'red';
|
||||
|
||||
if (this.options.quiet) {
|
||||
this.log(`${icon} ${testName}`);
|
||||
} else {
|
||||
this.log(this.format(` ${icon} ${testName} (${duration}ms)`, color));
|
||||
if (error && !passed) {
|
||||
this.log(this.format(` ${error}`, 'red'));
|
||||
}
|
||||
}
|
||||
|
||||
// Clear logs after each test
|
||||
this.currentTestLogs = [];
|
||||
}
|
||||
|
||||
testFileEnd(passed: number, failed: number, duration: number) {
|
||||
if (this.currentFileResult) {
|
||||
this.fileResults.push(this.currentFileResult);
|
||||
this.currentFileResult = null;
|
||||
}
|
||||
|
||||
if (this.options.json) {
|
||||
this.logJson({ event: 'fileEnd', passed, failed, duration });
|
||||
return;
|
||||
}
|
||||
|
||||
if (!this.options.quiet) {
|
||||
const total = passed + failed;
|
||||
const durationStr = duration >= 1000 ? `${(duration / 1000).toFixed(1)}s` : `${duration}ms`;
|
||||
|
||||
if (failed === 0) {
|
||||
this.log(this.format(` Summary: ${passed}/${total} PASSED in ${durationStr}`, 'green'));
|
||||
} else {
|
||||
this.log(this.format(` Summary: ${passed} passed, ${failed} failed of ${total} tests in ${durationStr}`, 'red'));
|
||||
}
|
||||
}
|
||||
|
||||
// If using --logfile, handle error copy and diff detection
|
||||
if (this.options.logFile && this.currentTestLogFile) {
|
||||
try {
|
||||
const logContent = fs.readFileSync(this.currentTestLogFile, 'utf-8');
|
||||
const logDir = path.dirname(this.currentTestLogFile);
|
||||
const logBasename = path.basename(this.currentTestLogFile);
|
||||
|
||||
// Create error copy if there were failures
|
||||
if (failed > 0) {
|
||||
const errorDir = path.join(logDir, '00err');
|
||||
if (!fs.existsSync(errorDir)) {
|
||||
fs.mkdirSync(errorDir, { recursive: true });
|
||||
}
|
||||
const errorLogPath = path.join(errorDir, logBasename);
|
||||
fs.writeFileSync(errorLogPath, logContent);
|
||||
}
|
||||
|
||||
// Check for previous version and create diff if changed
|
||||
const previousLogPath = path.join(logDir, 'previous', logBasename);
|
||||
if (fs.existsSync(previousLogPath)) {
|
||||
const previousContent = fs.readFileSync(previousLogPath, 'utf-8');
|
||||
|
||||
// Simple check if content differs
|
||||
if (previousContent !== logContent) {
|
||||
const diffDir = path.join(logDir, '00diff');
|
||||
if (!fs.existsSync(diffDir)) {
|
||||
fs.mkdirSync(diffDir, { recursive: true });
|
||||
}
|
||||
const diffLogPath = path.join(diffDir, logBasename);
|
||||
const diffContent = this.createDiff(previousContent, logContent, logBasename);
|
||||
fs.writeFileSync(diffLogPath, diffContent);
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
// Silently fail to avoid disrupting the test run
|
||||
}
|
||||
}
|
||||
|
||||
// Clear the current test log file reference only if using --logfile
|
||||
if (this.options.logFile) {
|
||||
this.currentTestLogFile = null;
|
||||
}
|
||||
}
|
||||
|
||||
// TAP output forwarding (for TAP protocol messages)
|
||||
tapOutput(message: string, _isError: boolean = false) {
|
||||
if (this.options.json) return;
|
||||
|
||||
// Never show raw TAP protocol messages in console
|
||||
// They are already processed by TapParser and shown in our format
|
||||
|
||||
// Always log to test file if --logfile is specified
|
||||
if (this.currentTestLogFile) {
|
||||
this.logToTestFile(` ${message}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Console output from test files (non-TAP output)
|
||||
testConsoleOutput(message: string) {
|
||||
if (this.options.json) return;
|
||||
|
||||
// In verbose mode, show console output immediately
|
||||
if (this.options.verbose) {
|
||||
this.log(this.format(` ${message}`, 'dim'));
|
||||
} else {
|
||||
// In non-verbose mode, buffer the logs
|
||||
this.currentTestLogs.push(message);
|
||||
}
|
||||
|
||||
// Always log to test file if --logfile is specified
|
||||
if (this.currentTestLogFile) {
|
||||
this.logToTestFile(` ${message}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Skipped test file
|
||||
testFileSkipped(filename: string, index: number, total: number, reason: string) {
|
||||
if (this.options.json) {
|
||||
this.logJson({ event: 'fileSkipped', filename, index, total, reason });
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.options.quiet) return;
|
||||
|
||||
this.log(this.format(`\n⏭️ ${filename} (${index}/${total})`, 'yellow'));
|
||||
this.log(this.format(` Skipped: ${reason}`, 'dim'));
|
||||
}
|
||||
|
||||
// Browser console
|
||||
browserConsole(message: string, level: string = 'log') {
|
||||
if (this.options.json) {
|
||||
this.logJson({ event: 'browserConsole', message, level });
|
||||
return;
|
||||
}
|
||||
|
||||
if (!this.options.quiet) {
|
||||
const prefix = level === 'error' ? '🌐❌' : '🌐';
|
||||
const color = level === 'error' ? 'red' : 'magenta';
|
||||
this.log(this.format(` ${prefix} ${message}`, color));
|
||||
}
|
||||
}
|
||||
|
||||
// Test error details display
|
||||
testErrorDetails(errorMessage: string) {
|
||||
if (this.options.json) {
|
||||
this.logJson({ event: 'testError', error: errorMessage });
|
||||
return;
|
||||
}
|
||||
|
||||
if (!this.options.quiet) {
|
||||
this.log(this.format(' Error details:', 'red'));
|
||||
errorMessage.split('\n').forEach(line => {
|
||||
this.log(this.format(` ${line}`, 'red'));
|
||||
});
|
||||
}
|
||||
|
||||
// Always log to test file if --logfile is specified
|
||||
if (this.currentTestLogFile) {
|
||||
this.logToTestFile(` Error: ${errorMessage}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Final summary
|
||||
summary(skippedFiles: string[] = []) {
|
||||
const totalDuration = Date.now() - this.startTime;
|
||||
const summary: TestSummary = {
|
||||
totalFiles: this.fileResults.length + skippedFiles.length,
|
||||
totalTests: this.fileResults.reduce((sum, r) => sum + r.total, 0),
|
||||
totalPassed: this.fileResults.reduce((sum, r) => sum + r.passed, 0),
|
||||
totalFailed: this.fileResults.reduce((sum, r) => sum + r.failed, 0),
|
||||
totalSkipped: skippedFiles.length,
|
||||
totalDuration,
|
||||
fileResults: this.fileResults,
|
||||
skippedFiles
|
||||
};
|
||||
|
||||
if (this.options.json) {
|
||||
this.logJson({ event: 'summary', summary });
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.options.quiet) {
|
||||
const status = summary.totalFailed === 0 ? 'PASSED' : 'FAILED';
|
||||
const durationStr = totalDuration >= 1000 ? `${(totalDuration / 1000).toFixed(1)}s` : `${totalDuration}ms`;
|
||||
|
||||
if (summary.totalFailed === 0) {
|
||||
this.log(`\nSummary: ${summary.totalPassed}/${summary.totalTests} | ${durationStr} | ${status}`);
|
||||
} else {
|
||||
this.log(`\nSummary: ${summary.totalPassed} passed, ${summary.totalFailed} failed of ${summary.totalTests} tests | ${durationStr} | ${status}`);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Detailed summary
|
||||
this.log(this.format('\n📊 Test Summary', 'bold'));
|
||||
this.log(this.format('┌────────────────────────────────┐', 'dim'));
|
||||
this.log(this.format(`│ Total Files: ${summary.totalFiles.toString().padStart(14)} │`, 'white'));
|
||||
this.log(this.format(`│ Total Tests: ${summary.totalTests.toString().padStart(14)} │`, 'white'));
|
||||
this.log(this.format(`│ Passed: ${summary.totalPassed.toString().padStart(14)} │`, 'green'));
|
||||
this.log(this.format(`│ Failed: ${summary.totalFailed.toString().padStart(14)} │`, summary.totalFailed > 0 ? 'red' : 'green'));
|
||||
if (summary.totalSkipped > 0) {
|
||||
this.log(this.format(`│ Skipped: ${summary.totalSkipped.toString().padStart(14)} │`, 'yellow'));
|
||||
}
|
||||
const durationStrFormatted = totalDuration >= 1000 ? `${(totalDuration / 1000).toFixed(1)}s` : `${totalDuration}ms`;
|
||||
this.log(this.format(`│ Duration: ${durationStrFormatted.padStart(14)} │`, 'white'));
|
||||
this.log(this.format('└────────────────────────────────┘', 'dim'));
|
||||
|
||||
// File results
|
||||
if (summary.totalFailed > 0) {
|
||||
this.log(this.format('\n❌ Failed Tests:', 'red'));
|
||||
this.fileResults.forEach(fileResult => {
|
||||
if (fileResult.failed > 0) {
|
||||
this.log(this.format(`\n ${fileResult.file}`, 'yellow'));
|
||||
fileResult.tests.filter(t => !t.passed).forEach(test => {
|
||||
this.log(this.format(` ❌ ${test.name}`, 'red'));
|
||||
if (test.error) {
|
||||
this.log(this.format(` ${test.error}`, 'dim'));
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Performance metrics
|
||||
if (this.options.verbose) {
|
||||
// Calculate metrics based on actual test durations
|
||||
const allTests = this.fileResults.flatMap(r => r.tests);
|
||||
const testDurations = allTests.map(t => t.duration);
|
||||
const sumOfTestDurations = testDurations.reduce((sum, d) => sum + d, 0);
|
||||
const avgTestDuration = allTests.length > 0 ? Math.round(sumOfTestDurations / allTests.length) : 0;
|
||||
|
||||
// Find slowest test (exclude 0ms durations unless all are 0)
|
||||
const nonZeroDurations = allTests.filter(t => t.duration > 0);
|
||||
const testsToSort = nonZeroDurations.length > 0 ? nonZeroDurations : allTests;
|
||||
const slowestTest = testsToSort.sort((a, b) => b.duration - a.duration)[0];
|
||||
|
||||
this.log(this.format('\n⏱️ Performance Metrics:', 'cyan'));
|
||||
this.log(this.format(` Average per test: ${avgTestDuration}ms`, 'white'));
|
||||
if (slowestTest && slowestTest.duration > 0) {
|
||||
this.log(this.format(` Slowest test: ${slowestTest.name} (${slowestTest.duration}ms)`, 'orange'));
|
||||
} else if (allTests.length > 0) {
|
||||
this.log(this.format(` All tests completed in <1ms`, 'dim'));
|
||||
}
|
||||
}
|
||||
|
||||
// Final status
|
||||
const status = summary.totalFailed === 0 ? 'ALL TESTS PASSED! 🎉' : 'SOME TESTS FAILED! ❌';
|
||||
const statusColor = summary.totalFailed === 0 ? 'green' : 'red';
|
||||
this.log(this.format(`\n${status}`, statusColor));
|
||||
}
|
||||
|
||||
// Warning display
|
||||
warning(message: string) {
|
||||
if (this.options.json) {
|
||||
this.logJson({ event: 'warning', message });
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.options.quiet) {
|
||||
console.log(`WARNING: ${message}`);
|
||||
} else {
|
||||
this.log(this.format(` ⚠️ ${message}`, 'orange'));
|
||||
}
|
||||
}
|
||||
|
||||
// Error display
|
||||
error(message: string, file?: string, stack?: string) {
|
||||
if (this.options.json) {
|
||||
this.logJson({ event: 'error', message, file, stack });
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.options.quiet) {
|
||||
console.error(`ERROR: ${message}`);
|
||||
} else {
|
||||
this.log(this.format('\n⚠️ Error', 'red'));
|
||||
if (file) this.log(this.format(` File: ${file}`, 'yellow'));
|
||||
this.log(this.format(` ${message}`, 'red'));
|
||||
if (stack && this.options.verbose) {
|
||||
this.log(this.format(` Stack:`, 'dim'));
|
||||
this.log(this.format(stack.split('\n').map(line => ` ${line}`).join('\n'), 'dim'));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create a diff between two log contents
|
||||
private createDiff(previousContent: string, currentContent: string, filename: string): string {
|
||||
const previousLines = previousContent.split('\n');
|
||||
const currentLines = currentContent.split('\n');
|
||||
|
||||
let diff = `DIFF REPORT: ${filename}\n`;
|
||||
diff += `Generated: ${new Date().toISOString()}\n`;
|
||||
diff += '='.repeat(80) + '\n\n';
|
||||
|
||||
// Simple line-by-line comparison
|
||||
const maxLines = Math.max(previousLines.length, currentLines.length);
|
||||
let hasChanges = false;
|
||||
|
||||
for (let i = 0; i < maxLines; i++) {
|
||||
const prevLine = previousLines[i] || '';
|
||||
const currLine = currentLines[i] || '';
|
||||
|
||||
if (prevLine !== currLine) {
|
||||
hasChanges = true;
|
||||
if (i < previousLines.length && i >= currentLines.length) {
|
||||
// Line was removed
|
||||
diff += `- [Line ${i + 1}] ${prevLine}\n`;
|
||||
} else if (i >= previousLines.length && i < currentLines.length) {
|
||||
// Line was added
|
||||
diff += `+ [Line ${i + 1}] ${currLine}\n`;
|
||||
} else {
|
||||
// Line was modified
|
||||
diff += `- [Line ${i + 1}] ${prevLine}\n`;
|
||||
diff += `+ [Line ${i + 1}] ${currLine}\n`;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!hasChanges) {
|
||||
diff += 'No changes detected.\n';
|
||||
}
|
||||
|
||||
diff += '\n' + '='.repeat(80) + '\n';
|
||||
diff += `Previous version had ${previousLines.length} lines\n`;
|
||||
diff += `Current version has ${currentLines.length} lines\n`;
|
||||
|
||||
return diff;
|
||||
}
|
||||
|
||||
// Watch mode methods
|
||||
watchModeStart() {
|
||||
if (this.options.json) {
|
||||
this.logJson({ event: 'watchModeStart' });
|
||||
return;
|
||||
}
|
||||
|
||||
this.log(this.format('\n👀 Watch Mode', 'cyan'));
|
||||
this.log(this.format(' Running tests in watch mode...', 'dim'));
|
||||
this.log(this.format(' Press Ctrl+C to exit\n', 'dim'));
|
||||
}
|
||||
|
||||
watchModeWaiting() {
|
||||
if (this.options.json) {
|
||||
this.logJson({ event: 'watchModeWaiting' });
|
||||
return;
|
||||
}
|
||||
|
||||
this.log(this.format('\n Waiting for file changes...', 'dim'));
|
||||
}
|
||||
|
||||
watchModeRerun(changedFiles: string[]) {
|
||||
if (this.options.json) {
|
||||
this.logJson({ event: 'watchModeRerun', changedFiles });
|
||||
return;
|
||||
}
|
||||
|
||||
this.log(this.format('\n🔄 File changes detected:', 'cyan'));
|
||||
changedFiles.forEach(file => {
|
||||
this.log(this.format(` • ${file}`, 'yellow'));
|
||||
});
|
||||
this.log(this.format('\n Re-running tests...\n', 'dim'));
|
||||
}
|
||||
|
||||
watchModeStop() {
|
||||
if (this.options.json) {
|
||||
this.logJson({ event: 'watchModeStop' });
|
||||
return;
|
||||
}
|
||||
|
||||
this.log(this.format('\n\n👋 Stopping watch mode...', 'cyan'));
|
||||
}
|
||||
}
|
@@ -1,7 +1,8 @@
|
||||
import * as plugins from './tstest.plugins';
|
||||
import { coloredString as cs } from '@pushrocks/consolecolor';
|
||||
import * as plugins from './tstest.plugins.js';
|
||||
import { coloredString as cs } from '@push.rocks/consolecolor';
|
||||
|
||||
export const TapPrefix = cs(`::TAP::`, 'pink', 'black');
|
||||
export const TapPretaskPrefix = cs(`::PRETASK::`, 'cyan', 'black');
|
||||
export const TapErrorPrefix = cs(` !!!TAP PROTOCOL ERROR!!! `, 'red', 'black');
|
||||
|
||||
export const TsTestPrefix = cs(`**TSTEST**`, 'pink', 'black');
|
||||
|
@@ -1,5 +1,5 @@
|
||||
import * as plugins from './tstest.plugins';
|
||||
import * as plugins from './tstest.plugins.js';
|
||||
|
||||
export const cwd = process.cwd();
|
||||
export const testDir = plugins.path.join(cwd, './test/');
|
||||
export const binDirectory = plugins.path.join(cwd, 'node_modules/.bin');
|
||||
export const binDirectory = plugins.path.join(cwd, './node_modules/.bin');
|
||||
|
@@ -3,35 +3,51 @@ import * as path from 'path';
|
||||
|
||||
export { path };
|
||||
|
||||
// @pushrocks scope
|
||||
import * as consolecolor from '@pushrocks/consolecolor';
|
||||
import * as smartbrowser from '@pushrocks/smartbrowser';
|
||||
import * as smartexpress from '@pushrocks/smartexpress';
|
||||
import * as smartdelay from '@pushrocks/smartdelay';
|
||||
import * as smartfile from '@pushrocks/smartfile';
|
||||
import * as smartlog from '@pushrocks/smartlog';
|
||||
import * as smartpromise from '@pushrocks/smartpromise';
|
||||
import * as smartshell from '@pushrocks/smartshell';
|
||||
import * as tapbundle from '@pushrocks/tapbundle';
|
||||
// @apiglobal scope
|
||||
import * as typedserver from '@api.global/typedserver';
|
||||
|
||||
export {
|
||||
typedserver
|
||||
}
|
||||
|
||||
// @push.rocks scope
|
||||
import * as consolecolor from '@push.rocks/consolecolor';
|
||||
import * as smartbrowser from '@push.rocks/smartbrowser';
|
||||
import * as smartchok from '@push.rocks/smartchok';
|
||||
import * as smartdelay from '@push.rocks/smartdelay';
|
||||
import * as smartfile from '@push.rocks/smartfile';
|
||||
import * as smartlog from '@push.rocks/smartlog';
|
||||
import * as smartnetwork from '@push.rocks/smartnetwork';
|
||||
import * as smartpromise from '@push.rocks/smartpromise';
|
||||
import * as smartshell from '@push.rocks/smartshell';
|
||||
import * as tapbundle from '../dist_ts_tapbundle/index.js';
|
||||
|
||||
export {
|
||||
consolecolor,
|
||||
smartbrowser,
|
||||
smartexpress,
|
||||
smartchok,
|
||||
smartdelay,
|
||||
smartfile,
|
||||
smartlog,
|
||||
smartnetwork,
|
||||
smartpromise,
|
||||
smartshell,
|
||||
tapbundle,
|
||||
};
|
||||
|
||||
// @gitzone scope
|
||||
import * as tsbundle from '@gitzone/tsbundle';
|
||||
// @git.zone scope
|
||||
import * as tsbundle from '@git.zone/tsbundle';
|
||||
|
||||
export { tsbundle };
|
||||
|
||||
// sindresorhus
|
||||
import * as figures from 'figures';
|
||||
import figures from 'figures';
|
||||
|
||||
export { figures };
|
||||
|
||||
// third party
|
||||
import * as ws from 'ws';
|
||||
|
||||
export {
|
||||
ws
|
||||
}
|
||||
|
8
ts_tapbundle/00_commitinfo_data.ts
Normal file
8
ts_tapbundle/00_commitinfo_data.ts
Normal file
@@ -0,0 +1,8 @@
|
||||
/**
|
||||
* autocreated commitinfo by @push.rocks/commitinfo
|
||||
*/
|
||||
export const commitinfo = {
|
||||
name: '@push.rocks/tapbundle',
|
||||
version: '6.0.3',
|
||||
description: 'A comprehensive testing automation library that provides a wide range of utilities and tools for TAP (Test Anything Protocol) based testing, especially suitable for projects using tapbuffer.'
|
||||
}
|
7
ts_tapbundle/index.ts
Normal file
7
ts_tapbundle/index.ts
Normal file
@@ -0,0 +1,7 @@
|
||||
export { tap } from './tapbundle.classes.tap.js';
|
||||
export { TapWrap } from './tapbundle.classes.tapwrap.js';
|
||||
export { webhelpers } from './webhelpers.js';
|
||||
export { TapTools } from './tapbundle.classes.taptools.js';
|
||||
|
||||
// Export enhanced expect with diff generation
|
||||
export { expect, setProtocolEmitter } from './tapbundle.expect.wrapper.js';
|
389
ts_tapbundle/readme.md
Normal file
389
ts_tapbundle/readme.md
Normal file
@@ -0,0 +1,389 @@
|
||||
# @git.zone/tstest/tapbundle
|
||||
|
||||
> 🧪 Core TAP testing framework with enhanced assertions and lifecycle hooks
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
# tapbundle is typically included as part of @git.zone/tstest
|
||||
pnpm install --save-dev @git.zone/tstest
|
||||
```
|
||||
|
||||
## Overview
|
||||
|
||||
`@git.zone/tstest/tapbundle` is the core testing framework module that provides the TAP (Test Anything Protocol) implementation for tstest. It offers a comprehensive API for writing and organizing tests with support for lifecycle hooks, test suites, enhanced assertions with diff generation, and flexible test configuration.
|
||||
|
||||
## Key Features
|
||||
|
||||
- 🎯 **TAP Protocol Compliant** - Full TAP version 13 support
|
||||
- 🔍 **Enhanced Assertions** - Built on smartexpect with automatic diff generation
|
||||
- 🏗️ **Test Suites** - Organize tests with `describe()` blocks
|
||||
- 🔄 **Lifecycle Hooks** - beforeEach/afterEach at suite and global levels
|
||||
- 🏷️ **Test Tagging** - Filter tests by tags for selective execution
|
||||
- ⚡ **Parallel Testing** - Run tests concurrently with `testParallel()`
|
||||
- 🔁 **Automatic Retries** - Configure retry logic for flaky tests
|
||||
- ⏱️ **Timeout Control** - Set timeouts at global, file, or test level
|
||||
- 🎨 **Fluent API** - Chain test configurations with builder pattern
|
||||
- 📊 **Protocol Events** - Real-time test execution events
|
||||
|
||||
## Basic Usage
|
||||
|
||||
### Simple Test File
|
||||
|
||||
```typescript
|
||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||
|
||||
tap.test('should add numbers correctly', async () => {
|
||||
const result = 2 + 2;
|
||||
expect(result).toEqual(4);
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
```
|
||||
|
||||
### Using Test Suites
|
||||
|
||||
```typescript
|
||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||
|
||||
tap.describe('Calculator', () => {
|
||||
tap.beforeEach(async (tapTools) => {
|
||||
// Setup before each test in this suite
|
||||
});
|
||||
|
||||
tap.test('should add', async () => {
|
||||
expect(2 + 2).toEqual(4);
|
||||
});
|
||||
|
||||
tap.test('should subtract', async () => {
|
||||
expect(5 - 3).toEqual(2);
|
||||
});
|
||||
|
||||
tap.afterEach(async (tapTools) => {
|
||||
// Cleanup after each test in this suite
|
||||
});
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
```
|
||||
|
||||
## API Reference
|
||||
|
||||
### Main Test Methods
|
||||
|
||||
#### `tap.test(description, testFunction)`
|
||||
|
||||
Define a standard test that runs sequentially.
|
||||
|
||||
```typescript
|
||||
tap.test('should validate user input', async () => {
|
||||
// test code
|
||||
});
|
||||
```
|
||||
|
||||
#### `tap.testParallel(description, testFunction)`
|
||||
|
||||
Define a test that runs in parallel with other parallel tests.
|
||||
|
||||
```typescript
|
||||
tap.testParallel('should fetch user data', async () => {
|
||||
// test code
|
||||
});
|
||||
```
|
||||
|
||||
#### `tap.describe(description, suiteFunction)`
|
||||
|
||||
Create a test suite to group related tests.
|
||||
|
||||
```typescript
|
||||
tap.describe('User Authentication', () => {
|
||||
tap.test('should login', async () => { });
|
||||
tap.test('should logout', async () => { });
|
||||
});
|
||||
```
|
||||
|
||||
### Test Modes
|
||||
|
||||
#### Skip Tests
|
||||
|
||||
```typescript
|
||||
tap.skip.test('not ready yet', async () => {
|
||||
// This test will be skipped
|
||||
});
|
||||
```
|
||||
|
||||
#### Only Mode
|
||||
|
||||
```typescript
|
||||
tap.only.test('focus on this test', async () => {
|
||||
// Only tests marked with 'only' will run
|
||||
});
|
||||
```
|
||||
|
||||
#### Todo Tests
|
||||
|
||||
```typescript
|
||||
tap.todo.test('implement feature X');
|
||||
```
|
||||
|
||||
### Fluent Test Builder
|
||||
|
||||
Chain test configurations for expressive test definitions:
|
||||
|
||||
```typescript
|
||||
tap
|
||||
.tags('integration', 'database')
|
||||
.priority('high')
|
||||
.retry(3)
|
||||
.timeout(5000)
|
||||
.test('should handle database connection', async () => {
|
||||
// test with configured settings
|
||||
});
|
||||
```
|
||||
|
||||
### Lifecycle Hooks
|
||||
|
||||
#### Suite-Level Hooks
|
||||
|
||||
```typescript
|
||||
tap.describe('Database Tests', () => {
|
||||
tap.beforeEach(async (tapTools) => {
|
||||
// Runs before each test in this suite
|
||||
});
|
||||
|
||||
tap.afterEach(async (tapTools) => {
|
||||
// Runs after each test in this suite
|
||||
});
|
||||
|
||||
tap.test('test 1', async () => { });
|
||||
tap.test('test 2', async () => { });
|
||||
});
|
||||
```
|
||||
|
||||
#### Global Hooks
|
||||
|
||||
```typescript
|
||||
tap.settings({
|
||||
beforeAll: async () => {
|
||||
// Runs once before all tests
|
||||
},
|
||||
afterAll: async () => {
|
||||
// Runs once after all tests
|
||||
},
|
||||
beforeEach: async (testName) => {
|
||||
// Runs before every test
|
||||
},
|
||||
afterEach: async (testName, passed) => {
|
||||
// Runs after every test
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
### Global Settings
|
||||
|
||||
Configure test behavior at the file level:
|
||||
|
||||
```typescript
|
||||
tap.settings({
|
||||
timeout: 10000, // Default timeout for all tests
|
||||
retries: 2, // Retry failed tests
|
||||
retryDelay: 1000, // Delay between retries
|
||||
bail: false, // Stop on first failure
|
||||
suppressConsole: false, // Hide console output
|
||||
verboseErrors: true, // Show full stack traces
|
||||
showTestDuration: true, // Display test durations
|
||||
maxConcurrency: 4, // Max parallel tests
|
||||
});
|
||||
```
|
||||
|
||||
### Enhanced Assertions
|
||||
|
||||
The `expect` function is an enhanced wrapper around [@push.rocks/smartexpect](https://code.foss.global/push.rocks/smartexpect) that automatically generates diffs for failed assertions.
|
||||
|
||||
```typescript
|
||||
import { expect } from '@git.zone/tstest/tapbundle';
|
||||
|
||||
tap.test('should compare objects', async () => {
|
||||
const actual = { name: 'John', age: 30 };
|
||||
const expected = { name: 'John', age: 31 };
|
||||
|
||||
// Will show a detailed diff of the differences
|
||||
expect(actual).toEqual(expected);
|
||||
});
|
||||
```
|
||||
|
||||
#### Available Assertions
|
||||
|
||||
```typescript
|
||||
// Equality
|
||||
expect(value).toEqual(expected);
|
||||
expect(value).toBe(expected);
|
||||
|
||||
// Truthiness
|
||||
expect(value).toBeTruthy();
|
||||
expect(value).toBeFalsy();
|
||||
|
||||
// Type checks
|
||||
expect(value).toBeType('string');
|
||||
|
||||
// Strings
|
||||
expect(string).toMatch(/pattern/);
|
||||
expect(string).toContain('substring');
|
||||
|
||||
// Arrays
|
||||
expect(array).toContain(item);
|
||||
|
||||
// Exceptions
|
||||
expect(fn).toThrow();
|
||||
expect(fn).toThrow('error message');
|
||||
|
||||
// Async
|
||||
await expect(promise).toResolve();
|
||||
await expect(promise).toReject();
|
||||
```
|
||||
|
||||
### Test Tagging and Filtering
|
||||
|
||||
Tag tests for selective execution:
|
||||
|
||||
```typescript
|
||||
// Define tests with tags
|
||||
tap.tags('integration', 'slow').test('complex test', async () => {
|
||||
// test code
|
||||
});
|
||||
|
||||
tap.tags('unit').test('fast test', async () => {
|
||||
// test code
|
||||
});
|
||||
```
|
||||
|
||||
Filter tests by setting the environment variable:
|
||||
|
||||
```bash
|
||||
TSTEST_FILTER_TAGS=unit tstest test/mytest.node.ts
|
||||
```
|
||||
|
||||
### TapTools
|
||||
|
||||
Each test receives a `tapTools` instance with utilities:
|
||||
|
||||
```typescript
|
||||
tap.test('should have utilities', async (tapTools) => {
|
||||
// Mark test as skipped
|
||||
tapTools.markAsSkipped('reason');
|
||||
|
||||
// Mark as todo
|
||||
tapTools.todo('not implemented');
|
||||
|
||||
// Configure retries
|
||||
tapTools.retry(3);
|
||||
|
||||
// Log test output
|
||||
tapTools.log('debug message');
|
||||
});
|
||||
```
|
||||
|
||||
## Advanced Features
|
||||
|
||||
### Pre-Tasks
|
||||
|
||||
Run setup tasks before any tests execute:
|
||||
|
||||
```typescript
|
||||
tap.preTask('setup database', async () => {
|
||||
// Runs before any tests
|
||||
});
|
||||
|
||||
tap.test('first test', async () => {
|
||||
// Database is ready
|
||||
});
|
||||
```
|
||||
|
||||
### Test Priority
|
||||
|
||||
Organize tests by priority level:
|
||||
|
||||
```typescript
|
||||
tap.priority('high').test('critical test', async () => { });
|
||||
tap.priority('medium').test('normal test', async () => { });
|
||||
tap.priority('low').test('optional test', async () => { });
|
||||
```
|
||||
|
||||
### Nested Suites
|
||||
|
||||
Create deeply nested test organization:
|
||||
|
||||
```typescript
|
||||
tap.describe('API', () => {
|
||||
tap.describe('Users', () => {
|
||||
tap.describe('GET /users', () => {
|
||||
tap.test('should return all users', async () => { });
|
||||
});
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### Protocol Events
|
||||
|
||||
Access real-time test events for custom tooling:
|
||||
|
||||
```typescript
|
||||
import { setProtocolEmitter } from '@git.zone/tstest/tapbundle';
|
||||
|
||||
// Get access to protocol emitter for custom event handling
|
||||
// Events: test:started, test:completed, assertion:failed, suite:started, suite:completed
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Always export `tap.start()`** at the end of test files:
|
||||
```typescript
|
||||
export default tap.start();
|
||||
```
|
||||
|
||||
2. **Use descriptive test names** that explain what is being tested:
|
||||
```typescript
|
||||
tap.test('should return 404 when user does not exist', async () => { });
|
||||
```
|
||||
|
||||
3. **Group related tests** with `describe()` blocks:
|
||||
```typescript
|
||||
tap.describe('User validation', () => {
|
||||
// All user validation tests
|
||||
});
|
||||
```
|
||||
|
||||
4. **Leverage lifecycle hooks** to reduce duplication:
|
||||
```typescript
|
||||
tap.beforeEach(async () => {
|
||||
// Common setup
|
||||
});
|
||||
```
|
||||
|
||||
5. **Tag tests appropriately** for flexible test execution:
|
||||
```typescript
|
||||
tap.tags('integration', 'database').test('...', async () => { });
|
||||
```
|
||||
|
||||
## TypeScript Support
|
||||
|
||||
tapbundle is written in TypeScript and provides full type definitions. The `Tap` class accepts a generic type for shared context:
|
||||
|
||||
```typescript
|
||||
interface MyTestContext {
|
||||
db: DatabaseConnection;
|
||||
user: User;
|
||||
}
|
||||
|
||||
const tap = new Tap<MyTestContext>();
|
||||
|
||||
tap.test('should use context', async (tapTools) => {
|
||||
// tapTools is typed with MyTestContext
|
||||
});
|
||||
```
|
||||
|
||||
## Legal
|
||||
|
||||
This project is licensed under MIT.
|
||||
|
||||
© 2025 Task Venture Capital GmbH. All rights reserved.
|
21
ts_tapbundle/tapbundle.classes.pretask.ts
Normal file
21
ts_tapbundle/tapbundle.classes.pretask.ts
Normal file
@@ -0,0 +1,21 @@
|
||||
import * as plugins from './tapbundle.plugins.js';
|
||||
import { TapTools } from './tapbundle.classes.taptools.js';
|
||||
|
||||
export interface IPreTaskFunction {
|
||||
(tapTools?: TapTools): Promise<any>;
|
||||
}
|
||||
|
||||
export class PreTask {
|
||||
public description: string;
|
||||
public preTaskFunction: IPreTaskFunction;
|
||||
|
||||
constructor(descriptionArg: string, preTaskFunctionArg: IPreTaskFunction) {
|
||||
this.description = descriptionArg;
|
||||
this.preTaskFunction = preTaskFunctionArg;
|
||||
}
|
||||
|
||||
public async run() {
|
||||
console.log(`::__PRETASK: ${this.description}`);
|
||||
await this.preTaskFunction(new TapTools(null));
|
||||
}
|
||||
}
|
117
ts_tapbundle/tapbundle.classes.settingsmanager.ts
Normal file
117
ts_tapbundle/tapbundle.classes.settingsmanager.ts
Normal file
@@ -0,0 +1,117 @@
|
||||
import type { ITapSettings, ISettingsManager } from './tapbundle.interfaces.js';
|
||||
|
||||
export class SettingsManager implements ISettingsManager {
|
||||
private globalSettings: ITapSettings = {};
|
||||
private fileSettings: ITapSettings = {};
|
||||
private testSettings: Map<string, ITapSettings> = new Map();
|
||||
|
||||
// Default settings
|
||||
private defaultSettings: ITapSettings = {
|
||||
timeout: undefined, // No timeout by default
|
||||
slowThreshold: 1000, // 1 second
|
||||
bail: false,
|
||||
retries: 0,
|
||||
retryDelay: 0,
|
||||
suppressConsole: false,
|
||||
verboseErrors: true,
|
||||
showTestDuration: true,
|
||||
maxConcurrency: 5,
|
||||
isolateTests: false,
|
||||
enableSnapshots: true,
|
||||
snapshotDirectory: '.snapshots',
|
||||
updateSnapshots: false,
|
||||
};
|
||||
|
||||
/**
|
||||
* Get merged settings for current context
|
||||
*/
|
||||
public getSettings(): ITapSettings {
|
||||
return this.mergeSettings(
|
||||
this.defaultSettings,
|
||||
this.globalSettings,
|
||||
this.fileSettings
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set global settings (from 00init.ts or tap.settings())
|
||||
*/
|
||||
public setGlobalSettings(settings: ITapSettings): void {
|
||||
this.globalSettings = { ...this.globalSettings, ...settings };
|
||||
}
|
||||
|
||||
/**
|
||||
* Set file-level settings
|
||||
*/
|
||||
public setFileSettings(settings: ITapSettings): void {
|
||||
this.fileSettings = { ...this.fileSettings, ...settings };
|
||||
}
|
||||
|
||||
/**
|
||||
* Set test-specific settings
|
||||
*/
|
||||
public setTestSettings(testId: string, settings: ITapSettings): void {
|
||||
const existingSettings = this.testSettings.get(testId) || {};
|
||||
this.testSettings.set(testId, { ...existingSettings, ...settings });
|
||||
}
|
||||
|
||||
/**
|
||||
* Get settings for specific test
|
||||
*/
|
||||
public getTestSettings(testId: string): ITapSettings {
|
||||
const testSpecificSettings = this.testSettings.get(testId) || {};
|
||||
return this.mergeSettings(
|
||||
this.defaultSettings,
|
||||
this.globalSettings,
|
||||
this.fileSettings,
|
||||
testSpecificSettings
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge settings with proper inheritance
|
||||
* Later settings override earlier ones
|
||||
*/
|
||||
private mergeSettings(...settingsArray: ITapSettings[]): ITapSettings {
|
||||
const result: ITapSettings = {};
|
||||
|
||||
for (const settings of settingsArray) {
|
||||
// Simple properties - later values override
|
||||
if (settings.timeout !== undefined) result.timeout = settings.timeout;
|
||||
if (settings.slowThreshold !== undefined) result.slowThreshold = settings.slowThreshold;
|
||||
if (settings.bail !== undefined) result.bail = settings.bail;
|
||||
if (settings.retries !== undefined) result.retries = settings.retries;
|
||||
if (settings.retryDelay !== undefined) result.retryDelay = settings.retryDelay;
|
||||
if (settings.suppressConsole !== undefined) result.suppressConsole = settings.suppressConsole;
|
||||
if (settings.verboseErrors !== undefined) result.verboseErrors = settings.verboseErrors;
|
||||
if (settings.showTestDuration !== undefined) result.showTestDuration = settings.showTestDuration;
|
||||
if (settings.maxConcurrency !== undefined) result.maxConcurrency = settings.maxConcurrency;
|
||||
if (settings.isolateTests !== undefined) result.isolateTests = settings.isolateTests;
|
||||
if (settings.enableSnapshots !== undefined) result.enableSnapshots = settings.enableSnapshots;
|
||||
if (settings.snapshotDirectory !== undefined) result.snapshotDirectory = settings.snapshotDirectory;
|
||||
if (settings.updateSnapshots !== undefined) result.updateSnapshots = settings.updateSnapshots;
|
||||
|
||||
// Lifecycle hooks - later ones override
|
||||
if (settings.beforeAll !== undefined) result.beforeAll = settings.beforeAll;
|
||||
if (settings.afterAll !== undefined) result.afterAll = settings.afterAll;
|
||||
if (settings.beforeEach !== undefined) result.beforeEach = settings.beforeEach;
|
||||
if (settings.afterEach !== undefined) result.afterEach = settings.afterEach;
|
||||
|
||||
// Environment variables - merge
|
||||
if (settings.env) {
|
||||
result.env = { ...result.env, ...settings.env };
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear all settings (useful for testing)
|
||||
*/
|
||||
public clearSettings(): void {
|
||||
this.globalSettings = {};
|
||||
this.fileSettings = {};
|
||||
this.testSettings.clear();
|
||||
}
|
||||
}
|
710
ts_tapbundle/tapbundle.classes.tap.ts
Normal file
710
ts_tapbundle/tapbundle.classes.tap.ts
Normal file
@@ -0,0 +1,710 @@
|
||||
import * as plugins from './tapbundle.plugins.js';
|
||||
|
||||
import { type IPreTaskFunction, PreTask } from './tapbundle.classes.pretask.js';
|
||||
import { TapTest, type ITestFunction } from './tapbundle.classes.taptest.js';
|
||||
import { ProtocolEmitter, type ITestEvent } from '../dist_ts_tapbundle_protocol/index.js';
|
||||
import type { ITapSettings } from './tapbundle.interfaces.js';
|
||||
import { SettingsManager } from './tapbundle.classes.settingsmanager.js';
|
||||
|
||||
export interface ITestSuite {
|
||||
description: string;
|
||||
tests: TapTest<any>[];
|
||||
beforeEach?: ITestFunction<any>;
|
||||
afterEach?: ITestFunction<any>;
|
||||
parent?: ITestSuite;
|
||||
children: ITestSuite[];
|
||||
}
|
||||
|
||||
class TestBuilder<T> {
|
||||
private _tap: Tap<T>;
|
||||
private _tags: string[] = [];
|
||||
private _priority: 'high' | 'medium' | 'low' = 'medium';
|
||||
private _retryCount?: number;
|
||||
private _timeoutMs?: number;
|
||||
|
||||
constructor(tap: Tap<T>) {
|
||||
this._tap = tap;
|
||||
}
|
||||
|
||||
tags(...tags: string[]) {
|
||||
this._tags = tags;
|
||||
return this;
|
||||
}
|
||||
|
||||
priority(level: 'high' | 'medium' | 'low') {
|
||||
this._priority = level;
|
||||
return this;
|
||||
}
|
||||
|
||||
retry(count: number) {
|
||||
this._retryCount = count;
|
||||
return this;
|
||||
}
|
||||
|
||||
timeout(ms: number) {
|
||||
this._timeoutMs = ms;
|
||||
return this;
|
||||
}
|
||||
|
||||
test(description: string, testFunction: ITestFunction<T>) {
|
||||
const test = this._tap.test(description, testFunction, 'normal');
|
||||
|
||||
// Apply settings to the test
|
||||
if (this._tags.length > 0) {
|
||||
test.tags = this._tags;
|
||||
}
|
||||
test.priority = this._priority;
|
||||
|
||||
if (this._retryCount !== undefined) {
|
||||
test.tapTools.retry(this._retryCount);
|
||||
}
|
||||
if (this._timeoutMs !== undefined) {
|
||||
test.timeoutMs = this._timeoutMs;
|
||||
}
|
||||
|
||||
return test;
|
||||
}
|
||||
|
||||
testOnly(description: string, testFunction: ITestFunction<T>) {
|
||||
const test = this._tap.test(description, testFunction, 'only');
|
||||
|
||||
// Apply settings to the test
|
||||
if (this._tags.length > 0) {
|
||||
test.tags = this._tags;
|
||||
}
|
||||
test.priority = this._priority;
|
||||
|
||||
if (this._retryCount !== undefined) {
|
||||
test.tapTools.retry(this._retryCount);
|
||||
}
|
||||
if (this._timeoutMs !== undefined) {
|
||||
test.timeoutMs = this._timeoutMs;
|
||||
}
|
||||
|
||||
return test;
|
||||
}
|
||||
|
||||
testSkip(description: string, testFunction: ITestFunction<T>) {
|
||||
const test = this._tap.test(description, testFunction, 'skip');
|
||||
|
||||
// Apply settings to the test
|
||||
if (this._tags.length > 0) {
|
||||
test.tags = this._tags;
|
||||
}
|
||||
test.priority = this._priority;
|
||||
|
||||
if (this._retryCount !== undefined) {
|
||||
test.tapTools.retry(this._retryCount);
|
||||
}
|
||||
if (this._timeoutMs !== undefined) {
|
||||
test.timeoutMs = this._timeoutMs;
|
||||
}
|
||||
|
||||
return test;
|
||||
}
|
||||
}
|
||||
|
||||
export class Tap<T> {
|
||||
private protocolEmitter = new ProtocolEmitter();
|
||||
private settingsManager = new SettingsManager();
|
||||
private _skipCount = 0;
|
||||
private _filterTags: string[] = [];
|
||||
|
||||
constructor() {
|
||||
// Get filter tags from environment
|
||||
if (typeof process !== 'undefined' && process.env && process.env.TSTEST_FILTER_TAGS) {
|
||||
this._filterTags = process.env.TSTEST_FILTER_TAGS.split(',');
|
||||
}
|
||||
}
|
||||
|
||||
// Fluent test builder
|
||||
public tags(...tags: string[]) {
|
||||
const builder = new TestBuilder<T>(this);
|
||||
return builder.tags(...tags);
|
||||
}
|
||||
|
||||
public priority(level: 'high' | 'medium' | 'low') {
|
||||
const builder = new TestBuilder<T>(this);
|
||||
return builder.priority(level);
|
||||
}
|
||||
|
||||
public retry(count: number) {
|
||||
const builder = new TestBuilder<T>(this);
|
||||
return builder.retry(count);
|
||||
}
|
||||
|
||||
public timeout(ms: number) {
|
||||
const builder = new TestBuilder<T>(this);
|
||||
return builder.timeout(ms);
|
||||
}
|
||||
|
||||
/**
|
||||
* skips a test
|
||||
* tests marked with tap.skip.test() are never executed
|
||||
*/
|
||||
public skip = {
|
||||
test: (descriptionArg: string, functionArg: ITestFunction<T>) => {
|
||||
const skippedTest = this.test(descriptionArg, functionArg, 'skip');
|
||||
return skippedTest;
|
||||
},
|
||||
testParallel: (descriptionArg: string, functionArg: ITestFunction<T>) => {
|
||||
const skippedTest = new TapTest<T>({
|
||||
description: descriptionArg,
|
||||
testFunction: functionArg,
|
||||
parallel: true,
|
||||
});
|
||||
|
||||
// Mark as skip mode
|
||||
skippedTest.tapTools.markAsSkipped('Marked as skip');
|
||||
|
||||
// Add to appropriate test list
|
||||
if (this._currentSuite) {
|
||||
this._currentSuite.tests.push(skippedTest);
|
||||
} else {
|
||||
this._tapTests.push(skippedTest);
|
||||
}
|
||||
|
||||
return skippedTest;
|
||||
},
|
||||
};
|
||||
|
||||
/**
|
||||
* only executes tests marked as ONLY
|
||||
*/
|
||||
public only = {
|
||||
test: (descriptionArg: string, testFunctionArg: ITestFunction<T>) => {
|
||||
return this.test(descriptionArg, testFunctionArg, 'only');
|
||||
},
|
||||
testParallel: (descriptionArg: string, testFunctionArg: ITestFunction<T>) => {
|
||||
const onlyTest = new TapTest<T>({
|
||||
description: descriptionArg,
|
||||
testFunction: testFunctionArg,
|
||||
parallel: true,
|
||||
});
|
||||
|
||||
// Add to only tests list
|
||||
this._tapTestsOnly.push(onlyTest);
|
||||
|
||||
return onlyTest;
|
||||
},
|
||||
};
|
||||
|
||||
/**
|
||||
* mark a test as todo (not yet implemented)
|
||||
*/
|
||||
public todo = {
|
||||
test: (descriptionArg: string, functionArg?: ITestFunction<T>) => {
|
||||
const defaultFunc = (async () => {}) as ITestFunction<T>;
|
||||
const todoTest = new TapTest<T>({
|
||||
description: descriptionArg,
|
||||
testFunction: functionArg || defaultFunc,
|
||||
parallel: false,
|
||||
});
|
||||
|
||||
// Mark as todo
|
||||
todoTest.tapTools.todo('Marked as todo');
|
||||
|
||||
// Add to appropriate test list
|
||||
if (this._currentSuite) {
|
||||
this._currentSuite.tests.push(todoTest);
|
||||
} else {
|
||||
this._tapTests.push(todoTest);
|
||||
}
|
||||
|
||||
return todoTest;
|
||||
},
|
||||
testParallel: (descriptionArg: string, functionArg?: ITestFunction<T>) => {
|
||||
const defaultFunc = (async () => {}) as ITestFunction<T>;
|
||||
const todoTest = new TapTest<T>({
|
||||
description: descriptionArg,
|
||||
testFunction: functionArg || defaultFunc,
|
||||
parallel: true,
|
||||
});
|
||||
|
||||
// Mark as todo
|
||||
todoTest.tapTools.todo('Marked as todo');
|
||||
|
||||
// Add to appropriate test list
|
||||
if (this._currentSuite) {
|
||||
this._currentSuite.tests.push(todoTest);
|
||||
} else {
|
||||
this._tapTests.push(todoTest);
|
||||
}
|
||||
|
||||
return todoTest;
|
||||
},
|
||||
};
|
||||
|
||||
private _tapPreTasks: PreTask[] = [];
|
||||
private _tapTests: TapTest<any>[] = [];
|
||||
private _tapTestsOnly: TapTest<any>[] = [];
|
||||
private _currentSuite: ITestSuite | null = null;
|
||||
private _rootSuites: ITestSuite[] = [];
|
||||
|
||||
/**
|
||||
* Configure global test settings
|
||||
*/
|
||||
public settings(settings: ITapSettings): this {
|
||||
this.settingsManager.setGlobalSettings(settings);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current test settings
|
||||
*/
|
||||
public getSettings(): ITapSettings {
|
||||
return this.settingsManager.getSettings();
|
||||
}
|
||||
|
||||
/**
|
||||
* Normal test function, will run one by one
|
||||
* @param testDescription - A description of what the test does
|
||||
* @param testFunction - A Function that returns a Promise and resolves or rejects
|
||||
*/
|
||||
public test(
|
||||
testDescription: string,
|
||||
testFunction: ITestFunction<T>,
|
||||
modeArg: 'normal' | 'only' | 'skip' = 'normal'
|
||||
): TapTest<T> {
|
||||
const localTest = new TapTest<T>({
|
||||
description: testDescription,
|
||||
testFunction,
|
||||
parallel: false,
|
||||
});
|
||||
|
||||
// Apply default settings from settings manager
|
||||
const settings = this.settingsManager.getSettings();
|
||||
if (settings.timeout !== undefined) {
|
||||
localTest.timeoutMs = settings.timeout;
|
||||
}
|
||||
if (settings.retries !== undefined) {
|
||||
localTest.tapTools.retry(settings.retries);
|
||||
}
|
||||
|
||||
// Handle skip mode
|
||||
if (modeArg === 'skip') {
|
||||
localTest.tapTools.markAsSkipped('Marked as skip');
|
||||
}
|
||||
|
||||
// If we're in a suite, add test to the suite
|
||||
if (this._currentSuite) {
|
||||
this._currentSuite.tests.push(localTest);
|
||||
} else {
|
||||
// Otherwise add to global test list
|
||||
if (modeArg === 'normal' || modeArg === 'skip') {
|
||||
this._tapTests.push(localTest);
|
||||
} else if (modeArg === 'only') {
|
||||
this._tapTestsOnly.push(localTest);
|
||||
}
|
||||
}
|
||||
return localTest;
|
||||
}
|
||||
|
||||
public preTask(descriptionArg: string, functionArg: IPreTaskFunction) {
|
||||
this._tapPreTasks.push(new PreTask(descriptionArg, functionArg));
|
||||
}
|
||||
|
||||
/**
|
||||
* A parallel test that will not be waited for before the next starts.
|
||||
* @param testDescription - A description of what the test does
|
||||
* @param testFunction - A Function that returns a Promise and resolves or rejects
|
||||
*/
|
||||
public testParallel(testDescription: string, testFunction: ITestFunction<T>) {
|
||||
const localTest = new TapTest({
|
||||
description: testDescription,
|
||||
testFunction,
|
||||
parallel: true,
|
||||
});
|
||||
|
||||
// Apply default settings from settings manager
|
||||
const settings = this.settingsManager.getSettings();
|
||||
if (settings.timeout !== undefined) {
|
||||
localTest.timeoutMs = settings.timeout;
|
||||
}
|
||||
if (settings.retries !== undefined) {
|
||||
localTest.tapTools.retry(settings.retries);
|
||||
}
|
||||
|
||||
if (this._currentSuite) {
|
||||
this._currentSuite.tests.push(localTest);
|
||||
} else {
|
||||
this._tapTests.push(localTest);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a test suite for grouping related tests
|
||||
*/
|
||||
public describe(description: string, suiteFunction: () => void) {
|
||||
const suite: ITestSuite = {
|
||||
description,
|
||||
tests: [],
|
||||
children: [],
|
||||
parent: this._currentSuite,
|
||||
};
|
||||
|
||||
// Add to parent or root
|
||||
if (this._currentSuite) {
|
||||
this._currentSuite.children.push(suite);
|
||||
} else {
|
||||
this._rootSuites.push(suite);
|
||||
}
|
||||
|
||||
// Execute suite function in context
|
||||
const previousSuite = this._currentSuite;
|
||||
this._currentSuite = suite;
|
||||
try {
|
||||
suiteFunction();
|
||||
} finally {
|
||||
this._currentSuite = previousSuite;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Set up a function to run before each test in the current suite
|
||||
*/
|
||||
public beforeEach(setupFunction: ITestFunction<any>) {
|
||||
if (this._currentSuite) {
|
||||
this._currentSuite.beforeEach = setupFunction;
|
||||
} else {
|
||||
throw new Error('beforeEach can only be used inside a describe block');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Set up a function to run after each test in the current suite
|
||||
*/
|
||||
public afterEach(teardownFunction: ITestFunction<any>) {
|
||||
if (this._currentSuite) {
|
||||
this._currentSuite.afterEach = teardownFunction;
|
||||
} else {
|
||||
throw new Error('afterEach can only be used inside a describe block');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* collect all tests from suites
|
||||
*/
|
||||
private _collectTests(suite: ITestSuite, tests: TapTest<any>[] = []): TapTest<any>[] {
|
||||
tests.push(...suite.tests);
|
||||
for (const childSuite of suite.children) {
|
||||
this._collectTests(childSuite, tests);
|
||||
}
|
||||
return tests;
|
||||
}
|
||||
|
||||
/**
|
||||
* starts the test evaluation
|
||||
*/
|
||||
public async start(optionsArg?: { throwOnError: boolean }) {
|
||||
// lets set the tapbundle promise
|
||||
const smartenvInstance = new plugins.smartenv.Smartenv();
|
||||
const globalPromise = plugins.smartpromise.defer();
|
||||
smartenvInstance.isBrowser
|
||||
? ((globalThis as any).tapbundleDeferred = globalPromise)
|
||||
: null;
|
||||
// Also set tapPromise for backwards compatibility
|
||||
smartenvInstance.isBrowser
|
||||
? ((globalThis as any).tapPromise = globalPromise.promise)
|
||||
: null;
|
||||
|
||||
// Path helpers will be initialized by the Node.js environment if available
|
||||
|
||||
// lets continue with running the tests
|
||||
const promiseArray: Array<Promise<any>> = [];
|
||||
|
||||
// Collect all tests including those in suites
|
||||
let allTests: TapTest<any>[] = [...this._tapTests];
|
||||
for (const suite of this._rootSuites) {
|
||||
this._collectTests(suite, allTests);
|
||||
}
|
||||
|
||||
// safeguard against empty test array
|
||||
if (allTests.length === 0 && this._tapTestsOnly.length === 0) {
|
||||
console.log('no tests specified. Ending here!');
|
||||
return;
|
||||
}
|
||||
|
||||
// determine which tests to run
|
||||
let concerningTests: TapTest[];
|
||||
if (this._tapTestsOnly.length > 0) {
|
||||
concerningTests = this._tapTestsOnly;
|
||||
} else {
|
||||
concerningTests = allTests;
|
||||
}
|
||||
|
||||
// Filter tests by tags if specified
|
||||
if (this._filterTags.length > 0) {
|
||||
concerningTests = concerningTests.filter(test => {
|
||||
// Skip tests without tags when filtering is active
|
||||
if (!test.tags || test.tags.length === 0) {
|
||||
return false;
|
||||
}
|
||||
// Check if test has any of the filter tags
|
||||
return test.tags.some(tag => this._filterTags.includes(tag));
|
||||
});
|
||||
}
|
||||
|
||||
// lets run the pretasks
|
||||
for (const preTask of this._tapPreTasks) {
|
||||
await preTask.run();
|
||||
}
|
||||
|
||||
// Emit protocol header and TAP version
|
||||
console.log(this.protocolEmitter.emitProtocolHeader());
|
||||
console.log(this.protocolEmitter.emitTapVersion(13));
|
||||
|
||||
// Emit test plan
|
||||
const plan = {
|
||||
start: 1,
|
||||
end: concerningTests.length
|
||||
};
|
||||
console.log(this.protocolEmitter.emitPlan(plan));
|
||||
|
||||
// Run global beforeAll hook if configured
|
||||
const settings = this.settingsManager.getSettings();
|
||||
if (settings.beforeAll) {
|
||||
try {
|
||||
await settings.beforeAll();
|
||||
} catch (error) {
|
||||
console.error('Error in beforeAll hook:', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// Run tests from suites with lifecycle hooks
|
||||
let testKey = 0;
|
||||
|
||||
// Run root suite tests with lifecycle hooks
|
||||
if (this._rootSuites.length > 0) {
|
||||
await this._runSuite(null, this._rootSuites, promiseArray, { testKey });
|
||||
// Update testKey after running suite tests
|
||||
for (const suite of this._rootSuites) {
|
||||
const suiteTests = this._collectTests(suite);
|
||||
testKey += suiteTests.length;
|
||||
}
|
||||
}
|
||||
|
||||
// Run non-suite tests (tests added directly without describe)
|
||||
const nonSuiteTests = concerningTests.filter(test => {
|
||||
// Check if test is not in any suite
|
||||
for (const suite of this._rootSuites) {
|
||||
const suiteTests = this._collectTests(suite);
|
||||
if (suiteTests.includes(test)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
});
|
||||
|
||||
for (const currentTest of nonSuiteTests) {
|
||||
// Wrap test function with global lifecycle hooks
|
||||
const originalFunction = currentTest.testFunction;
|
||||
const testName = currentTest.description;
|
||||
currentTest.testFunction = async (tapTools) => {
|
||||
// Run global beforeEach if configured
|
||||
if (settings.beforeEach) {
|
||||
await settings.beforeEach(testName);
|
||||
}
|
||||
|
||||
// Run the actual test
|
||||
let testPassed = true;
|
||||
let result: any;
|
||||
try {
|
||||
result = await originalFunction(tapTools);
|
||||
} catch (error) {
|
||||
testPassed = false;
|
||||
throw error;
|
||||
} finally {
|
||||
// Run global afterEach if configured
|
||||
if (settings.afterEach) {
|
||||
await settings.afterEach(testName, testPassed);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
};
|
||||
|
||||
const testPromise = currentTest.run(testKey++);
|
||||
if (currentTest.parallel) {
|
||||
promiseArray.push(testPromise);
|
||||
} else {
|
||||
await testPromise;
|
||||
}
|
||||
}
|
||||
|
||||
await Promise.all(promiseArray);
|
||||
|
||||
// when tests have been run and all promises are fullfilled
|
||||
const failReasons: string[] = [];
|
||||
const executionNotes: string[] = [];
|
||||
// collect failed tests
|
||||
for (const tapTest of concerningTests) {
|
||||
if (tapTest.status !== 'success' && tapTest.status !== 'skipped') {
|
||||
failReasons.push(
|
||||
`Test ${tapTest.testKey + 1} failed with status ${tapTest.status}:\n` +
|
||||
`|| ${tapTest.description}\n` +
|
||||
`|| for more information please take a look the logs above`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// render fail Reasons
|
||||
for (const failReason of failReasons) {
|
||||
console.log(failReason);
|
||||
}
|
||||
|
||||
// Run global afterAll hook if configured
|
||||
if (settings.afterAll) {
|
||||
try {
|
||||
await settings.afterAll();
|
||||
} catch (error) {
|
||||
console.error('Error in afterAll hook:', error);
|
||||
// Don't throw here, we want to complete the test run
|
||||
}
|
||||
}
|
||||
|
||||
if (optionsArg && optionsArg.throwOnError && failReasons.length > 0) {
|
||||
if (!smartenvInstance.isBrowser && typeof process !== 'undefined') process.exit(1);
|
||||
}
|
||||
if (smartenvInstance.isBrowser) {
|
||||
globalPromise.resolve();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Emit an event
|
||||
*/
|
||||
private emitEvent(event: ITestEvent) {
|
||||
console.log(this.protocolEmitter.emitEvent(event));
|
||||
}
|
||||
|
||||
/**
|
||||
* Run tests in a suite with lifecycle hooks
|
||||
*/
|
||||
private async _runSuite(
|
||||
parentSuite: ITestSuite | null,
|
||||
suites: ITestSuite[],
|
||||
promiseArray: Promise<any>[],
|
||||
context: { testKey: number }
|
||||
) {
|
||||
for (const suite of suites) {
|
||||
// Emit suite:started event
|
||||
this.emitEvent({
|
||||
eventType: 'suite:started',
|
||||
timestamp: Date.now(),
|
||||
data: {
|
||||
suiteName: suite.description
|
||||
}
|
||||
});
|
||||
// Run beforeEach from parent suites
|
||||
const beforeEachFunctions: ITestFunction<any>[] = [];
|
||||
let currentSuite: ITestSuite | null = suite;
|
||||
while (currentSuite) {
|
||||
if (currentSuite.beforeEach) {
|
||||
beforeEachFunctions.unshift(currentSuite.beforeEach);
|
||||
}
|
||||
currentSuite = currentSuite.parent || null;
|
||||
}
|
||||
|
||||
// Run tests in this suite
|
||||
for (const test of suite.tests) {
|
||||
// Create wrapper test function that includes lifecycle hooks
|
||||
const originalFunction = test.testFunction;
|
||||
const testName = test.description;
|
||||
test.testFunction = async (tapTools) => {
|
||||
// Run global beforeEach if configured
|
||||
const settings = this.settingsManager.getSettings();
|
||||
if (settings.beforeEach) {
|
||||
await settings.beforeEach(testName);
|
||||
}
|
||||
|
||||
// Run all suite beforeEach hooks
|
||||
for (const beforeEach of beforeEachFunctions) {
|
||||
await beforeEach(tapTools);
|
||||
}
|
||||
|
||||
// Run the actual test
|
||||
let testPassed = true;
|
||||
let result: any;
|
||||
try {
|
||||
result = await originalFunction(tapTools);
|
||||
} catch (error) {
|
||||
testPassed = false;
|
||||
throw error;
|
||||
} finally {
|
||||
// Run afterEach hooks in reverse order
|
||||
const afterEachFunctions: ITestFunction<any>[] = [];
|
||||
currentSuite = suite;
|
||||
while (currentSuite) {
|
||||
if (currentSuite.afterEach) {
|
||||
afterEachFunctions.push(currentSuite.afterEach);
|
||||
}
|
||||
currentSuite = currentSuite.parent || null;
|
||||
}
|
||||
|
||||
for (const afterEach of afterEachFunctions) {
|
||||
await afterEach(tapTools);
|
||||
}
|
||||
|
||||
// Run global afterEach if configured
|
||||
if (settings.afterEach) {
|
||||
await settings.afterEach(testName, testPassed);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
};
|
||||
|
||||
const testPromise = test.run(context.testKey++);
|
||||
if (test.parallel) {
|
||||
promiseArray.push(testPromise);
|
||||
} else {
|
||||
await testPromise;
|
||||
}
|
||||
}
|
||||
|
||||
// Recursively run child suites
|
||||
await this._runSuite(suite, suite.children, promiseArray, context);
|
||||
|
||||
// Emit suite:completed event
|
||||
this.emitEvent({
|
||||
eventType: 'suite:completed',
|
||||
timestamp: Date.now(),
|
||||
data: {
|
||||
suiteName: suite.description
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
public async stopForcefully(codeArg = 0, directArg = false) {
|
||||
console.log(`tap stopping forcefully! Code: ${codeArg} / Direct: ${directArg}`);
|
||||
if (typeof process !== 'undefined') {
|
||||
if (directArg) {
|
||||
process.exit(codeArg);
|
||||
} else {
|
||||
setTimeout(() => {
|
||||
process.exit(codeArg);
|
||||
}, 10);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* handle errors
|
||||
*/
|
||||
public threw(err: Error) {
|
||||
console.log(err);
|
||||
}
|
||||
|
||||
/**
|
||||
* Explicitly fail the current test with a custom message
|
||||
* @param message - The failure message to display
|
||||
*/
|
||||
public fail(message: string = 'Test failed'): never {
|
||||
throw new Error(message);
|
||||
}
|
||||
}
|
||||
|
||||
export const tap = new Tap();
|
318
ts_tapbundle/tapbundle.classes.taptest.ts
Normal file
318
ts_tapbundle/tapbundle.classes.taptest.ts
Normal file
@@ -0,0 +1,318 @@
|
||||
import * as plugins from './tapbundle.plugins.js';
|
||||
import { tapCreator } from './tapbundle.tapcreator.js';
|
||||
import { TapTools, SkipError } from './tapbundle.classes.taptools.js';
|
||||
import { ProtocolEmitter, type ITestEvent } from '../dist_ts_tapbundle_protocol/index.js';
|
||||
import { setProtocolEmitter } from './tapbundle.expect.wrapper.js';
|
||||
|
||||
// imported interfaces
|
||||
import { Deferred } from '@push.rocks/smartpromise';
|
||||
import { HrtMeasurement } from '@push.rocks/smarttime';
|
||||
|
||||
// interfaces
|
||||
export type TTestStatus = 'success' | 'error' | 'pending' | 'errorAfterSuccess' | 'timeout' | 'skipped';
|
||||
|
||||
export type ITestFunction<T> =
|
||||
| ((tapTools: TapTools) => Promise<T>)
|
||||
| (() => Promise<T>);
|
||||
|
||||
export class TapTest<T = unknown> {
|
||||
public description: string;
|
||||
public failureAllowed: boolean;
|
||||
public hrtMeasurement: HrtMeasurement;
|
||||
public parallel: boolean;
|
||||
public status: TTestStatus;
|
||||
public tapTools: TapTools;
|
||||
public testFunction: ITestFunction<T>;
|
||||
public testKey: number; // the testKey the position in the test qeue. Set upon calling .run()
|
||||
public timeoutMs?: number;
|
||||
public isTodo: boolean = false;
|
||||
public todoReason?: string;
|
||||
public tags: string[] = [];
|
||||
public priority: 'high' | 'medium' | 'low' = 'medium';
|
||||
public fileName?: string;
|
||||
private testDeferred: Deferred<TapTest<T>> = plugins.smartpromise.defer();
|
||||
public testPromise: Promise<TapTest<T>> = this.testDeferred.promise;
|
||||
private testResultDeferred: Deferred<T> = plugins.smartpromise.defer();
|
||||
public testResultPromise: Promise<T> = this.testResultDeferred.promise;
|
||||
private protocolEmitter = new ProtocolEmitter();
|
||||
/**
|
||||
* constructor
|
||||
*/
|
||||
constructor(optionsArg: {
|
||||
description: string;
|
||||
testFunction: ITestFunction<T>;
|
||||
parallel: boolean;
|
||||
}) {
|
||||
this.description = optionsArg.description;
|
||||
this.hrtMeasurement = new HrtMeasurement();
|
||||
this.parallel = optionsArg.parallel;
|
||||
this.status = 'pending';
|
||||
this.tapTools = new TapTools(this);
|
||||
this.testFunction = optionsArg.testFunction;
|
||||
}
|
||||
|
||||
/**
|
||||
* Emit an event
|
||||
*/
|
||||
private emitEvent(event: ITestEvent) {
|
||||
console.log(this.protocolEmitter.emitEvent(event));
|
||||
}
|
||||
|
||||
/**
|
||||
* run the test
|
||||
*/
|
||||
public async run(testKeyArg: number) {
|
||||
this.testKey = testKeyArg;
|
||||
const testNumber = testKeyArg + 1;
|
||||
|
||||
// Emit test:queued event
|
||||
this.emitEvent({
|
||||
eventType: 'test:queued',
|
||||
timestamp: Date.now(),
|
||||
data: {
|
||||
testNumber,
|
||||
description: this.description
|
||||
}
|
||||
});
|
||||
|
||||
// Handle todo tests
|
||||
if (this.isTodo) {
|
||||
const testResult = {
|
||||
ok: true,
|
||||
testNumber,
|
||||
description: this.description,
|
||||
directive: {
|
||||
type: 'todo' as const,
|
||||
reason: this.todoReason
|
||||
}
|
||||
};
|
||||
const lines = this.protocolEmitter.emitTest(testResult);
|
||||
lines.forEach((line: string) => console.log(line));
|
||||
this.status = 'success';
|
||||
|
||||
// Emit test:completed event for todo test
|
||||
this.emitEvent({
|
||||
eventType: 'test:completed',
|
||||
timestamp: Date.now(),
|
||||
data: {
|
||||
testNumber,
|
||||
description: this.description,
|
||||
duration: 0,
|
||||
error: undefined
|
||||
}
|
||||
});
|
||||
|
||||
this.testDeferred.resolve(this);
|
||||
return;
|
||||
}
|
||||
|
||||
// Handle pre-marked skip tests
|
||||
if (this.tapTools.isSkipped) {
|
||||
const testResult = {
|
||||
ok: true,
|
||||
testNumber,
|
||||
description: this.description,
|
||||
directive: {
|
||||
type: 'skip' as const,
|
||||
reason: this.tapTools.skipReason || 'Marked as skip'
|
||||
}
|
||||
};
|
||||
const lines = this.protocolEmitter.emitTest(testResult);
|
||||
lines.forEach((line: string) => console.log(line));
|
||||
this.status = 'skipped';
|
||||
|
||||
// Emit test:completed event for skipped test
|
||||
this.emitEvent({
|
||||
eventType: 'test:completed',
|
||||
timestamp: Date.now(),
|
||||
data: {
|
||||
testNumber,
|
||||
description: this.description,
|
||||
duration: 0,
|
||||
error: undefined
|
||||
}
|
||||
});
|
||||
|
||||
this.testDeferred.resolve(this);
|
||||
return;
|
||||
}
|
||||
|
||||
// Run test with retries
|
||||
let lastError: any;
|
||||
const maxRetries = this.tapTools.maxRetries;
|
||||
|
||||
for (let attempt = 0; attempt <= maxRetries; attempt++) {
|
||||
this.hrtMeasurement.start();
|
||||
|
||||
// Emit test:started event
|
||||
this.emitEvent({
|
||||
eventType: 'test:started',
|
||||
timestamp: Date.now(),
|
||||
data: {
|
||||
testNumber,
|
||||
description: this.description,
|
||||
retry: attempt > 0 ? attempt : undefined
|
||||
}
|
||||
});
|
||||
|
||||
// Set protocol emitter for enhanced expect
|
||||
setProtocolEmitter(this.protocolEmitter);
|
||||
|
||||
try {
|
||||
// Set up timeout if specified
|
||||
let timeoutHandle: any;
|
||||
let timeoutPromise: Promise<never> | null = null;
|
||||
|
||||
if (this.timeoutMs) {
|
||||
timeoutPromise = new Promise<never>((_, reject) => {
|
||||
timeoutHandle = setTimeout(() => {
|
||||
this.status = 'timeout';
|
||||
reject(new Error(`Test timed out after ${this.timeoutMs}ms`));
|
||||
}, this.timeoutMs);
|
||||
});
|
||||
}
|
||||
|
||||
// Run the test function with potential timeout
|
||||
const testPromise = this.testFunction.length === 0
|
||||
? (this.testFunction as () => Promise<T>)()
|
||||
: (this.testFunction as (tapTools: TapTools) => Promise<T>)(this.tapTools);
|
||||
const testReturnValue = timeoutPromise
|
||||
? await Promise.race([testPromise, timeoutPromise])
|
||||
: await testPromise;
|
||||
|
||||
// Clear timeout if test completed
|
||||
if (timeoutHandle) {
|
||||
clearTimeout(timeoutHandle);
|
||||
}
|
||||
|
||||
this.hrtMeasurement.stop();
|
||||
const testResult = {
|
||||
ok: true,
|
||||
testNumber,
|
||||
description: this.description,
|
||||
metadata: {
|
||||
time: this.hrtMeasurement.milliSeconds,
|
||||
tags: this.tags.length > 0 ? this.tags : undefined,
|
||||
file: this.fileName
|
||||
}
|
||||
};
|
||||
const lines = this.protocolEmitter.emitTest(testResult);
|
||||
lines.forEach((line: string) => console.log(line));
|
||||
this.status = 'success';
|
||||
|
||||
// Emit test:completed event
|
||||
this.emitEvent({
|
||||
eventType: 'test:completed',
|
||||
timestamp: Date.now(),
|
||||
data: {
|
||||
testNumber,
|
||||
description: this.description,
|
||||
duration: this.hrtMeasurement.milliSeconds,
|
||||
error: undefined
|
||||
}
|
||||
});
|
||||
|
||||
this.testDeferred.resolve(this);
|
||||
this.testResultDeferred.resolve(testReturnValue);
|
||||
return; // Success, exit retry loop
|
||||
|
||||
} catch (err: any) {
|
||||
this.hrtMeasurement.stop();
|
||||
|
||||
// Handle skip
|
||||
if (err instanceof SkipError || err.name === 'SkipError') {
|
||||
const testResult = {
|
||||
ok: true,
|
||||
testNumber,
|
||||
description: this.description,
|
||||
directive: {
|
||||
type: 'skip' as const,
|
||||
reason: err.message.replace('Skipped: ', '')
|
||||
}
|
||||
};
|
||||
const lines = this.protocolEmitter.emitTest(testResult);
|
||||
lines.forEach((line: string) => console.log(line));
|
||||
this.status = 'skipped';
|
||||
|
||||
// Emit test:completed event for skipped test
|
||||
this.emitEvent({
|
||||
eventType: 'test:completed',
|
||||
timestamp: Date.now(),
|
||||
data: {
|
||||
testNumber,
|
||||
description: this.description,
|
||||
duration: this.hrtMeasurement.milliSeconds,
|
||||
error: undefined
|
||||
}
|
||||
});
|
||||
|
||||
this.testDeferred.resolve(this);
|
||||
return;
|
||||
}
|
||||
|
||||
lastError = err;
|
||||
|
||||
// If we have retries left, try again
|
||||
if (attempt < maxRetries) {
|
||||
console.log(this.protocolEmitter.emitComment(`Retry ${attempt + 1}/${maxRetries} for test: ${this.description}`));
|
||||
this.tapTools._incrementRetryCount();
|
||||
continue;
|
||||
}
|
||||
|
||||
// Final failure
|
||||
const testResult = {
|
||||
ok: false,
|
||||
testNumber,
|
||||
description: this.description,
|
||||
metadata: {
|
||||
time: this.hrtMeasurement.milliSeconds,
|
||||
retry: this.tapTools.retryCount,
|
||||
maxRetries: maxRetries > 0 ? maxRetries : undefined,
|
||||
error: {
|
||||
message: lastError.message || String(lastError),
|
||||
stack: lastError.stack,
|
||||
code: lastError.code
|
||||
},
|
||||
tags: this.tags.length > 0 ? this.tags : undefined,
|
||||
file: this.fileName
|
||||
}
|
||||
};
|
||||
const lines = this.protocolEmitter.emitTest(testResult);
|
||||
lines.forEach((line: string) => console.log(line));
|
||||
|
||||
// Emit test:completed event for failed test
|
||||
this.emitEvent({
|
||||
eventType: 'test:completed',
|
||||
timestamp: Date.now(),
|
||||
data: {
|
||||
testNumber,
|
||||
description: this.description,
|
||||
duration: this.hrtMeasurement.milliSeconds,
|
||||
error: {
|
||||
message: lastError.message || String(lastError),
|
||||
stack: lastError.stack,
|
||||
type: 'runtime' as const
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
this.testDeferred.resolve(this);
|
||||
this.testResultDeferred.resolve(err);
|
||||
|
||||
// if the test has already succeeded before
|
||||
if (this.status === 'success') {
|
||||
this.status = 'errorAfterSuccess';
|
||||
console.log('!!! ALERT !!!: weird behaviour, since test has been already successfull');
|
||||
} else {
|
||||
this.status = 'error';
|
||||
}
|
||||
|
||||
// if the test is allowed to fail
|
||||
if (this.failureAllowed) {
|
||||
console.log(`please note: failure allowed!`);
|
||||
}
|
||||
console.log(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
299
ts_tapbundle/tapbundle.classes.taptools.ts
Normal file
299
ts_tapbundle/tapbundle.classes.taptools.ts
Normal file
@@ -0,0 +1,299 @@
|
||||
import * as plugins from './tapbundle.plugins.js';
|
||||
import { TapTest } from './tapbundle.classes.taptest.js';
|
||||
|
||||
export interface IPromiseFunc {
|
||||
(): Promise<any>;
|
||||
}
|
||||
|
||||
export class SkipError extends Error {
|
||||
constructor(message: string) {
|
||||
super(message);
|
||||
this.name = 'SkipError';
|
||||
}
|
||||
}
|
||||
|
||||
export class TapTools {
|
||||
/**
|
||||
* the referenced TapTest
|
||||
*/
|
||||
private _tapTest: TapTest;
|
||||
private _retries = 0;
|
||||
private _retryCount = 0;
|
||||
public testData: any = {};
|
||||
private static _sharedContext = new Map<string, any>();
|
||||
private _snapshotPath: string = '';
|
||||
|
||||
// Flags for skip/todo
|
||||
private _isSkipped = false;
|
||||
private _skipReason?: string;
|
||||
|
||||
constructor(TapTestArg: TapTest<any>) {
|
||||
this._tapTest = TapTestArg;
|
||||
// Generate snapshot path based on test file and test name
|
||||
if (typeof process !== 'undefined' && process.cwd && TapTestArg) {
|
||||
const testFile = TapTestArg.fileName || 'unknown';
|
||||
const testName = TapTestArg.description.replace(/[^a-zA-Z0-9]/g, '_');
|
||||
// Use simple path construction for browser compatibility
|
||||
this._snapshotPath = `${process.cwd()}/.nogit/test_snapshots/${testFile}/${testName}.snap`;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* allow failure
|
||||
*/
|
||||
public allowFailure() {
|
||||
this._tapTest.failureAllowed = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* skip the rest of the test
|
||||
*/
|
||||
public skip(reason?: string): never {
|
||||
this._isSkipped = true;
|
||||
this._skipReason = reason;
|
||||
const skipMessage = reason ? `Skipped: ${reason}` : 'Skipped';
|
||||
throw new SkipError(skipMessage);
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark test as skipped without throwing (for pre-marking)
|
||||
*/
|
||||
public markAsSkipped(reason?: string): void {
|
||||
this._isSkipped = true;
|
||||
this._skipReason = reason;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if test is marked as skipped
|
||||
*/
|
||||
public get isSkipped(): boolean {
|
||||
return this._isSkipped;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get skip reason
|
||||
*/
|
||||
public get skipReason(): string | undefined {
|
||||
return this._skipReason;
|
||||
}
|
||||
|
||||
/**
|
||||
* conditionally skip the rest of the test
|
||||
*/
|
||||
public skipIf(condition: boolean, reason?: string): void {
|
||||
if (condition) {
|
||||
this.skip(reason);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* mark test as todo
|
||||
*/
|
||||
public todo(reason?: string): void {
|
||||
this._tapTest.isTodo = true;
|
||||
this._tapTest.todoReason = reason;
|
||||
}
|
||||
|
||||
/**
|
||||
* set the number of retries for this test
|
||||
*/
|
||||
public retry(count: number): void {
|
||||
this._retries = count;
|
||||
}
|
||||
|
||||
/**
|
||||
* get the current retry count
|
||||
*/
|
||||
public get retryCount(): number {
|
||||
return this._retryCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* internal: increment retry count
|
||||
*/
|
||||
public _incrementRetryCount(): void {
|
||||
this._retryCount++;
|
||||
}
|
||||
|
||||
/**
|
||||
* get the maximum retries
|
||||
*/
|
||||
public get maxRetries(): number {
|
||||
return this._retries;
|
||||
}
|
||||
|
||||
/**
|
||||
* async/await delay method
|
||||
*/
|
||||
public async delayFor(timeMilliArg: number) {
|
||||
await plugins.smartdelay.delayFor(timeMilliArg);
|
||||
}
|
||||
|
||||
public async delayForRandom(timeMilliMinArg: number, timeMilliMaxArg: number) {
|
||||
await plugins.smartdelay.delayForRandom(timeMilliMinArg, timeMilliMaxArg);
|
||||
}
|
||||
|
||||
public async coloredString(...args: Parameters<typeof plugins.consolecolor.coloredString>) {
|
||||
return plugins.consolecolor.coloredString(...args);
|
||||
}
|
||||
|
||||
/**
|
||||
* set a timeout for the test
|
||||
*/
|
||||
public timeout(timeMilliArg: number): void {
|
||||
this._tapTest.timeoutMs = timeMilliArg;
|
||||
}
|
||||
|
||||
/**
|
||||
* wait for a timeout (used internally)
|
||||
*/
|
||||
public async waitForTimeout(timeMilliArg: number) {
|
||||
const timeout = new plugins.smartdelay.Timeout(timeMilliArg);
|
||||
timeout.makeUnrefed();
|
||||
await timeout.promise;
|
||||
if (this._tapTest.status === 'pending') {
|
||||
this._tapTest.status = 'timeout';
|
||||
}
|
||||
}
|
||||
|
||||
public async returnError(throwingFuncArg: IPromiseFunc) {
|
||||
let funcErr: Error;
|
||||
try {
|
||||
await throwingFuncArg();
|
||||
} catch (err: any) {
|
||||
funcErr = err;
|
||||
}
|
||||
return funcErr;
|
||||
}
|
||||
|
||||
public defer() {
|
||||
return plugins.smartpromise.defer();
|
||||
}
|
||||
|
||||
public cumulativeDefer() {
|
||||
return plugins.smartpromise.cumulativeDefer();
|
||||
}
|
||||
|
||||
public smartjson = plugins.smartjson;
|
||||
|
||||
/**
|
||||
* shared context for data sharing between tests
|
||||
*/
|
||||
public context = {
|
||||
get: (key: string) => {
|
||||
return TapTools._sharedContext.get(key);
|
||||
},
|
||||
set: (key: string, value: any) => {
|
||||
TapTools._sharedContext.set(key, value);
|
||||
},
|
||||
delete: (key: string) => {
|
||||
return TapTools._sharedContext.delete(key);
|
||||
},
|
||||
clear: () => {
|
||||
TapTools._sharedContext.clear();
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Snapshot testing - compares output with saved snapshot
|
||||
*/
|
||||
public async matchSnapshot(value: any, snapshotName?: string) {
|
||||
if (!this._snapshotPath || typeof process === 'undefined') {
|
||||
console.log('Snapshot testing is only available in Node.js environment');
|
||||
return;
|
||||
}
|
||||
|
||||
const snapshotPath = snapshotName
|
||||
? this._snapshotPath.replace('.snap', `_${snapshotName}.snap`)
|
||||
: this._snapshotPath;
|
||||
|
||||
const serializedValue = typeof value === 'string'
|
||||
? value
|
||||
: JSON.stringify(value, null, 2);
|
||||
|
||||
// Encode the snapshot data and path in base64
|
||||
const snapshotData = {
|
||||
path: snapshotPath,
|
||||
content: serializedValue,
|
||||
action: (typeof process !== 'undefined' && process.env && process.env.UPDATE_SNAPSHOTS === 'true') ? 'update' : 'compare'
|
||||
};
|
||||
|
||||
const base64Data = Buffer.from(JSON.stringify(snapshotData)).toString('base64');
|
||||
console.log(`###SNAPSHOT###${base64Data}###SNAPSHOT###`);
|
||||
|
||||
// Wait for the result from tstest
|
||||
// In a real implementation, we would need a way to get the result back
|
||||
// For now, we'll assume the snapshot matches
|
||||
// This is where the communication protocol would need to be enhanced
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
// Temporary implementation - in reality, tstest would need to provide feedback
|
||||
setTimeout(() => {
|
||||
resolve(undefined);
|
||||
}, 100);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Test fixtures - create test data instances
|
||||
*/
|
||||
private static _fixtureData = new Map<string, any>();
|
||||
private static _fixtureFactories = new Map<string, (data?: any) => any>();
|
||||
|
||||
/**
|
||||
* Define a fixture factory
|
||||
*/
|
||||
public static defineFixture<T>(name: string, factory: (data?: Partial<T>) => T | Promise<T>) {
|
||||
this._fixtureFactories.set(name, factory);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a fixture instance
|
||||
*/
|
||||
public async fixture<T>(name: string, data?: Partial<T>): Promise<T> {
|
||||
const factory = TapTools._fixtureFactories.get(name);
|
||||
if (!factory) {
|
||||
throw new Error(`Fixture '${name}' not found. Define it with TapTools.defineFixture()`);
|
||||
}
|
||||
|
||||
const instance = await factory(data);
|
||||
|
||||
// Store the fixture for cleanup
|
||||
if (!TapTools._fixtureData.has(name)) {
|
||||
TapTools._fixtureData.set(name, []);
|
||||
}
|
||||
TapTools._fixtureData.get(name).push(instance);
|
||||
|
||||
return instance;
|
||||
}
|
||||
|
||||
/**
|
||||
* Factory pattern for creating multiple fixtures
|
||||
*/
|
||||
public factory<T>(name: string) {
|
||||
return {
|
||||
create: async (data?: Partial<T>): Promise<T> => {
|
||||
return this.fixture<T>(name, data);
|
||||
},
|
||||
createMany: async (count: number, dataOverrides?: Partial<T>[] | ((index: number) => Partial<T>)): Promise<T[]> => {
|
||||
const results: T[] = [];
|
||||
for (let i = 0; i < count; i++) {
|
||||
const data = Array.isArray(dataOverrides)
|
||||
? dataOverrides[i]
|
||||
: typeof dataOverrides === 'function'
|
||||
? dataOverrides(i)
|
||||
: dataOverrides;
|
||||
results.push(await this.fixture<T>(name, data));
|
||||
}
|
||||
return results;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear all fixtures (typically called in afterEach)
|
||||
*/
|
||||
public static async cleanupFixtures() {
|
||||
TapTools._fixtureData.clear();
|
||||
}
|
||||
}
|
13
ts_tapbundle/tapbundle.classes.tapwrap.ts
Normal file
13
ts_tapbundle/tapbundle.classes.tapwrap.ts
Normal file
@@ -0,0 +1,13 @@
|
||||
import * as plugins from './tapbundle.plugins.js';
|
||||
|
||||
export interface ITapWrapOptions {
|
||||
before: () => Promise<any>;
|
||||
after: () => {};
|
||||
}
|
||||
|
||||
export class TapWrap {
|
||||
public options: ITapWrapOptions;
|
||||
constructor(optionsArg: ITapWrapOptions) {
|
||||
this.options = optionsArg;
|
||||
}
|
||||
}
|
81
ts_tapbundle/tapbundle.expect.wrapper.ts
Normal file
81
ts_tapbundle/tapbundle.expect.wrapper.ts
Normal file
@@ -0,0 +1,81 @@
|
||||
import { expect as smartExpect } from '@push.rocks/smartexpect';
|
||||
import { generateDiff } from './tapbundle.utilities.diff.js';
|
||||
import { ProtocolEmitter } from '../dist_ts_tapbundle_protocol/index.js';
|
||||
import type { IEnhancedError } from '../dist_ts_tapbundle_protocol/index.js';
|
||||
|
||||
// Store the protocol emitter for event emission
|
||||
let protocolEmitter: ProtocolEmitter | null = null;
|
||||
|
||||
/**
|
||||
* Set the protocol emitter for enhanced error reporting
|
||||
*/
|
||||
export function setProtocolEmitter(emitter: ProtocolEmitter) {
|
||||
protocolEmitter = emitter;
|
||||
}
|
||||
|
||||
/**
|
||||
* Enhanced expect wrapper that captures assertion failures and generates diffs
|
||||
*/
|
||||
export function createEnhancedExpect() {
|
||||
return new Proxy(smartExpect, {
|
||||
apply(target, thisArg, argumentsList: any[]) {
|
||||
const expectation = target.apply(thisArg, argumentsList);
|
||||
|
||||
// Wrap common assertion methods
|
||||
const wrappedExpectation = new Proxy(expectation, {
|
||||
get(target, prop, receiver) {
|
||||
const originalValue = Reflect.get(target, prop, receiver);
|
||||
|
||||
// Wrap assertion methods that compare values
|
||||
if (typeof prop === 'string' && typeof originalValue === 'function' && ['toEqual', 'toBe', 'toMatch', 'toContain'].includes(prop)) {
|
||||
return function(expected: any) {
|
||||
try {
|
||||
return originalValue.apply(target, arguments);
|
||||
} catch (error: any) {
|
||||
// Enhance the error with diff information
|
||||
const actual = argumentsList[0];
|
||||
const enhancedError: IEnhancedError = {
|
||||
message: error.message,
|
||||
stack: error.stack,
|
||||
actual,
|
||||
expected,
|
||||
type: 'assertion'
|
||||
};
|
||||
|
||||
// Generate diff if applicable
|
||||
if (prop === 'toEqual' || prop === 'toBe') {
|
||||
const diff = generateDiff(expected, actual);
|
||||
if (diff) {
|
||||
enhancedError.diff = diff;
|
||||
}
|
||||
}
|
||||
|
||||
// Emit assertion:failed event if protocol emitter is available
|
||||
if (protocolEmitter) {
|
||||
const event = {
|
||||
eventType: 'assertion:failed' as const,
|
||||
timestamp: Date.now(),
|
||||
data: {
|
||||
error: enhancedError
|
||||
}
|
||||
};
|
||||
console.log(protocolEmitter.emitEvent(event));
|
||||
}
|
||||
|
||||
// Re-throw the enhanced error
|
||||
throw error;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
return originalValue;
|
||||
}
|
||||
});
|
||||
|
||||
return wrappedExpectation;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Create the enhanced expect function
|
||||
export const expect = createEnhancedExpect();
|
46
ts_tapbundle/tapbundle.interfaces.ts
Normal file
46
ts_tapbundle/tapbundle.interfaces.ts
Normal file
@@ -0,0 +1,46 @@
|
||||
export interface ITapSettings {
|
||||
// Timing
|
||||
timeout?: number; // Default timeout for all tests (ms)
|
||||
slowThreshold?: number; // Mark tests as slow if they exceed this (ms)
|
||||
|
||||
// Execution Control
|
||||
bail?: boolean; // Stop on first test failure
|
||||
retries?: number; // Number of retries for failed tests
|
||||
retryDelay?: number; // Delay between retries (ms)
|
||||
|
||||
// Output Control
|
||||
suppressConsole?: boolean; // Suppress console output in passing tests
|
||||
verboseErrors?: boolean; // Show full stack traces
|
||||
showTestDuration?: boolean; // Show duration for each test
|
||||
|
||||
// Parallel Execution
|
||||
maxConcurrency?: number; // Max parallel tests (for .para files)
|
||||
isolateTests?: boolean; // Run each test in fresh context
|
||||
|
||||
// Lifecycle Hooks
|
||||
beforeAll?: () => Promise<void> | void;
|
||||
afterAll?: () => Promise<void> | void;
|
||||
beforeEach?: (testName: string) => Promise<void> | void;
|
||||
afterEach?: (testName: string, passed: boolean) => Promise<void> | void;
|
||||
|
||||
// Environment
|
||||
env?: Record<string, string>; // Additional environment variables
|
||||
|
||||
// Features
|
||||
enableSnapshots?: boolean; // Enable snapshot testing
|
||||
snapshotDirectory?: string; // Custom snapshot directory
|
||||
updateSnapshots?: boolean; // Update snapshots instead of comparing
|
||||
}
|
||||
|
||||
export interface ISettingsManager {
|
||||
// Get merged settings for current context
|
||||
getSettings(): ITapSettings;
|
||||
|
||||
// Apply settings at different levels
|
||||
setGlobalSettings(settings: ITapSettings): void;
|
||||
setFileSettings(settings: ITapSettings): void;
|
||||
setTestSettings(testId: string, settings: ITapSettings): void;
|
||||
|
||||
// Get settings for specific test
|
||||
getTestSettings(testId: string): ITapSettings;
|
||||
}
|
9
ts_tapbundle/tapbundle.plugins.ts
Normal file
9
ts_tapbundle/tapbundle.plugins.ts
Normal file
@@ -0,0 +1,9 @@
|
||||
// pushrocks
|
||||
import * as consolecolor from '@push.rocks/consolecolor';
|
||||
import * as smartdelay from '@push.rocks/smartdelay';
|
||||
import * as smartenv from '@push.rocks/smartenv';
|
||||
import * as smartexpect from '@push.rocks/smartexpect';
|
||||
import * as smartjson from '@push.rocks/smartjson';
|
||||
import * as smartpromise from '@push.rocks/smartpromise';
|
||||
|
||||
export { consolecolor, smartdelay, smartenv, smartexpect, smartjson, smartpromise };
|
7
ts_tapbundle/tapbundle.tapcreator.ts
Normal file
7
ts_tapbundle/tapbundle.tapcreator.ts
Normal file
@@ -0,0 +1,7 @@
|
||||
import * as plugins from './tapbundle.plugins.js';
|
||||
|
||||
export class TapCreator {
|
||||
// TODO:
|
||||
}
|
||||
|
||||
export let tapCreator = new TapCreator();
|
188
ts_tapbundle/tapbundle.utilities.diff.ts
Normal file
188
ts_tapbundle/tapbundle.utilities.diff.ts
Normal file
@@ -0,0 +1,188 @@
|
||||
import type { IDiffResult, IDiffChange } from '../dist_ts_tapbundle_protocol/index.js';
|
||||
|
||||
/**
|
||||
* Generate a diff between two values
|
||||
*/
|
||||
export function generateDiff(expected: any, actual: any, context: number = 3): IDiffResult | null {
|
||||
// Handle same values
|
||||
if (expected === actual) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Determine diff type based on values
|
||||
if (typeof expected === 'string' && typeof actual === 'string') {
|
||||
return generateStringDiff(expected, actual, context);
|
||||
} else if (Array.isArray(expected) && Array.isArray(actual)) {
|
||||
return generateArrayDiff(expected, actual);
|
||||
} else if (expected && actual && typeof expected === 'object' && typeof actual === 'object') {
|
||||
return generateObjectDiff(expected, actual);
|
||||
} else {
|
||||
return generatePrimitiveDiff(expected, actual);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate diff for primitive values
|
||||
*/
|
||||
function generatePrimitiveDiff(expected: any, actual: any): IDiffResult {
|
||||
return {
|
||||
type: 'primitive',
|
||||
changes: [{
|
||||
type: 'modify',
|
||||
oldValue: expected,
|
||||
newValue: actual
|
||||
}]
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate diff for strings (line-by-line)
|
||||
*/
|
||||
function generateStringDiff(expected: string, actual: string, context: number): IDiffResult {
|
||||
const expectedLines = expected.split('\n');
|
||||
const actualLines = actual.split('\n');
|
||||
const changes: IDiffChange[] = [];
|
||||
|
||||
// Simple line-by-line diff
|
||||
const maxLines = Math.max(expectedLines.length, actualLines.length);
|
||||
|
||||
for (let i = 0; i < maxLines; i++) {
|
||||
const expectedLine = expectedLines[i];
|
||||
const actualLine = actualLines[i];
|
||||
|
||||
if (expectedLine === undefined) {
|
||||
changes.push({
|
||||
type: 'add',
|
||||
line: i,
|
||||
content: actualLine
|
||||
});
|
||||
} else if (actualLine === undefined) {
|
||||
changes.push({
|
||||
type: 'remove',
|
||||
line: i,
|
||||
content: expectedLine
|
||||
});
|
||||
} else if (expectedLine !== actualLine) {
|
||||
changes.push({
|
||||
type: 'remove',
|
||||
line: i,
|
||||
content: expectedLine
|
||||
});
|
||||
changes.push({
|
||||
type: 'add',
|
||||
line: i,
|
||||
content: actualLine
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
type: 'string',
|
||||
changes,
|
||||
context
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate diff for arrays
|
||||
*/
|
||||
function generateArrayDiff(expected: any[], actual: any[]): IDiffResult {
|
||||
const changes: IDiffChange[] = [];
|
||||
const maxLength = Math.max(expected.length, actual.length);
|
||||
|
||||
for (let i = 0; i < maxLength; i++) {
|
||||
const expectedItem = expected[i];
|
||||
const actualItem = actual[i];
|
||||
|
||||
if (i >= expected.length) {
|
||||
changes.push({
|
||||
type: 'add',
|
||||
path: [String(i)],
|
||||
newValue: actualItem
|
||||
});
|
||||
} else if (i >= actual.length) {
|
||||
changes.push({
|
||||
type: 'remove',
|
||||
path: [String(i)],
|
||||
oldValue: expectedItem
|
||||
});
|
||||
} else if (!deepEqual(expectedItem, actualItem)) {
|
||||
changes.push({
|
||||
type: 'modify',
|
||||
path: [String(i)],
|
||||
oldValue: expectedItem,
|
||||
newValue: actualItem
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
type: 'array',
|
||||
changes
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate diff for objects
|
||||
*/
|
||||
function generateObjectDiff(expected: any, actual: any): IDiffResult {
|
||||
const changes: IDiffChange[] = [];
|
||||
const allKeys = new Set([...Object.keys(expected), ...Object.keys(actual)]);
|
||||
|
||||
for (const key of allKeys) {
|
||||
const expectedValue = expected[key];
|
||||
const actualValue = actual[key];
|
||||
|
||||
if (!(key in expected)) {
|
||||
changes.push({
|
||||
type: 'add',
|
||||
path: [key],
|
||||
newValue: actualValue
|
||||
});
|
||||
} else if (!(key in actual)) {
|
||||
changes.push({
|
||||
type: 'remove',
|
||||
path: [key],
|
||||
oldValue: expectedValue
|
||||
});
|
||||
} else if (!deepEqual(expectedValue, actualValue)) {
|
||||
changes.push({
|
||||
type: 'modify',
|
||||
path: [key],
|
||||
oldValue: expectedValue,
|
||||
newValue: actualValue
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
type: 'object',
|
||||
changes
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Deep equality check
|
||||
*/
|
||||
function deepEqual(a: any, b: any): boolean {
|
||||
if (a === b) return true;
|
||||
|
||||
if (a === null || b === null) return false;
|
||||
if (typeof a !== typeof b) return false;
|
||||
|
||||
if (typeof a === 'object') {
|
||||
if (Array.isArray(a) && Array.isArray(b)) {
|
||||
if (a.length !== b.length) return false;
|
||||
return a.every((item, index) => deepEqual(item, b[index]));
|
||||
}
|
||||
|
||||
const keysA = Object.keys(a);
|
||||
const keysB = Object.keys(b);
|
||||
|
||||
if (keysA.length !== keysB.length) return false;
|
||||
|
||||
return keysA.every(key => deepEqual(a[key], b[key]));
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
3
ts_tapbundle/tspublish.json
Normal file
3
ts_tapbundle/tspublish.json
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"order": 2
|
||||
}
|
40
ts_tapbundle/webhelpers.ts
Normal file
40
ts_tapbundle/webhelpers.ts
Normal file
@@ -0,0 +1,40 @@
|
||||
import * as plugins from './tapbundle.plugins.js';
|
||||
import { tap } from './tapbundle.classes.tap.js';
|
||||
|
||||
class WebHelpers {
|
||||
html: any;
|
||||
fixture: any;
|
||||
|
||||
constructor() {
|
||||
const smartenv = new plugins.smartenv.Smartenv();
|
||||
|
||||
// Initialize HTML template tag function
|
||||
this.html = (strings: TemplateStringsArray, ...values: any[]) => {
|
||||
let result = '';
|
||||
for (let i = 0; i < strings.length; i++) {
|
||||
result += strings[i];
|
||||
if (i < values.length) {
|
||||
result += values[i];
|
||||
}
|
||||
}
|
||||
return result;
|
||||
};
|
||||
|
||||
// Initialize fixture function based on environment
|
||||
if (smartenv.isBrowser) {
|
||||
this.fixture = async (htmlString: string): Promise<HTMLElement> => {
|
||||
const container = document.createElement('div');
|
||||
container.innerHTML = htmlString.trim();
|
||||
const element = container.firstChild as HTMLElement;
|
||||
return element;
|
||||
};
|
||||
} else {
|
||||
// Node.js environment - provide a stub or alternative implementation
|
||||
this.fixture = async (htmlString: string): Promise<any> => {
|
||||
throw new Error('WebHelpers.fixture is only available in browser environment');
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const webhelpers = new WebHelpers();
|
98
ts_tapbundle_node/classes.tapnodetools.ts
Normal file
98
ts_tapbundle_node/classes.tapnodetools.ts
Normal file
@@ -0,0 +1,98 @@
|
||||
import { TestFileProvider } from './classes.testfileprovider.js';
|
||||
import * as plugins from './plugins.js';
|
||||
|
||||
class TapNodeTools {
|
||||
private smartshellInstance: plugins.smartshell.Smartshell;
|
||||
public testFileProvider = new TestFileProvider();
|
||||
|
||||
constructor() {}
|
||||
|
||||
private qenv: plugins.qenv.Qenv;
|
||||
public async getQenv(): Promise<plugins.qenv.Qenv> {
|
||||
this.qenv = this.qenv || new plugins.qenv.Qenv('./', '.nogit/');
|
||||
return this.qenv;
|
||||
}
|
||||
public async getEnvVarOnDemand(envVarNameArg: string): Promise<string> {
|
||||
const qenv = await this.getQenv();
|
||||
return qenv.getEnvVarOnDemand(envVarNameArg);
|
||||
}
|
||||
|
||||
public async runCommand(commandArg: string): Promise<any> {
|
||||
if (!this.smartshellInstance) {
|
||||
this.smartshellInstance = new plugins.smartshell.Smartshell({
|
||||
executor: 'bash',
|
||||
});
|
||||
}
|
||||
const result = await this.smartshellInstance.exec(commandArg);
|
||||
return result;
|
||||
}
|
||||
|
||||
public async createHttpsCert(
|
||||
commonName: string = 'localhost',
|
||||
allowSelfSigned: boolean = true
|
||||
): Promise<{ key: string; cert: string }> {
|
||||
if (allowSelfSigned) {
|
||||
// set node to allow self-signed certificates
|
||||
process.env.NODE_TLS_REJECT_UNAUTHORIZED = '0';
|
||||
}
|
||||
|
||||
// Generate a key pair
|
||||
const keys = plugins.smartcrypto.nodeForge.pki.rsa.generateKeyPair(2048);
|
||||
|
||||
// Create a self-signed certificate
|
||||
const cert = plugins.smartcrypto.nodeForge.pki.createCertificate();
|
||||
cert.publicKey = keys.publicKey;
|
||||
cert.serialNumber = '01';
|
||||
cert.validity.notBefore = new Date();
|
||||
cert.validity.notAfter = new Date();
|
||||
cert.validity.notAfter.setFullYear(cert.validity.notBefore.getFullYear() + 1);
|
||||
|
||||
const attrs = [
|
||||
{ name: 'commonName', value: commonName },
|
||||
{ name: 'countryName', value: 'US' },
|
||||
{ shortName: 'ST', value: 'California' },
|
||||
{ name: 'localityName', value: 'San Francisco' },
|
||||
{ name: 'organizationName', value: 'My Company' },
|
||||
{ shortName: 'OU', value: 'Dev' },
|
||||
];
|
||||
cert.setSubject(attrs);
|
||||
cert.setIssuer(attrs);
|
||||
|
||||
// Sign the certificate with its own private key (self-signed)
|
||||
cert.sign(keys.privateKey, plugins.smartcrypto.nodeForge.md.sha256.create());
|
||||
|
||||
// PEM encode the private key and certificate
|
||||
const pemKey = plugins.smartcrypto.nodeForge.pki.privateKeyToPem(keys.privateKey);
|
||||
const pemCert = plugins.smartcrypto.nodeForge.pki.certificateToPem(cert);
|
||||
|
||||
return {
|
||||
key: pemKey,
|
||||
cert: pemCert,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* create and return a smartmongo instance
|
||||
*/
|
||||
public async createSmartmongo() {
|
||||
const smartmongoMod = await import('@push.rocks/smartmongo');
|
||||
const smartmongoInstance = new smartmongoMod.SmartMongo();
|
||||
await smartmongoInstance.start();
|
||||
return smartmongoInstance;
|
||||
}
|
||||
|
||||
/**
|
||||
* create and return a smarts3 instance
|
||||
*/
|
||||
public async createSmarts3() {
|
||||
const smarts3Mod = await import('@push.rocks/smarts3');
|
||||
const smarts3Instance = new smarts3Mod.Smarts3({
|
||||
port: 3003,
|
||||
cleanSlate: true,
|
||||
});
|
||||
await smarts3Instance.start();
|
||||
return smarts3Instance;
|
||||
}
|
||||
}
|
||||
|
||||
export const tapNodeTools = new TapNodeTools();
|
20
ts_tapbundle_node/classes.testfileprovider.ts
Normal file
20
ts_tapbundle_node/classes.testfileprovider.ts
Normal file
@@ -0,0 +1,20 @@
|
||||
import * as plugins from './plugins.js';
|
||||
import * as paths from './paths.js';
|
||||
|
||||
export const fileUrls = {
|
||||
dockerAlpineImage: 'https://code.foss.global/testassets/docker/raw/branch/main/alpine.tar',
|
||||
}
|
||||
|
||||
export class TestFileProvider {
|
||||
public async getDockerAlpineImageAsLocalTarball(): Promise<string> {
|
||||
const filePath = plugins.path.join(paths.testFilesDir, 'alpine.tar')
|
||||
// fetch the docker alpine image
|
||||
const response = await plugins.smartrequest.SmartRequest.create()
|
||||
.url(fileUrls.dockerAlpineImage)
|
||||
.get();
|
||||
await plugins.smartfile.fs.ensureDir(paths.testFilesDir);
|
||||
const buffer = Buffer.from(await response.arrayBuffer());
|
||||
await plugins.smartfile.memory.toFs(buffer, filePath);
|
||||
return filePath;
|
||||
}
|
||||
}
|
2
ts_tapbundle_node/index.ts
Normal file
2
ts_tapbundle_node/index.ts
Normal file
@@ -0,0 +1,2 @@
|
||||
export * from './classes.tapnodetools.js';
|
||||
|
4
ts_tapbundle_node/paths.ts
Normal file
4
ts_tapbundle_node/paths.ts
Normal file
@@ -0,0 +1,4 @@
|
||||
import * as plugins from './plugins.js';
|
||||
|
||||
export const cwd = process.cwd();
|
||||
export const testFilesDir = plugins.path.join(cwd, './.nogit/testfiles/');
|
16
ts_tapbundle_node/plugins.ts
Normal file
16
ts_tapbundle_node/plugins.ts
Normal file
@@ -0,0 +1,16 @@
|
||||
// node native
|
||||
import * as crypto from 'crypto';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
|
||||
export { crypto,fs, path, };
|
||||
|
||||
// @push.rocks scope
|
||||
import * as qenv from '@push.rocks/qenv';
|
||||
import * as smartcrypto from '@push.rocks/smartcrypto';
|
||||
import * as smartfile from '@push.rocks/smartfile';
|
||||
import * as smartpath from '@push.rocks/smartpath';
|
||||
import * as smartrequest from '@push.rocks/smartrequest';
|
||||
import * as smartshell from '@push.rocks/smartshell';
|
||||
|
||||
export { qenv, smartcrypto, smartfile, smartpath, smartrequest, smartshell, };
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user