Compare commits

..

32 Commits

Author SHA1 Message Date
4585801f32 v1.17.2
Some checks failed
Default (tags) / security (push) Successful in 43s
Default (tags) / test (push) Failing after 4m12s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-07 12:34:37 +00:00
3dc75f5cda fix(registry): improve HTTP fetch retry logging, backoff calculation, and token-cache warning 2026-02-07 12:34:37 +00:00
7591e0ed90 v1.17.1
Some checks failed
Default (tags) / security (push) Successful in 37s
Default (tags) / test (push) Failing after 4m0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-07 12:29:43 +00:00
d2c2a4c4dd fix(registrycopy): add fetchWithRetry wrapper to apply timeouts, retries with exponential backoff, and token cache handling; use it for registry HTTP requests 2026-02-07 12:29:43 +00:00
89cd93cdff v1.17.0
Some checks failed
Default (tags) / security (push) Successful in 40s
Default (tags) / test (push) Failing after 4m0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-07 12:02:17 +00:00
10aee5d4c5 feat(tsdocker): add Dockerfile filtering, optional skip-build flow, and fallback Docker config credential loading 2026-02-07 12:02:17 +00:00
53b7bd7048 v1.16.0
Some checks failed
Default (tags) / security (push) Successful in 39s
Default (tags) / test (push) Failing after 4m1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-07 10:30:52 +00:00
101c4286c1 feat(core): Introduce per-invocation TsDockerSession and session-aware local registry and build orchestration; stream and parse buildx output for improved logging and visibility; detect Docker topology and add CI-safe cleanup; update README with multi-arch, parallel-build, caching, and local registry usage and new CLI flags. 2026-02-07 10:30:52 +00:00
63078139ec v1.15.1
Some checks failed
Default (tags) / security (push) Successful in 39s
Default (tags) / test (push) Failing after 4m0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-07 09:41:22 +00:00
0cb5515b93 fix(registry): use persistent local registry and OCI Distribution API image copy for pushes 2026-02-07 09:41:22 +00:00
aa0425f9bc v1.15.0
Some checks failed
Default (tags) / security (push) Successful in 42s
Default (tags) / test (push) Failing after 4m0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-07 05:17:32 +00:00
2d4d7c671a feat(clean): Make the command interactive: add smartinteract prompts, docker context detection, and selective resource removal with support for --all and -y auto-confirm 2026-02-07 05:17:32 +00:00
3085eb590f v1.14.0
Some checks failed
Default (tags) / security (push) Successful in 34s
Default (tags) / test (push) Failing after 4m0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-07 04:46:06 +00:00
04b75b42f3 feat(build): add level-based parallel builds with --parallel and configurable concurrency 2026-02-07 04:46:06 +00:00
b04b8c9033 v1.13.0
Some checks failed
Default (tags) / security (push) Successful in 40s
Default (tags) / test (push) Failing after 4m1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-07 04:33:07 +00:00
2130a8a879 feat(docker): add Docker context detection, rootless support, and context-aware buildx registry handling 2026-02-07 04:33:07 +00:00
17de78aed3 v1.12.0
Some checks failed
Default (tags) / security (push) Successful in 40s
Default (tags) / test (push) Failing after 4m0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-06 16:35:49 +00:00
eddb8cd156 feat(docker): add detailed logging for buildx, build commands, local registry, and local dependency info 2026-02-06 16:35:49 +00:00
cfc7798d49 v1.11.0
Some checks failed
Default (tags) / security (push) Successful in 38s
Default (tags) / test (push) Failing after 3m59s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-06 15:53:32 +00:00
37dfde005e feat(docker): start temporary local registry for buildx dependency resolution and ensure buildx builder uses host network 2026-02-06 15:53:32 +00:00
d1785aab86 v1.10.0
Some checks failed
Default (tags) / security (push) Successful in 33s
Default (tags) / test (push) Failing after 4m0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-06 15:05:46 +00:00
31fb4aea3c feat(classes.dockerfile): support using a local base image as a build context in buildx commands 2026-02-06 15:05:46 +00:00
907048fa87 v1.9.0
Some checks failed
Default (tags) / security (push) Successful in 50s
Default (tags) / test (push) Failing after 4m0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-06 14:52:16 +00:00
02b267ee10 feat(build): add verbose build output, progress logging, and timing for builds/tests 2026-02-06 14:52:16 +00:00
16cd0bbd87 v1.8.0
Some checks failed
Default (tags) / security (push) Successful in 39s
Default (tags) / test (push) Failing after 4m0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-06 14:18:06 +00:00
cc83743f9a feat(build): add optional content-hash based build cache to skip rebuilding unchanged Dockerfiles 2026-02-06 14:18:06 +00:00
7131c16f80 v1.7.0
Some checks failed
Default (tags) / security (push) Successful in 31s
Default (tags) / test (push) Failing after 3m59s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-06 13:39:24 +00:00
02688861f4 feat(cli): add CLI version display using commitinfo 2026-02-06 13:39:24 +00:00
3a8b301b3e v1.6.0
Some checks failed
Default (tags) / security (push) Successful in 39s
Default (tags) / test (push) Failing after 4m0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-06 13:25:22 +00:00
c09bef33c3 feat(docker): add support for no-cache builds and tag built images for local dependency resolution 2026-02-06 13:25:21 +00:00
32eb0d1d77 v1.5.0
Some checks failed
Default (tags) / security (push) Successful in 39s
Default (tags) / test (push) Failing after 4m0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-06 11:58:32 +00:00
7cac628975 feat(build): add support for selective builds, platform override and build timeout 2026-02-06 11:58:32 +00:00
16 changed files with 2586 additions and 252 deletions

View File

@@ -1,5 +1,150 @@
# Changelog # Changelog
## 2026-02-07 - 1.17.2 - fix(registry)
improve HTTP fetch retry logging, backoff calculation, and token-cache warning
- Include HTTP method in logs and normalize method to uppercase for consistency
- Log retry attempts with method, URL and calculated exponential backoff delay
- Compute and reuse exponential backoff delay variable instead of inline calculation
- Log error when a 5xx response persists after all retry attempts and when fetch ultimately fails
- Add a warning log when clearing cached token after a 401 response
## 2026-02-07 - 1.17.1 - fix(registrycopy)
add fetchWithRetry wrapper to apply timeouts, retries with exponential backoff, and token cache handling; use it for registry HTTP requests
- Introduces fetchWithRetry(url, options, timeoutMs, maxRetries) to wrap fetch with AbortSignal timeout, exponential backoff retries, and retry behavior only for network errors and 5xx responses
- Replaces direct fetch calls for registry /v2 checks, token requests, and blob uploads with fetchWithRetry (30s for auth/token checks, 300s for blob operations)
- Clears token cache entry when a 401 response is received so the next attempt re-authenticates
- Adds logging on retry attempts and backoff delays to improve robustness and observability
## 2026-02-07 - 1.17.0 - feat(tsdocker)
add Dockerfile filtering, optional skip-build flow, and fallback Docker config credential loading
- Add TsDockerManager.filterDockerfiles(patterns) to filter discovered Dockerfiles by glob-style patterns and warn when no matches are found
- Allow skipping image build with --no-build (argvArg.build === false): discover Dockerfiles and apply filters without performing build
- Fallback to load Docker registry credentials from ~/.docker/config.json via RegistryCopy.getDockerConfigCredentials when env vars do not provide credentials
- Import RegistryCopy and add info/warn logs when credentials are loaded or missing
## 2026-02-07 - 1.16.0 - feat(core)
Introduce per-invocation TsDockerSession and session-aware local registry and build orchestration; stream and parse buildx output for improved logging and visibility; detect Docker topology and add CI-safe cleanup; update README with multi-arch, parallel-build, caching, and local registry usage and new CLI flags.
- Add TsDockerSession to allocate unique ports, container names and builder suffixes for concurrent runs (especially in CI).
- Make local registry session-aware: start/stop/use registry container and persistent storage per session; retry on port conflicts.
- Inject session into Dockerfile instances and TsDockerManager; use session.config.registryHost for tagging/pushing and test container naming.
- Stream and parse buildx/docker build output via createBuildOutputHandler for clearer step/platform/CACHED/DONE logging and --progress=plain usage.
- Detect Docker topology (socket-mount, dind, local) in DockerContext and expose it in context info.
- Add manager.cleanup to remove CI-scoped buildx builders and ensure CLI calls cleanup after build/push/test.
- Update interfaces to include topology and adjust many Dockerfile/manager methods to be session-aware.
- Large README improvements: multi-arch flow, persistent local registry, parallel builds, caching, new CLI and clean flags, and examples for CI integration.
## 2026-02-07 - 1.15.1 - fix(registry)
use persistent local registry and OCI Distribution API image copy for pushes
- Adds RegistryCopy class implementing the OCI Distribution API to copy images (including multi-arch manifest lists) from the local registry to remote registries.
- All builds now go through a persistent local registry at localhost:5234 with volume storage at .nogit/docker-registry/; Dockerfile.startLocalRegistry mounts this directory.
- Dockerfile.push now delegates to RegistryCopy.copyImage; Dockerfile.needsLocalRegistry() always returns true and config.push is now a no-op (kept for backward compat).
- Multi-platform buildx builds are pushed to the local registry (this.localRegistryTag) during buildx --push; code avoids redundant pushes when images are already pushed by buildx.
- Build, cached build, test, push and pull flows now start/stop the local registry automatically to support multi-platform/image resolution.
- Introduces Dockerfile.getDestRepo and support for config.registryRepoMap to control destination repository mapping.
- Breaking change: registry usage and push behavior changed (config.push ignored and local registry mandatory) — bump major version.
## 2026-02-07 - 1.15.0 - feat(clean)
Make the `clean` command interactive: add smartinteract prompts, docker context detection, and selective resource removal with support for --all and -y auto-confirm
- Adds dependency @push.rocks/smartinteract and exposes it from the plugins module
- Refactors tsdocker.cli.ts clean command to list Docker resources and prompt checkbox selection for running/stopped containers, images, and volumes
- Adds DockerContext detection and logging to determine active Docker context
- Introduces auto-confirm (-y) and --all handling to either auto-accept or allow full-image/volume removal
- Replaces blunt shell commands with safer, interactive selection and adds improved error handling and logging
## 2026-02-07 - 1.14.0 - feat(build)
add level-based parallel builds with --parallel and configurable concurrency
- Introduces --parallel and --parallel=<n> CLI flags to enable level-based parallel Docker builds (default concurrency 4).
- Adds Dockerfile.computeLevels() to group topologically-sorted Dockerfiles into dependency levels.
- Adds Dockerfile.runWithConcurrency() implementing a bounded-concurrency worker-pool (fast-fail via Promise.all).
- Integrates parallel build mode into Dockerfile.buildDockerfiles() and TsDockerManager.build() for both cached and non-cached flows, including tagging and pushing for dependency resolution after each level.
- Adds options.parallel and options.parallelConcurrency to the build interface and wires them through the CLI and manager.
- Updates documentation (readme.hints.md) with usage examples and implementation notes.
## 2026-02-07 - 1.13.0 - feat(docker)
add Docker context detection, rootless support, and context-aware buildx registry handling
- Introduce DockerContext class to detect current Docker context and rootless mode and to log warnings and context info
- Add IDockerContextInfo interface and a new context option on build/config to pass explicit Docker context
- Propagate --context CLI flag into TsDockerManager.prepare so CLI commands can set an explicit Docker context
- Make buildx builder name context-aware (tsdocker-builder-<sanitized-context>) and log builder name/platforms
- Pass isRootless into local registry startup and build pipeline; emit rootless-specific warnings and registry reachability hint
## 2026-02-06 - 1.12.0 - feat(docker)
add detailed logging for buildx, build commands, local registry, and local dependency info
- Log startup of local registry including a note about buildx dependency bridging
- Log constructed build commands and indicate whether buildx or standard docker build is used (including platforms and --push/--load distinctions)
- Emit build mode summary at start of build phase and report local base-image dependency mappings
- Report when --no-cache is enabled and surface buildx setup readiness with configured platforms
- Non-functional change: purely adds informational logging to improve observability during builds
## 2026-02-06 - 1.11.0 - feat(docker)
start temporary local registry for buildx dependency resolution and ensure buildx builder uses host network
- Introduce a temporary local registry (localhost:5234) with start/stop helpers and push support to expose local images for buildx
- Add Dockerfile.needsLocalRegistry to decide when a local registry is required (local base dependencies + multi-platform or platform option)
- Push built images to the local registry and set localRegistryTag on Dockerfile instances for BuildKit build-context usage
- Tag built images in the host daemon for dependent Dockerfiles to resolve local FROM references
- Integrate registry lifecycle into Dockerfile.buildDockerfiles and TsDockerManager build flows (start before builds, stop after)
- Ensure buildx builder is created with --driver-opt network=host and recreate existing builder if it lacks host network to allow registry access from build containers
## 2026-02-06 - 1.10.0 - feat(classes.dockerfile)
support using a local base image as a build context in buildx commands
- Adds --build-context flag mapping base image to docker-image://<localTag> when localBaseImageDependent && localBaseDockerfile are set
- Appends the build context flag to both single-platform and multi-platform docker buildx commands
- Logs an info message indicating the local build context mapping
## 2026-02-06 - 1.9.0 - feat(build)
add verbose build output, progress logging, and timing for builds/tests
- Add 'verbose' option to build/test flows (interfaces, CLI, and method signatures) to allow streaming raw docker build output or run silently
- Log per-item progress for build and test phases (e.g. (1/N) Building/Testing <tag>) and report individual durations
- Return elapsed time from Dockerfile.build() and Dockerfile.test() and aggregate total build/test times in manager
- Introduce formatDuration(ms) helper in logging module to format timings
- Switch from console.log to structured logger calls across cache, manager, dockerfile and push paths
- Use silent exec variants when verbose is false and stream exec when verbose is true
## 2026-02-06 - 1.8.0 - feat(build)
add optional content-hash based build cache to skip rebuilding unchanged Dockerfiles
- Introduce TsDockerCache to compute SHA-256 of Dockerfile content and persist cache to .nogit/tsdocker_support.json
- Add ICacheEntry and ICacheData interfaces and a cached flag to IBuildCommandOptions
- Integrate cached mode in TsDockerManager: skip builds on cache hits, verify image presence, record builds on misses, and still perform dependency tagging
- Expose --cached option in CLI to enable the cached build flow
- Cache records store contentHash, imageId, buildTag and timestamp
## 2026-02-06 - 1.7.0 - feat(cli)
add CLI version display using commitinfo
- Imported commitinfo from './00_commitinfo_data.js' and called tsdockerCli.addVersion(commitinfo.version) to surface package/commit version in the Smartcli instance
- Change made in ts/tsdocker.cli.ts — small user-facing CLI enhancement; no breaking changes
## 2026-02-06 - 1.6.0 - feat(docker)
add support for no-cache builds and tag built images for local dependency resolution
- Introduce IBuildCommandOptions.noCache to control --no-cache behavior
- Propagate noCache from CLI (via cache flag) through TsDockerManager to Dockerfile.build
- Append --no-cache to docker build/buildx commands when noCache is true
- After building an image, tag it with full base image references used by dependent Dockerfiles so their FROM lines resolve to the locally-built image
- Log tagging actions and execute docker tag via smartshellInstance
## 2026-02-06 - 1.5.0 - feat(build)
add support for selective builds, platform override and build timeout
- Introduce IBuildCommandOptions with patterns, platform and timeout to control build behavior
- Allow manager.build() to accept options and build only matching Dockerfiles (including dependencies) preserving topological order
- Add CLI parsing for build/push to accept positional Dockerfile patterns and --platform/--timeout flags
- Support single-platform override via docker buildx and multi-platform buildx detection
- Implement streaming exec with timeout to kill long-running builds and surface timeout errors
## 2026-02-04 - 1.4.3 - fix(dockerfile) ## 2026-02-04 - 1.4.3 - fix(dockerfile)
fix matching of base images to local Dockerfiles by stripping registry prefixes when comparing image references fix matching of base images to local Dockerfiles by stripping registry prefixes when comparing image references

View File

@@ -1,6 +1,6 @@
{ {
"name": "@git.zone/tsdocker", "name": "@git.zone/tsdocker",
"version": "1.4.3", "version": "1.17.2",
"private": false, "private": false,
"description": "develop npm modules cross platform with docker", "description": "develop npm modules cross platform with docker",
"main": "dist_ts/index.js", "main": "dist_ts/index.js",
@@ -47,6 +47,7 @@
"@push.rocks/smartanalytics": "^2.0.15", "@push.rocks/smartanalytics": "^2.0.15",
"@push.rocks/smartcli": "^4.0.20", "@push.rocks/smartcli": "^4.0.20",
"@push.rocks/smartfs": "^1.3.1", "@push.rocks/smartfs": "^1.3.1",
"@push.rocks/smartinteract": "^2.0.16",
"@push.rocks/smartlog": "^3.1.10", "@push.rocks/smartlog": "^3.1.10",
"@push.rocks/smartlog-destination-local": "^9.0.2", "@push.rocks/smartlog-destination-local": "^9.0.2",
"@push.rocks/smartlog-source-ora": "^1.0.9", "@push.rocks/smartlog-source-ora": "^1.0.9",

277
pnpm-lock.yaml generated
View File

@@ -29,6 +29,9 @@ importers:
'@push.rocks/smartfs': '@push.rocks/smartfs':
specifier: ^1.3.1 specifier: ^1.3.1
version: 1.3.1 version: 1.3.1
'@push.rocks/smartinteract':
specifier: ^2.0.16
version: 2.0.16
'@push.rocks/smartlog': '@push.rocks/smartlog':
specifier: ^3.1.10 specifier: ^3.1.10
version: 3.1.10 version: 3.1.10
@@ -618,6 +621,62 @@ packages:
resolution: {integrity: sha512-mfOoUlIw8VBiJYPrl5RZfMzkXC/z7gbSpi2ecycrj/gRWLq2CMV+Q+0G+JPjeOmuNFgg0skEIzkVFzVYFP6URw==} resolution: {integrity: sha512-mfOoUlIw8VBiJYPrl5RZfMzkXC/z7gbSpi2ecycrj/gRWLq2CMV+Q+0G+JPjeOmuNFgg0skEIzkVFzVYFP6URw==}
engines: {node: '>=18.0.0'} engines: {node: '>=18.0.0'}
'@inquirer/checkbox@3.0.1':
resolution: {integrity: sha512-0hm2nrToWUdD6/UHnel/UKGdk1//ke5zGUpHIvk5ZWmaKezlGxZkOJXNSWsdxO/rEqTkbB3lNC2J6nBElV2aAQ==}
engines: {node: '>=18'}
'@inquirer/confirm@4.0.1':
resolution: {integrity: sha512-46yL28o2NJ9doViqOy0VDcoTzng7rAb6yPQKU7VDLqkmbCaH4JqK4yk4XqlzNWy9PVC5pG1ZUXPBQv+VqnYs2w==}
engines: {node: '>=18'}
'@inquirer/core@9.2.1':
resolution: {integrity: sha512-F2VBt7W/mwqEU4bL0RnHNZmC/OxzNx9cOYxHqnXX3MP6ruYvZUZAW9imgN9+h/uBT/oP8Gh888J2OZSbjSeWcg==}
engines: {node: '>=18'}
'@inquirer/editor@3.0.1':
resolution: {integrity: sha512-VA96GPFaSOVudjKFraokEEmUQg/Lub6OXvbIEZU1SDCmBzRkHGhxoFAVaF30nyiB4m5cEbDgiI2QRacXZ2hw9Q==}
engines: {node: '>=18'}
'@inquirer/expand@3.0.1':
resolution: {integrity: sha512-ToG8d6RIbnVpbdPdiN7BCxZGiHOTomOX94C2FaT5KOHupV40tKEDozp12res6cMIfRKrXLJyexAZhWVHgbALSQ==}
engines: {node: '>=18'}
'@inquirer/figures@1.0.15':
resolution: {integrity: sha512-t2IEY+unGHOzAaVM5Xx6DEWKeXlDDcNPeDyUpsRc6CUhBfU3VQOEl+Vssh7VNp1dR8MdUJBWhuObjXCsVpjN5g==}
engines: {node: '>=18'}
'@inquirer/input@3.0.1':
resolution: {integrity: sha512-BDuPBmpvi8eMCxqC5iacloWqv+5tQSJlUafYWUe31ow1BVXjW2a5qe3dh4X/Z25Wp22RwvcaLCc2siHobEOfzg==}
engines: {node: '>=18'}
'@inquirer/number@2.0.1':
resolution: {integrity: sha512-QpR8jPhRjSmlr/mD2cw3IR8HRO7lSVOnqUvQa8scv1Lsr3xoAMMworcYW3J13z3ppjBFBD2ef1Ci6AE5Qn8goQ==}
engines: {node: '>=18'}
'@inquirer/password@3.0.1':
resolution: {integrity: sha512-haoeEPUisD1NeE2IanLOiFr4wcTXGWrBOyAyPZi1FfLJuXOzNmxCJPgUrGYKVh+Y8hfGJenIfz5Wb/DkE9KkMQ==}
engines: {node: '>=18'}
'@inquirer/prompts@6.0.1':
resolution: {integrity: sha512-yl43JD/86CIj3Mz5mvvLJqAOfIup7ncxfJ0Btnl0/v5TouVUyeEdcpknfgc+yMevS/48oH9WAkkw93m7otLb/A==}
engines: {node: '>=18'}
'@inquirer/rawlist@3.0.1':
resolution: {integrity: sha512-VgRtFIwZInUzTiPLSfDXK5jLrnpkuSOh1ctfaoygKAdPqjcjKYmGh6sCY1pb0aGnCGsmhUxoqLDUAU0ud+lGXQ==}
engines: {node: '>=18'}
'@inquirer/search@2.0.1':
resolution: {integrity: sha512-r5hBKZk3g5MkIzLVoSgE4evypGqtOannnB3PKTG9NRZxyFRKcfzrdxXXPcoJQsxJPzvdSU2Rn7pB7lw0GCmGAg==}
engines: {node: '>=18'}
'@inquirer/select@3.0.1':
resolution: {integrity: sha512-lUDGUxPhdWMkN/fHy1Lk7pF3nK1fh/gqeyWXmctefhxLYxlDsc7vsPBEpxrfVGDsVdyYJsiJoD4bJ1b623cV1Q==}
engines: {node: '>=18'}
'@inquirer/type@2.0.0':
resolution: {integrity: sha512-XvJRx+2KR3YXyYtPUUy+qd9i7p+GO9Ko6VIIpWlBrpWwXDv8WLFeHTxz35CfQFUiBMLXlGHhGzys7lqit9gWag==}
engines: {node: '>=18'}
'@isaacs/balanced-match@4.0.1': '@isaacs/balanced-match@4.0.1':
resolution: {integrity: sha512-yzMTt9lEb8Gv7zRioUilSglI0c0smZ9k5D65677DLWLtWJaXIS3CqcGyUFByYKlnUj6TkjLVs54fBl6+TiGQDQ==} resolution: {integrity: sha512-yzMTt9lEb8Gv7zRioUilSglI0c0smZ9k5D65677DLWLtWJaXIS3CqcGyUFByYKlnUj6TkjLVs54fBl6+TiGQDQ==}
engines: {node: 20 || >=22} engines: {node: 20 || >=22}
@@ -842,6 +901,9 @@ packages:
'@push.rocks/smarthash@3.2.6': '@push.rocks/smarthash@3.2.6':
resolution: {integrity: sha512-Mq/WNX0Tjjes3X1gHd/ZBwOOKSrAG/Z3Xoc0OcCm3P20WKpniihkMpsnlE7wGjvpHLi/ZRe/XkB3KC3d5r9X4g==} resolution: {integrity: sha512-Mq/WNX0Tjjes3X1gHd/ZBwOOKSrAG/Z3Xoc0OcCm3P20WKpniihkMpsnlE7wGjvpHLi/ZRe/XkB3KC3d5r9X4g==}
'@push.rocks/smartinteract@2.0.16':
resolution: {integrity: sha512-eltvVRRUKBKd77DSFA4DPY2g4V4teZLNe8A93CDy/WglglYcUjxMoLY/b0DFTWCWKYT+yjk6Fe6p0FRrvX9Yvg==}
'@push.rocks/smartjson@5.2.0': '@push.rocks/smartjson@5.2.0':
resolution: {integrity: sha512-710e8UwovRfPgUtaBHcd6unaODUjV5fjxtGcGCqtaTcmvOV6VpasdVfT66xMDzQmWH2E9ZfHDJeso9HdDQzNQA==} resolution: {integrity: sha512-710e8UwovRfPgUtaBHcd6unaODUjV5fjxtGcGCqtaTcmvOV6VpasdVfT66xMDzQmWH2E9ZfHDJeso9HdDQzNQA==}
@@ -1520,6 +1582,9 @@ packages:
'@types/ms@2.1.0': '@types/ms@2.1.0':
resolution: {integrity: sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==} resolution: {integrity: sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==}
'@types/mute-stream@0.0.4':
resolution: {integrity: sha512-CPM9nzrCPPJHQNA9keH9CVkVI+WR5kMa+7XEs5jcGQ0VoAGnLv242w8lIVgwAEfmE4oufJRaTc9PNLQl0ioAow==}
'@types/node-forge@1.3.14': '@types/node-forge@1.3.14':
resolution: {integrity: sha512-mhVF2BnD4BO+jtOp7z1CdzaK4mbuK0LLQYAvdOLqHTavxFNq4zA1EmYkpnFjP8HOUzedfQkRnp0E2ulSAYSzAw==} resolution: {integrity: sha512-mhVF2BnD4BO+jtOp7z1CdzaK4mbuK0LLQYAvdOLqHTavxFNq4zA1EmYkpnFjP8HOUzedfQkRnp0E2ulSAYSzAw==}
@@ -1589,6 +1654,9 @@ packages:
'@types/which@3.0.4': '@types/which@3.0.4':
resolution: {integrity: sha512-liyfuo/106JdlgSchJzXEQCVArk0CvevqPote8F8HgWgJ3dRCcTHgJIsLDuee0kxk/mhbInzIZk3QWSZJ8R+2w==} resolution: {integrity: sha512-liyfuo/106JdlgSchJzXEQCVArk0CvevqPote8F8HgWgJ3dRCcTHgJIsLDuee0kxk/mhbInzIZk3QWSZJ8R+2w==}
'@types/wrap-ansi@3.0.0':
resolution: {integrity: sha512-ltIpx+kM7g/MLRZfkbL7EsCEjfzCcScLpkg37eXEtx5kmrAKBkTJwd1GIAjDSL8wTpM6Hzn5YO4pSb91BEwu1g==}
'@types/ws@8.18.1': '@types/ws@8.18.1':
resolution: {integrity: sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==} resolution: {integrity: sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==}
@@ -1622,6 +1690,10 @@ packages:
resolution: {integrity: sha1-kQ3lDvzHwJ49gvL4er1rcAwYgYo=} resolution: {integrity: sha1-kQ3lDvzHwJ49gvL4er1rcAwYgYo=}
engines: {node: '>=0.10.0'} engines: {node: '>=0.10.0'}
ansi-escapes@4.3.2:
resolution: {integrity: sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==}
engines: {node: '>=8'}
ansi-regex@5.0.1: ansi-regex@5.0.1:
resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==} resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==}
engines: {node: '>=8'} engines: {node: '>=8'}
@@ -1818,6 +1890,9 @@ packages:
character-entities@2.0.2: character-entities@2.0.2:
resolution: {integrity: sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==} resolution: {integrity: sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==}
chardet@0.7.0:
resolution: {integrity: sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==}
chokidar@4.0.3: chokidar@4.0.3:
resolution: {integrity: sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==} resolution: {integrity: sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==}
engines: {node: '>= 14.16.0'} engines: {node: '>= 14.16.0'}
@@ -1843,6 +1918,10 @@ packages:
resolution: {integrity: sha512-ywqV+5MmyL4E7ybXgKys4DugZbX0FC6LnwrhjuykIjnK9k8OQacQ7axGKnjDXWNhns0xot3bZI5h55H8yo9cJg==} resolution: {integrity: sha512-ywqV+5MmyL4E7ybXgKys4DugZbX0FC6LnwrhjuykIjnK9k8OQacQ7axGKnjDXWNhns0xot3bZI5h55H8yo9cJg==}
engines: {node: '>=6'} engines: {node: '>=6'}
cli-width@4.1.0:
resolution: {integrity: sha512-ouuZd4/dm2Sw5Gmqy6bGyNNNe1qt9RpmxveLSO7KcgsTnU7RXfsw+/bukWGo1abgBiMAic068rclZsO4IWmmxQ==}
engines: {node: '>= 12'}
cliui@8.0.1: cliui@8.0.1:
resolution: {integrity: sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==} resolution: {integrity: sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==}
engines: {node: '>=12'} engines: {node: '>=12'}
@@ -2143,6 +2222,10 @@ packages:
extend@3.0.2: extend@3.0.2:
resolution: {integrity: sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==} resolution: {integrity: sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==}
external-editor@3.1.0:
resolution: {integrity: sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew==}
engines: {node: '>=4'}
extract-zip@2.0.1: extract-zip@2.0.1:
resolution: {integrity: sha512-GDhU9ntwuKyGXdZBUgTIe+vXnWj0fppUEtMDL0+idd5Sta8TGpHssn/eusA9mrPr9qNDym6SxAYZjNvCn/9RBg==} resolution: {integrity: sha512-GDhU9ntwuKyGXdZBUgTIe+vXnWj0fppUEtMDL0+idd5Sta8TGpHssn/eusA9mrPr9qNDym6SxAYZjNvCn/9RBg==}
engines: {node: '>= 10.17.0'} engines: {node: '>= 10.17.0'}
@@ -2398,6 +2481,10 @@ packages:
humanize-ms@1.2.1: humanize-ms@1.2.1:
resolution: {integrity: sha1-xG4xWaKT9riW2ikxbYtv6Lt5u+0=} resolution: {integrity: sha1-xG4xWaKT9riW2ikxbYtv6Lt5u+0=}
iconv-lite@0.4.24:
resolution: {integrity: sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==}
engines: {node: '>=0.10.0'}
iconv-lite@0.6.3: iconv-lite@0.6.3:
resolution: {integrity: sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==} resolution: {integrity: sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==}
engines: {node: '>=0.10.0'} engines: {node: '>=0.10.0'}
@@ -2422,6 +2509,10 @@ packages:
ini@1.3.8: ini@1.3.8:
resolution: {integrity: sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==} resolution: {integrity: sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==}
inquirer@11.1.0:
resolution: {integrity: sha512-CmLAZT65GG/v30c+D2Fk8+ceP6pxD6RL+hIUOWAltCmeyEqWYwqu9v76q03OvjyZ3AB0C1Ala2stn1z/rMqGEw==}
engines: {node: '>=18'}
ip-address@10.1.0: ip-address@10.1.0:
resolution: {integrity: sha512-XXADHxXmvT9+CRxhXg56LJovE+bmWnEWB78LB83VZTprKTmaC5QfruXocxzTZ2Kl0DNwKuBdlIhjL8LeY8Sf8Q==} resolution: {integrity: sha512-XXADHxXmvT9+CRxhXg56LJovE+bmWnEWB78LB83VZTprKTmaC5QfruXocxzTZ2Kl0DNwKuBdlIhjL8LeY8Sf8Q==}
engines: {node: '>= 12'} engines: {node: '>= 12'}
@@ -2887,6 +2978,10 @@ packages:
mute-stream@0.0.8: mute-stream@0.0.8:
resolution: {integrity: sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA==} resolution: {integrity: sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA==}
mute-stream@1.0.0:
resolution: {integrity: sha512-avsJQhyd+680gKXyG/sQc0nXaC6rBkPOfyHYcFb9+hdkqQkR9bdnkJ0AMZhke0oesPqIO+mFFJ+IdBc7mst4IA==}
engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0}
nanoid@4.0.2: nanoid@4.0.2:
resolution: {integrity: sha512-7ZtY5KTCNheRGfEFxnedV5zFiORN1+Y1N6zvPTnHQd8ENUvfaDBeuJDZb2bN/oXwXxu3qkTXDzy57W5vAmDTBw==} resolution: {integrity: sha512-7ZtY5KTCNheRGfEFxnedV5zFiORN1+Y1N6zvPTnHQd8ENUvfaDBeuJDZb2bN/oXwXxu3qkTXDzy57W5vAmDTBw==}
engines: {node: ^14 || ^16 || >=18} engines: {node: ^14 || ^16 || >=18}
@@ -2961,6 +3056,10 @@ packages:
resolution: {integrity: sha512-sjYP8QyVWBpBZWD6Vr1M/KwknSw6kJOz41tvGMlwWeClHBtYKTbHMki1PsLZnxKpXMPbTKv9b3pjQu3REib96A==} resolution: {integrity: sha512-sjYP8QyVWBpBZWD6Vr1M/KwknSw6kJOz41tvGMlwWeClHBtYKTbHMki1PsLZnxKpXMPbTKv9b3pjQu3REib96A==}
engines: {node: '>=8'} engines: {node: '>=8'}
os-tmpdir@1.0.2:
resolution: {integrity: sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ=}
engines: {node: '>=0.10.0'}
p-cancelable@3.0.0: p-cancelable@3.0.0:
resolution: {integrity: sha512-mlVgR3PGuzlo0MmTdk4cXqXWlwQDLnONTAg6sm62XkMJEiRxN3GL3SffkYvqwonbkJBcrI7Uvv5Zh9yjvn2iUw==} resolution: {integrity: sha512-mlVgR3PGuzlo0MmTdk4cXqXWlwQDLnONTAg6sm62XkMJEiRxN3GL3SffkYvqwonbkJBcrI7Uvv5Zh9yjvn2iUw==}
engines: {node: '>=12.20'} engines: {node: '>=12.20'}
@@ -3238,6 +3337,10 @@ packages:
resolution: {integrity: sha512-nLTrUKm2UyiL7rlhapu/Zl45FwNgkZGaCpZbIHajDYgwlJCOzLSk+cIPAnsEqV955GjILJnKbdQC1nVPz+gAYQ==} resolution: {integrity: sha512-nLTrUKm2UyiL7rlhapu/Zl45FwNgkZGaCpZbIHajDYgwlJCOzLSk+cIPAnsEqV955GjILJnKbdQC1nVPz+gAYQ==}
engines: {node: '>= 18'} engines: {node: '>= 18'}
run-async@3.0.0:
resolution: {integrity: sha512-540WwVDOMxA6dN6We19EcT9sc3hkXPw5mzRNGM3FkdN/vtE9NFvj5lFAPNwUDmJjXidm3v7TC1cTE7t17Ulm1Q==}
engines: {node: '>=0.12.0'}
rxjs@7.8.2: rxjs@7.8.2:
resolution: {integrity: sha512-dhKf903U/PQZY6boNNtAGdWbG85WAbjT/1xYoZIC7FAY0yWapOBQVsVrDl58W86//e1VpMNBtRV4MaXfdMySFA==} resolution: {integrity: sha512-dhKf903U/PQZY6boNNtAGdWbG85WAbjT/1xYoZIC7FAY0yWapOBQVsVrDl58W86//e1VpMNBtRV4MaXfdMySFA==}
@@ -3444,6 +3547,10 @@ packages:
tiny-worker@2.3.0: tiny-worker@2.3.0:
resolution: {integrity: sha512-pJ70wq5EAqTAEl9IkGzA+fN0836rycEuz2Cn6yeZ6FRzlVS5IDOkFHpIoEsksPRQV34GDqXm65+OlnZqUSyK2g==} resolution: {integrity: sha512-pJ70wq5EAqTAEl9IkGzA+fN0836rycEuz2Cn6yeZ6FRzlVS5IDOkFHpIoEsksPRQV34GDqXm65+OlnZqUSyK2g==}
tmp@0.0.33:
resolution: {integrity: sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==}
engines: {node: '>=0.6.0'}
toidentifier@1.0.1: toidentifier@1.0.1:
resolution: {integrity: sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==} resolution: {integrity: sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==}
engines: {node: '>=0.6'} engines: {node: '>=0.6'}
@@ -3487,6 +3594,10 @@ packages:
turndown@7.2.2: turndown@7.2.2:
resolution: {integrity: sha512-1F7db8BiExOKxjSMU2b7if62D/XOyQyZbPKq/nUwopfgnHlqXHqQ0lvfUTeUIr1lZJzOPFn43dODyMSIfvWRKQ==} resolution: {integrity: sha512-1F7db8BiExOKxjSMU2b7if62D/XOyQyZbPKq/nUwopfgnHlqXHqQ0lvfUTeUIr1lZJzOPFn43dODyMSIfvWRKQ==}
type-fest@0.21.3:
resolution: {integrity: sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==}
engines: {node: '>=10'}
type-fest@2.19.0: type-fest@2.19.0:
resolution: {integrity: sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==} resolution: {integrity: sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==}
engines: {node: '>=12.20'} engines: {node: '>=12.20'}
@@ -3608,6 +3719,10 @@ packages:
engines: {node: ^18.17.0 || >=20.5.0} engines: {node: ^18.17.0 || >=20.5.0}
hasBin: true hasBin: true
wrap-ansi@6.2.0:
resolution: {integrity: sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==}
engines: {node: '>=8'}
wrap-ansi@7.0.0: wrap-ansi@7.0.0:
resolution: {integrity: sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==} resolution: {integrity: sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==}
engines: {node: '>=10'} engines: {node: '>=10'}
@@ -3675,6 +3790,10 @@ packages:
resolution: {integrity: sha512-Ow9nuGZE+qp1u4JIPvg+uCiUr7xGQWdff7JQSk5VGYTAZMDe2q8lxJ10ygv10qmSj031Ty/6FNJpLO4o1Sgc+w==} resolution: {integrity: sha512-Ow9nuGZE+qp1u4JIPvg+uCiUr7xGQWdff7JQSk5VGYTAZMDe2q8lxJ10ygv10qmSj031Ty/6FNJpLO4o1Sgc+w==}
engines: {node: '>=12'} engines: {node: '>=12'}
yoctocolors-cjs@2.1.3:
resolution: {integrity: sha512-U/PBtDf35ff0D8X8D0jfdzHYEPFxAI7jJlxZXwCSez5M3190m+QobIfh+sWDWSHMCWWJN2AWamkegn6vr6YBTw==}
engines: {node: '>=18'}
zod@3.25.76: zod@3.25.76:
resolution: {integrity: sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==} resolution: {integrity: sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==}
@@ -4602,6 +4721,102 @@ snapshots:
dependencies: dependencies:
happy-dom: 15.11.7 happy-dom: 15.11.7
'@inquirer/checkbox@3.0.1':
dependencies:
'@inquirer/core': 9.2.1
'@inquirer/figures': 1.0.15
'@inquirer/type': 2.0.0
ansi-escapes: 4.3.2
yoctocolors-cjs: 2.1.3
'@inquirer/confirm@4.0.1':
dependencies:
'@inquirer/core': 9.2.1
'@inquirer/type': 2.0.0
'@inquirer/core@9.2.1':
dependencies:
'@inquirer/figures': 1.0.15
'@inquirer/type': 2.0.0
'@types/mute-stream': 0.0.4
'@types/node': 22.19.1
'@types/wrap-ansi': 3.0.0
ansi-escapes: 4.3.2
cli-width: 4.1.0
mute-stream: 1.0.0
signal-exit: 4.1.0
strip-ansi: 6.0.1
wrap-ansi: 6.2.0
yoctocolors-cjs: 2.1.3
'@inquirer/editor@3.0.1':
dependencies:
'@inquirer/core': 9.2.1
'@inquirer/type': 2.0.0
external-editor: 3.1.0
'@inquirer/expand@3.0.1':
dependencies:
'@inquirer/core': 9.2.1
'@inquirer/type': 2.0.0
yoctocolors-cjs: 2.1.3
'@inquirer/figures@1.0.15': {}
'@inquirer/input@3.0.1':
dependencies:
'@inquirer/core': 9.2.1
'@inquirer/type': 2.0.0
'@inquirer/number@2.0.1':
dependencies:
'@inquirer/core': 9.2.1
'@inquirer/type': 2.0.0
'@inquirer/password@3.0.1':
dependencies:
'@inquirer/core': 9.2.1
'@inquirer/type': 2.0.0
ansi-escapes: 4.3.2
'@inquirer/prompts@6.0.1':
dependencies:
'@inquirer/checkbox': 3.0.1
'@inquirer/confirm': 4.0.1
'@inquirer/editor': 3.0.1
'@inquirer/expand': 3.0.1
'@inquirer/input': 3.0.1
'@inquirer/number': 2.0.1
'@inquirer/password': 3.0.1
'@inquirer/rawlist': 3.0.1
'@inquirer/search': 2.0.1
'@inquirer/select': 3.0.1
'@inquirer/rawlist@3.0.1':
dependencies:
'@inquirer/core': 9.2.1
'@inquirer/type': 2.0.0
yoctocolors-cjs: 2.1.3
'@inquirer/search@2.0.1':
dependencies:
'@inquirer/core': 9.2.1
'@inquirer/figures': 1.0.15
'@inquirer/type': 2.0.0
yoctocolors-cjs: 2.1.3
'@inquirer/select@3.0.1':
dependencies:
'@inquirer/core': 9.2.1
'@inquirer/figures': 1.0.15
'@inquirer/type': 2.0.0
ansi-escapes: 4.3.2
yoctocolors-cjs: 2.1.3
'@inquirer/type@2.0.0':
dependencies:
mute-stream: 1.0.0
'@isaacs/balanced-match@4.0.1': {} '@isaacs/balanced-match@4.0.1': {}
'@isaacs/brace-expansion@5.0.0': '@isaacs/brace-expansion@5.0.0':
@@ -5159,6 +5374,13 @@ snapshots:
'@types/through2': 2.0.41 '@types/through2': 2.0.41
through2: 4.0.2 through2: 4.0.2
'@push.rocks/smartinteract@2.0.16':
dependencies:
'@push.rocks/lik': 6.2.2
'@push.rocks/smartobject': 1.0.12
'@push.rocks/smartpromise': 4.2.3
inquirer: 11.1.0
'@push.rocks/smartjson@5.2.0': '@push.rocks/smartjson@5.2.0':
dependencies: dependencies:
'@push.rocks/smartenv': 5.0.13 '@push.rocks/smartenv': 5.0.13
@@ -6200,6 +6422,10 @@ snapshots:
'@types/ms@2.1.0': {} '@types/ms@2.1.0': {}
'@types/mute-stream@0.0.4':
dependencies:
'@types/node': 25.0.9
'@types/node-forge@1.3.14': '@types/node-forge@1.3.14':
dependencies: dependencies:
'@types/node': 22.19.1 '@types/node': 22.19.1
@@ -6269,6 +6495,8 @@ snapshots:
'@types/which@3.0.4': {} '@types/which@3.0.4': {}
'@types/wrap-ansi@3.0.0': {}
'@types/ws@8.18.1': '@types/ws@8.18.1':
dependencies: dependencies:
'@types/node': 22.19.1 '@types/node': 22.19.1
@@ -6308,6 +6536,10 @@ snapshots:
ansi-256-colors@1.1.0: {} ansi-256-colors@1.1.0: {}
ansi-escapes@4.3.2:
dependencies:
type-fest: 0.21.3
ansi-regex@5.0.1: {} ansi-regex@5.0.1: {}
ansi-regex@6.2.2: {} ansi-regex@6.2.2: {}
@@ -6507,6 +6739,8 @@ snapshots:
character-entities@2.0.2: {} character-entities@2.0.2: {}
chardet@0.7.0: {}
chokidar@4.0.3: chokidar@4.0.3:
dependencies: dependencies:
readdirp: 4.1.2 readdirp: 4.1.2
@@ -6529,6 +6763,8 @@ snapshots:
cli-spinners@2.9.2: {} cli-spinners@2.9.2: {}
cli-width@4.1.0: {}
cliui@8.0.1: cliui@8.0.1:
dependencies: dependencies:
string-width: 4.2.3 string-width: 4.2.3
@@ -6881,6 +7117,12 @@ snapshots:
extend@3.0.2: {} extend@3.0.2: {}
external-editor@3.1.0:
dependencies:
chardet: 0.7.0
iconv-lite: 0.4.24
tmp: 0.0.33
extract-zip@2.0.1: extract-zip@2.0.1:
dependencies: dependencies:
debug: 4.4.3 debug: 4.4.3
@@ -7209,6 +7451,10 @@ snapshots:
dependencies: dependencies:
ms: 2.1.3 ms: 2.1.3
iconv-lite@0.4.24:
dependencies:
safer-buffer: 2.1.2
iconv-lite@0.6.3: iconv-lite@0.6.3:
dependencies: dependencies:
safer-buffer: 2.1.2 safer-buffer: 2.1.2
@@ -7233,6 +7479,17 @@ snapshots:
ini@1.3.8: {} ini@1.3.8: {}
inquirer@11.1.0:
dependencies:
'@inquirer/core': 9.2.1
'@inquirer/prompts': 6.0.1
'@inquirer/type': 2.0.0
'@types/mute-stream': 0.0.4
ansi-escapes: 4.3.2
mute-stream: 1.0.0
run-async: 3.0.0
rxjs: 7.8.2
ip-address@10.1.0: {} ip-address@10.1.0: {}
ipaddr.js@1.9.1: {} ipaddr.js@1.9.1: {}
@@ -7844,6 +8101,8 @@ snapshots:
mute-stream@0.0.8: {} mute-stream@0.0.8: {}
mute-stream@1.0.0: {}
nanoid@4.0.2: {} nanoid@4.0.2: {}
negotiator@0.6.3: {} negotiator@0.6.3: {}
@@ -7909,6 +8168,8 @@ snapshots:
strip-ansi: 6.0.1 strip-ansi: 6.0.1
wcwidth: 1.0.1 wcwidth: 1.0.1
os-tmpdir@1.0.2: {}
p-cancelable@3.0.0: {} p-cancelable@3.0.0: {}
p-finally@1.0.0: {} p-finally@1.0.0: {}
@@ -8256,6 +8517,8 @@ snapshots:
transitivePeerDependencies: transitivePeerDependencies:
- supports-color - supports-color
run-async@3.0.0: {}
rxjs@7.8.2: rxjs@7.8.2:
dependencies: dependencies:
tslib: 2.8.1 tslib: 2.8.1
@@ -8542,6 +8805,10 @@ snapshots:
dependencies: dependencies:
esm: 3.2.25 esm: 3.2.25
tmp@0.0.33:
dependencies:
os-tmpdir: 1.0.2
toidentifier@1.0.1: {} toidentifier@1.0.1: {}
token-types@6.1.1: token-types@6.1.1:
@@ -8581,6 +8848,8 @@ snapshots:
dependencies: dependencies:
'@mixmark-io/domino': 2.2.0 '@mixmark-io/domino': 2.2.0
type-fest@0.21.3: {}
type-fest@2.19.0: {} type-fest@2.19.0: {}
type-fest@4.41.0: {} type-fest@4.41.0: {}
@@ -8690,6 +8959,12 @@ snapshots:
dependencies: dependencies:
isexe: 3.1.1 isexe: 3.1.1
wrap-ansi@6.2.0:
dependencies:
ansi-styles: 4.3.0
string-width: 4.2.3
strip-ansi: 6.0.1
wrap-ansi@7.0.0: wrap-ansi@7.0.0:
dependencies: dependencies:
ansi-styles: 4.3.0 ansi-styles: 4.3.0
@@ -8738,6 +9013,8 @@ snapshots:
buffer-crc32: 0.2.13 buffer-crc32: 0.2.13
pend: 1.2.0 pend: 1.2.0
yoctocolors-cjs@2.1.3: {}
zod@3.25.76: {} zod@3.25.76: {}
zwitch@2.0.4: {} zwitch@2.0.4: {}

View File

@@ -96,6 +96,30 @@ ts/
- `@push.rocks/smartcli`: CLI framework - `@push.rocks/smartcli`: CLI framework
- `@push.rocks/projectinfo`: Project metadata - `@push.rocks/projectinfo`: Project metadata
## Parallel Builds
`--parallel` flag enables level-based parallel Docker builds:
```bash
tsdocker build --parallel # parallel, default concurrency (4)
tsdocker build --parallel=8 # parallel, concurrency 8
tsdocker build --parallel --cached # works with both modes
```
Implementation: `Dockerfile.computeLevels()` groups topologically sorted Dockerfiles into dependency levels. `Dockerfile.runWithConcurrency()` provides a worker-pool pattern for bounded concurrency. Both are public static methods on the `Dockerfile` class. The parallel logic exists in both `Dockerfile.buildDockerfiles()` (standard mode) and `TsDockerManager.build()` (cached mode).
## OCI Distribution API Push (v1.16+)
All builds now go through a persistent local registry (`localhost:5234`) with volume storage at `.nogit/docker-registry/`. Pushes use the `RegistryCopy` class (`ts/classes.registrycopy.ts`) which implements the OCI Distribution API to copy images (including multi-arch manifest lists) from the local registry to remote registries. This replaces the old `docker tag + docker push` approach that only worked for single-platform images.
Key classes:
- `RegistryCopy` — HTTP-based OCI image copy (auth, blob transfer, manifest handling)
- `Dockerfile.push()` — Now delegates to `RegistryCopy.copyImage()`
- `Dockerfile.needsLocalRegistry()` — Always returns true
- `Dockerfile.startLocalRegistry()` — Uses persistent volume mount
The `config.push` field is now a no-op (kept for backward compat).
## Build Status ## Build Status
- Build: ✅ Passes - Build: ✅ Passes

301
readme.md
View File

@@ -1,6 +1,6 @@
# @git.zone/tsdocker # @git.zone/tsdocker
> 🐳 The ultimate Docker development toolkit for TypeScript projects — build, test, and ship containerized applications with ease. > 🐳 The ultimate Docker development toolkit for TypeScript projects — build, test, and ship multi-arch containerized applications with zero friction.
## Issue Reporting and Security ## Issue Reporting and Security
@@ -8,15 +8,18 @@ For reporting bugs, issues, or security vulnerabilities, please visit [community
## What is tsdocker? ## What is tsdocker?
**tsdocker** is a comprehensive Docker development and building tool that handles everything from testing npm packages in clean environments to building and pushing multi-architecture Docker images across multiple registries. **tsdocker** is a comprehensive Docker development and build tool that handles everything from testing npm packages in clean environments to building and pushing multi-architecture Docker images across multiple registries — all from a single CLI.
### 🎯 Key Capabilities ### 🎯 Key Capabilities
- 🧪 **Containerized Testing** — Run your tests in pristine Docker environments - 🧪 **Containerized Testing** — Run your tests in pristine Docker environments
- 🏗️ **Smart Docker Builds** — Automatically discover, sort, and build Dockerfiles by dependency - 🏗️ **Smart Docker Builds** — Automatically discover, sort, and build Dockerfiles by dependency
- 🚀 **Multi-Registry Push** — Ship to Docker Hub, GitLab, GitHub Container Registry, and more - 🌍 **True Multi-Architecture** — Build for `amd64` and `arm64` simultaneously with Docker Buildx
- 🔧 **Multi-Architecture** — Build for `amd64` and `arm64` with Docker Buildx - 🚀 **Multi-Registry Push** — Ship to Docker Hub, GitLab, GitHub Container Registry, and more via OCI Distribution API
-**Zero Config Start** — Works out of the box, scales with your needs -**Parallel Builds** — Level-based parallel builds with configurable concurrency
- 🗄️ **Persistent Local Registry** — All images flow through a local OCI registry with persistent storage
- 📦 **Build Caching** — Skip unchanged Dockerfiles with content-hash caching
- 🔧 **Zero Config Start** — Works out of the box, scales with your needs
## Installation ## Installation
@@ -53,6 +56,7 @@ tsdocker will:
2. 📊 Analyze `FROM` dependencies between them 2. 📊 Analyze `FROM` dependencies between them
3. 🔄 Sort them topologically 3. 🔄 Sort them topologically
4. 🏗️ Build each image in the correct order 4. 🏗️ Build each image in the correct order
5. 📦 Push every image to a persistent local registry (`.nogit/docker-registry/`)
### 📤 Push to Registries ### 📤 Push to Registries
@@ -63,33 +67,52 @@ Ship your images to one or all configured registries:
tsdocker push tsdocker push
# Push to a specific registry # Push to a specific registry
tsdocker push registry.gitlab.com tsdocker push --registry=registry.gitlab.com
``` ```
Under the hood, `tsdocker push` uses the **OCI Distribution API** to copy images directly from the local registry to remote registries. This means multi-arch manifest lists are preserved end-to-end — no more single-platform-only pushes.
## CLI Commands ## CLI Commands
| Command | Description | | Command | Description |
|---------|-------------| |---------|-------------|
| `tsdocker` | Run tests in a fresh Docker container | | `tsdocker` | Run tests in a fresh Docker container (legacy mode) |
| `tsdocker build` | Build all Dockerfiles with dependency ordering | | `tsdocker build` | Build all Dockerfiles with dependency ordering |
| `tsdocker push [registry]` | Push images to configured registries | | `tsdocker push` | Build + push images to configured registries |
| `tsdocker pull <registry>` | Pull images from a specific registry | | `tsdocker pull <registry>` | Pull images from a specific registry |
| `tsdocker test` | Run container test scripts (test_*.sh) | | `tsdocker test` | Build + run container test scripts (`test_*.sh`) |
| `tsdocker login` | Authenticate with configured registries | | `tsdocker login` | Authenticate with configured registries |
| `tsdocker list` | Display discovered Dockerfiles and their dependencies | | `tsdocker list` | Display discovered Dockerfiles and their dependencies |
| `tsdocker clean --all` | ⚠️ Aggressively clean Docker environment | | `tsdocker clean` | Interactively clean Docker environment |
| `tsdocker vscode` | Launch containerized VS Code in browser | | `tsdocker vscode` | Launch containerized VS Code in browser |
### Build Flags
| Flag | Description |
|------|-------------|
| `--platform=linux/arm64` | Override build platform for a single architecture |
| `--timeout=600` | Build timeout in seconds |
| `--no-cache` | Force rebuild without Docker layer cache |
| `--cached` | Skip unchanged Dockerfiles (content-hash based) |
| `--verbose` | Stream raw `docker build` output |
| `--parallel` | Enable level-based parallel builds (default concurrency: 4) |
| `--parallel=8` | Parallel builds with custom concurrency |
| `--context=mycontext` | Use a specific Docker context |
### Clean Flags
| Flag | Description |
|------|-------------|
| `--all` | Include all images and volumes (not just dangling) |
| `-y` | Auto-confirm all prompts |
## Configuration ## Configuration
Configure tsdocker in your `package.json` or `npmextra.json`: Configure tsdocker in your `package.json` or `npmextra.json` under the `@git.zone/tsdocker` key:
```json ```json
{ {
"@git.zone/tsdocker": { "@git.zone/tsdocker": {
"baseImage": "node:20",
"command": "npm test",
"dockerSock": false,
"registries": ["registry.gitlab.com", "docker.io"], "registries": ["registry.gitlab.com", "docker.io"],
"registryRepoMap": { "registryRepoMap": {
"registry.gitlab.com": "myorg/myproject" "registry.gitlab.com": "myorg/myproject"
@@ -98,7 +121,6 @@ Configure tsdocker in your `package.json` or `npmextra.json`:
"NODE_VERSION": "NODE_VERSION" "NODE_VERSION": "NODE_VERSION"
}, },
"platforms": ["linux/amd64", "linux/arm64"], "platforms": ["linux/amd64", "linux/arm64"],
"push": false,
"testDir": "./test" "testDir": "./test"
} }
} }
@@ -106,24 +128,73 @@ Configure tsdocker in your `package.json` or `npmextra.json`:
### Configuration Options ### Configuration Options
#### Testing Options (Legacy)
| Option | Type | Description |
|--------|------|-------------|
| `baseImage` | `string` | Docker image for test environment (default: `hosttoday/ht-docker-node:npmdocker`) |
| `command` | `string` | Command to run inside container (default: `npmci npm test`) |
| `dockerSock` | `boolean` | Mount Docker socket for DinD scenarios (default: `false`) |
#### Build & Push Options #### Build & Push Options
| Option | Type | Description | | Option | Type | Default | Description |
|--------|------|-------------| |--------|------|---------|-------------|
| `registries` | `string[]` | Registry URLs to push to | | `registries` | `string[]` | `[]` | Registry URLs to push to |
| `registryRepoMap` | `object` | Map registries to different repository paths | | `registryRepoMap` | `object` | `{}` | Map registries to different repository paths |
| `buildArgEnvMap` | `object` | Map Docker build ARGs to environment variables | | `buildArgEnvMap` | `object` | `{}` | Map Docker build ARGs to environment variables |
| `platforms` | `string[]` | Target architectures (default: `["linux/amd64"]`) | | `platforms` | `string[]` | `["linux/amd64"]` | Target architectures for multi-arch builds |
| `push` | `boolean` | Auto-push after build (default: `false`) | | `testDir` | `string` | `./test` | Directory containing test scripts |
| `testDir` | `string` | Directory containing test scripts |
#### Legacy Testing Options
These options configure the `tsdocker` default command (containerized test runner):
| Option | Type | Default | Description |
|--------|------|---------|-------------|
| `baseImage` | `string` | `hosttoday/ht-docker-node:npmdocker` | Docker image for test environment |
| `command` | `string` | `npmci npm test` | Command to run inside the container |
| `dockerSock` | `boolean` | `false` | Mount Docker socket for DinD scenarios |
## Architecture: How tsdocker Works
tsdocker uses a **local OCI registry** as the canonical store for all built images. This design solves fundamental problems with Docker's local daemon, which cannot hold multi-architecture manifest lists.
### 📐 Build Flow
```
┌─────────────────────────────────────────────────┐
│ tsdocker build │
│ │
│ 1. Start local registry (localhost:5234) │
│ └── Persistent volume: .nogit/docker-registry/
│ │
│ 2. For each Dockerfile (topological order): │
│ ├── Multi-platform: buildx --push → registry │
│ └── Single-platform: docker build → registry │
│ │
│ 3. Stop local registry (data persists on disk) │
└─────────────────────────────────────────────────┘
```
### 📤 Push Flow
```
┌──────────────────────────────────────────────────┐
│ tsdocker push │
│ │
│ 1. Start local registry (loads persisted data) │
│ │
│ 2. For each image × each remote registry: │
│ └── OCI Distribution API copy: │
│ ├── Fetch manifest (single or multi-arch) │
│ ├── Copy blobs (skip if already exist) │
│ └── Push manifest with destination tag │
│ │
│ 3. Stop local registry │
└──────────────────────────────────────────────────┘
```
### 🔑 Why a Local Registry?
| Problem | Solution |
|---------|----------|
| `docker buildx --load` fails for multi-arch images | `buildx --push` to local registry works for any number of platforms |
| `docker push` only pushes single-platform manifests | OCI API copy preserves full manifest lists (multi-arch) |
| Images lost between build and push phases | Persistent storage at `.nogit/docker-registry/` survives restarts |
| Redundant blob uploads on incremental pushes | HEAD checks skip blobs that already exist on the remote |
## Registry Authentication ## Registry Authentication
@@ -140,13 +211,17 @@ export DOCKER_REGISTRY_USER="username"
export DOCKER_REGISTRY_PASSWORD="password" export DOCKER_REGISTRY_PASSWORD="password"
``` ```
### Docker Config Fallback
When pushing, tsdocker will also read credentials from `~/.docker/config.json` if no explicit credentials are provided via environment variables. This means `docker login` credentials work automatically.
### Login Command ### Login Command
```bash ```bash
tsdocker login tsdocker login
``` ```
Authenticates with all configured registries. Authenticates with all configured registries using the provided environment variables.
## Advanced Usage ## Advanced Usage
@@ -162,7 +237,27 @@ Build for multiple platforms using Docker Buildx:
} }
``` ```
tsdocker automatically sets up a Buildx builder when multiple platforms are specified. tsdocker automatically:
- Sets up a Buildx builder with `--driver-opt network=host` (so buildx can reach the local registry)
- Pushes multi-platform images to the local registry via `buildx --push`
- Copies the full manifest list (including all platform variants) to remote registries on `tsdocker push`
### ⚡ Parallel Builds
Speed up builds by building independent images concurrently:
```bash
# Default concurrency (4 workers)
tsdocker build --parallel
# Custom concurrency
tsdocker build --parallel=8
# Works with caching too
tsdocker build --parallel --cached
```
tsdocker groups Dockerfiles into **dependency levels** using topological analysis. Images within the same level have no dependencies on each other and build in parallel. Each level completes before the next begins.
### 📦 Dockerfile Naming Conventions ### 📦 Dockerfile Naming Conventions
@@ -190,7 +285,7 @@ COPY . .
RUN npm run build RUN npm run build
``` ```
tsdocker automatically detects that `Dockerfile_app` depends on `Dockerfile_base` and builds them in the correct order. tsdocker automatically detects that `Dockerfile_app` depends on `Dockerfile_base`, builds them in the correct order, and makes the base image available to dependent builds via the local registry (using `--build-context` for buildx).
### 🧪 Container Test Scripts ### 🧪 Container Test Scripts
@@ -210,6 +305,8 @@ Run with:
tsdocker test tsdocker test
``` ```
This builds all images, starts the local registry (so multi-arch images can be pulled), and runs each matching test script inside a container.
### 🔧 Build Args from Environment ### 🔧 Build Args from Environment
Pass environment variables as Docker build arguments: Pass environment variables as Docker build arguments:
@@ -232,6 +329,24 @@ FROM node:${NODE_VERSION}
RUN echo "//registry.npmjs.org/:_authToken=${NPM_TOKEN}" > ~/.npmrc RUN echo "//registry.npmjs.org/:_authToken=${NPM_TOKEN}" > ~/.npmrc
``` ```
### 🗺️ Registry Repo Mapping
Use different repository names for different registries:
```json
{
"@git.zone/tsdocker": {
"registries": ["registry.gitlab.com", "docker.io"],
"registryRepoMap": {
"registry.gitlab.com": "mygroup/myproject",
"docker.io": "myuser/myproject"
}
}
}
```
When pushing, tsdocker maps the local repo name to the registry-specific path. For example, a locally built `myproject:latest` becomes `registry.gitlab.com/mygroup/myproject:latest` and `docker.io/myuser/myproject:latest`.
### 🐳 Docker-in-Docker Testing ### 🐳 Docker-in-Docker Testing
Test Docker-related tools by mounting the Docker socket: Test Docker-related tools by mounting the Docker socket:
@@ -259,68 +374,40 @@ Output:
Discovered Dockerfiles: Discovered Dockerfiles:
======================== ========================
1. Dockerfile_base 1. /path/to/Dockerfile_base
Tag: myproject:base Tag: myproject:base
Base Image: node:20-alpine Base Image: node:20-alpine
Version: base Version: base
2. Dockerfile_app 2. /path/to/Dockerfile_app
Tag: myproject:app Tag: myproject:app
Base Image: myproject:base Base Image: myproject:base
Version: app Version: app
Depends on: myproject:base Depends on: myproject:base
``` ```
### 🗺️ Registry Repo Mapping
Use different repository names for different registries:
```json
{
"@git.zone/tsdocker": {
"registries": ["registry.gitlab.com", "docker.io"],
"registryRepoMap": {
"registry.gitlab.com": "mygroup/myproject",
"docker.io": "myuser/myproject"
}
}
}
```
## Environment Variables
### qenv Integration
tsdocker automatically loads environment variables from `qenv.yml`:
```yaml
# qenv.yml
API_KEY: your-api-key
DATABASE_URL: postgres://localhost/test
```
These are injected into your test container automatically.
## Examples ## Examples
### Basic Test Configuration ### Minimal Build & Push
```json ```json
{ {
"@git.zone/tsdocker": { "@git.zone/tsdocker": {
"baseImage": "node:20", "registries": ["docker.io"],
"command": "npm test" "platforms": ["linux/amd64"]
} }
} }
``` ```
```bash
tsdocker push
```
### Full Production Setup ### Full Production Setup
```json ```json
{ {
"@git.zone/tsdocker": { "@git.zone/tsdocker": {
"baseImage": "node:20-alpine",
"command": "pnpm test",
"registries": ["registry.gitlab.com", "ghcr.io", "docker.io"], "registries": ["registry.gitlab.com", "ghcr.io", "docker.io"],
"registryRepoMap": { "registryRepoMap": {
"registry.gitlab.com": "myorg/myapp", "registry.gitlab.com": "myorg/myapp",
@@ -338,57 +425,37 @@ These are injected into your test container automatically.
### CI/CD Integration ### CI/CD Integration
**GitLab CI:**
```yaml ```yaml
# .gitlab-ci.yml build-and-push:
build:
stage: build stage: build
script: script:
- npm install -g @git.zone/tsdocker - npm install -g @git.zone/tsdocker
- tsdocker build
- tsdocker push - tsdocker push
variables:
DOCKER_REGISTRY_1: "registry.gitlab.com|$CI_REGISTRY_USER|$CI_REGISTRY_PASSWORD"
```
# GitHub Actions **GitHub Actions:**
```yaml
- name: Build and Push - name: Build and Push
run: | run: |
npm install -g @git.zone/tsdocker npm install -g @git.zone/tsdocker
tsdocker login tsdocker login
tsdocker build
tsdocker push tsdocker push
env: env:
DOCKER_REGISTRY_1: "ghcr.io|${{ github.actor }}|${{ secrets.GITHUB_TOKEN }}" DOCKER_REGISTRY_1: "ghcr.io|${{ github.actor }}|${{ secrets.GITHUB_TOKEN }}"
``` ```
## Requirements
- **Docker** — Docker Engine or Docker Desktop must be installed
- **Node.js** — Version 18 or higher (ESM support required)
- **Docker Buildx** — Required for multi-architecture builds (included in Docker Desktop)
## Why tsdocker?
### 🎯 The Problem
Managing Docker workflows manually is tedious:
- Remembering build order for dependent images
- Pushing to multiple registries with different credentials
- Setting up Buildx for multi-arch builds
- Ensuring consistent test environments
### ✨ The Solution
tsdocker automates the entire workflow:
- **One command** to build all images in dependency order
- **One command** to push to all registries
- **Automatic** Buildx setup for multi-platform builds
- **Consistent** containerized test environments
## TypeScript API ## TypeScript API
tsdocker exposes its types for programmatic use: tsdocker can also be used programmatically:
```typescript ```typescript
import type { ITsDockerConfig } from '@git.zone/tsdocker/dist_ts/interfaces/index.js';
import { TsDockerManager } from '@git.zone/tsdocker/dist_ts/classes.tsdockermanager.js'; import { TsDockerManager } from '@git.zone/tsdocker/dist_ts/classes.tsdockermanager.js';
import type { ITsDockerConfig } from '@git.zone/tsdocker/dist_ts/interfaces/index.js';
const config: ITsDockerConfig = { const config: ITsDockerConfig = {
baseImage: 'node:20', baseImage: 'node:20',
@@ -396,15 +463,21 @@ const config: ITsDockerConfig = {
dockerSock: false, dockerSock: false,
keyValueObject: {}, keyValueObject: {},
registries: ['docker.io'], registries: ['docker.io'],
platforms: ['linux/amd64'], platforms: ['linux/amd64', 'linux/arm64'],
}; };
const manager = new TsDockerManager(config); const manager = new TsDockerManager(config);
await manager.prepare(); await manager.prepare();
await manager.build(); await manager.build({ parallel: true });
await manager.push(); await manager.push();
``` ```
## Requirements
- **Docker** — Docker Engine 20+ or Docker Desktop
- **Node.js** — Version 18 or higher (for native `fetch` and ESM support)
- **Docker Buildx** — Required for multi-architecture builds (included in Docker Desktop)
## Troubleshooting ## Troubleshooting
### "docker not found" ### "docker not found"
@@ -417,11 +490,10 @@ docker --version
### Multi-arch build fails ### Multi-arch build fails
Make sure Docker Buildx is available: Make sure Docker Buildx is available. tsdocker will set up the builder automatically, but you can verify:
```bash ```bash
docker buildx version docker buildx version
docker buildx create --use
``` ```
### Registry authentication fails ### Registry authentication fails
@@ -433,19 +505,22 @@ echo $DOCKER_REGISTRY_1
tsdocker login tsdocker login
``` ```
tsdocker also falls back to `~/.docker/config.json` — ensure you've run `docker login` for your target registries.
### Circular dependency detected ### Circular dependency detected
Review your Dockerfiles' `FROM` statements — you have images depending on each other in a loop. Review your Dockerfiles' `FROM` statements — you have images depending on each other in a loop.
## Performance Tips ### Build context too large
🚀 **Use specific tags**: `node:20-alpine` is smaller and faster than `node:latest` Use a `.dockerignore` file to exclude `node_modules`, `.git`, `.nogit`, and other large directories:
🚀 **Leverage caching**: Docker layers are cached — your builds get faster over time ```
node_modules
🚀 **Prune regularly**: `docker system prune` reclaims disk space .git
.nogit
🚀 **Use .dockerignore**: Exclude `node_modules`, `.git`, etc. from build context dist_ts
```
## Migration from Legacy ## Migration from Legacy

View File

@@ -3,6 +3,6 @@
*/ */
export const commitinfo = { export const commitinfo = {
name: '@git.zone/tsdocker', name: '@git.zone/tsdocker',
version: '1.4.3', version: '1.17.2',
description: 'develop npm modules cross platform with docker' description: 'develop npm modules cross platform with docker'
} }

View File

@@ -0,0 +1,79 @@
import * as plugins from './tsdocker.plugins.js';
import * as fs from 'fs';
import { logger } from './tsdocker.logging.js';
import type { IDockerContextInfo } from './interfaces/index.js';
const smartshellInstance = new plugins.smartshell.Smartshell({ executor: 'bash' });
export class DockerContext {
public contextInfo: IDockerContextInfo | null = null;
/** Sets DOCKER_CONTEXT env var for explicit context selection. */
public setContext(contextName: string): void {
process.env.DOCKER_CONTEXT = contextName;
logger.log('info', `Docker context explicitly set to: ${contextName}`);
}
/** Detects current Docker context via `docker context inspect` and rootless via `docker info`. */
public async detect(): Promise<IDockerContextInfo> {
let name = 'default';
let endpoint = 'unknown';
const contextResult = await smartshellInstance.execSilent(
`docker context inspect --format '{{json .}}'`
);
if (contextResult.exitCode === 0 && contextResult.stdout) {
try {
const parsed = JSON.parse(contextResult.stdout.trim());
const data = Array.isArray(parsed) ? parsed[0] : parsed;
name = data.Name || 'default';
endpoint = data.Endpoints?.docker?.Host || 'unknown';
} catch { /* fallback to defaults */ }
}
let isRootless = false;
const infoResult = await smartshellInstance.execSilent(
`docker info --format '{{json .SecurityOptions}}'`
);
if (infoResult.exitCode === 0 && infoResult.stdout) {
isRootless = infoResult.stdout.includes('name=rootless');
}
// Detect topology
let topology: 'socket-mount' | 'dind' | 'local' = 'local';
if (process.env.DOCKER_HOST && process.env.DOCKER_HOST.startsWith('tcp://')) {
topology = 'dind';
} else if (fs.existsSync('/.dockerenv')) {
topology = 'socket-mount';
}
this.contextInfo = { name, endpoint, isRootless, dockerHost: process.env.DOCKER_HOST, topology };
return this.contextInfo;
}
/** Logs context info prominently. */
public logContextInfo(): void {
if (!this.contextInfo) return;
const { name, endpoint, isRootless, dockerHost, topology } = this.contextInfo;
logger.log('info', '=== DOCKER CONTEXT ===');
logger.log('info', `Context: ${name}`);
logger.log('info', `Endpoint: ${endpoint}`);
if (dockerHost) logger.log('info', `DOCKER_HOST: ${dockerHost}`);
logger.log('info', `Rootless: ${isRootless ? 'yes' : 'no'}`);
logger.log('info', `Topology: ${topology || 'local'}`);
}
/** Emits rootless-specific warnings. */
public logRootlessWarnings(): void {
if (!this.contextInfo?.isRootless) return;
logger.log('warn', '[rootless] network=host in buildx is namespaced by rootlesskit');
logger.log('warn', '[rootless] Local registry may have localhost vs 127.0.0.1 resolution quirks');
}
/** Returns context-aware builder name: tsdocker-builder-<context> */
public getBuilderName(): string {
const contextName = this.contextInfo?.name || 'default';
const sanitized = contextName.replace(/[^a-zA-Z0-9_-]/g, '-');
return `tsdocker-builder-${sanitized}`;
}
}

View File

@@ -1,8 +1,10 @@
import * as plugins from './tsdocker.plugins.js'; import * as plugins from './tsdocker.plugins.js';
import * as paths from './tsdocker.paths.js'; import * as paths from './tsdocker.paths.js';
import { logger } from './tsdocker.logging.js'; import { logger, formatDuration } from './tsdocker.logging.js';
import { DockerRegistry } from './classes.dockerregistry.js'; import { DockerRegistry } from './classes.dockerregistry.js';
import type { IDockerfileOptions, ITsDockerConfig } from './interfaces/index.js'; import { RegistryCopy } from './classes.registrycopy.js';
import { TsDockerSession } from './classes.tsdockersession.js';
import type { IDockerfileOptions, ITsDockerConfig, IBuildCommandOptions } from './interfaces/index.js';
import type { TsDockerManager } from './classes.tsdockermanager.js'; import type { TsDockerManager } from './classes.tsdockermanager.js';
import * as fs from 'fs'; import * as fs from 'fs';
@@ -10,6 +12,15 @@ const smartshellInstance = new plugins.smartshell.Smartshell({
executor: 'bash', executor: 'bash',
}); });
/**
* Extracts a platform string (e.g. "linux/amd64") from a buildx bracket prefix.
* The prefix may be like "linux/amd64 ", "linux/amd64 stage-1 ", "stage-1 ", or "".
*/
function extractPlatform(prefix: string): string | null {
const match = prefix.match(/linux\/\w+/);
return match ? match[0] : null;
}
/** /**
* Class Dockerfile represents a Dockerfile on disk * Class Dockerfile represents a Dockerfile on disk
*/ */
@@ -26,8 +37,10 @@ export class Dockerfile {
.map(entry => plugins.path.join(paths.cwd, entry.name)); .map(entry => plugins.path.join(paths.cwd, entry.name));
const readDockerfilesArray: Dockerfile[] = []; const readDockerfilesArray: Dockerfile[] = [];
logger.log('info', `found ${fileTree.length} Dockerfiles:`); logger.log('info', `found ${fileTree.length} Dockerfile(s):`);
console.log(fileTree); for (const filePath of fileTree) {
logger.log('info', ` ${plugins.path.basename(filePath)}`);
}
for (const dockerfilePath of fileTree) { for (const dockerfilePath of fileTree) {
const myDockerfile = new Dockerfile(managerRef, { const myDockerfile = new Dockerfile(managerRef, {
@@ -133,13 +146,215 @@ export class Dockerfile {
return sortedDockerfileArray; return sortedDockerfileArray;
} }
/** Local registry is always needed — it's the canonical store for all built images. */
public static needsLocalRegistry(
_dockerfiles?: Dockerfile[],
_options?: { platform?: string },
): boolean {
return true;
}
/** Starts a persistent registry:2 container with session-unique port and name. */
public static async startLocalRegistry(session: TsDockerSession, isRootless?: boolean): Promise<void> {
const { registryPort, registryHost, registryContainerName, isCI, sessionId } = session.config;
// Ensure persistent storage directory exists — isolate per session in CI
const registryDataDir = isCI
? plugins.path.join(paths.cwd, '.nogit', 'docker-registry', sessionId)
: plugins.path.join(paths.cwd, '.nogit', 'docker-registry');
fs.mkdirSync(registryDataDir, { recursive: true });
await smartshellInstance.execSilent(
`docker rm -f ${registryContainerName} 2>/dev/null || true`
);
const runCmd = `docker run -d --name ${registryContainerName} -p ${registryPort}:5000 -v "${registryDataDir}:/var/lib/registry" registry:2`;
let result = await smartshellInstance.execSilent(runCmd);
// Port retry: if port was stolen between allocation and docker run, reallocate once
if (result.exitCode !== 0 && (result.stderr || result.stdout || '').includes('port is already allocated')) {
const newPort = await TsDockerSession.allocatePort();
logger.log('warn', `Port ${registryPort} taken, retrying with ${newPort}`);
session.config.registryPort = newPort;
session.config.registryHost = `localhost:${newPort}`;
const retryCmd = `docker run -d --name ${registryContainerName} -p ${newPort}:5000 -v "${registryDataDir}:/var/lib/registry" registry:2`;
result = await smartshellInstance.execSilent(retryCmd);
}
if (result.exitCode !== 0) {
throw new Error(`Failed to start local registry: ${result.stderr || result.stdout}`);
}
// registry:2 starts near-instantly; brief wait for readiness
await new Promise(resolve => setTimeout(resolve, 1000));
logger.log('info', `Started local registry at ${session.config.registryHost} (container: ${registryContainerName})`);
if (isRootless) {
logger.log('warn', `[rootless] Registry on port ${session.config.registryPort} — if buildx cannot reach localhost, try 127.0.0.1`);
}
}
/** Stops and removes the session-specific local registry container. */
public static async stopLocalRegistry(session: TsDockerSession): Promise<void> {
await smartshellInstance.execSilent(
`docker rm -f ${session.config.registryContainerName} 2>/dev/null || true`
);
logger.log('info', `Stopped local registry (${session.config.registryContainerName})`);
}
/** Pushes a built image to the local registry for buildx consumption. */
public static async pushToLocalRegistry(session: TsDockerSession, dockerfile: Dockerfile): Promise<void> {
const registryTag = `${session.config.registryHost}/${dockerfile.buildTag}`;
await smartshellInstance.execSilent(`docker tag ${dockerfile.buildTag} ${registryTag}`);
const result = await smartshellInstance.execSilent(`docker push ${registryTag}`);
if (result.exitCode !== 0) {
throw new Error(`Failed to push to local registry: ${result.stderr || result.stdout}`);
}
dockerfile.localRegistryTag = registryTag;
logger.log('info', `Pushed ${dockerfile.buildTag} to local registry as ${registryTag}`);
}
/**
* Groups topologically sorted Dockerfiles into dependency levels.
* Level 0 = no local dependencies; level N = depends on something in level N-1.
* Images within the same level are independent and can build in parallel.
*/
public static computeLevels(sortedDockerfiles: Dockerfile[]): Dockerfile[][] {
const levelMap = new Map<Dockerfile, number>();
for (const df of sortedDockerfiles) {
if (!df.localBaseImageDependent || !df.localBaseDockerfile) {
levelMap.set(df, 0);
} else {
const depLevel = levelMap.get(df.localBaseDockerfile) ?? 0;
levelMap.set(df, depLevel + 1);
}
}
const maxLevel = Math.max(...Array.from(levelMap.values()), 0);
const levels: Dockerfile[][] = [];
for (let l = 0; l <= maxLevel; l++) {
levels.push(sortedDockerfiles.filter(df => levelMap.get(df) === l));
}
return levels;
}
/**
* Runs async tasks with bounded concurrency (worker-pool pattern).
* Fast-fail: if any task throws, Promise.all rejects immediately.
*/
public static async runWithConcurrency<T>(
tasks: (() => Promise<T>)[],
concurrency: number,
): Promise<T[]> {
const results: T[] = new Array(tasks.length);
let nextIndex = 0;
async function worker(): Promise<void> {
while (true) {
const idx = nextIndex++;
if (idx >= tasks.length) break;
results[idx] = await tasks[idx]();
}
}
const workers = Array.from(
{ length: Math.min(concurrency, tasks.length) },
() => worker(),
);
await Promise.all(workers);
return results;
}
/** /**
* Builds the corresponding real docker image for each Dockerfile class instance * Builds the corresponding real docker image for each Dockerfile class instance
*/ */
public static async buildDockerfiles(sortedArrayArg: Dockerfile[]): Promise<Dockerfile[]> { public static async buildDockerfiles(
for (const dockerfileArg of sortedArrayArg) { sortedArrayArg: Dockerfile[],
await dockerfileArg.build(); session: TsDockerSession,
options?: { platform?: string; timeout?: number; noCache?: boolean; verbose?: boolean; isRootless?: boolean; parallel?: boolean; parallelConcurrency?: number },
): Promise<Dockerfile[]> {
const total = sortedArrayArg.length;
const overallStart = Date.now();
await Dockerfile.startLocalRegistry(session, options?.isRootless);
try {
if (options?.parallel) {
// === PARALLEL MODE: build independent images concurrently within each level ===
const concurrency = options.parallelConcurrency ?? 4;
const levels = Dockerfile.computeLevels(sortedArrayArg);
logger.log('info', `Parallel build: ${levels.length} level(s), concurrency ${concurrency}`);
for (let l = 0; l < levels.length; l++) {
const level = levels[l];
logger.log('info', ` Level ${l} (${level.length}): ${level.map(df => df.cleanTag).join(', ')}`);
}
let built = 0;
for (let l = 0; l < levels.length; l++) {
const level = levels[l];
logger.log('info', `--- Level ${l}: building ${level.length} image(s) in parallel ---`);
const tasks = level.map((df) => {
const myIndex = ++built;
return async () => {
const progress = `(${myIndex}/${total})`;
logger.log('info', `${progress} Building ${df.cleanTag}...`);
const elapsed = await df.build(options);
logger.log('ok', `${progress} Built ${df.cleanTag} in ${formatDuration(elapsed)}`);
return df;
};
});
await Dockerfile.runWithConcurrency(tasks, concurrency);
// After the entire level completes, push all to local registry + tag for deps
for (const df of level) {
// Tag in host daemon for dependency resolution
const dependentBaseImages = new Set<string>();
for (const other of sortedArrayArg) {
if (other.localBaseDockerfile === df && other.baseImage !== df.buildTag) {
dependentBaseImages.add(other.baseImage);
}
}
for (const fullTag of dependentBaseImages) {
logger.log('info', `Tagging ${df.buildTag} as ${fullTag} for local dependency resolution`);
await smartshellInstance.exec(`docker tag ${df.buildTag} ${fullTag}`);
}
// Push ALL images to local registry (skip if already pushed via buildx)
if (!df.localRegistryTag) {
await Dockerfile.pushToLocalRegistry(session, df);
}
}
}
} else {
// === SEQUENTIAL MODE: build one at a time ===
for (let i = 0; i < total; i++) {
const dockerfileArg = sortedArrayArg[i];
const progress = `(${i + 1}/${total})`;
logger.log('info', `${progress} Building ${dockerfileArg.cleanTag}...`);
const elapsed = await dockerfileArg.build(options);
logger.log('ok', `${progress} Built ${dockerfileArg.cleanTag} in ${formatDuration(elapsed)}`);
// Tag in host daemon for standard docker build compatibility
const dependentBaseImages = new Set<string>();
for (const other of sortedArrayArg) {
if (other.localBaseDockerfile === dockerfileArg && other.baseImage !== dockerfileArg.buildTag) {
dependentBaseImages.add(other.baseImage);
}
}
for (const fullTag of dependentBaseImages) {
logger.log('info', `Tagging ${dockerfileArg.buildTag} as ${fullTag} for local dependency resolution`);
await smartshellInstance.exec(`docker tag ${dockerfileArg.buildTag} ${fullTag}`);
}
// Push ALL images to local registry (skip if already pushed via buildx)
if (!dockerfileArg.localRegistryTag) {
await Dockerfile.pushToLocalRegistry(session, dockerfileArg);
}
}
}
} finally {
await Dockerfile.stopLocalRegistry(session);
} }
logger.log('info', `Total build time: ${formatDuration(Date.now() - overallStart)}`);
return sortedArrayArg; return sortedArrayArg;
} }
@@ -147,9 +362,19 @@ export class Dockerfile {
* Tests all Dockerfiles by calling Dockerfile.test() * Tests all Dockerfiles by calling Dockerfile.test()
*/ */
public static async testDockerfiles(sortedArrayArg: Dockerfile[]): Promise<Dockerfile[]> { public static async testDockerfiles(sortedArrayArg: Dockerfile[]): Promise<Dockerfile[]> {
for (const dockerfileArg of sortedArrayArg) { const total = sortedArrayArg.length;
await dockerfileArg.test(); const overallStart = Date.now();
for (let i = 0; i < total; i++) {
const dockerfileArg = sortedArrayArg[i];
const progress = `(${i + 1}/${total})`;
logger.log('info', `${progress} Testing ${dockerfileArg.cleanTag}...`);
const elapsed = await dockerfileArg.test();
logger.log('ok', `${progress} Tested ${dockerfileArg.cleanTag} in ${formatDuration(elapsed)}`);
} }
logger.log('info', `Total test time: ${formatDuration(Date.now() - overallStart)}`);
return sortedArrayArg; return sortedArrayArg;
} }
@@ -317,6 +542,7 @@ export class Dockerfile {
// INSTANCE PROPERTIES // INSTANCE PROPERTIES
public managerRef: TsDockerManager; public managerRef: TsDockerManager;
public session?: TsDockerSession;
public filePath!: string; public filePath!: string;
public repo: string; public repo: string;
public version: string; public version: string;
@@ -328,6 +554,7 @@ export class Dockerfile {
public baseImage: string; public baseImage: string;
public localBaseImageDependent: boolean; public localBaseImageDependent: boolean;
public localBaseDockerfile!: Dockerfile; public localBaseDockerfile!: Dockerfile;
public localRegistryTag?: string;
constructor(managerRefArg: TsDockerManager, options: IDockerfileOptions) { constructor(managerRefArg: TsDockerManager, options: IDockerfileOptions) {
this.managerRef = managerRefArg; this.managerRef = managerRefArg;
@@ -360,75 +587,191 @@ export class Dockerfile {
} }
/** /**
* Builds the Dockerfile * Creates a line-by-line handler for Docker build output that logs
* recognized layer/step lines in an emphasized format.
*/ */
public async build(): Promise<void> { private createBuildOutputHandler(verbose: boolean): {
logger.log('info', 'now building Dockerfile for ' + this.cleanTag); handleChunk: (chunk: Buffer | string) => void;
const buildArgsString = await Dockerfile.getDockerBuildArgs(this.managerRef); } {
const config = this.managerRef.config; let buffer = '';
const tag = this.cleanTag;
let buildCommand: string; const handleLine = (line: string) => {
// In verbose mode, write raw output prefixed with tag for identification
// Check if multi-platform build is needed if (verbose) {
if (config.platforms && config.platforms.length > 1) { process.stdout.write(`[${tag}] ${line}\n`);
// Multi-platform build using buildx
const platformString = config.platforms.join(',');
buildCommand = `docker buildx build --platform ${platformString} -t ${this.buildTag} -f ${this.filePath} ${buildArgsString} .`;
if (config.push) {
buildCommand += ' --push';
} else {
buildCommand += ' --load';
} }
} else {
// Standard build
const versionLabel = this.managerRef.projectInfo?.npm?.version || 'unknown';
buildCommand = `docker build --label="version=${versionLabel}" -t ${this.buildTag} -f ${this.filePath} ${buildArgsString} .`;
}
const result = await smartshellInstance.exec(buildCommand); // Buildx step: #N [platform step/total] INSTRUCTION
if (result.exitCode !== 0) { const bxStep = line.match(/^#\d+ \[([^\]]+?)(\d+\/\d+)\] (.+)/);
logger.log('error', `Build failed for ${this.cleanTag}`); if (bxStep) {
console.log(result.stdout); const prefix = bxStep[1].trim();
throw new Error(`Build failed for ${this.cleanTag}`); const step = bxStep[2];
} const instruction = bxStep[3];
const platform = extractPlatform(prefix);
const platStr = platform ? `${platform}` : '';
logger.log('note', `[${tag}] ${platStr}[${step}] ${instruction}`);
return;
}
logger.log('ok', `Built ${this.cleanTag}`); // Buildx CACHED: #N CACHED
const bxCached = line.match(/^#(\d+) CACHED/);
if (bxCached) {
logger.log('note', `[${tag}] CACHED`);
return;
}
// Buildx DONE: #N DONE 12.3s
const bxDone = line.match(/^#\d+ DONE (.+)/);
if (bxDone) {
const timing = bxDone[1];
if (!timing.startsWith('0.0')) {
logger.log('note', `[${tag}] DONE ${timing}`);
}
return;
}
// Buildx export phase: #N exporting ...
const bxExport = line.match(/^#\d+ exporting (.+)/);
if (bxExport) {
logger.log('note', `[${tag}] exporting ${bxExport[1]}`);
return;
}
// Standard docker build: Step N/M : INSTRUCTION
const stdStep = line.match(/^Step (\d+\/\d+) : (.+)/);
if (stdStep) {
logger.log('note', `[${tag}] Step ${stdStep[1]}: ${stdStep[2]}`);
return;
}
};
return {
handleChunk: (chunk: Buffer | string) => {
buffer += chunk.toString();
const lines = buffer.split('\n');
buffer = lines.pop() || '';
for (const line of lines) {
const trimmed = line.replace(/\r$/, '').trim();
if (trimmed) handleLine(trimmed);
}
},
};
} }
/** /**
* Pushes the Dockerfile to a registry * Builds the Dockerfile
*/
public async build(options?: { platform?: string; timeout?: number; noCache?: boolean; verbose?: boolean }): Promise<number> {
const startTime = Date.now();
const buildArgsString = await Dockerfile.getDockerBuildArgs(this.managerRef);
const config = this.managerRef.config;
const platformOverride = options?.platform;
const timeout = options?.timeout;
const noCacheFlag = options?.noCache ? ' --no-cache' : '';
const verbose = options?.verbose ?? false;
let buildContextFlag = '';
if (this.localBaseImageDependent && this.localBaseDockerfile) {
const fromImage = this.baseImage;
if (this.localBaseDockerfile.localRegistryTag) {
// BuildKit pulls from the local registry (reachable via host network)
const registryTag = this.localBaseDockerfile.localRegistryTag;
buildContextFlag = ` --build-context "${fromImage}=docker-image://${registryTag}"`;
logger.log('info', `Using local registry build context: ${fromImage} -> docker-image://${registryTag}`);
}
}
let buildCommand: string;
if (platformOverride) {
// Single platform override via buildx
buildCommand = `docker buildx build --progress=plain --platform ${platformOverride}${noCacheFlag}${buildContextFlag} --load -t ${this.buildTag} -f ${this.filePath} ${buildArgsString} .`;
logger.log('info', `Build: buildx --platform ${platformOverride} --load`);
} else if (config.platforms && config.platforms.length > 1) {
// Multi-platform build using buildx — always push to local registry
const platformString = config.platforms.join(',');
const registryHost = this.session?.config.registryHost || 'localhost:5234';
const localTag = `${registryHost}/${this.buildTag}`;
buildCommand = `docker buildx build --progress=plain --platform ${platformString}${noCacheFlag}${buildContextFlag} -t ${localTag} -f ${this.filePath} ${buildArgsString} --push .`;
this.localRegistryTag = localTag;
logger.log('info', `Build: buildx --platform ${platformString} --push to local registry`);
} else {
// Standard build
const versionLabel = this.managerRef.projectInfo?.npm?.version || 'unknown';
buildCommand = `docker build --progress=plain --label="version=${versionLabel}"${noCacheFlag} -t ${this.buildTag} -f ${this.filePath} ${buildArgsString} .`;
logger.log('info', 'Build: docker build (standard)');
}
// Execute build with real-time layer logging
const handler = this.createBuildOutputHandler(verbose);
const streaming = await smartshellInstance.execStreamingSilent(buildCommand);
// Intercept output for layer logging
streaming.childProcess.stdout?.on('data', handler.handleChunk);
streaming.childProcess.stderr?.on('data', handler.handleChunk);
if (timeout) {
const timeoutPromise = new Promise<never>((_, reject) => {
setTimeout(() => {
streaming.childProcess.kill();
reject(new Error(`Build timed out after ${timeout}s for ${this.cleanTag}`));
}, timeout * 1000);
});
const result = await Promise.race([streaming.finalPromise, timeoutPromise]);
if (result.exitCode !== 0) {
logger.log('error', `Build failed for ${this.cleanTag}`);
throw new Error(`Build failed for ${this.cleanTag}`);
}
} else {
const result = await streaming.finalPromise;
if (result.exitCode !== 0) {
logger.log('error', `Build failed for ${this.cleanTag}`);
if (!verbose && result.stdout) {
logger.log('error', `Build output:\n${result.stdout}`);
}
throw new Error(`Build failed for ${this.cleanTag}`);
}
}
return Date.now() - startTime;
}
/**
* Pushes the Dockerfile to a registry using OCI Distribution API copy
* from the local registry to the remote registry.
*/ */
public async push(dockerRegistryArg: DockerRegistry, versionSuffix?: string): Promise<void> { public async push(dockerRegistryArg: DockerRegistry, versionSuffix?: string): Promise<void> {
this.pushTag = Dockerfile.getDockerTagString( const destRepo = this.getDestRepo(dockerRegistryArg.registryUrl);
this.managerRef, const destTag = versionSuffix ? `${this.version}_${versionSuffix}` : this.version;
dockerRegistryArg.registryUrl, const registryCopy = new RegistryCopy();
const registryHost = this.session?.config.registryHost || 'localhost:5234';
this.pushTag = `${dockerRegistryArg.registryUrl}/${destRepo}:${destTag}`;
logger.log('info', `Pushing ${this.pushTag} via OCI copy from local registry...`);
await registryCopy.copyImage(
registryHost,
this.repo, this.repo,
this.version, this.version,
versionSuffix dockerRegistryArg.registryUrl,
destRepo,
destTag,
{ username: dockerRegistryArg.username, password: dockerRegistryArg.password },
); );
await smartshellInstance.exec(`docker tag ${this.buildTag} ${this.pushTag}`);
const pushResult = await smartshellInstance.exec(`docker push ${this.pushTag}`);
if (pushResult.exitCode !== 0) {
logger.log('error', `Push failed for ${this.pushTag}`);
throw new Error(`Push failed for ${this.pushTag}`);
}
// Get image digest
const inspectResult = await smartshellInstance.exec(
`docker inspect --format="{{index .RepoDigests 0}}" ${this.pushTag}`
);
if (inspectResult.exitCode === 0 && inspectResult.stdout.includes('@')) {
const imageDigest = inspectResult.stdout.split('@')[1]?.trim();
console.log(`The image ${this.pushTag} has digest ${imageDigest}`);
}
logger.log('ok', `Pushed ${this.pushTag}`); logger.log('ok', `Pushed ${this.pushTag}`);
} }
/**
* Returns the destination repository for a given registry URL,
* using registryRepoMap if configured, otherwise the default repo.
*/
private getDestRepo(registryUrl: string): string {
const config = this.managerRef.config;
return config.registryRepoMap?.[registryUrl] || this.repo;
}
/** /**
* Pulls the Dockerfile from a registry * Pulls the Dockerfile from a registry
*/ */
@@ -448,40 +791,46 @@ export class Dockerfile {
} }
/** /**
* Tests the Dockerfile by running a test script if it exists * Tests the Dockerfile by running a test script if it exists.
* For multi-platform builds, uses the local registry tag so Docker can auto-pull.
*/ */
public async test(): Promise<void> { public async test(): Promise<number> {
const startTime = Date.now();
const testDir = this.managerRef.config.testDir || plugins.path.join(paths.cwd, 'test'); const testDir = this.managerRef.config.testDir || plugins.path.join(paths.cwd, 'test');
const testFile = plugins.path.join(testDir, 'test_' + this.version + '.sh'); const testFile = plugins.path.join(testDir, 'test_' + this.version + '.sh');
// Use local registry tag for multi-platform images (not in daemon), otherwise buildTag
const imageRef = this.localRegistryTag || this.buildTag;
const sessionId = this.session?.config.sessionId || 'default';
const testContainerName = `tsdocker_test_${sessionId}`;
const testImageName = `tsdocker_test_image_${sessionId}`;
const testFileExists = fs.existsSync(testFile); const testFileExists = fs.existsSync(testFile);
if (testFileExists) { if (testFileExists) {
logger.log('info', `Running tests for ${this.cleanTag}`);
// Run tests in container // Run tests in container
await smartshellInstance.exec( await smartshellInstance.exec(
`docker run --name tsdocker_test_container --entrypoint="bash" ${this.buildTag} -c "mkdir /tsdocker_test"` `docker run --name ${testContainerName} --entrypoint="bash" ${imageRef} -c "mkdir /tsdocker_test"`
); );
await smartshellInstance.exec(`docker cp ${testFile} tsdocker_test_container:/tsdocker_test/test.sh`); await smartshellInstance.exec(`docker cp ${testFile} ${testContainerName}:/tsdocker_test/test.sh`);
await smartshellInstance.exec(`docker commit tsdocker_test_container tsdocker_test_image`); await smartshellInstance.exec(`docker commit ${testContainerName} ${testImageName}`);
const testResult = await smartshellInstance.exec( const testResult = await smartshellInstance.exec(
`docker run --entrypoint="bash" tsdocker_test_image -x /tsdocker_test/test.sh` `docker run --entrypoint="bash" ${testImageName} -x /tsdocker_test/test.sh`
); );
// Cleanup // Cleanup
await smartshellInstance.exec(`docker rm tsdocker_test_container`); await smartshellInstance.exec(`docker rm ${testContainerName}`);
await smartshellInstance.exec(`docker rmi --force tsdocker_test_image`); await smartshellInstance.exec(`docker rmi --force ${testImageName}`);
if (testResult.exitCode !== 0) { if (testResult.exitCode !== 0) {
throw new Error(`Tests failed for ${this.cleanTag}`); throw new Error(`Tests failed for ${this.cleanTag}`);
} }
logger.log('ok', `Tests passed for ${this.cleanTag}`);
} else { } else {
logger.log('warn', `Skipping tests for ${this.cleanTag} because no test file was found at ${testFile}`); logger.log('warn', `Skipping tests for ${this.cleanTag} no test file at ${testFile}`);
} }
return Date.now() - startTime;
} }
/** /**

567
ts/classes.registrycopy.ts Normal file
View File

@@ -0,0 +1,567 @@
import * as fs from 'fs';
import * as os from 'os';
import * as path from 'path';
import { logger } from './tsdocker.logging.js';
interface IRegistryCredentials {
username: string;
password: string;
}
interface ITokenCache {
[scope: string]: { token: string; expiry: number };
}
/**
* OCI Distribution API client for copying images between registries.
* Supports manifest lists (multi-arch) and single-platform manifests.
* Uses native fetch (Node 18+).
*/
export class RegistryCopy {
private tokenCache: ITokenCache = {};
/**
* Wraps fetch() with timeout (via AbortSignal) and retry with exponential backoff.
* Retries on network errors and 5xx; does NOT retry on 4xx client errors.
* On 401, clears the token cache entry so the next attempt re-authenticates.
*/
private async fetchWithRetry(
url: string,
options: RequestInit & { duplex?: string },
timeoutMs: number = 300_000,
maxRetries: number = 3,
): Promise<Response> {
const method = (options.method || 'GET').toUpperCase();
let lastError: Error | null = null;
for (let attempt = 1; attempt <= maxRetries; attempt++) {
try {
if (attempt > 1) {
logger.log('info', `Retry ${attempt}/${maxRetries} for ${method} ${url}`);
}
const resp = await fetch(url, {
...options,
signal: AbortSignal.timeout(timeoutMs),
});
// Retry on 5xx server errors (but not 4xx)
if (resp.status >= 500 && attempt < maxRetries) {
const delay = 1000 * Math.pow(2, attempt - 1);
logger.log('warn', `${method} ${url} returned ${resp.status}, retrying in ${delay}ms (attempt ${attempt}/${maxRetries})...`);
await new Promise(r => setTimeout(r, delay));
continue;
}
if (resp.status >= 500) {
logger.log('error', `${method} ${url} returned ${resp.status} after ${maxRetries} attempts, giving up`);
}
return resp;
} catch (err) {
lastError = err as Error;
if (attempt < maxRetries) {
const delay = 1000 * Math.pow(2, attempt - 1);
logger.log('warn', `${method} ${url} failed (attempt ${attempt}/${maxRetries}): ${lastError.message}, retrying in ${delay}ms...`);
await new Promise(r => setTimeout(r, delay));
} else {
logger.log('error', `${method} ${url} failed after ${maxRetries} attempts: ${lastError.message}`);
}
}
}
throw lastError!;
}
/**
* Reads Docker credentials from ~/.docker/config.json for a given registry.
* Supports base64-encoded "auth" field in the config.
*/
public static getDockerConfigCredentials(registryUrl: string): IRegistryCredentials | null {
try {
const configPath = path.join(os.homedir(), '.docker', 'config.json');
if (!fs.existsSync(configPath)) return null;
const config = JSON.parse(fs.readFileSync(configPath, 'utf-8'));
const auths = config.auths || {};
// Try exact match first, then common variations
const keys = [
registryUrl,
`https://${registryUrl}`,
`http://${registryUrl}`,
];
// Docker Hub special cases
if (registryUrl === 'docker.io' || registryUrl === 'registry-1.docker.io') {
keys.push(
'https://index.docker.io/v1/',
'https://index.docker.io/v2/',
'index.docker.io',
'docker.io',
'registry-1.docker.io',
);
}
for (const key of keys) {
if (auths[key]?.auth) {
const decoded = Buffer.from(auths[key].auth, 'base64').toString('utf-8');
const colonIndex = decoded.indexOf(':');
if (colonIndex > 0) {
return {
username: decoded.substring(0, colonIndex),
password: decoded.substring(colonIndex + 1),
};
}
}
}
return null;
} catch {
return null;
}
}
/**
* Returns the API base URL for a registry.
* Docker Hub uses registry-1.docker.io as API endpoint.
*/
private getRegistryApiBase(registry: string): string {
if (registry === 'docker.io' || registry === 'index.docker.io') {
return 'https://registry-1.docker.io';
}
// Local registries (localhost) use HTTP
if (registry.startsWith('localhost') || registry.startsWith('127.0.0.1')) {
return `http://${registry}`;
}
return `https://${registry}`;
}
/**
* Obtains a Bearer token for registry operations.
* Follows the standard Docker auth flow:
* GET /v2/ → 401 with Www-Authenticate → request token
*/
private async getToken(
registry: string,
repo: string,
actions: string,
credentials?: IRegistryCredentials | null,
): Promise<string | null> {
const scope = `repository:${repo}:${actions}`;
const cached = this.tokenCache[`${registry}/${scope}`];
if (cached && cached.expiry > Date.now()) {
return cached.token;
}
const apiBase = this.getRegistryApiBase(registry);
// Local registries typically don't need auth
if (registry.startsWith('localhost') || registry.startsWith('127.0.0.1')) {
return null;
}
try {
const checkResp = await this.fetchWithRetry(`${apiBase}/v2/`, { method: 'GET' }, 30_000);
if (checkResp.ok) return null; // No auth needed
const wwwAuth = checkResp.headers.get('www-authenticate') || '';
const realmMatch = wwwAuth.match(/realm="([^"]+)"/);
const serviceMatch = wwwAuth.match(/service="([^"]+)"/);
if (!realmMatch) return null;
const realm = realmMatch[1];
const service = serviceMatch ? serviceMatch[1] : '';
const tokenUrl = new URL(realm);
tokenUrl.searchParams.set('scope', scope);
if (service) tokenUrl.searchParams.set('service', service);
const headers: Record<string, string> = {};
const creds = credentials || RegistryCopy.getDockerConfigCredentials(registry);
if (creds) {
headers['Authorization'] = 'Basic ' + Buffer.from(`${creds.username}:${creds.password}`).toString('base64');
}
const tokenResp = await this.fetchWithRetry(tokenUrl.toString(), { headers }, 30_000);
if (!tokenResp.ok) {
const body = await tokenResp.text();
throw new Error(`Token request failed (${tokenResp.status}): ${body}`);
}
const tokenData = await tokenResp.json() as any;
const token = tokenData.token || tokenData.access_token;
if (token) {
// Cache for 5 minutes (conservative)
this.tokenCache[`${registry}/${scope}`] = {
token,
expiry: Date.now() + 5 * 60 * 1000,
};
}
return token;
} catch (err) {
logger.log('warn', `Auth for ${registry}: ${(err as Error).message}`);
return null;
}
}
/**
* Makes an authenticated request to a registry.
*/
private async registryFetch(
registry: string,
path: string,
options: {
method?: string;
headers?: Record<string, string>;
body?: Buffer | ReadableStream | null;
repo?: string;
actions?: string;
credentials?: IRegistryCredentials | null;
} = {},
): Promise<Response> {
const apiBase = this.getRegistryApiBase(registry);
const method = options.method || 'GET';
const headers: Record<string, string> = { ...(options.headers || {}) };
const repo = options.repo || '';
const actions = options.actions || 'pull';
const token = await this.getToken(registry, repo, actions, options.credentials);
if (token) {
headers['Authorization'] = `Bearer ${token}`;
}
const url = `${apiBase}${path}`;
const fetchOptions: any = { method, headers };
if (options.body) {
fetchOptions.body = options.body;
fetchOptions.duplex = 'half'; // Required for streaming body in Node
}
const resp = await this.fetchWithRetry(url, fetchOptions, 300_000);
// Token expired — clear cache so next call re-authenticates
if (resp.status === 401 && token) {
const cacheKey = `${registry}/${`repository:${repo}:${actions}`}`;
logger.log('warn', `Got 401 for ${registry}${path} — clearing cached token for ${cacheKey}`);
delete this.tokenCache[cacheKey];
}
return resp;
}
/**
* Gets a manifest from a registry (supports both manifest lists and single manifests).
*/
private async getManifest(
registry: string,
repo: string,
reference: string,
credentials?: IRegistryCredentials | null,
): Promise<{ contentType: string; body: any; digest: string; raw: Buffer }> {
const accept = [
'application/vnd.oci.image.index.v1+json',
'application/vnd.docker.distribution.manifest.list.v2+json',
'application/vnd.oci.image.manifest.v1+json',
'application/vnd.docker.distribution.manifest.v2+json',
].join(', ');
const resp = await this.registryFetch(registry, `/v2/${repo}/manifests/${reference}`, {
headers: { 'Accept': accept },
repo,
actions: 'pull',
credentials,
});
if (!resp.ok) {
const body = await resp.text();
throw new Error(`Failed to get manifest ${registry}/${repo}:${reference} (${resp.status}): ${body}`);
}
const raw = Buffer.from(await resp.arrayBuffer());
const contentType = resp.headers.get('content-type') || '';
const digest = resp.headers.get('docker-content-digest') || this.computeDigest(raw);
const body = JSON.parse(raw.toString('utf-8'));
return { contentType, body, digest, raw };
}
/**
* Checks if a blob exists in the destination registry.
*/
private async blobExists(
registry: string,
repo: string,
digest: string,
credentials?: IRegistryCredentials | null,
): Promise<boolean> {
const resp = await this.registryFetch(registry, `/v2/${repo}/blobs/${digest}`, {
method: 'HEAD',
repo,
actions: 'pull,push',
credentials,
});
return resp.ok;
}
/**
* Copies a single blob from source to destination registry.
* Uses monolithic upload (POST initiate + PUT complete).
*/
private async copyBlob(
srcRegistry: string,
srcRepo: string,
destRegistry: string,
destRepo: string,
digest: string,
srcCredentials?: IRegistryCredentials | null,
destCredentials?: IRegistryCredentials | null,
): Promise<void> {
// Check if blob already exists at destination
const exists = await this.blobExists(destRegistry, destRepo, digest, destCredentials);
if (exists) {
logger.log('info', ` Blob ${digest.substring(0, 19)}... already exists, skipping`);
return;
}
// Download blob from source
const getResp = await this.registryFetch(srcRegistry, `/v2/${srcRepo}/blobs/${digest}`, {
repo: srcRepo,
actions: 'pull',
credentials: srcCredentials,
});
if (!getResp.ok) {
throw new Error(`Failed to get blob ${digest} from ${srcRegistry}/${srcRepo}: ${getResp.status}`);
}
const blobData = Buffer.from(await getResp.arrayBuffer());
const blobSize = blobData.length;
// Initiate upload at destination
const postResp = await this.registryFetch(destRegistry, `/v2/${destRepo}/blobs/uploads/`, {
method: 'POST',
headers: { 'Content-Length': '0' },
repo: destRepo,
actions: 'pull,push',
credentials: destCredentials,
});
if (!postResp.ok && postResp.status !== 202) {
const body = await postResp.text();
throw new Error(`Failed to initiate upload at ${destRegistry}/${destRepo}: ${postResp.status} ${body}`);
}
// Get upload URL from Location header
let uploadUrl = postResp.headers.get('location') || '';
if (!uploadUrl) {
throw new Error(`No upload location returned from ${destRegistry}/${destRepo}`);
}
// Make upload URL absolute if relative
if (uploadUrl.startsWith('/')) {
const apiBase = this.getRegistryApiBase(destRegistry);
uploadUrl = `${apiBase}${uploadUrl}`;
}
// Complete upload with PUT (monolithic)
const separator = uploadUrl.includes('?') ? '&' : '?';
const putUrl = `${uploadUrl}${separator}digest=${encodeURIComponent(digest)}`;
// For PUT to the upload URL, we need auth
const token = await this.getToken(destRegistry, destRepo, 'pull,push', destCredentials);
const putHeaders: Record<string, string> = {
'Content-Type': 'application/octet-stream',
'Content-Length': String(blobSize),
};
if (token) {
putHeaders['Authorization'] = `Bearer ${token}`;
}
const putResp = await this.fetchWithRetry(putUrl, {
method: 'PUT',
headers: putHeaders,
body: blobData,
}, 300_000);
if (!putResp.ok) {
const body = await putResp.text();
throw new Error(`Failed to upload blob ${digest} to ${destRegistry}/${destRepo}: ${putResp.status} ${body}`);
}
const sizeStr = blobSize > 1048576
? `${(blobSize / 1048576).toFixed(1)} MB`
: `${(blobSize / 1024).toFixed(1)} KB`;
logger.log('info', ` Copied blob ${digest.substring(0, 19)}... (${sizeStr})`);
}
/**
* Pushes a manifest to a registry.
*/
private async putManifest(
registry: string,
repo: string,
reference: string,
manifest: Buffer,
contentType: string,
credentials?: IRegistryCredentials | null,
): Promise<string> {
const resp = await this.registryFetch(registry, `/v2/${repo}/manifests/${reference}`, {
method: 'PUT',
headers: {
'Content-Type': contentType,
'Content-Length': String(manifest.length),
},
body: manifest,
repo,
actions: 'pull,push',
credentials,
});
if (!resp.ok) {
const body = await resp.text();
throw new Error(`Failed to put manifest ${registry}/${repo}:${reference} (${resp.status}): ${body}`);
}
const digest = resp.headers.get('docker-content-digest') || this.computeDigest(manifest);
return digest;
}
/**
* Copies a single-platform manifest and all its blobs from source to destination.
*/
private async copySingleManifest(
srcRegistry: string,
srcRepo: string,
destRegistry: string,
destRepo: string,
manifestDigest: string,
srcCredentials?: IRegistryCredentials | null,
destCredentials?: IRegistryCredentials | null,
): Promise<void> {
// Get the platform manifest
const { body: manifest, contentType, raw } = await this.getManifest(
srcRegistry, srcRepo, manifestDigest, srcCredentials,
);
// Copy config blob
if (manifest.config?.digest) {
logger.log('info', ` Copying config blob...`);
await this.copyBlob(
srcRegistry, srcRepo, destRegistry, destRepo,
manifest.config.digest, srcCredentials, destCredentials,
);
}
// Copy layer blobs
const layers = manifest.layers || [];
for (let i = 0; i < layers.length; i++) {
const layer = layers[i];
logger.log('info', ` Copying layer ${i + 1}/${layers.length}...`);
await this.copyBlob(
srcRegistry, srcRepo, destRegistry, destRepo,
layer.digest, srcCredentials, destCredentials,
);
}
// Push the platform manifest by digest
await this.putManifest(
destRegistry, destRepo, manifestDigest, raw, contentType, destCredentials,
);
}
/**
* Copies a complete image (single or multi-arch) from source to destination registry.
*
* @param srcRegistry - Source registry host (e.g., "localhost:5234")
* @param srcRepo - Source repository (e.g., "myapp")
* @param srcTag - Source tag (e.g., "v1.0.0")
* @param destRegistry - Destination registry host (e.g., "registry.gitlab.com")
* @param destRepo - Destination repository (e.g., "org/myapp")
* @param destTag - Destination tag (e.g., "v1.0.0" or "v1.0.0_arm64")
* @param credentials - Optional credentials for destination registry
*/
public async copyImage(
srcRegistry: string,
srcRepo: string,
srcTag: string,
destRegistry: string,
destRepo: string,
destTag: string,
credentials?: IRegistryCredentials | null,
): Promise<void> {
logger.log('info', `Copying ${srcRegistry}/${srcRepo}:${srcTag} -> ${destRegistry}/${destRepo}:${destTag}`);
// Source is always the local registry (no credentials needed)
const srcCredentials: IRegistryCredentials | null = null;
const destCredentials = credentials || RegistryCopy.getDockerConfigCredentials(destRegistry);
// Get the top-level manifest
const topManifest = await this.getManifest(srcRegistry, srcRepo, srcTag, srcCredentials);
const { body, contentType, raw } = topManifest;
const isManifestList =
contentType.includes('manifest.list') ||
contentType.includes('image.index') ||
body.manifests !== undefined;
if (isManifestList) {
// Multi-arch: copy each platform manifest + blobs, then push the manifest list
const platforms = (body.manifests || []) as any[];
logger.log('info', `Multi-arch manifest with ${platforms.length} platform(s)`);
for (const platformEntry of platforms) {
const platDesc = platformEntry.platform
? `${platformEntry.platform.os}/${platformEntry.platform.architecture}`
: platformEntry.digest;
logger.log('info', `Copying platform: ${platDesc}`);
await this.copySingleManifest(
srcRegistry, srcRepo, destRegistry, destRepo,
platformEntry.digest, srcCredentials, destCredentials,
);
}
// Push the manifest list/index with the destination tag
const digest = await this.putManifest(
destRegistry, destRepo, destTag, raw, contentType, destCredentials,
);
logger.log('ok', `Pushed manifest list to ${destRegistry}/${destRepo}:${destTag} (${digest.substring(0, 19)}...)`);
} else {
// Single-platform manifest: copy blobs + push manifest
logger.log('info', 'Single-platform manifest');
// Copy config blob
if (body.config?.digest) {
logger.log('info', ' Copying config blob...');
await this.copyBlob(
srcRegistry, srcRepo, destRegistry, destRepo,
body.config.digest, srcCredentials, destCredentials,
);
}
// Copy layer blobs
const layers = body.layers || [];
for (let i = 0; i < layers.length; i++) {
logger.log('info', ` Copying layer ${i + 1}/${layers.length}...`);
await this.copyBlob(
srcRegistry, srcRepo, destRegistry, destRepo,
layers[i].digest, srcCredentials, destCredentials,
);
}
// Push the manifest with the destination tag
const digest = await this.putManifest(
destRegistry, destRepo, destTag, raw, contentType, destCredentials,
);
logger.log('ok', `Pushed manifest to ${destRegistry}/${destRepo}:${destTag} (${digest.substring(0, 19)}...)`);
}
}
/**
* Computes sha256 digest of a buffer.
*/
private computeDigest(data: Buffer): string {
const crypto = require('crypto');
const hash = crypto.createHash('sha256').update(data).digest('hex');
return `sha256:${hash}`;
}
}

108
ts/classes.tsdockercache.ts Normal file
View File

@@ -0,0 +1,108 @@
import * as crypto from 'crypto';
import * as fs from 'fs';
import * as path from 'path';
import * as plugins from './tsdocker.plugins.js';
import * as paths from './tsdocker.paths.js';
import { logger } from './tsdocker.logging.js';
import type { ICacheData, ICacheEntry } from './interfaces/index.js';
const smartshellInstance = new plugins.smartshell.Smartshell({
executor: 'bash',
});
/**
* Manages content-hash-based build caching for Dockerfiles.
* Cache is stored in .nogit/tsdocker_support.json.
*/
export class TsDockerCache {
private cacheFilePath: string;
private data: ICacheData;
constructor() {
this.cacheFilePath = path.join(paths.cwd, '.nogit', 'tsdocker_support.json');
this.data = { version: 1, entries: {} };
}
/**
* Loads cache data from disk. Falls back to empty cache on missing/corrupt file.
*/
public load(): void {
try {
const raw = fs.readFileSync(this.cacheFilePath, 'utf-8');
const parsed = JSON.parse(raw);
if (parsed && parsed.version === 1 && parsed.entries) {
this.data = parsed;
} else {
logger.log('warn', '[cache] Cache file has unexpected format, starting fresh');
this.data = { version: 1, entries: {} };
}
} catch {
// Missing or corrupt file — start fresh
this.data = { version: 1, entries: {} };
}
}
/**
* Saves cache data to disk. Creates .nogit directory if needed.
*/
public save(): void {
const dir = path.dirname(this.cacheFilePath);
fs.mkdirSync(dir, { recursive: true });
fs.writeFileSync(this.cacheFilePath, JSON.stringify(this.data, null, 2), 'utf-8');
}
/**
* Computes SHA-256 hash of Dockerfile content.
*/
public computeContentHash(content: string): string {
return crypto.createHash('sha256').update(content).digest('hex');
}
/**
* Checks whether a build can be skipped for the given Dockerfile.
* Logs detailed diagnostics and returns true if the build should be skipped.
*/
public async shouldSkipBuild(cleanTag: string, content: string): Promise<boolean> {
const contentHash = this.computeContentHash(content);
const entry = this.data.entries[cleanTag];
if (!entry) {
logger.log('info', `[cache] ${cleanTag}: no cached entry, will build`);
return false;
}
const hashMatch = entry.contentHash === contentHash;
logger.log('info', `[cache] ${cleanTag}: hash ${hashMatch ? 'matches' : 'changed'}`);
if (!hashMatch) {
logger.log('info', `[cache] ${cleanTag}: content changed, will build`);
return false;
}
// Hash matches — verify the image still exists locally
const inspectResult = await smartshellInstance.exec(
`docker image inspect ${entry.imageId} > /dev/null 2>&1`
);
const available = inspectResult.exitCode === 0;
if (available) {
logger.log('info', `[cache] ${cleanTag}: cache hit, skipping build`);
return true;
}
logger.log('info', `[cache] ${cleanTag}: image no longer available, will build`);
return false;
}
/**
* Records a successful build in the cache.
*/
public recordBuild(cleanTag: string, content: string, imageId: string, buildTag: string): void {
this.data.entries[cleanTag] = {
contentHash: this.computeContentHash(content),
imageId,
buildTag,
timestamp: Date.now(),
};
}
}

View File

@@ -1,10 +1,14 @@
import * as plugins from './tsdocker.plugins.js'; import * as plugins from './tsdocker.plugins.js';
import * as paths from './tsdocker.paths.js'; import * as paths from './tsdocker.paths.js';
import { logger } from './tsdocker.logging.js'; import { logger, formatDuration } from './tsdocker.logging.js';
import { Dockerfile } from './classes.dockerfile.js'; import { Dockerfile } from './classes.dockerfile.js';
import { DockerRegistry } from './classes.dockerregistry.js'; import { DockerRegistry } from './classes.dockerregistry.js';
import { RegistryStorage } from './classes.registrystorage.js'; import { RegistryStorage } from './classes.registrystorage.js';
import type { ITsDockerConfig } from './interfaces/index.js'; import { TsDockerCache } from './classes.tsdockercache.js';
import { DockerContext } from './classes.dockercontext.js';
import { TsDockerSession } from './classes.tsdockersession.js';
import { RegistryCopy } from './classes.registrycopy.js';
import type { ITsDockerConfig, IBuildCommandOptions } from './interfaces/index.js';
const smartshellInstance = new plugins.smartshell.Smartshell({ const smartshellInstance = new plugins.smartshell.Smartshell({
executor: 'bash', executor: 'bash',
@@ -17,17 +21,28 @@ export class TsDockerManager {
public registryStorage: RegistryStorage; public registryStorage: RegistryStorage;
public config: ITsDockerConfig; public config: ITsDockerConfig;
public projectInfo: any; public projectInfo: any;
public dockerContext: DockerContext;
public session!: TsDockerSession;
private dockerfiles: Dockerfile[] = []; private dockerfiles: Dockerfile[] = [];
constructor(config: ITsDockerConfig) { constructor(config: ITsDockerConfig) {
this.config = config; this.config = config;
this.registryStorage = new RegistryStorage(); this.registryStorage = new RegistryStorage();
this.dockerContext = new DockerContext();
} }
/** /**
* Prepares the manager by loading project info and registries * Prepares the manager by loading project info and registries
*/ */
public async prepare(): Promise<void> { public async prepare(contextArg?: string): Promise<void> {
// Detect Docker context
if (contextArg) {
this.dockerContext.setContext(contextArg);
}
await this.dockerContext.detect();
this.dockerContext.logContextInfo();
this.dockerContext.logRootlessWarnings();
// Load project info // Load project info
try { try {
const projectinfoInstance = new plugins.projectinfo.ProjectInfo(paths.cwd); const projectinfoInstance = new plugins.projectinfo.ProjectInfo(paths.cwd);
@@ -62,9 +77,28 @@ export class TsDockerManager {
} }
} }
} }
// Fallback: check ~/.docker/config.json if env vars didn't provide credentials
if (!this.registryStorage.getRegistryByUrl(registryUrl)) {
const dockerConfigCreds = RegistryCopy.getDockerConfigCredentials(registryUrl);
if (dockerConfigCreds) {
const registry = new DockerRegistry({
registryUrl,
username: dockerConfigCreds.username,
password: dockerConfigCreds.password,
});
this.registryStorage.addRegistry(registry);
logger.log('info', `Loaded credentials for ${registryUrl} from ~/.docker/config.json`);
} else {
logger.log('warn', `No credentials found for ${registryUrl} (checked env vars and ~/.docker/config.json)`);
}
}
} }
} }
// Create session identity (unique ports, names for CI concurrency)
this.session = await TsDockerSession.create();
logger.log('info', `Prepared TsDockerManager with ${this.registryStorage.getAllRegistries().length} registries`); logger.log('info', `Prepared TsDockerManager with ${this.registryStorage.getAllRegistries().length} registries`);
} }
@@ -86,13 +120,39 @@ export class TsDockerManager {
this.dockerfiles = await Dockerfile.readDockerfiles(this); this.dockerfiles = await Dockerfile.readDockerfiles(this);
this.dockerfiles = await Dockerfile.sortDockerfiles(this.dockerfiles); this.dockerfiles = await Dockerfile.sortDockerfiles(this.dockerfiles);
this.dockerfiles = await Dockerfile.mapDockerfiles(this.dockerfiles); this.dockerfiles = await Dockerfile.mapDockerfiles(this.dockerfiles);
// Inject session into each Dockerfile
for (const df of this.dockerfiles) {
df.session = this.session;
}
return this.dockerfiles; return this.dockerfiles;
} }
/** /**
* Builds all discovered Dockerfiles in dependency order * Filters discovered Dockerfiles by name patterns (glob-style).
* Mutates this.dockerfiles in place.
*/ */
public async build(): Promise<Dockerfile[]> { public filterDockerfiles(patterns: string[]): void {
const matched = this.dockerfiles.filter((df) => {
const basename = plugins.path.basename(df.filePath);
return patterns.some((pattern) => {
if (pattern.includes('*') || pattern.includes('?')) {
const regexStr = '^' + pattern.replace(/\*/g, '.*').replace(/\?/g, '.') + '$';
return new RegExp(regexStr).test(basename);
}
return basename === pattern;
});
});
if (matched.length === 0) {
logger.log('warn', `No Dockerfiles matched patterns: ${patterns.join(', ')}`);
}
this.dockerfiles = matched;
}
/**
* Builds discovered Dockerfiles in dependency order.
* When options.patterns is provided, only matching Dockerfiles (and their dependencies) are built.
*/
public async build(options?: IBuildCommandOptions): Promise<Dockerfile[]> {
if (this.dockerfiles.length === 0) { if (this.dockerfiles.length === 0) {
await this.discoverDockerfiles(); await this.discoverDockerfiles();
} }
@@ -102,38 +162,246 @@ export class TsDockerManager {
return []; return [];
} }
// Determine which Dockerfiles to build
let toBuild = this.dockerfiles;
if (options?.patterns && options.patterns.length > 0) {
// Filter to matching Dockerfiles
const matched = this.dockerfiles.filter((df) => {
const basename = plugins.path.basename(df.filePath);
return options.patterns!.some((pattern) => {
if (pattern.includes('*') || pattern.includes('?')) {
// Convert glob pattern to regex
const regexStr = '^' + pattern.replace(/\*/g, '.*').replace(/\?/g, '.') + '$';
return new RegExp(regexStr).test(basename);
}
return basename === pattern;
});
});
if (matched.length === 0) {
logger.log('warn', `No Dockerfiles matched patterns: ${options.patterns.join(', ')}`);
return [];
}
// Resolve dependency chain and preserve topological order
toBuild = this.resolveWithDependencies(matched, this.dockerfiles);
logger.log('info', `Matched ${matched.length} Dockerfile(s), building ${toBuild.length} (including dependencies)`);
}
// Check if buildx is needed // Check if buildx is needed
if (this.config.platforms && this.config.platforms.length > 1) { const useBuildx = !!(options?.platform || (this.config.platforms && this.config.platforms.length > 1));
if (useBuildx) {
await this.ensureBuildx(); await this.ensureBuildx();
} }
logger.log('info', `Building ${this.dockerfiles.length} Dockerfiles...`); logger.log('info', '');
await Dockerfile.buildDockerfiles(this.dockerfiles); logger.log('info', '=== BUILD PHASE ===');
if (useBuildx) {
const platforms = options?.platform || this.config.platforms!.join(', ');
logger.log('info', `Build mode: buildx multi-platform [${platforms}]`);
} else {
logger.log('info', 'Build mode: standard docker build');
}
const localDeps = toBuild.filter(df => df.localBaseImageDependent);
if (localDeps.length > 0) {
logger.log('info', `Local dependencies: ${localDeps.map(df => `${df.cleanTag} -> ${df.localBaseDockerfile?.cleanTag}`).join(', ')}`);
}
if (options?.noCache) {
logger.log('info', 'Cache: disabled (--no-cache)');
}
if (options?.parallel) {
const concurrency = options.parallelConcurrency ?? 4;
const levels = Dockerfile.computeLevels(toBuild);
logger.log('info', `Parallel build: ${levels.length} level(s), concurrency ${concurrency}`);
for (let l = 0; l < levels.length; l++) {
const level = levels[l];
logger.log('info', ` Level ${l} (${level.length}): ${level.map(df => df.cleanTag).join(', ')}`);
}
}
logger.log('info', `Building ${toBuild.length} Dockerfile(s)...`);
if (options?.cached) {
// === CACHED MODE: skip builds for unchanged Dockerfiles ===
logger.log('info', '(cached mode active)');
const cache = new TsDockerCache();
cache.load();
const total = toBuild.length;
const overallStart = Date.now();
await Dockerfile.startLocalRegistry(this.session, this.dockerContext.contextInfo?.isRootless);
try {
if (options?.parallel) {
// === PARALLEL CACHED MODE ===
const concurrency = options.parallelConcurrency ?? 4;
const levels = Dockerfile.computeLevels(toBuild);
let built = 0;
for (let l = 0; l < levels.length; l++) {
const level = levels[l];
logger.log('info', `--- Level ${l}: building ${level.length} image(s) in parallel ---`);
const tasks = level.map((df) => {
const myIndex = ++built;
return async () => {
const progress = `(${myIndex}/${total})`;
const skip = await cache.shouldSkipBuild(df.cleanTag, df.content);
if (skip) {
logger.log('ok', `${progress} Skipped ${df.cleanTag} (cached)`);
} else {
logger.log('info', `${progress} Building ${df.cleanTag}...`);
const elapsed = await df.build({
platform: options?.platform,
timeout: options?.timeout,
noCache: options?.noCache,
verbose: options?.verbose,
});
logger.log('ok', `${progress} Built ${df.cleanTag} in ${formatDuration(elapsed)}`);
const imageId = await df.getId();
cache.recordBuild(df.cleanTag, df.content, imageId, df.buildTag);
}
return df;
};
});
await Dockerfile.runWithConcurrency(tasks, concurrency);
// After the entire level completes, push all to local registry + tag for deps
for (const df of level) {
const dependentBaseImages = new Set<string>();
for (const other of toBuild) {
if (other.localBaseDockerfile === df && other.baseImage !== df.buildTag) {
dependentBaseImages.add(other.baseImage);
}
}
for (const fullTag of dependentBaseImages) {
logger.log('info', `Tagging ${df.buildTag} as ${fullTag} for local dependency resolution`);
await smartshellInstance.exec(`docker tag ${df.buildTag} ${fullTag}`);
}
// Push ALL images to local registry (skip if already pushed via buildx)
if (!df.localRegistryTag) {
await Dockerfile.pushToLocalRegistry(this.session, df);
}
}
}
} else {
// === SEQUENTIAL CACHED MODE ===
for (let i = 0; i < total; i++) {
const dockerfileArg = toBuild[i];
const progress = `(${i + 1}/${total})`;
const skip = await cache.shouldSkipBuild(dockerfileArg.cleanTag, dockerfileArg.content);
if (skip) {
logger.log('ok', `${progress} Skipped ${dockerfileArg.cleanTag} (cached)`);
} else {
logger.log('info', `${progress} Building ${dockerfileArg.cleanTag}...`);
const elapsed = await dockerfileArg.build({
platform: options?.platform,
timeout: options?.timeout,
noCache: options?.noCache,
verbose: options?.verbose,
});
logger.log('ok', `${progress} Built ${dockerfileArg.cleanTag} in ${formatDuration(elapsed)}`);
const imageId = await dockerfileArg.getId();
cache.recordBuild(dockerfileArg.cleanTag, dockerfileArg.content, imageId, dockerfileArg.buildTag);
}
// Tag for dependents IMMEDIATELY (not after all builds)
const dependentBaseImages = new Set<string>();
for (const other of toBuild) {
if (other.localBaseDockerfile === dockerfileArg && other.baseImage !== dockerfileArg.buildTag) {
dependentBaseImages.add(other.baseImage);
}
}
for (const fullTag of dependentBaseImages) {
logger.log('info', `Tagging ${dockerfileArg.buildTag} as ${fullTag} for local dependency resolution`);
await smartshellInstance.exec(`docker tag ${dockerfileArg.buildTag} ${fullTag}`);
}
// Push ALL images to local registry (skip if already pushed via buildx)
if (!dockerfileArg.localRegistryTag) {
await Dockerfile.pushToLocalRegistry(this.session, dockerfileArg);
}
}
}
} finally {
await Dockerfile.stopLocalRegistry(this.session);
}
logger.log('info', `Total build time: ${formatDuration(Date.now() - overallStart)}`);
cache.save();
} else {
// === STANDARD MODE: build all via static helper ===
await Dockerfile.buildDockerfiles(toBuild, this.session, {
platform: options?.platform,
timeout: options?.timeout,
noCache: options?.noCache,
verbose: options?.verbose,
isRootless: this.dockerContext.contextInfo?.isRootless,
parallel: options?.parallel,
parallelConcurrency: options?.parallelConcurrency,
});
}
logger.log('success', 'All Dockerfiles built successfully'); logger.log('success', 'All Dockerfiles built successfully');
return this.dockerfiles; return toBuild;
}
/**
* Resolves a set of target Dockerfiles to include all their local base image dependencies,
* preserving the original topological build order.
*/
private resolveWithDependencies(targets: Dockerfile[], allSorted: Dockerfile[]): Dockerfile[] {
const needed = new Set<Dockerfile>();
const addWithDeps = (df: Dockerfile) => {
if (needed.has(df)) return;
needed.add(df);
if (df.localBaseImageDependent && df.localBaseDockerfile) {
addWithDeps(df.localBaseDockerfile);
}
};
for (const df of targets) addWithDeps(df);
return allSorted.filter((df) => needed.has(df));
} }
/** /**
* Ensures Docker buildx is set up for multi-architecture builds * Ensures Docker buildx is set up for multi-architecture builds
*/ */
private async ensureBuildx(): Promise<void> { private async ensureBuildx(): Promise<void> {
logger.log('info', 'Setting up Docker buildx for multi-platform builds...'); const builderName = this.dockerContext.getBuilderName() + (this.session?.config.builderSuffix || '');
const platforms = this.config.platforms?.join(', ') || 'default';
// Check if a buildx builder exists logger.log('info', `Setting up Docker buildx [${platforms}]...`);
const inspectResult = await smartshellInstance.exec('docker buildx inspect tsdocker-builder 2>/dev/null'); logger.log('info', `Builder: ${builderName}`);
const inspectResult = await smartshellInstance.exec(`docker buildx inspect ${builderName} 2>/dev/null`);
if (inspectResult.exitCode !== 0) { if (inspectResult.exitCode !== 0) {
// Create a new buildx builder logger.log('info', 'Creating new buildx builder with host network...');
logger.log('info', 'Creating new buildx builder...'); await smartshellInstance.exec(
await smartshellInstance.exec('docker buildx create --name tsdocker-builder --use'); `docker buildx create --name ${builderName} --driver docker-container --driver-opt network=host --use`
);
await smartshellInstance.exec('docker buildx inspect --bootstrap'); await smartshellInstance.exec('docker buildx inspect --bootstrap');
} else { } else {
// Use existing builder const inspectOutput = inspectResult.stdout || '';
await smartshellInstance.exec('docker buildx use tsdocker-builder'); if (!inspectOutput.includes('network=host')) {
logger.log('info', 'Recreating buildx builder with host network (migration)...');
await smartshellInstance.exec(`docker buildx rm ${builderName} 2>/dev/null`);
await smartshellInstance.exec(
`docker buildx create --name ${builderName} --driver docker-container --driver-opt network=host --use`
);
await smartshellInstance.exec('docker buildx inspect --bootstrap');
} else {
await smartshellInstance.exec(`docker buildx use ${builderName}`);
}
} }
logger.log('ok', `Docker buildx ready (builder: ${builderName}, platforms: ${platforms})`);
logger.log('ok', 'Docker buildx ready');
} }
/** /**
@@ -172,11 +440,17 @@ export class TsDockerManager {
return; return;
} }
// Push each Dockerfile to each registry // Start local registry (reads from persistent .nogit/docker-registry/)
for (const dockerfile of this.dockerfiles) { await Dockerfile.startLocalRegistry(this.session, this.dockerContext.contextInfo?.isRootless);
for (const registry of registriesToPush) { try {
await dockerfile.push(registry); // Push each Dockerfile to each registry via OCI copy
for (const dockerfile of this.dockerfiles) {
for (const registry of registriesToPush) {
await dockerfile.push(registry);
}
} }
} finally {
await Dockerfile.stopLocalRegistry(this.session);
} }
logger.log('success', 'All images pushed successfully'); logger.log('success', 'All images pushed successfully');
@@ -203,7 +477,8 @@ export class TsDockerManager {
} }
/** /**
* Runs tests for all Dockerfiles * Runs tests for all Dockerfiles.
* Starts the local registry so multi-platform images can be auto-pulled.
*/ */
public async test(): Promise<void> { public async test(): Promise<void> {
if (this.dockerfiles.length === 0) { if (this.dockerfiles.length === 0) {
@@ -215,7 +490,16 @@ export class TsDockerManager {
return; return;
} }
await Dockerfile.testDockerfiles(this.dockerfiles); logger.log('info', '');
logger.log('info', '=== TEST PHASE ===');
await Dockerfile.startLocalRegistry(this.session, this.dockerContext.contextInfo?.isRootless);
try {
await Dockerfile.testDockerfiles(this.dockerfiles);
} finally {
await Dockerfile.stopLocalRegistry(this.session);
}
logger.log('success', 'All tests completed'); logger.log('success', 'All tests completed');
} }
@@ -227,19 +511,21 @@ export class TsDockerManager {
await this.discoverDockerfiles(); await this.discoverDockerfiles();
} }
console.log('\nDiscovered Dockerfiles:'); logger.log('info', '');
console.log('========================\n'); logger.log('info', 'Discovered Dockerfiles:');
logger.log('info', '========================');
logger.log('info', '');
for (let i = 0; i < this.dockerfiles.length; i++) { for (let i = 0; i < this.dockerfiles.length; i++) {
const df = this.dockerfiles[i]; const df = this.dockerfiles[i];
console.log(`${i + 1}. ${df.filePath}`); logger.log('info', `${i + 1}. ${df.filePath}`);
console.log(` Tag: ${df.cleanTag}`); logger.log('info', ` Tag: ${df.cleanTag}`);
console.log(` Base Image: ${df.baseImage}`); logger.log('info', ` Base Image: ${df.baseImage}`);
console.log(` Version: ${df.version}`); logger.log('info', ` Version: ${df.version}`);
if (df.localBaseImageDependent) { if (df.localBaseImageDependent) {
console.log(` Depends on: ${df.localBaseDockerfile?.cleanTag}`); logger.log('info', ` Depends on: ${df.localBaseDockerfile?.cleanTag}`);
} }
console.log(''); logger.log('info', '');
} }
return this.dockerfiles; return this.dockerfiles;
@@ -251,4 +537,16 @@ export class TsDockerManager {
public getDockerfiles(): Dockerfile[] { public getDockerfiles(): Dockerfile[] {
return this.dockerfiles; return this.dockerfiles;
} }
/**
* Cleans up session-specific resources.
* In CI, removes the session-specific buildx builder to avoid accumulation.
*/
public async cleanup(): Promise<void> {
if (this.session?.config.isCI && this.session.config.builderSuffix) {
const builderName = this.dockerContext.getBuilderName() + this.session.config.builderSuffix;
logger.log('info', `CI cleanup: removing buildx builder ${builderName}`);
await smartshellInstance.execSilent(`docker buildx rm ${builderName} 2>/dev/null || true`);
}
}
} }

View File

@@ -0,0 +1,107 @@
import * as crypto from 'crypto';
import * as net from 'net';
import { logger } from './tsdocker.logging.js';
export interface ISessionConfig {
sessionId: string;
registryPort: number;
registryHost: string;
registryContainerName: string;
isCI: boolean;
ciSystem: string | null;
builderSuffix: string;
}
/**
* Per-invocation session identity for tsdocker.
* Generates unique ports, container names, and builder names so that
* concurrent CI jobs on the same Docker host don't collide.
*
* In local (non-CI) dev the builder suffix is empty, preserving the
* persistent builder behavior.
*/
export class TsDockerSession {
public config: ISessionConfig;
private constructor(config: ISessionConfig) {
this.config = config;
}
/**
* Creates a new session. Allocates a dynamic port unless overridden
* via `TSDOCKER_REGISTRY_PORT`.
*/
public static async create(): Promise<TsDockerSession> {
const sessionId =
process.env.TSDOCKER_SESSION_ID || crypto.randomBytes(4).toString('hex');
const registryPort = await TsDockerSession.allocatePort();
const registryHost = `localhost:${registryPort}`;
const registryContainerName = `tsdocker-registry-${sessionId}`;
const { isCI, ciSystem } = TsDockerSession.detectCI();
const builderSuffix = isCI ? `-${sessionId}` : '';
const config: ISessionConfig = {
sessionId,
registryPort,
registryHost,
registryContainerName,
isCI,
ciSystem,
builderSuffix,
};
const session = new TsDockerSession(config);
session.logInfo();
return session;
}
/**
* Allocates a free TCP port. Respects `TSDOCKER_REGISTRY_PORT` override.
*/
public static async allocatePort(): Promise<number> {
const envPort = process.env.TSDOCKER_REGISTRY_PORT;
if (envPort) {
const parsed = parseInt(envPort, 10);
if (!isNaN(parsed) && parsed > 0) {
return parsed;
}
}
return new Promise<number>((resolve, reject) => {
const srv = net.createServer();
srv.listen(0, '127.0.0.1', () => {
const addr = srv.address() as net.AddressInfo;
const port = addr.port;
srv.close((err) => {
if (err) reject(err);
else resolve(port);
});
});
srv.on('error', reject);
});
}
/**
* Detects whether we're running inside a CI system.
*/
private static detectCI(): { isCI: boolean; ciSystem: string | null } {
if (process.env.GITEA_ACTIONS) return { isCI: true, ciSystem: 'gitea-actions' };
if (process.env.GITHUB_ACTIONS) return { isCI: true, ciSystem: 'github-actions' };
if (process.env.GITLAB_CI) return { isCI: true, ciSystem: 'gitlab-ci' };
if (process.env.CI) return { isCI: true, ciSystem: 'generic' };
return { isCI: false, ciSystem: null };
}
private logInfo(): void {
const c = this.config;
logger.log('info', '=== TSDOCKER SESSION ===');
logger.log('info', `Session ID: ${c.sessionId}`);
logger.log('info', `Registry: ${c.registryHost} (container: ${c.registryContainerName})`);
if (c.isCI) {
logger.log('info', `CI detected: ${c.ciSystem}`);
logger.log('info', `Builder suffix: ${c.builderSuffix}`);
}
}
}

View File

@@ -68,3 +68,38 @@ export interface IPushResult {
digest?: string; digest?: string;
error?: string; error?: string;
} }
/**
* Options for the build command
*/
export interface IBuildCommandOptions {
patterns?: string[]; // Dockerfile name patterns (e.g., ['Dockerfile_base', 'Dockerfile_*'])
platform?: string; // Single platform override (e.g., 'linux/arm64')
timeout?: number; // Build timeout in seconds
noCache?: boolean; // Force rebuild without Docker layer cache (--no-cache)
cached?: boolean; // Skip builds when Dockerfile content hasn't changed
verbose?: boolean; // Stream raw docker build output (default: silent)
context?: string; // Explicit Docker context name (--context flag)
parallel?: boolean; // Enable parallel builds within dependency levels
parallelConcurrency?: number; // Max concurrent builds per level (default 4)
}
export interface ICacheEntry {
contentHash: string; // SHA-256 hex of Dockerfile content
imageId: string; // Docker image ID (sha256:...)
buildTag: string;
timestamp: number; // Unix ms
}
export interface ICacheData {
version: 1;
entries: { [cleanTag: string]: ICacheEntry };
}
export interface IDockerContextInfo {
name: string; // 'default', 'rootless', 'colima', etc.
endpoint: string; // 'unix:///var/run/docker.sock'
isRootless: boolean;
dockerHost?: string; // value of DOCKER_HOST env var, if set
topology?: 'socket-mount' | 'dind' | 'local';
}

View File

@@ -7,8 +7,12 @@ import * as DockerModule from './tsdocker.docker.js';
import { logger, ora } from './tsdocker.logging.js'; import { logger, ora } from './tsdocker.logging.js';
import { TsDockerManager } from './classes.tsdockermanager.js'; import { TsDockerManager } from './classes.tsdockermanager.js';
import { DockerContext } from './classes.dockercontext.js';
import type { IBuildCommandOptions } from './interfaces/index.js';
import { commitinfo } from './00_commitinfo_data.js';
const tsdockerCli = new plugins.smartcli.Smartcli(); const tsdockerCli = new plugins.smartcli.Smartcli();
tsdockerCli.addVersion(commitinfo.version);
export let run = () => { export let run = () => {
// Default command: run tests in container (legacy behavior) // Default command: run tests in container (legacy behavior)
@@ -23,14 +27,44 @@ export let run = () => {
}); });
/** /**
* Build all Dockerfiles in dependency order * Build Dockerfiles in dependency order
* Usage: tsdocker build [Dockerfile_patterns...] [--platform=linux/arm64] [--timeout=600]
*/ */
tsdockerCli.addCommand('build').subscribe(async argvArg => { tsdockerCli.addCommand('build').subscribe(async argvArg => {
try { try {
const config = await ConfigModule.run(); const config = await ConfigModule.run();
const manager = new TsDockerManager(config); const manager = new TsDockerManager(config);
await manager.prepare(); await manager.prepare(argvArg.context as string | undefined);
await manager.build();
const buildOptions: IBuildCommandOptions = {};
const patterns = argvArg._.slice(1) as string[];
if (patterns.length > 0) {
buildOptions.patterns = patterns;
}
if (argvArg.platform) {
buildOptions.platform = argvArg.platform as string;
}
if (argvArg.timeout) {
buildOptions.timeout = Number(argvArg.timeout);
}
if (argvArg.cache === false) {
buildOptions.noCache = true;
}
if (argvArg.cached) {
buildOptions.cached = true;
}
if (argvArg.verbose) {
buildOptions.verbose = true;
}
if (argvArg.parallel) {
buildOptions.parallel = true;
if (typeof argvArg.parallel === 'number') {
buildOptions.parallelConcurrency = argvArg.parallel;
}
}
await manager.build(buildOptions);
await manager.cleanup();
logger.log('success', 'Build completed successfully'); logger.log('success', 'Build completed successfully');
} catch (err) { } catch (err) {
logger.log('error', `Build failed: ${(err as Error).message}`); logger.log('error', `Build failed: ${(err as Error).message}`);
@@ -40,24 +74,58 @@ export let run = () => {
/** /**
* Push built images to configured registries * Push built images to configured registries
* Usage: tsdocker push [Dockerfile_patterns...] [--platform=linux/arm64] [--timeout=600] [--registry=url]
*/ */
tsdockerCli.addCommand('push').subscribe(async argvArg => { tsdockerCli.addCommand('push').subscribe(async argvArg => {
try { try {
const config = await ConfigModule.run(); const config = await ConfigModule.run();
const manager = new TsDockerManager(config); const manager = new TsDockerManager(config);
await manager.prepare(); await manager.prepare(argvArg.context as string | undefined);
// Login first // Login first
await manager.login(); await manager.login();
// Build images first (if not already built) // Parse build options from positional args and flags
await manager.build(); const buildOptions: IBuildCommandOptions = {};
const patterns = argvArg._.slice(1) as string[];
if (patterns.length > 0) {
buildOptions.patterns = patterns;
}
if (argvArg.platform) {
buildOptions.platform = argvArg.platform as string;
}
if (argvArg.timeout) {
buildOptions.timeout = Number(argvArg.timeout);
}
if (argvArg.cache === false) {
buildOptions.noCache = true;
}
if (argvArg.verbose) {
buildOptions.verbose = true;
}
if (argvArg.parallel) {
buildOptions.parallel = true;
if (typeof argvArg.parallel === 'number') {
buildOptions.parallelConcurrency = argvArg.parallel;
}
}
// Get registry from arguments if specified // Build images first, unless --no-build is set
const registryArg = argvArg._[1]; // e.g., tsdocker push registry.gitlab.com if (argvArg.build === false) {
await manager.discoverDockerfiles();
if (buildOptions.patterns?.length) {
manager.filterDockerfiles(buildOptions.patterns);
}
} else {
await manager.build(buildOptions);
}
// Get registry from --registry flag
const registryArg = argvArg.registry as string | undefined;
const registries = registryArg ? [registryArg] : undefined; const registries = registryArg ? [registryArg] : undefined;
await manager.push(registries); await manager.push(registries);
await manager.cleanup();
logger.log('success', 'Push completed successfully'); logger.log('success', 'Push completed successfully');
} catch (err) { } catch (err) {
logger.log('error', `Push failed: ${(err as Error).message}`); logger.log('error', `Push failed: ${(err as Error).message}`);
@@ -78,7 +146,7 @@ export let run = () => {
const config = await ConfigModule.run(); const config = await ConfigModule.run();
const manager = new TsDockerManager(config); const manager = new TsDockerManager(config);
await manager.prepare(); await manager.prepare(argvArg.context as string | undefined);
// Login first // Login first
await manager.login(); await manager.login();
@@ -98,13 +166,30 @@ export let run = () => {
try { try {
const config = await ConfigModule.run(); const config = await ConfigModule.run();
const manager = new TsDockerManager(config); const manager = new TsDockerManager(config);
await manager.prepare(); await manager.prepare(argvArg.context as string | undefined);
// Build images first // Build images first
await manager.build(); const buildOptions: IBuildCommandOptions = {};
if (argvArg.cache === false) {
buildOptions.noCache = true;
}
if (argvArg.cached) {
buildOptions.cached = true;
}
if (argvArg.verbose) {
buildOptions.verbose = true;
}
if (argvArg.parallel) {
buildOptions.parallel = true;
if (typeof argvArg.parallel === 'number') {
buildOptions.parallelConcurrency = argvArg.parallel;
}
}
await manager.build(buildOptions);
// Run tests // Run tests
await manager.test(); await manager.test();
await manager.cleanup();
logger.log('success', 'Tests completed successfully'); logger.log('success', 'Tests completed successfully');
} catch (err) { } catch (err) {
logger.log('error', `Tests failed: ${(err as Error).message}`); logger.log('error', `Tests failed: ${(err as Error).message}`);
@@ -119,7 +204,7 @@ export let run = () => {
try { try {
const config = await ConfigModule.run(); const config = await ConfigModule.run();
const manager = new TsDockerManager(config); const manager = new TsDockerManager(config);
await manager.prepare(); await manager.prepare(argvArg.context as string | undefined);
await manager.login(); await manager.login();
logger.log('success', 'Login completed successfully'); logger.log('success', 'Login completed successfully');
} catch (err) { } catch (err) {
@@ -135,7 +220,7 @@ export let run = () => {
try { try {
const config = await ConfigModule.run(); const config = await ConfigModule.run();
const manager = new TsDockerManager(config); const manager = new TsDockerManager(config);
await manager.prepare(); await manager.prepare(argvArg.context as string | undefined);
await manager.list(); await manager.list();
} catch (err) { } catch (err) {
logger.log('error', `List failed: ${(err as Error).message}`); logger.log('error', `List failed: ${(err as Error).message}`);
@@ -162,27 +247,200 @@ export let run = () => {
}); });
tsdockerCli.addCommand('clean').subscribe(async argvArg => { tsdockerCli.addCommand('clean').subscribe(async argvArg => {
ora.text('cleaning up docker env...'); try {
if (argvArg.all) { const autoYes = !!argvArg.y;
const smartshellInstance = new plugins.smartshell.Smartshell({ const includeAll = !!argvArg.all;
executor: 'bash'
});
ora.text('killing any running docker containers...');
await smartshellInstance.exec(`docker kill $(docker ps -q)`);
ora.text('removing stopped containers...'); const smartshellInstance = new plugins.smartshell.Smartshell({ executor: 'bash' });
await smartshellInstance.exec(`docker rm $(docker ps -a -q)`); const interact = new plugins.smartinteract.SmartInteract();
ora.text('removing images...'); // --- Docker context detection ---
await smartshellInstance.exec(`docker rmi -f $(docker images -q -f dangling=true)`); ora.text('detecting docker context...');
const dockerContext = new DockerContext();
if (argvArg.context) {
dockerContext.setContext(argvArg.context as string);
}
await dockerContext.detect();
ora.stop();
dockerContext.logContextInfo();
ora.text('removing all other images...'); // --- Helper: parse docker output into resource list ---
await smartshellInstance.exec(`docker rmi $(docker images -a -q)`); interface IDockerResource {
id: string;
display: string;
}
ora.text('removing all volumes...'); const listResources = async (command: string): Promise<IDockerResource[]> => {
await smartshellInstance.exec(`docker volume rm $(docker volume ls -f dangling=true -q)`); const result = await smartshellInstance.execSilent(command);
if (result.exitCode !== 0 || !result.stdout.trim()) {
return [];
}
return result.stdout.trim().split('\n').filter(Boolean).map((line) => {
const parts = line.split('\t');
return {
id: parts[0],
display: parts.join(' | '),
};
});
};
// --- Helper: checkbox selection ---
const selectResources = async (
name: string,
message: string,
resources: IDockerResource[],
): Promise<string[]> => {
if (autoYes) {
return resources.map((r) => r.id);
}
const answer = await interact.askQuestion({
name,
type: 'checkbox',
message,
default: [],
choices: resources.map((r) => ({ name: r.display, value: r.id })),
});
return answer.value as string[];
};
// --- Helper: confirm action ---
const confirmAction = async (
name: string,
message: string,
): Promise<boolean> => {
if (autoYes) {
return true;
}
const answer = await interact.askQuestion({
name,
type: 'confirm',
message,
default: false,
});
return answer.value as boolean;
};
// === RUNNING CONTAINERS ===
const runningContainers = await listResources(
`docker ps --format '{{.ID}}\t{{.Names}}\t{{.Image}}\t{{.Status}}'`
);
if (runningContainers.length > 0) {
logger.log('info', `Found ${runningContainers.length} running container(s)`);
const selectedIds = await selectResources(
'runningContainers',
'Select running containers to kill:',
runningContainers,
);
if (selectedIds.length > 0) {
logger.log('info', `Killing ${selectedIds.length} container(s)...`);
await smartshellInstance.exec(`docker kill ${selectedIds.join(' ')}`);
}
} else {
logger.log('info', 'No running containers found');
}
// === STOPPED CONTAINERS ===
const stoppedContainers = await listResources(
`docker ps -a --filter status=exited --filter status=created --format '{{.ID}}\t{{.Names}}\t{{.Image}}\t{{.Status}}'`
);
if (stoppedContainers.length > 0) {
logger.log('info', `Found ${stoppedContainers.length} stopped container(s)`);
const selectedIds = await selectResources(
'stoppedContainers',
'Select stopped containers to remove:',
stoppedContainers,
);
if (selectedIds.length > 0) {
logger.log('info', `Removing ${selectedIds.length} container(s)...`);
await smartshellInstance.exec(`docker rm ${selectedIds.join(' ')}`);
}
} else {
logger.log('info', 'No stopped containers found');
}
// === DANGLING IMAGES ===
const danglingImages = await listResources(
`docker images -f dangling=true --format '{{.ID}}\t{{.Repository}}:{{.Tag}}\t{{.Size}}'`
);
if (danglingImages.length > 0) {
const confirmed = await confirmAction(
'removeDanglingImages',
`Remove ${danglingImages.length} dangling image(s)?`,
);
if (confirmed) {
logger.log('info', `Removing ${danglingImages.length} dangling image(s)...`);
const ids = danglingImages.map((r) => r.id).join(' ');
await smartshellInstance.exec(`docker rmi ${ids}`);
}
} else {
logger.log('info', 'No dangling images found');
}
// === ALL IMAGES (only with --all) ===
if (includeAll) {
const allImages = await listResources(
`docker images --format '{{.ID}}\t{{.Repository}}:{{.Tag}}\t{{.Size}}'`
);
if (allImages.length > 0) {
logger.log('info', `Found ${allImages.length} image(s) total`);
const selectedIds = await selectResources(
'allImages',
'Select images to remove:',
allImages,
);
if (selectedIds.length > 0) {
logger.log('info', `Removing ${selectedIds.length} image(s)...`);
await smartshellInstance.exec(`docker rmi -f ${selectedIds.join(' ')}`);
}
} else {
logger.log('info', 'No images found');
}
}
// === DANGLING VOLUMES ===
const danglingVolumes = await listResources(
`docker volume ls -f dangling=true --format '{{.Name}}\t{{.Driver}}'`
);
if (danglingVolumes.length > 0) {
const confirmed = await confirmAction(
'removeDanglingVolumes',
`Remove ${danglingVolumes.length} dangling volume(s)?`,
);
if (confirmed) {
logger.log('info', `Removing ${danglingVolumes.length} dangling volume(s)...`);
const names = danglingVolumes.map((r) => r.id).join(' ');
await smartshellInstance.exec(`docker volume rm ${names}`);
}
} else {
logger.log('info', 'No dangling volumes found');
}
// === ALL VOLUMES (only with --all) ===
if (includeAll) {
const allVolumes = await listResources(
`docker volume ls --format '{{.Name}}\t{{.Driver}}'`
);
if (allVolumes.length > 0) {
logger.log('info', `Found ${allVolumes.length} volume(s) total`);
const selectedIds = await selectResources(
'allVolumes',
'Select volumes to remove:',
allVolumes,
);
if (selectedIds.length > 0) {
logger.log('info', `Removing ${selectedIds.length} volume(s)...`);
await smartshellInstance.exec(`docker volume rm ${selectedIds.join(' ')}`);
}
} else {
logger.log('info', 'No volumes found');
}
}
logger.log('success', 'Docker cleanup completed!');
} catch (err) {
logger.log('error', `Clean failed: ${(err as Error).message}`);
process.exit(1);
} }
ora.finishSuccess('docker environment now is clean!');
}); });
tsdockerCli.addCommand('vscode').subscribe(async argvArg => { tsdockerCli.addCommand('vscode').subscribe(async argvArg => {

View File

@@ -15,3 +15,12 @@ export const logger = new plugins.smartlog.Smartlog({
logger.addLogDestination(new plugins.smartlogDestinationLocal.DestinationLocal()); logger.addLogDestination(new plugins.smartlogDestinationLocal.DestinationLocal());
export const ora = new plugins.smartlogSouceOra.SmartlogSourceOra(); export const ora = new plugins.smartlogSouceOra.SmartlogSourceOra();
export function formatDuration(ms: number): string {
if (ms < 1000) return `${ms}ms`;
const totalSeconds = ms / 1000;
if (totalSeconds < 60) return `${totalSeconds.toFixed(1)}s`;
const minutes = Math.floor(totalSeconds / 60);
const seconds = Math.round(totalSeconds % 60);
return `${minutes}m ${seconds}s`;
}

View File

@@ -11,6 +11,7 @@ import * as smartlog from '@push.rocks/smartlog';
import * as smartlogDestinationLocal from '@push.rocks/smartlog-destination-local'; import * as smartlogDestinationLocal from '@push.rocks/smartlog-destination-local';
import * as smartlogSouceOra from '@push.rocks/smartlog-source-ora'; import * as smartlogSouceOra from '@push.rocks/smartlog-source-ora';
import * as smartopen from '@push.rocks/smartopen'; import * as smartopen from '@push.rocks/smartopen';
import * as smartinteract from '@push.rocks/smartinteract';
import * as smartshell from '@push.rocks/smartshell'; import * as smartshell from '@push.rocks/smartshell';
import * as smartstring from '@push.rocks/smartstring'; import * as smartstring from '@push.rocks/smartstring';
@@ -25,6 +26,7 @@ export {
smartpromise, smartpromise,
qenv, qenv,
smartcli, smartcli,
smartinteract,
smartlog, smartlog,
smartlogDestinationLocal, smartlogDestinationLocal,
smartlogSouceOra, smartlogSouceOra,