Compare commits

...

24 Commits

Author SHA1 Message Date
63078139ec v1.15.1
Some checks failed
Default (tags) / security (push) Successful in 39s
Default (tags) / test (push) Failing after 4m0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-07 09:41:22 +00:00
0cb5515b93 fix(registry): use persistent local registry and OCI Distribution API image copy for pushes 2026-02-07 09:41:22 +00:00
aa0425f9bc v1.15.0
Some checks failed
Default (tags) / security (push) Successful in 42s
Default (tags) / test (push) Failing after 4m0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-07 05:17:32 +00:00
2d4d7c671a feat(clean): Make the command interactive: add smartinteract prompts, docker context detection, and selective resource removal with support for --all and -y auto-confirm 2026-02-07 05:17:32 +00:00
3085eb590f v1.14.0
Some checks failed
Default (tags) / security (push) Successful in 34s
Default (tags) / test (push) Failing after 4m0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-07 04:46:06 +00:00
04b75b42f3 feat(build): add level-based parallel builds with --parallel and configurable concurrency 2026-02-07 04:46:06 +00:00
b04b8c9033 v1.13.0
Some checks failed
Default (tags) / security (push) Successful in 40s
Default (tags) / test (push) Failing after 4m1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-07 04:33:07 +00:00
2130a8a879 feat(docker): add Docker context detection, rootless support, and context-aware buildx registry handling 2026-02-07 04:33:07 +00:00
17de78aed3 v1.12.0
Some checks failed
Default (tags) / security (push) Successful in 40s
Default (tags) / test (push) Failing after 4m0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-06 16:35:49 +00:00
eddb8cd156 feat(docker): add detailed logging for buildx, build commands, local registry, and local dependency info 2026-02-06 16:35:49 +00:00
cfc7798d49 v1.11.0
Some checks failed
Default (tags) / security (push) Successful in 38s
Default (tags) / test (push) Failing after 3m59s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-06 15:53:32 +00:00
37dfde005e feat(docker): start temporary local registry for buildx dependency resolution and ensure buildx builder uses host network 2026-02-06 15:53:32 +00:00
d1785aab86 v1.10.0
Some checks failed
Default (tags) / security (push) Successful in 33s
Default (tags) / test (push) Failing after 4m0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-06 15:05:46 +00:00
31fb4aea3c feat(classes.dockerfile): support using a local base image as a build context in buildx commands 2026-02-06 15:05:46 +00:00
907048fa87 v1.9.0
Some checks failed
Default (tags) / security (push) Successful in 50s
Default (tags) / test (push) Failing after 4m0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-06 14:52:16 +00:00
02b267ee10 feat(build): add verbose build output, progress logging, and timing for builds/tests 2026-02-06 14:52:16 +00:00
16cd0bbd87 v1.8.0
Some checks failed
Default (tags) / security (push) Successful in 39s
Default (tags) / test (push) Failing after 4m0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-06 14:18:06 +00:00
cc83743f9a feat(build): add optional content-hash based build cache to skip rebuilding unchanged Dockerfiles 2026-02-06 14:18:06 +00:00
7131c16f80 v1.7.0
Some checks failed
Default (tags) / security (push) Successful in 31s
Default (tags) / test (push) Failing after 3m59s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-06 13:39:24 +00:00
02688861f4 feat(cli): add CLI version display using commitinfo 2026-02-06 13:39:24 +00:00
3a8b301b3e v1.6.0
Some checks failed
Default (tags) / security (push) Successful in 39s
Default (tags) / test (push) Failing after 4m0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-06 13:25:22 +00:00
c09bef33c3 feat(docker): add support for no-cache builds and tag built images for local dependency resolution 2026-02-06 13:25:21 +00:00
32eb0d1d77 v1.5.0
Some checks failed
Default (tags) / security (push) Successful in 39s
Default (tags) / test (push) Failing after 4m0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-06 11:58:32 +00:00
7cac628975 feat(build): add support for selective builds, platform override and build timeout 2026-02-06 11:58:32 +00:00
14 changed files with 1999 additions and 124 deletions

View File

@@ -1,5 +1,113 @@
# Changelog # Changelog
## 2026-02-07 - 1.15.1 - fix(registry)
use persistent local registry and OCI Distribution API image copy for pushes
- Adds RegistryCopy class implementing the OCI Distribution API to copy images (including multi-arch manifest lists) from the local registry to remote registries.
- All builds now go through a persistent local registry at localhost:5234 with volume storage at .nogit/docker-registry/; Dockerfile.startLocalRegistry mounts this directory.
- Dockerfile.push now delegates to RegistryCopy.copyImage; Dockerfile.needsLocalRegistry() always returns true and config.push is now a no-op (kept for backward compat).
- Multi-platform buildx builds are pushed to the local registry (this.localRegistryTag) during buildx --push; code avoids redundant pushes when images are already pushed by buildx.
- Build, cached build, test, push and pull flows now start/stop the local registry automatically to support multi-platform/image resolution.
- Introduces Dockerfile.getDestRepo and support for config.registryRepoMap to control destination repository mapping.
- Breaking change: registry usage and push behavior changed (config.push ignored and local registry mandatory) — bump major version.
## 2026-02-07 - 1.15.0 - feat(clean)
Make the `clean` command interactive: add smartinteract prompts, docker context detection, and selective resource removal with support for --all and -y auto-confirm
- Adds dependency @push.rocks/smartinteract and exposes it from the plugins module
- Refactors tsdocker.cli.ts clean command to list Docker resources and prompt checkbox selection for running/stopped containers, images, and volumes
- Adds DockerContext detection and logging to determine active Docker context
- Introduces auto-confirm (-y) and --all handling to either auto-accept or allow full-image/volume removal
- Replaces blunt shell commands with safer, interactive selection and adds improved error handling and logging
## 2026-02-07 - 1.14.0 - feat(build)
add level-based parallel builds with --parallel and configurable concurrency
- Introduces --parallel and --parallel=<n> CLI flags to enable level-based parallel Docker builds (default concurrency 4).
- Adds Dockerfile.computeLevels() to group topologically-sorted Dockerfiles into dependency levels.
- Adds Dockerfile.runWithConcurrency() implementing a bounded-concurrency worker-pool (fast-fail via Promise.all).
- Integrates parallel build mode into Dockerfile.buildDockerfiles() and TsDockerManager.build() for both cached and non-cached flows, including tagging and pushing for dependency resolution after each level.
- Adds options.parallel and options.parallelConcurrency to the build interface and wires them through the CLI and manager.
- Updates documentation (readme.hints.md) with usage examples and implementation notes.
## 2026-02-07 - 1.13.0 - feat(docker)
add Docker context detection, rootless support, and context-aware buildx registry handling
- Introduce DockerContext class to detect current Docker context and rootless mode and to log warnings and context info
- Add IDockerContextInfo interface and a new context option on build/config to pass explicit Docker context
- Propagate --context CLI flag into TsDockerManager.prepare so CLI commands can set an explicit Docker context
- Make buildx builder name context-aware (tsdocker-builder-<sanitized-context>) and log builder name/platforms
- Pass isRootless into local registry startup and build pipeline; emit rootless-specific warnings and registry reachability hint
## 2026-02-06 - 1.12.0 - feat(docker)
add detailed logging for buildx, build commands, local registry, and local dependency info
- Log startup of local registry including a note about buildx dependency bridging
- Log constructed build commands and indicate whether buildx or standard docker build is used (including platforms and --push/--load distinctions)
- Emit build mode summary at start of build phase and report local base-image dependency mappings
- Report when --no-cache is enabled and surface buildx setup readiness with configured platforms
- Non-functional change: purely adds informational logging to improve observability during builds
## 2026-02-06 - 1.11.0 - feat(docker)
start temporary local registry for buildx dependency resolution and ensure buildx builder uses host network
- Introduce a temporary local registry (localhost:5234) with start/stop helpers and push support to expose local images for buildx
- Add Dockerfile.needsLocalRegistry to decide when a local registry is required (local base dependencies + multi-platform or platform option)
- Push built images to the local registry and set localRegistryTag on Dockerfile instances for BuildKit build-context usage
- Tag built images in the host daemon for dependent Dockerfiles to resolve local FROM references
- Integrate registry lifecycle into Dockerfile.buildDockerfiles and TsDockerManager build flows (start before builds, stop after)
- Ensure buildx builder is created with --driver-opt network=host and recreate existing builder if it lacks host network to allow registry access from build containers
## 2026-02-06 - 1.10.0 - feat(classes.dockerfile)
support using a local base image as a build context in buildx commands
- Adds --build-context flag mapping base image to docker-image://<localTag> when localBaseImageDependent && localBaseDockerfile are set
- Appends the build context flag to both single-platform and multi-platform docker buildx commands
- Logs an info message indicating the local build context mapping
## 2026-02-06 - 1.9.0 - feat(build)
add verbose build output, progress logging, and timing for builds/tests
- Add 'verbose' option to build/test flows (interfaces, CLI, and method signatures) to allow streaming raw docker build output or run silently
- Log per-item progress for build and test phases (e.g. (1/N) Building/Testing <tag>) and report individual durations
- Return elapsed time from Dockerfile.build() and Dockerfile.test() and aggregate total build/test times in manager
- Introduce formatDuration(ms) helper in logging module to format timings
- Switch from console.log to structured logger calls across cache, manager, dockerfile and push paths
- Use silent exec variants when verbose is false and stream exec when verbose is true
## 2026-02-06 - 1.8.0 - feat(build)
add optional content-hash based build cache to skip rebuilding unchanged Dockerfiles
- Introduce TsDockerCache to compute SHA-256 of Dockerfile content and persist cache to .nogit/tsdocker_support.json
- Add ICacheEntry and ICacheData interfaces and a cached flag to IBuildCommandOptions
- Integrate cached mode in TsDockerManager: skip builds on cache hits, verify image presence, record builds on misses, and still perform dependency tagging
- Expose --cached option in CLI to enable the cached build flow
- Cache records store contentHash, imageId, buildTag and timestamp
## 2026-02-06 - 1.7.0 - feat(cli)
add CLI version display using commitinfo
- Imported commitinfo from './00_commitinfo_data.js' and called tsdockerCli.addVersion(commitinfo.version) to surface package/commit version in the Smartcli instance
- Change made in ts/tsdocker.cli.ts — small user-facing CLI enhancement; no breaking changes
## 2026-02-06 - 1.6.0 - feat(docker)
add support for no-cache builds and tag built images for local dependency resolution
- Introduce IBuildCommandOptions.noCache to control --no-cache behavior
- Propagate noCache from CLI (via cache flag) through TsDockerManager to Dockerfile.build
- Append --no-cache to docker build/buildx commands when noCache is true
- After building an image, tag it with full base image references used by dependent Dockerfiles so their FROM lines resolve to the locally-built image
- Log tagging actions and execute docker tag via smartshellInstance
## 2026-02-06 - 1.5.0 - feat(build)
add support for selective builds, platform override and build timeout
- Introduce IBuildCommandOptions with patterns, platform and timeout to control build behavior
- Allow manager.build() to accept options and build only matching Dockerfiles (including dependencies) preserving topological order
- Add CLI parsing for build/push to accept positional Dockerfile patterns and --platform/--timeout flags
- Support single-platform override via docker buildx and multi-platform buildx detection
- Implement streaming exec with timeout to kill long-running builds and surface timeout errors
## 2026-02-04 - 1.4.3 - fix(dockerfile) ## 2026-02-04 - 1.4.3 - fix(dockerfile)
fix matching of base images to local Dockerfiles by stripping registry prefixes when comparing image references fix matching of base images to local Dockerfiles by stripping registry prefixes when comparing image references

View File

@@ -1,6 +1,6 @@
{ {
"name": "@git.zone/tsdocker", "name": "@git.zone/tsdocker",
"version": "1.4.3", "version": "1.15.1",
"private": false, "private": false,
"description": "develop npm modules cross platform with docker", "description": "develop npm modules cross platform with docker",
"main": "dist_ts/index.js", "main": "dist_ts/index.js",
@@ -47,6 +47,7 @@
"@push.rocks/smartanalytics": "^2.0.15", "@push.rocks/smartanalytics": "^2.0.15",
"@push.rocks/smartcli": "^4.0.20", "@push.rocks/smartcli": "^4.0.20",
"@push.rocks/smartfs": "^1.3.1", "@push.rocks/smartfs": "^1.3.1",
"@push.rocks/smartinteract": "^2.0.16",
"@push.rocks/smartlog": "^3.1.10", "@push.rocks/smartlog": "^3.1.10",
"@push.rocks/smartlog-destination-local": "^9.0.2", "@push.rocks/smartlog-destination-local": "^9.0.2",
"@push.rocks/smartlog-source-ora": "^1.0.9", "@push.rocks/smartlog-source-ora": "^1.0.9",

277
pnpm-lock.yaml generated
View File

@@ -29,6 +29,9 @@ importers:
'@push.rocks/smartfs': '@push.rocks/smartfs':
specifier: ^1.3.1 specifier: ^1.3.1
version: 1.3.1 version: 1.3.1
'@push.rocks/smartinteract':
specifier: ^2.0.16
version: 2.0.16
'@push.rocks/smartlog': '@push.rocks/smartlog':
specifier: ^3.1.10 specifier: ^3.1.10
version: 3.1.10 version: 3.1.10
@@ -618,6 +621,62 @@ packages:
resolution: {integrity: sha512-mfOoUlIw8VBiJYPrl5RZfMzkXC/z7gbSpi2ecycrj/gRWLq2CMV+Q+0G+JPjeOmuNFgg0skEIzkVFzVYFP6URw==} resolution: {integrity: sha512-mfOoUlIw8VBiJYPrl5RZfMzkXC/z7gbSpi2ecycrj/gRWLq2CMV+Q+0G+JPjeOmuNFgg0skEIzkVFzVYFP6URw==}
engines: {node: '>=18.0.0'} engines: {node: '>=18.0.0'}
'@inquirer/checkbox@3.0.1':
resolution: {integrity: sha512-0hm2nrToWUdD6/UHnel/UKGdk1//ke5zGUpHIvk5ZWmaKezlGxZkOJXNSWsdxO/rEqTkbB3lNC2J6nBElV2aAQ==}
engines: {node: '>=18'}
'@inquirer/confirm@4.0.1':
resolution: {integrity: sha512-46yL28o2NJ9doViqOy0VDcoTzng7rAb6yPQKU7VDLqkmbCaH4JqK4yk4XqlzNWy9PVC5pG1ZUXPBQv+VqnYs2w==}
engines: {node: '>=18'}
'@inquirer/core@9.2.1':
resolution: {integrity: sha512-F2VBt7W/mwqEU4bL0RnHNZmC/OxzNx9cOYxHqnXX3MP6ruYvZUZAW9imgN9+h/uBT/oP8Gh888J2OZSbjSeWcg==}
engines: {node: '>=18'}
'@inquirer/editor@3.0.1':
resolution: {integrity: sha512-VA96GPFaSOVudjKFraokEEmUQg/Lub6OXvbIEZU1SDCmBzRkHGhxoFAVaF30nyiB4m5cEbDgiI2QRacXZ2hw9Q==}
engines: {node: '>=18'}
'@inquirer/expand@3.0.1':
resolution: {integrity: sha512-ToG8d6RIbnVpbdPdiN7BCxZGiHOTomOX94C2FaT5KOHupV40tKEDozp12res6cMIfRKrXLJyexAZhWVHgbALSQ==}
engines: {node: '>=18'}
'@inquirer/figures@1.0.15':
resolution: {integrity: sha512-t2IEY+unGHOzAaVM5Xx6DEWKeXlDDcNPeDyUpsRc6CUhBfU3VQOEl+Vssh7VNp1dR8MdUJBWhuObjXCsVpjN5g==}
engines: {node: '>=18'}
'@inquirer/input@3.0.1':
resolution: {integrity: sha512-BDuPBmpvi8eMCxqC5iacloWqv+5tQSJlUafYWUe31ow1BVXjW2a5qe3dh4X/Z25Wp22RwvcaLCc2siHobEOfzg==}
engines: {node: '>=18'}
'@inquirer/number@2.0.1':
resolution: {integrity: sha512-QpR8jPhRjSmlr/mD2cw3IR8HRO7lSVOnqUvQa8scv1Lsr3xoAMMworcYW3J13z3ppjBFBD2ef1Ci6AE5Qn8goQ==}
engines: {node: '>=18'}
'@inquirer/password@3.0.1':
resolution: {integrity: sha512-haoeEPUisD1NeE2IanLOiFr4wcTXGWrBOyAyPZi1FfLJuXOzNmxCJPgUrGYKVh+Y8hfGJenIfz5Wb/DkE9KkMQ==}
engines: {node: '>=18'}
'@inquirer/prompts@6.0.1':
resolution: {integrity: sha512-yl43JD/86CIj3Mz5mvvLJqAOfIup7ncxfJ0Btnl0/v5TouVUyeEdcpknfgc+yMevS/48oH9WAkkw93m7otLb/A==}
engines: {node: '>=18'}
'@inquirer/rawlist@3.0.1':
resolution: {integrity: sha512-VgRtFIwZInUzTiPLSfDXK5jLrnpkuSOh1ctfaoygKAdPqjcjKYmGh6sCY1pb0aGnCGsmhUxoqLDUAU0ud+lGXQ==}
engines: {node: '>=18'}
'@inquirer/search@2.0.1':
resolution: {integrity: sha512-r5hBKZk3g5MkIzLVoSgE4evypGqtOannnB3PKTG9NRZxyFRKcfzrdxXXPcoJQsxJPzvdSU2Rn7pB7lw0GCmGAg==}
engines: {node: '>=18'}
'@inquirer/select@3.0.1':
resolution: {integrity: sha512-lUDGUxPhdWMkN/fHy1Lk7pF3nK1fh/gqeyWXmctefhxLYxlDsc7vsPBEpxrfVGDsVdyYJsiJoD4bJ1b623cV1Q==}
engines: {node: '>=18'}
'@inquirer/type@2.0.0':
resolution: {integrity: sha512-XvJRx+2KR3YXyYtPUUy+qd9i7p+GO9Ko6VIIpWlBrpWwXDv8WLFeHTxz35CfQFUiBMLXlGHhGzys7lqit9gWag==}
engines: {node: '>=18'}
'@isaacs/balanced-match@4.0.1': '@isaacs/balanced-match@4.0.1':
resolution: {integrity: sha512-yzMTt9lEb8Gv7zRioUilSglI0c0smZ9k5D65677DLWLtWJaXIS3CqcGyUFByYKlnUj6TkjLVs54fBl6+TiGQDQ==} resolution: {integrity: sha512-yzMTt9lEb8Gv7zRioUilSglI0c0smZ9k5D65677DLWLtWJaXIS3CqcGyUFByYKlnUj6TkjLVs54fBl6+TiGQDQ==}
engines: {node: 20 || >=22} engines: {node: 20 || >=22}
@@ -842,6 +901,9 @@ packages:
'@push.rocks/smarthash@3.2.6': '@push.rocks/smarthash@3.2.6':
resolution: {integrity: sha512-Mq/WNX0Tjjes3X1gHd/ZBwOOKSrAG/Z3Xoc0OcCm3P20WKpniihkMpsnlE7wGjvpHLi/ZRe/XkB3KC3d5r9X4g==} resolution: {integrity: sha512-Mq/WNX0Tjjes3X1gHd/ZBwOOKSrAG/Z3Xoc0OcCm3P20WKpniihkMpsnlE7wGjvpHLi/ZRe/XkB3KC3d5r9X4g==}
'@push.rocks/smartinteract@2.0.16':
resolution: {integrity: sha512-eltvVRRUKBKd77DSFA4DPY2g4V4teZLNe8A93CDy/WglglYcUjxMoLY/b0DFTWCWKYT+yjk6Fe6p0FRrvX9Yvg==}
'@push.rocks/smartjson@5.2.0': '@push.rocks/smartjson@5.2.0':
resolution: {integrity: sha512-710e8UwovRfPgUtaBHcd6unaODUjV5fjxtGcGCqtaTcmvOV6VpasdVfT66xMDzQmWH2E9ZfHDJeso9HdDQzNQA==} resolution: {integrity: sha512-710e8UwovRfPgUtaBHcd6unaODUjV5fjxtGcGCqtaTcmvOV6VpasdVfT66xMDzQmWH2E9ZfHDJeso9HdDQzNQA==}
@@ -1520,6 +1582,9 @@ packages:
'@types/ms@2.1.0': '@types/ms@2.1.0':
resolution: {integrity: sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==} resolution: {integrity: sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==}
'@types/mute-stream@0.0.4':
resolution: {integrity: sha512-CPM9nzrCPPJHQNA9keH9CVkVI+WR5kMa+7XEs5jcGQ0VoAGnLv242w8lIVgwAEfmE4oufJRaTc9PNLQl0ioAow==}
'@types/node-forge@1.3.14': '@types/node-forge@1.3.14':
resolution: {integrity: sha512-mhVF2BnD4BO+jtOp7z1CdzaK4mbuK0LLQYAvdOLqHTavxFNq4zA1EmYkpnFjP8HOUzedfQkRnp0E2ulSAYSzAw==} resolution: {integrity: sha512-mhVF2BnD4BO+jtOp7z1CdzaK4mbuK0LLQYAvdOLqHTavxFNq4zA1EmYkpnFjP8HOUzedfQkRnp0E2ulSAYSzAw==}
@@ -1589,6 +1654,9 @@ packages:
'@types/which@3.0.4': '@types/which@3.0.4':
resolution: {integrity: sha512-liyfuo/106JdlgSchJzXEQCVArk0CvevqPote8F8HgWgJ3dRCcTHgJIsLDuee0kxk/mhbInzIZk3QWSZJ8R+2w==} resolution: {integrity: sha512-liyfuo/106JdlgSchJzXEQCVArk0CvevqPote8F8HgWgJ3dRCcTHgJIsLDuee0kxk/mhbInzIZk3QWSZJ8R+2w==}
'@types/wrap-ansi@3.0.0':
resolution: {integrity: sha512-ltIpx+kM7g/MLRZfkbL7EsCEjfzCcScLpkg37eXEtx5kmrAKBkTJwd1GIAjDSL8wTpM6Hzn5YO4pSb91BEwu1g==}
'@types/ws@8.18.1': '@types/ws@8.18.1':
resolution: {integrity: sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==} resolution: {integrity: sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==}
@@ -1622,6 +1690,10 @@ packages:
resolution: {integrity: sha1-kQ3lDvzHwJ49gvL4er1rcAwYgYo=} resolution: {integrity: sha1-kQ3lDvzHwJ49gvL4er1rcAwYgYo=}
engines: {node: '>=0.10.0'} engines: {node: '>=0.10.0'}
ansi-escapes@4.3.2:
resolution: {integrity: sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==}
engines: {node: '>=8'}
ansi-regex@5.0.1: ansi-regex@5.0.1:
resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==} resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==}
engines: {node: '>=8'} engines: {node: '>=8'}
@@ -1818,6 +1890,9 @@ packages:
character-entities@2.0.2: character-entities@2.0.2:
resolution: {integrity: sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==} resolution: {integrity: sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==}
chardet@0.7.0:
resolution: {integrity: sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==}
chokidar@4.0.3: chokidar@4.0.3:
resolution: {integrity: sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==} resolution: {integrity: sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==}
engines: {node: '>= 14.16.0'} engines: {node: '>= 14.16.0'}
@@ -1843,6 +1918,10 @@ packages:
resolution: {integrity: sha512-ywqV+5MmyL4E7ybXgKys4DugZbX0FC6LnwrhjuykIjnK9k8OQacQ7axGKnjDXWNhns0xot3bZI5h55H8yo9cJg==} resolution: {integrity: sha512-ywqV+5MmyL4E7ybXgKys4DugZbX0FC6LnwrhjuykIjnK9k8OQacQ7axGKnjDXWNhns0xot3bZI5h55H8yo9cJg==}
engines: {node: '>=6'} engines: {node: '>=6'}
cli-width@4.1.0:
resolution: {integrity: sha512-ouuZd4/dm2Sw5Gmqy6bGyNNNe1qt9RpmxveLSO7KcgsTnU7RXfsw+/bukWGo1abgBiMAic068rclZsO4IWmmxQ==}
engines: {node: '>= 12'}
cliui@8.0.1: cliui@8.0.1:
resolution: {integrity: sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==} resolution: {integrity: sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==}
engines: {node: '>=12'} engines: {node: '>=12'}
@@ -2143,6 +2222,10 @@ packages:
extend@3.0.2: extend@3.0.2:
resolution: {integrity: sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==} resolution: {integrity: sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==}
external-editor@3.1.0:
resolution: {integrity: sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew==}
engines: {node: '>=4'}
extract-zip@2.0.1: extract-zip@2.0.1:
resolution: {integrity: sha512-GDhU9ntwuKyGXdZBUgTIe+vXnWj0fppUEtMDL0+idd5Sta8TGpHssn/eusA9mrPr9qNDym6SxAYZjNvCn/9RBg==} resolution: {integrity: sha512-GDhU9ntwuKyGXdZBUgTIe+vXnWj0fppUEtMDL0+idd5Sta8TGpHssn/eusA9mrPr9qNDym6SxAYZjNvCn/9RBg==}
engines: {node: '>= 10.17.0'} engines: {node: '>= 10.17.0'}
@@ -2398,6 +2481,10 @@ packages:
humanize-ms@1.2.1: humanize-ms@1.2.1:
resolution: {integrity: sha1-xG4xWaKT9riW2ikxbYtv6Lt5u+0=} resolution: {integrity: sha1-xG4xWaKT9riW2ikxbYtv6Lt5u+0=}
iconv-lite@0.4.24:
resolution: {integrity: sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==}
engines: {node: '>=0.10.0'}
iconv-lite@0.6.3: iconv-lite@0.6.3:
resolution: {integrity: sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==} resolution: {integrity: sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==}
engines: {node: '>=0.10.0'} engines: {node: '>=0.10.0'}
@@ -2422,6 +2509,10 @@ packages:
ini@1.3.8: ini@1.3.8:
resolution: {integrity: sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==} resolution: {integrity: sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==}
inquirer@11.1.0:
resolution: {integrity: sha512-CmLAZT65GG/v30c+D2Fk8+ceP6pxD6RL+hIUOWAltCmeyEqWYwqu9v76q03OvjyZ3AB0C1Ala2stn1z/rMqGEw==}
engines: {node: '>=18'}
ip-address@10.1.0: ip-address@10.1.0:
resolution: {integrity: sha512-XXADHxXmvT9+CRxhXg56LJovE+bmWnEWB78LB83VZTprKTmaC5QfruXocxzTZ2Kl0DNwKuBdlIhjL8LeY8Sf8Q==} resolution: {integrity: sha512-XXADHxXmvT9+CRxhXg56LJovE+bmWnEWB78LB83VZTprKTmaC5QfruXocxzTZ2Kl0DNwKuBdlIhjL8LeY8Sf8Q==}
engines: {node: '>= 12'} engines: {node: '>= 12'}
@@ -2887,6 +2978,10 @@ packages:
mute-stream@0.0.8: mute-stream@0.0.8:
resolution: {integrity: sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA==} resolution: {integrity: sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA==}
mute-stream@1.0.0:
resolution: {integrity: sha512-avsJQhyd+680gKXyG/sQc0nXaC6rBkPOfyHYcFb9+hdkqQkR9bdnkJ0AMZhke0oesPqIO+mFFJ+IdBc7mst4IA==}
engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0}
nanoid@4.0.2: nanoid@4.0.2:
resolution: {integrity: sha512-7ZtY5KTCNheRGfEFxnedV5zFiORN1+Y1N6zvPTnHQd8ENUvfaDBeuJDZb2bN/oXwXxu3qkTXDzy57W5vAmDTBw==} resolution: {integrity: sha512-7ZtY5KTCNheRGfEFxnedV5zFiORN1+Y1N6zvPTnHQd8ENUvfaDBeuJDZb2bN/oXwXxu3qkTXDzy57W5vAmDTBw==}
engines: {node: ^14 || ^16 || >=18} engines: {node: ^14 || ^16 || >=18}
@@ -2961,6 +3056,10 @@ packages:
resolution: {integrity: sha512-sjYP8QyVWBpBZWD6Vr1M/KwknSw6kJOz41tvGMlwWeClHBtYKTbHMki1PsLZnxKpXMPbTKv9b3pjQu3REib96A==} resolution: {integrity: sha512-sjYP8QyVWBpBZWD6Vr1M/KwknSw6kJOz41tvGMlwWeClHBtYKTbHMki1PsLZnxKpXMPbTKv9b3pjQu3REib96A==}
engines: {node: '>=8'} engines: {node: '>=8'}
os-tmpdir@1.0.2:
resolution: {integrity: sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ=}
engines: {node: '>=0.10.0'}
p-cancelable@3.0.0: p-cancelable@3.0.0:
resolution: {integrity: sha512-mlVgR3PGuzlo0MmTdk4cXqXWlwQDLnONTAg6sm62XkMJEiRxN3GL3SffkYvqwonbkJBcrI7Uvv5Zh9yjvn2iUw==} resolution: {integrity: sha512-mlVgR3PGuzlo0MmTdk4cXqXWlwQDLnONTAg6sm62XkMJEiRxN3GL3SffkYvqwonbkJBcrI7Uvv5Zh9yjvn2iUw==}
engines: {node: '>=12.20'} engines: {node: '>=12.20'}
@@ -3238,6 +3337,10 @@ packages:
resolution: {integrity: sha512-nLTrUKm2UyiL7rlhapu/Zl45FwNgkZGaCpZbIHajDYgwlJCOzLSk+cIPAnsEqV955GjILJnKbdQC1nVPz+gAYQ==} resolution: {integrity: sha512-nLTrUKm2UyiL7rlhapu/Zl45FwNgkZGaCpZbIHajDYgwlJCOzLSk+cIPAnsEqV955GjILJnKbdQC1nVPz+gAYQ==}
engines: {node: '>= 18'} engines: {node: '>= 18'}
run-async@3.0.0:
resolution: {integrity: sha512-540WwVDOMxA6dN6We19EcT9sc3hkXPw5mzRNGM3FkdN/vtE9NFvj5lFAPNwUDmJjXidm3v7TC1cTE7t17Ulm1Q==}
engines: {node: '>=0.12.0'}
rxjs@7.8.2: rxjs@7.8.2:
resolution: {integrity: sha512-dhKf903U/PQZY6boNNtAGdWbG85WAbjT/1xYoZIC7FAY0yWapOBQVsVrDl58W86//e1VpMNBtRV4MaXfdMySFA==} resolution: {integrity: sha512-dhKf903U/PQZY6boNNtAGdWbG85WAbjT/1xYoZIC7FAY0yWapOBQVsVrDl58W86//e1VpMNBtRV4MaXfdMySFA==}
@@ -3444,6 +3547,10 @@ packages:
tiny-worker@2.3.0: tiny-worker@2.3.0:
resolution: {integrity: sha512-pJ70wq5EAqTAEl9IkGzA+fN0836rycEuz2Cn6yeZ6FRzlVS5IDOkFHpIoEsksPRQV34GDqXm65+OlnZqUSyK2g==} resolution: {integrity: sha512-pJ70wq5EAqTAEl9IkGzA+fN0836rycEuz2Cn6yeZ6FRzlVS5IDOkFHpIoEsksPRQV34GDqXm65+OlnZqUSyK2g==}
tmp@0.0.33:
resolution: {integrity: sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==}
engines: {node: '>=0.6.0'}
toidentifier@1.0.1: toidentifier@1.0.1:
resolution: {integrity: sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==} resolution: {integrity: sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==}
engines: {node: '>=0.6'} engines: {node: '>=0.6'}
@@ -3487,6 +3594,10 @@ packages:
turndown@7.2.2: turndown@7.2.2:
resolution: {integrity: sha512-1F7db8BiExOKxjSMU2b7if62D/XOyQyZbPKq/nUwopfgnHlqXHqQ0lvfUTeUIr1lZJzOPFn43dODyMSIfvWRKQ==} resolution: {integrity: sha512-1F7db8BiExOKxjSMU2b7if62D/XOyQyZbPKq/nUwopfgnHlqXHqQ0lvfUTeUIr1lZJzOPFn43dODyMSIfvWRKQ==}
type-fest@0.21.3:
resolution: {integrity: sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==}
engines: {node: '>=10'}
type-fest@2.19.0: type-fest@2.19.0:
resolution: {integrity: sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==} resolution: {integrity: sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==}
engines: {node: '>=12.20'} engines: {node: '>=12.20'}
@@ -3608,6 +3719,10 @@ packages:
engines: {node: ^18.17.0 || >=20.5.0} engines: {node: ^18.17.0 || >=20.5.0}
hasBin: true hasBin: true
wrap-ansi@6.2.0:
resolution: {integrity: sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==}
engines: {node: '>=8'}
wrap-ansi@7.0.0: wrap-ansi@7.0.0:
resolution: {integrity: sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==} resolution: {integrity: sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==}
engines: {node: '>=10'} engines: {node: '>=10'}
@@ -3675,6 +3790,10 @@ packages:
resolution: {integrity: sha512-Ow9nuGZE+qp1u4JIPvg+uCiUr7xGQWdff7JQSk5VGYTAZMDe2q8lxJ10ygv10qmSj031Ty/6FNJpLO4o1Sgc+w==} resolution: {integrity: sha512-Ow9nuGZE+qp1u4JIPvg+uCiUr7xGQWdff7JQSk5VGYTAZMDe2q8lxJ10ygv10qmSj031Ty/6FNJpLO4o1Sgc+w==}
engines: {node: '>=12'} engines: {node: '>=12'}
yoctocolors-cjs@2.1.3:
resolution: {integrity: sha512-U/PBtDf35ff0D8X8D0jfdzHYEPFxAI7jJlxZXwCSez5M3190m+QobIfh+sWDWSHMCWWJN2AWamkegn6vr6YBTw==}
engines: {node: '>=18'}
zod@3.25.76: zod@3.25.76:
resolution: {integrity: sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==} resolution: {integrity: sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==}
@@ -4602,6 +4721,102 @@ snapshots:
dependencies: dependencies:
happy-dom: 15.11.7 happy-dom: 15.11.7
'@inquirer/checkbox@3.0.1':
dependencies:
'@inquirer/core': 9.2.1
'@inquirer/figures': 1.0.15
'@inquirer/type': 2.0.0
ansi-escapes: 4.3.2
yoctocolors-cjs: 2.1.3
'@inquirer/confirm@4.0.1':
dependencies:
'@inquirer/core': 9.2.1
'@inquirer/type': 2.0.0
'@inquirer/core@9.2.1':
dependencies:
'@inquirer/figures': 1.0.15
'@inquirer/type': 2.0.0
'@types/mute-stream': 0.0.4
'@types/node': 22.19.1
'@types/wrap-ansi': 3.0.0
ansi-escapes: 4.3.2
cli-width: 4.1.0
mute-stream: 1.0.0
signal-exit: 4.1.0
strip-ansi: 6.0.1
wrap-ansi: 6.2.0
yoctocolors-cjs: 2.1.3
'@inquirer/editor@3.0.1':
dependencies:
'@inquirer/core': 9.2.1
'@inquirer/type': 2.0.0
external-editor: 3.1.0
'@inquirer/expand@3.0.1':
dependencies:
'@inquirer/core': 9.2.1
'@inquirer/type': 2.0.0
yoctocolors-cjs: 2.1.3
'@inquirer/figures@1.0.15': {}
'@inquirer/input@3.0.1':
dependencies:
'@inquirer/core': 9.2.1
'@inquirer/type': 2.0.0
'@inquirer/number@2.0.1':
dependencies:
'@inquirer/core': 9.2.1
'@inquirer/type': 2.0.0
'@inquirer/password@3.0.1':
dependencies:
'@inquirer/core': 9.2.1
'@inquirer/type': 2.0.0
ansi-escapes: 4.3.2
'@inquirer/prompts@6.0.1':
dependencies:
'@inquirer/checkbox': 3.0.1
'@inquirer/confirm': 4.0.1
'@inquirer/editor': 3.0.1
'@inquirer/expand': 3.0.1
'@inquirer/input': 3.0.1
'@inquirer/number': 2.0.1
'@inquirer/password': 3.0.1
'@inquirer/rawlist': 3.0.1
'@inquirer/search': 2.0.1
'@inquirer/select': 3.0.1
'@inquirer/rawlist@3.0.1':
dependencies:
'@inquirer/core': 9.2.1
'@inquirer/type': 2.0.0
yoctocolors-cjs: 2.1.3
'@inquirer/search@2.0.1':
dependencies:
'@inquirer/core': 9.2.1
'@inquirer/figures': 1.0.15
'@inquirer/type': 2.0.0
yoctocolors-cjs: 2.1.3
'@inquirer/select@3.0.1':
dependencies:
'@inquirer/core': 9.2.1
'@inquirer/figures': 1.0.15
'@inquirer/type': 2.0.0
ansi-escapes: 4.3.2
yoctocolors-cjs: 2.1.3
'@inquirer/type@2.0.0':
dependencies:
mute-stream: 1.0.0
'@isaacs/balanced-match@4.0.1': {} '@isaacs/balanced-match@4.0.1': {}
'@isaacs/brace-expansion@5.0.0': '@isaacs/brace-expansion@5.0.0':
@@ -5159,6 +5374,13 @@ snapshots:
'@types/through2': 2.0.41 '@types/through2': 2.0.41
through2: 4.0.2 through2: 4.0.2
'@push.rocks/smartinteract@2.0.16':
dependencies:
'@push.rocks/lik': 6.2.2
'@push.rocks/smartobject': 1.0.12
'@push.rocks/smartpromise': 4.2.3
inquirer: 11.1.0
'@push.rocks/smartjson@5.2.0': '@push.rocks/smartjson@5.2.0':
dependencies: dependencies:
'@push.rocks/smartenv': 5.0.13 '@push.rocks/smartenv': 5.0.13
@@ -6200,6 +6422,10 @@ snapshots:
'@types/ms@2.1.0': {} '@types/ms@2.1.0': {}
'@types/mute-stream@0.0.4':
dependencies:
'@types/node': 25.0.9
'@types/node-forge@1.3.14': '@types/node-forge@1.3.14':
dependencies: dependencies:
'@types/node': 22.19.1 '@types/node': 22.19.1
@@ -6269,6 +6495,8 @@ snapshots:
'@types/which@3.0.4': {} '@types/which@3.0.4': {}
'@types/wrap-ansi@3.0.0': {}
'@types/ws@8.18.1': '@types/ws@8.18.1':
dependencies: dependencies:
'@types/node': 22.19.1 '@types/node': 22.19.1
@@ -6308,6 +6536,10 @@ snapshots:
ansi-256-colors@1.1.0: {} ansi-256-colors@1.1.0: {}
ansi-escapes@4.3.2:
dependencies:
type-fest: 0.21.3
ansi-regex@5.0.1: {} ansi-regex@5.0.1: {}
ansi-regex@6.2.2: {} ansi-regex@6.2.2: {}
@@ -6507,6 +6739,8 @@ snapshots:
character-entities@2.0.2: {} character-entities@2.0.2: {}
chardet@0.7.0: {}
chokidar@4.0.3: chokidar@4.0.3:
dependencies: dependencies:
readdirp: 4.1.2 readdirp: 4.1.2
@@ -6529,6 +6763,8 @@ snapshots:
cli-spinners@2.9.2: {} cli-spinners@2.9.2: {}
cli-width@4.1.0: {}
cliui@8.0.1: cliui@8.0.1:
dependencies: dependencies:
string-width: 4.2.3 string-width: 4.2.3
@@ -6881,6 +7117,12 @@ snapshots:
extend@3.0.2: {} extend@3.0.2: {}
external-editor@3.1.0:
dependencies:
chardet: 0.7.0
iconv-lite: 0.4.24
tmp: 0.0.33
extract-zip@2.0.1: extract-zip@2.0.1:
dependencies: dependencies:
debug: 4.4.3 debug: 4.4.3
@@ -7209,6 +7451,10 @@ snapshots:
dependencies: dependencies:
ms: 2.1.3 ms: 2.1.3
iconv-lite@0.4.24:
dependencies:
safer-buffer: 2.1.2
iconv-lite@0.6.3: iconv-lite@0.6.3:
dependencies: dependencies:
safer-buffer: 2.1.2 safer-buffer: 2.1.2
@@ -7233,6 +7479,17 @@ snapshots:
ini@1.3.8: {} ini@1.3.8: {}
inquirer@11.1.0:
dependencies:
'@inquirer/core': 9.2.1
'@inquirer/prompts': 6.0.1
'@inquirer/type': 2.0.0
'@types/mute-stream': 0.0.4
ansi-escapes: 4.3.2
mute-stream: 1.0.0
run-async: 3.0.0
rxjs: 7.8.2
ip-address@10.1.0: {} ip-address@10.1.0: {}
ipaddr.js@1.9.1: {} ipaddr.js@1.9.1: {}
@@ -7844,6 +8101,8 @@ snapshots:
mute-stream@0.0.8: {} mute-stream@0.0.8: {}
mute-stream@1.0.0: {}
nanoid@4.0.2: {} nanoid@4.0.2: {}
negotiator@0.6.3: {} negotiator@0.6.3: {}
@@ -7909,6 +8168,8 @@ snapshots:
strip-ansi: 6.0.1 strip-ansi: 6.0.1
wcwidth: 1.0.1 wcwidth: 1.0.1
os-tmpdir@1.0.2: {}
p-cancelable@3.0.0: {} p-cancelable@3.0.0: {}
p-finally@1.0.0: {} p-finally@1.0.0: {}
@@ -8256,6 +8517,8 @@ snapshots:
transitivePeerDependencies: transitivePeerDependencies:
- supports-color - supports-color
run-async@3.0.0: {}
rxjs@7.8.2: rxjs@7.8.2:
dependencies: dependencies:
tslib: 2.8.1 tslib: 2.8.1
@@ -8542,6 +8805,10 @@ snapshots:
dependencies: dependencies:
esm: 3.2.25 esm: 3.2.25
tmp@0.0.33:
dependencies:
os-tmpdir: 1.0.2
toidentifier@1.0.1: {} toidentifier@1.0.1: {}
token-types@6.1.1: token-types@6.1.1:
@@ -8581,6 +8848,8 @@ snapshots:
dependencies: dependencies:
'@mixmark-io/domino': 2.2.0 '@mixmark-io/domino': 2.2.0
type-fest@0.21.3: {}
type-fest@2.19.0: {} type-fest@2.19.0: {}
type-fest@4.41.0: {} type-fest@4.41.0: {}
@@ -8690,6 +8959,12 @@ snapshots:
dependencies: dependencies:
isexe: 3.1.1 isexe: 3.1.1
wrap-ansi@6.2.0:
dependencies:
ansi-styles: 4.3.0
string-width: 4.2.3
strip-ansi: 6.0.1
wrap-ansi@7.0.0: wrap-ansi@7.0.0:
dependencies: dependencies:
ansi-styles: 4.3.0 ansi-styles: 4.3.0
@@ -8738,6 +9013,8 @@ snapshots:
buffer-crc32: 0.2.13 buffer-crc32: 0.2.13
pend: 1.2.0 pend: 1.2.0
yoctocolors-cjs@2.1.3: {}
zod@3.25.76: {} zod@3.25.76: {}
zwitch@2.0.4: {} zwitch@2.0.4: {}

View File

@@ -96,6 +96,30 @@ ts/
- `@push.rocks/smartcli`: CLI framework - `@push.rocks/smartcli`: CLI framework
- `@push.rocks/projectinfo`: Project metadata - `@push.rocks/projectinfo`: Project metadata
## Parallel Builds
`--parallel` flag enables level-based parallel Docker builds:
```bash
tsdocker build --parallel # parallel, default concurrency (4)
tsdocker build --parallel=8 # parallel, concurrency 8
tsdocker build --parallel --cached # works with both modes
```
Implementation: `Dockerfile.computeLevels()` groups topologically sorted Dockerfiles into dependency levels. `Dockerfile.runWithConcurrency()` provides a worker-pool pattern for bounded concurrency. Both are public static methods on the `Dockerfile` class. The parallel logic exists in both `Dockerfile.buildDockerfiles()` (standard mode) and `TsDockerManager.build()` (cached mode).
## OCI Distribution API Push (v1.16+)
All builds now go through a persistent local registry (`localhost:5234`) with volume storage at `.nogit/docker-registry/`. Pushes use the `RegistryCopy` class (`ts/classes.registrycopy.ts`) which implements the OCI Distribution API to copy images (including multi-arch manifest lists) from the local registry to remote registries. This replaces the old `docker tag + docker push` approach that only worked for single-platform images.
Key classes:
- `RegistryCopy` — HTTP-based OCI image copy (auth, blob transfer, manifest handling)
- `Dockerfile.push()` — Now delegates to `RegistryCopy.copyImage()`
- `Dockerfile.needsLocalRegistry()` — Always returns true
- `Dockerfile.startLocalRegistry()` — Uses persistent volume mount
The `config.push` field is now a no-op (kept for backward compat).
## Build Status ## Build Status
- Build: ✅ Passes - Build: ✅ Passes

View File

@@ -3,6 +3,6 @@
*/ */
export const commitinfo = { export const commitinfo = {
name: '@git.zone/tsdocker', name: '@git.zone/tsdocker',
version: '1.4.3', version: '1.15.1',
description: 'develop npm modules cross platform with docker' description: 'develop npm modules cross platform with docker'
} }

View File

@@ -0,0 +1,69 @@
import * as plugins from './tsdocker.plugins.js';
import { logger } from './tsdocker.logging.js';
import type { IDockerContextInfo } from './interfaces/index.js';
const smartshellInstance = new plugins.smartshell.Smartshell({ executor: 'bash' });
export class DockerContext {
public contextInfo: IDockerContextInfo | null = null;
/** Sets DOCKER_CONTEXT env var for explicit context selection. */
public setContext(contextName: string): void {
process.env.DOCKER_CONTEXT = contextName;
logger.log('info', `Docker context explicitly set to: ${contextName}`);
}
/** Detects current Docker context via `docker context inspect` and rootless via `docker info`. */
public async detect(): Promise<IDockerContextInfo> {
let name = 'default';
let endpoint = 'unknown';
const contextResult = await smartshellInstance.execSilent(
`docker context inspect --format '{{json .}}'`
);
if (contextResult.exitCode === 0 && contextResult.stdout) {
try {
const parsed = JSON.parse(contextResult.stdout.trim());
const data = Array.isArray(parsed) ? parsed[0] : parsed;
name = data.Name || 'default';
endpoint = data.Endpoints?.docker?.Host || 'unknown';
} catch { /* fallback to defaults */ }
}
let isRootless = false;
const infoResult = await smartshellInstance.execSilent(
`docker info --format '{{json .SecurityOptions}}'`
);
if (infoResult.exitCode === 0 && infoResult.stdout) {
isRootless = infoResult.stdout.includes('name=rootless');
}
this.contextInfo = { name, endpoint, isRootless, dockerHost: process.env.DOCKER_HOST };
return this.contextInfo;
}
/** Logs context info prominently. */
public logContextInfo(): void {
if (!this.contextInfo) return;
const { name, endpoint, isRootless, dockerHost } = this.contextInfo;
logger.log('info', '=== DOCKER CONTEXT ===');
logger.log('info', `Context: ${name}`);
logger.log('info', `Endpoint: ${endpoint}`);
if (dockerHost) logger.log('info', `DOCKER_HOST: ${dockerHost}`);
logger.log('info', `Rootless: ${isRootless ? 'yes' : 'no'}`);
}
/** Emits rootless-specific warnings. */
public logRootlessWarnings(): void {
if (!this.contextInfo?.isRootless) return;
logger.log('warn', '[rootless] network=host in buildx is namespaced by rootlesskit');
logger.log('warn', '[rootless] Local registry may have localhost vs 127.0.0.1 resolution quirks');
}
/** Returns context-aware builder name: tsdocker-builder-<context> */
public getBuilderName(): string {
const contextName = this.contextInfo?.name || 'default';
const sanitized = contextName.replace(/[^a-zA-Z0-9_-]/g, '-');
return `tsdocker-builder-${sanitized}`;
}
}

View File

@@ -1,8 +1,9 @@
import * as plugins from './tsdocker.plugins.js'; import * as plugins from './tsdocker.plugins.js';
import * as paths from './tsdocker.paths.js'; import * as paths from './tsdocker.paths.js';
import { logger } from './tsdocker.logging.js'; import { logger, formatDuration } from './tsdocker.logging.js';
import { DockerRegistry } from './classes.dockerregistry.js'; import { DockerRegistry } from './classes.dockerregistry.js';
import type { IDockerfileOptions, ITsDockerConfig } from './interfaces/index.js'; import { RegistryCopy } from './classes.registrycopy.js';
import type { IDockerfileOptions, ITsDockerConfig, IBuildCommandOptions } from './interfaces/index.js';
import type { TsDockerManager } from './classes.tsdockermanager.js'; import type { TsDockerManager } from './classes.tsdockermanager.js';
import * as fs from 'fs'; import * as fs from 'fs';
@@ -10,6 +11,10 @@ const smartshellInstance = new plugins.smartshell.Smartshell({
executor: 'bash', executor: 'bash',
}); });
const LOCAL_REGISTRY_PORT = 5234;
const LOCAL_REGISTRY_HOST = `localhost:${LOCAL_REGISTRY_PORT}`;
const LOCAL_REGISTRY_CONTAINER = 'tsdocker-local-registry';
/** /**
* Class Dockerfile represents a Dockerfile on disk * Class Dockerfile represents a Dockerfile on disk
*/ */
@@ -26,8 +31,10 @@ export class Dockerfile {
.map(entry => plugins.path.join(paths.cwd, entry.name)); .map(entry => plugins.path.join(paths.cwd, entry.name));
const readDockerfilesArray: Dockerfile[] = []; const readDockerfilesArray: Dockerfile[] = [];
logger.log('info', `found ${fileTree.length} Dockerfiles:`); logger.log('info', `found ${fileTree.length} Dockerfile(s):`);
console.log(fileTree); for (const filePath of fileTree) {
logger.log('info', ` ${plugins.path.basename(filePath)}`);
}
for (const dockerfilePath of fileTree) { for (const dockerfilePath of fileTree) {
const myDockerfile = new Dockerfile(managerRef, { const myDockerfile = new Dockerfile(managerRef, {
@@ -133,13 +140,199 @@ export class Dockerfile {
return sortedDockerfileArray; return sortedDockerfileArray;
} }
/** Local registry is always needed — it's the canonical store for all built images. */
public static needsLocalRegistry(
_dockerfiles?: Dockerfile[],
_options?: { platform?: string },
): boolean {
return true;
}
/** Starts a persistent registry:2 container on port 5234 with volume storage. */
public static async startLocalRegistry(isRootless?: boolean): Promise<void> {
// Ensure persistent storage directory exists
const registryDataDir = plugins.path.join(paths.cwd, '.nogit', 'docker-registry');
fs.mkdirSync(registryDataDir, { recursive: true });
await smartshellInstance.execSilent(
`docker rm -f ${LOCAL_REGISTRY_CONTAINER} 2>/dev/null || true`
);
const result = await smartshellInstance.execSilent(
`docker run -d --name ${LOCAL_REGISTRY_CONTAINER} -p ${LOCAL_REGISTRY_PORT}:5000 -v "${registryDataDir}:/var/lib/registry" registry:2`
);
if (result.exitCode !== 0) {
throw new Error(`Failed to start local registry: ${result.stderr || result.stdout}`);
}
// registry:2 starts near-instantly; brief wait for readiness
await new Promise(resolve => setTimeout(resolve, 1000));
logger.log('info', `Started local registry at ${LOCAL_REGISTRY_HOST} (persistent storage at .nogit/docker-registry/)`);
if (isRootless) {
logger.log('warn', `[rootless] Registry on port ${LOCAL_REGISTRY_PORT} — if buildx cannot reach localhost:${LOCAL_REGISTRY_PORT}, try 127.0.0.1:${LOCAL_REGISTRY_PORT}`);
}
}
/** Stops and removes the temporary local registry container. */
public static async stopLocalRegistry(): Promise<void> {
await smartshellInstance.execSilent(
`docker rm -f ${LOCAL_REGISTRY_CONTAINER} 2>/dev/null || true`
);
logger.log('info', 'Stopped local registry');
}
/** Pushes a built image to the local registry for buildx consumption. */
public static async pushToLocalRegistry(dockerfile: Dockerfile): Promise<void> {
const registryTag = `${LOCAL_REGISTRY_HOST}/${dockerfile.buildTag}`;
await smartshellInstance.execSilent(`docker tag ${dockerfile.buildTag} ${registryTag}`);
const result = await smartshellInstance.execSilent(`docker push ${registryTag}`);
if (result.exitCode !== 0) {
throw new Error(`Failed to push to local registry: ${result.stderr || result.stdout}`);
}
dockerfile.localRegistryTag = registryTag;
logger.log('info', `Pushed ${dockerfile.buildTag} to local registry as ${registryTag}`);
}
/**
* Groups topologically sorted Dockerfiles into dependency levels.
* Level 0 = no local dependencies; level N = depends on something in level N-1.
* Images within the same level are independent and can build in parallel.
*/
public static computeLevels(sortedDockerfiles: Dockerfile[]): Dockerfile[][] {
const levelMap = new Map<Dockerfile, number>();
for (const df of sortedDockerfiles) {
if (!df.localBaseImageDependent || !df.localBaseDockerfile) {
levelMap.set(df, 0);
} else {
const depLevel = levelMap.get(df.localBaseDockerfile) ?? 0;
levelMap.set(df, depLevel + 1);
}
}
const maxLevel = Math.max(...Array.from(levelMap.values()), 0);
const levels: Dockerfile[][] = [];
for (let l = 0; l <= maxLevel; l++) {
levels.push(sortedDockerfiles.filter(df => levelMap.get(df) === l));
}
return levels;
}
/**
* Runs async tasks with bounded concurrency (worker-pool pattern).
* Fast-fail: if any task throws, Promise.all rejects immediately.
*/
public static async runWithConcurrency<T>(
tasks: (() => Promise<T>)[],
concurrency: number,
): Promise<T[]> {
const results: T[] = new Array(tasks.length);
let nextIndex = 0;
async function worker(): Promise<void> {
while (true) {
const idx = nextIndex++;
if (idx >= tasks.length) break;
results[idx] = await tasks[idx]();
}
}
const workers = Array.from(
{ length: Math.min(concurrency, tasks.length) },
() => worker(),
);
await Promise.all(workers);
return results;
}
/** /**
* Builds the corresponding real docker image for each Dockerfile class instance * Builds the corresponding real docker image for each Dockerfile class instance
*/ */
public static async buildDockerfiles(sortedArrayArg: Dockerfile[]): Promise<Dockerfile[]> { public static async buildDockerfiles(
for (const dockerfileArg of sortedArrayArg) { sortedArrayArg: Dockerfile[],
await dockerfileArg.build(); options?: { platform?: string; timeout?: number; noCache?: boolean; verbose?: boolean; isRootless?: boolean; parallel?: boolean; parallelConcurrency?: number },
): Promise<Dockerfile[]> {
const total = sortedArrayArg.length;
const overallStart = Date.now();
await Dockerfile.startLocalRegistry(options?.isRootless);
try {
if (options?.parallel) {
// === PARALLEL MODE: build independent images concurrently within each level ===
const concurrency = options.parallelConcurrency ?? 4;
const levels = Dockerfile.computeLevels(sortedArrayArg);
logger.log('info', `Parallel build: ${levels.length} level(s), concurrency ${concurrency}`);
for (let l = 0; l < levels.length; l++) {
const level = levels[l];
logger.log('info', ` Level ${l} (${level.length}): ${level.map(df => df.cleanTag).join(', ')}`);
} }
let built = 0;
for (let l = 0; l < levels.length; l++) {
const level = levels[l];
logger.log('info', `--- Level ${l}: building ${level.length} image(s) in parallel ---`);
const tasks = level.map((df) => {
const myIndex = ++built;
return async () => {
const progress = `(${myIndex}/${total})`;
logger.log('info', `${progress} Building ${df.cleanTag}...`);
const elapsed = await df.build(options);
logger.log('ok', `${progress} Built ${df.cleanTag} in ${formatDuration(elapsed)}`);
return df;
};
});
await Dockerfile.runWithConcurrency(tasks, concurrency);
// After the entire level completes, push all to local registry + tag for deps
for (const df of level) {
// Tag in host daemon for dependency resolution
const dependentBaseImages = new Set<string>();
for (const other of sortedArrayArg) {
if (other.localBaseDockerfile === df && other.baseImage !== df.buildTag) {
dependentBaseImages.add(other.baseImage);
}
}
for (const fullTag of dependentBaseImages) {
logger.log('info', `Tagging ${df.buildTag} as ${fullTag} for local dependency resolution`);
await smartshellInstance.exec(`docker tag ${df.buildTag} ${fullTag}`);
}
// Push ALL images to local registry (skip if already pushed via buildx)
if (!df.localRegistryTag) {
await Dockerfile.pushToLocalRegistry(df);
}
}
}
} else {
// === SEQUENTIAL MODE: build one at a time ===
for (let i = 0; i < total; i++) {
const dockerfileArg = sortedArrayArg[i];
const progress = `(${i + 1}/${total})`;
logger.log('info', `${progress} Building ${dockerfileArg.cleanTag}...`);
const elapsed = await dockerfileArg.build(options);
logger.log('ok', `${progress} Built ${dockerfileArg.cleanTag} in ${formatDuration(elapsed)}`);
// Tag in host daemon for standard docker build compatibility
const dependentBaseImages = new Set<string>();
for (const other of sortedArrayArg) {
if (other.localBaseDockerfile === dockerfileArg && other.baseImage !== dockerfileArg.buildTag) {
dependentBaseImages.add(other.baseImage);
}
}
for (const fullTag of dependentBaseImages) {
logger.log('info', `Tagging ${dockerfileArg.buildTag} as ${fullTag} for local dependency resolution`);
await smartshellInstance.exec(`docker tag ${dockerfileArg.buildTag} ${fullTag}`);
}
// Push ALL images to local registry (skip if already pushed via buildx)
if (!dockerfileArg.localRegistryTag) {
await Dockerfile.pushToLocalRegistry(dockerfileArg);
}
}
}
} finally {
await Dockerfile.stopLocalRegistry();
}
logger.log('info', `Total build time: ${formatDuration(Date.now() - overallStart)}`);
return sortedArrayArg; return sortedArrayArg;
} }
@@ -147,9 +340,19 @@ export class Dockerfile {
* Tests all Dockerfiles by calling Dockerfile.test() * Tests all Dockerfiles by calling Dockerfile.test()
*/ */
public static async testDockerfiles(sortedArrayArg: Dockerfile[]): Promise<Dockerfile[]> { public static async testDockerfiles(sortedArrayArg: Dockerfile[]): Promise<Dockerfile[]> {
for (const dockerfileArg of sortedArrayArg) { const total = sortedArrayArg.length;
await dockerfileArg.test(); const overallStart = Date.now();
for (let i = 0; i < total; i++) {
const dockerfileArg = sortedArrayArg[i];
const progress = `(${i + 1}/${total})`;
logger.log('info', `${progress} Testing ${dockerfileArg.cleanTag}...`);
const elapsed = await dockerfileArg.test();
logger.log('ok', `${progress} Tested ${dockerfileArg.cleanTag} in ${formatDuration(elapsed)}`);
} }
logger.log('info', `Total test time: ${formatDuration(Date.now() - overallStart)}`);
return sortedArrayArg; return sortedArrayArg;
} }
@@ -328,6 +531,7 @@ export class Dockerfile {
public baseImage: string; public baseImage: string;
public localBaseImageDependent: boolean; public localBaseImageDependent: boolean;
public localBaseDockerfile!: Dockerfile; public localBaseDockerfile!: Dockerfile;
public localRegistryTag?: string;
constructor(managerRefArg: TsDockerManager, options: IDockerfileOptions) { constructor(managerRefArg: TsDockerManager, options: IDockerfileOptions) {
this.managerRef = managerRefArg; this.managerRef = managerRefArg;
@@ -362,73 +566,112 @@ export class Dockerfile {
/** /**
* Builds the Dockerfile * Builds the Dockerfile
*/ */
public async build(): Promise<void> { public async build(options?: { platform?: string; timeout?: number; noCache?: boolean; verbose?: boolean }): Promise<number> {
logger.log('info', 'now building Dockerfile for ' + this.cleanTag); const startTime = Date.now();
const buildArgsString = await Dockerfile.getDockerBuildArgs(this.managerRef); const buildArgsString = await Dockerfile.getDockerBuildArgs(this.managerRef);
const config = this.managerRef.config; const config = this.managerRef.config;
const platformOverride = options?.platform;
const timeout = options?.timeout;
const noCacheFlag = options?.noCache ? ' --no-cache' : '';
const verbose = options?.verbose ?? false;
let buildContextFlag = '';
if (this.localBaseImageDependent && this.localBaseDockerfile) {
const fromImage = this.baseImage;
if (this.localBaseDockerfile.localRegistryTag) {
// BuildKit pulls from the local registry (reachable via host network)
const registryTag = this.localBaseDockerfile.localRegistryTag;
buildContextFlag = ` --build-context "${fromImage}=docker-image://${registryTag}"`;
logger.log('info', `Using local registry build context: ${fromImage} -> docker-image://${registryTag}`);
}
}
let buildCommand: string; let buildCommand: string;
// Check if multi-platform build is needed if (platformOverride) {
if (config.platforms && config.platforms.length > 1) { // Single platform override via buildx
// Multi-platform build using buildx buildCommand = `docker buildx build --platform ${platformOverride}${noCacheFlag}${buildContextFlag} --load -t ${this.buildTag} -f ${this.filePath} ${buildArgsString} .`;
logger.log('info', `Build: buildx --platform ${platformOverride} --load`);
} else if (config.platforms && config.platforms.length > 1) {
// Multi-platform build using buildx — always push to local registry
const platformString = config.platforms.join(','); const platformString = config.platforms.join(',');
buildCommand = `docker buildx build --platform ${platformString} -t ${this.buildTag} -f ${this.filePath} ${buildArgsString} .`; const localTag = `${LOCAL_REGISTRY_HOST}/${this.buildTag}`;
buildCommand = `docker buildx build --platform ${platformString}${noCacheFlag}${buildContextFlag} -t ${localTag} -f ${this.filePath} ${buildArgsString} --push .`;
if (config.push) { this.localRegistryTag = localTag;
buildCommand += ' --push'; logger.log('info', `Build: buildx --platform ${platformString} --push to local registry`);
} else {
buildCommand += ' --load';
}
} else { } else {
// Standard build // Standard build
const versionLabel = this.managerRef.projectInfo?.npm?.version || 'unknown'; const versionLabel = this.managerRef.projectInfo?.npm?.version || 'unknown';
buildCommand = `docker build --label="version=${versionLabel}" -t ${this.buildTag} -f ${this.filePath} ${buildArgsString} .`; buildCommand = `docker build --label="version=${versionLabel}"${noCacheFlag} -t ${this.buildTag} -f ${this.filePath} ${buildArgsString} .`;
logger.log('info', 'Build: docker build (standard)');
} }
const result = await smartshellInstance.exec(buildCommand); if (timeout) {
// Use streaming execution with timeout
const streaming = verbose
? await smartshellInstance.execStreaming(buildCommand)
: await smartshellInstance.execStreamingSilent(buildCommand);
const timeoutPromise = new Promise<never>((_, reject) => {
setTimeout(() => {
streaming.childProcess.kill();
reject(new Error(`Build timed out after ${timeout}s for ${this.cleanTag}`));
}, timeout * 1000);
});
const result = await Promise.race([streaming.finalPromise, timeoutPromise]);
if (result.exitCode !== 0) { if (result.exitCode !== 0) {
logger.log('error', `Build failed for ${this.cleanTag}`); logger.log('error', `Build failed for ${this.cleanTag}`);
console.log(result.stdout);
throw new Error(`Build failed for ${this.cleanTag}`); throw new Error(`Build failed for ${this.cleanTag}`);
} }
} else {
const result = verbose
? await smartshellInstance.exec(buildCommand)
: await smartshellInstance.execSilent(buildCommand);
if (result.exitCode !== 0) {
logger.log('error', `Build failed for ${this.cleanTag}`);
if (!verbose && result.stdout) {
logger.log('error', `Build output:\n${result.stdout}`);
}
throw new Error(`Build failed for ${this.cleanTag}`);
}
}
logger.log('ok', `Built ${this.cleanTag}`); return Date.now() - startTime;
} }
/** /**
* Pushes the Dockerfile to a registry * Pushes the Dockerfile to a registry using OCI Distribution API copy
* from the local registry to the remote registry.
*/ */
public async push(dockerRegistryArg: DockerRegistry, versionSuffix?: string): Promise<void> { public async push(dockerRegistryArg: DockerRegistry, versionSuffix?: string): Promise<void> {
this.pushTag = Dockerfile.getDockerTagString( const destRepo = this.getDestRepo(dockerRegistryArg.registryUrl);
this.managerRef, const destTag = versionSuffix ? `${this.version}_${versionSuffix}` : this.version;
dockerRegistryArg.registryUrl, const registryCopy = new RegistryCopy();
this.pushTag = `${dockerRegistryArg.registryUrl}/${destRepo}:${destTag}`;
logger.log('info', `Pushing ${this.pushTag} via OCI copy from local registry...`);
await registryCopy.copyImage(
LOCAL_REGISTRY_HOST,
this.repo, this.repo,
this.version, this.version,
versionSuffix dockerRegistryArg.registryUrl,
destRepo,
destTag,
{ username: dockerRegistryArg.username, password: dockerRegistryArg.password },
); );
await smartshellInstance.exec(`docker tag ${this.buildTag} ${this.pushTag}`);
const pushResult = await smartshellInstance.exec(`docker push ${this.pushTag}`);
if (pushResult.exitCode !== 0) {
logger.log('error', `Push failed for ${this.pushTag}`);
throw new Error(`Push failed for ${this.pushTag}`);
}
// Get image digest
const inspectResult = await smartshellInstance.exec(
`docker inspect --format="{{index .RepoDigests 0}}" ${this.pushTag}`
);
if (inspectResult.exitCode === 0 && inspectResult.stdout.includes('@')) {
const imageDigest = inspectResult.stdout.split('@')[1]?.trim();
console.log(`The image ${this.pushTag} has digest ${imageDigest}`);
}
logger.log('ok', `Pushed ${this.pushTag}`); logger.log('ok', `Pushed ${this.pushTag}`);
} }
/**
* Returns the destination repository for a given registry URL,
* using registryRepoMap if configured, otherwise the default repo.
*/
private getDestRepo(registryUrl: string): string {
const config = this.managerRef.config;
return config.registryRepoMap?.[registryUrl] || this.repo;
}
/** /**
* Pulls the Dockerfile from a registry * Pulls the Dockerfile from a registry
*/ */
@@ -448,20 +691,22 @@ export class Dockerfile {
} }
/** /**
* Tests the Dockerfile by running a test script if it exists * Tests the Dockerfile by running a test script if it exists.
* For multi-platform builds, uses the local registry tag so Docker can auto-pull.
*/ */
public async test(): Promise<void> { public async test(): Promise<number> {
const startTime = Date.now();
const testDir = this.managerRef.config.testDir || plugins.path.join(paths.cwd, 'test'); const testDir = this.managerRef.config.testDir || plugins.path.join(paths.cwd, 'test');
const testFile = plugins.path.join(testDir, 'test_' + this.version + '.sh'); const testFile = plugins.path.join(testDir, 'test_' + this.version + '.sh');
// Use local registry tag for multi-platform images (not in daemon), otherwise buildTag
const imageRef = this.localRegistryTag || this.buildTag;
const testFileExists = fs.existsSync(testFile); const testFileExists = fs.existsSync(testFile);
if (testFileExists) { if (testFileExists) {
logger.log('info', `Running tests for ${this.cleanTag}`);
// Run tests in container // Run tests in container
await smartshellInstance.exec( await smartshellInstance.exec(
`docker run --name tsdocker_test_container --entrypoint="bash" ${this.buildTag} -c "mkdir /tsdocker_test"` `docker run --name tsdocker_test_container --entrypoint="bash" ${imageRef} -c "mkdir /tsdocker_test"`
); );
await smartshellInstance.exec(`docker cp ${testFile} tsdocker_test_container:/tsdocker_test/test.sh`); await smartshellInstance.exec(`docker cp ${testFile} tsdocker_test_container:/tsdocker_test/test.sh`);
await smartshellInstance.exec(`docker commit tsdocker_test_container tsdocker_test_image`); await smartshellInstance.exec(`docker commit tsdocker_test_container tsdocker_test_image`);
@@ -477,11 +722,11 @@ export class Dockerfile {
if (testResult.exitCode !== 0) { if (testResult.exitCode !== 0) {
throw new Error(`Tests failed for ${this.cleanTag}`); throw new Error(`Tests failed for ${this.cleanTag}`);
} }
logger.log('ok', `Tests passed for ${this.cleanTag}`);
} else { } else {
logger.log('warn', `Skipping tests for ${this.cleanTag} because no test file was found at ${testFile}`); logger.log('warn', `Skipping tests for ${this.cleanTag} no test file at ${testFile}`);
} }
return Date.now() - startTime;
} }
/** /**

511
ts/classes.registrycopy.ts Normal file
View File

@@ -0,0 +1,511 @@
import * as fs from 'fs';
import * as os from 'os';
import * as path from 'path';
import { logger } from './tsdocker.logging.js';
interface IRegistryCredentials {
username: string;
password: string;
}
interface ITokenCache {
[scope: string]: { token: string; expiry: number };
}
/**
* OCI Distribution API client for copying images between registries.
* Supports manifest lists (multi-arch) and single-platform manifests.
* Uses native fetch (Node 18+).
*/
export class RegistryCopy {
private tokenCache: ITokenCache = {};
/**
* Reads Docker credentials from ~/.docker/config.json for a given registry.
* Supports base64-encoded "auth" field in the config.
*/
public static getDockerConfigCredentials(registryUrl: string): IRegistryCredentials | null {
try {
const configPath = path.join(os.homedir(), '.docker', 'config.json');
if (!fs.existsSync(configPath)) return null;
const config = JSON.parse(fs.readFileSync(configPath, 'utf-8'));
const auths = config.auths || {};
// Try exact match first, then common variations
const keys = [
registryUrl,
`https://${registryUrl}`,
`http://${registryUrl}`,
];
// Docker Hub special cases
if (registryUrl === 'docker.io' || registryUrl === 'registry-1.docker.io') {
keys.push(
'https://index.docker.io/v1/',
'https://index.docker.io/v2/',
'index.docker.io',
'docker.io',
'registry-1.docker.io',
);
}
for (const key of keys) {
if (auths[key]?.auth) {
const decoded = Buffer.from(auths[key].auth, 'base64').toString('utf-8');
const colonIndex = decoded.indexOf(':');
if (colonIndex > 0) {
return {
username: decoded.substring(0, colonIndex),
password: decoded.substring(colonIndex + 1),
};
}
}
}
return null;
} catch {
return null;
}
}
/**
* Returns the API base URL for a registry.
* Docker Hub uses registry-1.docker.io as API endpoint.
*/
private getRegistryApiBase(registry: string): string {
if (registry === 'docker.io' || registry === 'index.docker.io') {
return 'https://registry-1.docker.io';
}
// Local registries (localhost) use HTTP
if (registry.startsWith('localhost') || registry.startsWith('127.0.0.1')) {
return `http://${registry}`;
}
return `https://${registry}`;
}
/**
* Obtains a Bearer token for registry operations.
* Follows the standard Docker auth flow:
* GET /v2/ → 401 with Www-Authenticate → request token
*/
private async getToken(
registry: string,
repo: string,
actions: string,
credentials?: IRegistryCredentials | null,
): Promise<string | null> {
const scope = `repository:${repo}:${actions}`;
const cached = this.tokenCache[`${registry}/${scope}`];
if (cached && cached.expiry > Date.now()) {
return cached.token;
}
const apiBase = this.getRegistryApiBase(registry);
// Local registries typically don't need auth
if (registry.startsWith('localhost') || registry.startsWith('127.0.0.1')) {
return null;
}
try {
const checkResp = await fetch(`${apiBase}/v2/`, { method: 'GET' });
if (checkResp.ok) return null; // No auth needed
const wwwAuth = checkResp.headers.get('www-authenticate') || '';
const realmMatch = wwwAuth.match(/realm="([^"]+)"/);
const serviceMatch = wwwAuth.match(/service="([^"]+)"/);
if (!realmMatch) return null;
const realm = realmMatch[1];
const service = serviceMatch ? serviceMatch[1] : '';
const tokenUrl = new URL(realm);
tokenUrl.searchParams.set('scope', scope);
if (service) tokenUrl.searchParams.set('service', service);
const headers: Record<string, string> = {};
const creds = credentials || RegistryCopy.getDockerConfigCredentials(registry);
if (creds) {
headers['Authorization'] = 'Basic ' + Buffer.from(`${creds.username}:${creds.password}`).toString('base64');
}
const tokenResp = await fetch(tokenUrl.toString(), { headers });
if (!tokenResp.ok) {
const body = await tokenResp.text();
throw new Error(`Token request failed (${tokenResp.status}): ${body}`);
}
const tokenData = await tokenResp.json() as any;
const token = tokenData.token || tokenData.access_token;
if (token) {
// Cache for 5 minutes (conservative)
this.tokenCache[`${registry}/${scope}`] = {
token,
expiry: Date.now() + 5 * 60 * 1000,
};
}
return token;
} catch (err) {
logger.log('warn', `Auth for ${registry}: ${(err as Error).message}`);
return null;
}
}
/**
* Makes an authenticated request to a registry.
*/
private async registryFetch(
registry: string,
path: string,
options: {
method?: string;
headers?: Record<string, string>;
body?: Buffer | ReadableStream | null;
repo?: string;
actions?: string;
credentials?: IRegistryCredentials | null;
} = {},
): Promise<Response> {
const apiBase = this.getRegistryApiBase(registry);
const method = options.method || 'GET';
const headers: Record<string, string> = { ...(options.headers || {}) };
const repo = options.repo || '';
const actions = options.actions || 'pull';
const token = await this.getToken(registry, repo, actions, options.credentials);
if (token) {
headers['Authorization'] = `Bearer ${token}`;
}
const url = `${apiBase}${path}`;
const fetchOptions: any = { method, headers };
if (options.body) {
fetchOptions.body = options.body;
fetchOptions.duplex = 'half'; // Required for streaming body in Node
}
return fetch(url, fetchOptions);
}
/**
* Gets a manifest from a registry (supports both manifest lists and single manifests).
*/
private async getManifest(
registry: string,
repo: string,
reference: string,
credentials?: IRegistryCredentials | null,
): Promise<{ contentType: string; body: any; digest: string; raw: Buffer }> {
const accept = [
'application/vnd.oci.image.index.v1+json',
'application/vnd.docker.distribution.manifest.list.v2+json',
'application/vnd.oci.image.manifest.v1+json',
'application/vnd.docker.distribution.manifest.v2+json',
].join(', ');
const resp = await this.registryFetch(registry, `/v2/${repo}/manifests/${reference}`, {
headers: { 'Accept': accept },
repo,
actions: 'pull',
credentials,
});
if (!resp.ok) {
const body = await resp.text();
throw new Error(`Failed to get manifest ${registry}/${repo}:${reference} (${resp.status}): ${body}`);
}
const raw = Buffer.from(await resp.arrayBuffer());
const contentType = resp.headers.get('content-type') || '';
const digest = resp.headers.get('docker-content-digest') || this.computeDigest(raw);
const body = JSON.parse(raw.toString('utf-8'));
return { contentType, body, digest, raw };
}
/**
* Checks if a blob exists in the destination registry.
*/
private async blobExists(
registry: string,
repo: string,
digest: string,
credentials?: IRegistryCredentials | null,
): Promise<boolean> {
const resp = await this.registryFetch(registry, `/v2/${repo}/blobs/${digest}`, {
method: 'HEAD',
repo,
actions: 'pull,push',
credentials,
});
return resp.ok;
}
/**
* Copies a single blob from source to destination registry.
* Uses monolithic upload (POST initiate + PUT complete).
*/
private async copyBlob(
srcRegistry: string,
srcRepo: string,
destRegistry: string,
destRepo: string,
digest: string,
srcCredentials?: IRegistryCredentials | null,
destCredentials?: IRegistryCredentials | null,
): Promise<void> {
// Check if blob already exists at destination
const exists = await this.blobExists(destRegistry, destRepo, digest, destCredentials);
if (exists) {
logger.log('info', ` Blob ${digest.substring(0, 19)}... already exists, skipping`);
return;
}
// Download blob from source
const getResp = await this.registryFetch(srcRegistry, `/v2/${srcRepo}/blobs/${digest}`, {
repo: srcRepo,
actions: 'pull',
credentials: srcCredentials,
});
if (!getResp.ok) {
throw new Error(`Failed to get blob ${digest} from ${srcRegistry}/${srcRepo}: ${getResp.status}`);
}
const blobData = Buffer.from(await getResp.arrayBuffer());
const blobSize = blobData.length;
// Initiate upload at destination
const postResp = await this.registryFetch(destRegistry, `/v2/${destRepo}/blobs/uploads/`, {
method: 'POST',
headers: { 'Content-Length': '0' },
repo: destRepo,
actions: 'pull,push',
credentials: destCredentials,
});
if (!postResp.ok && postResp.status !== 202) {
const body = await postResp.text();
throw new Error(`Failed to initiate upload at ${destRegistry}/${destRepo}: ${postResp.status} ${body}`);
}
// Get upload URL from Location header
let uploadUrl = postResp.headers.get('location') || '';
if (!uploadUrl) {
throw new Error(`No upload location returned from ${destRegistry}/${destRepo}`);
}
// Make upload URL absolute if relative
if (uploadUrl.startsWith('/')) {
const apiBase = this.getRegistryApiBase(destRegistry);
uploadUrl = `${apiBase}${uploadUrl}`;
}
// Complete upload with PUT (monolithic)
const separator = uploadUrl.includes('?') ? '&' : '?';
const putUrl = `${uploadUrl}${separator}digest=${encodeURIComponent(digest)}`;
// For PUT to the upload URL, we need auth
const token = await this.getToken(destRegistry, destRepo, 'pull,push', destCredentials);
const putHeaders: Record<string, string> = {
'Content-Type': 'application/octet-stream',
'Content-Length': String(blobSize),
};
if (token) {
putHeaders['Authorization'] = `Bearer ${token}`;
}
const putResp = await fetch(putUrl, {
method: 'PUT',
headers: putHeaders,
body: blobData,
});
if (!putResp.ok) {
const body = await putResp.text();
throw new Error(`Failed to upload blob ${digest} to ${destRegistry}/${destRepo}: ${putResp.status} ${body}`);
}
const sizeStr = blobSize > 1048576
? `${(blobSize / 1048576).toFixed(1)} MB`
: `${(blobSize / 1024).toFixed(1)} KB`;
logger.log('info', ` Copied blob ${digest.substring(0, 19)}... (${sizeStr})`);
}
/**
* Pushes a manifest to a registry.
*/
private async putManifest(
registry: string,
repo: string,
reference: string,
manifest: Buffer,
contentType: string,
credentials?: IRegistryCredentials | null,
): Promise<string> {
const resp = await this.registryFetch(registry, `/v2/${repo}/manifests/${reference}`, {
method: 'PUT',
headers: {
'Content-Type': contentType,
'Content-Length': String(manifest.length),
},
body: manifest,
repo,
actions: 'pull,push',
credentials,
});
if (!resp.ok) {
const body = await resp.text();
throw new Error(`Failed to put manifest ${registry}/${repo}:${reference} (${resp.status}): ${body}`);
}
const digest = resp.headers.get('docker-content-digest') || this.computeDigest(manifest);
return digest;
}
/**
* Copies a single-platform manifest and all its blobs from source to destination.
*/
private async copySingleManifest(
srcRegistry: string,
srcRepo: string,
destRegistry: string,
destRepo: string,
manifestDigest: string,
srcCredentials?: IRegistryCredentials | null,
destCredentials?: IRegistryCredentials | null,
): Promise<void> {
// Get the platform manifest
const { body: manifest, contentType, raw } = await this.getManifest(
srcRegistry, srcRepo, manifestDigest, srcCredentials,
);
// Copy config blob
if (manifest.config?.digest) {
logger.log('info', ` Copying config blob...`);
await this.copyBlob(
srcRegistry, srcRepo, destRegistry, destRepo,
manifest.config.digest, srcCredentials, destCredentials,
);
}
// Copy layer blobs
const layers = manifest.layers || [];
for (let i = 0; i < layers.length; i++) {
const layer = layers[i];
logger.log('info', ` Copying layer ${i + 1}/${layers.length}...`);
await this.copyBlob(
srcRegistry, srcRepo, destRegistry, destRepo,
layer.digest, srcCredentials, destCredentials,
);
}
// Push the platform manifest by digest
await this.putManifest(
destRegistry, destRepo, manifestDigest, raw, contentType, destCredentials,
);
}
/**
* Copies a complete image (single or multi-arch) from source to destination registry.
*
* @param srcRegistry - Source registry host (e.g., "localhost:5234")
* @param srcRepo - Source repository (e.g., "myapp")
* @param srcTag - Source tag (e.g., "v1.0.0")
* @param destRegistry - Destination registry host (e.g., "registry.gitlab.com")
* @param destRepo - Destination repository (e.g., "org/myapp")
* @param destTag - Destination tag (e.g., "v1.0.0" or "v1.0.0_arm64")
* @param credentials - Optional credentials for destination registry
*/
public async copyImage(
srcRegistry: string,
srcRepo: string,
srcTag: string,
destRegistry: string,
destRepo: string,
destTag: string,
credentials?: IRegistryCredentials | null,
): Promise<void> {
logger.log('info', `Copying ${srcRegistry}/${srcRepo}:${srcTag} -> ${destRegistry}/${destRepo}:${destTag}`);
// Source is always the local registry (no credentials needed)
const srcCredentials: IRegistryCredentials | null = null;
const destCredentials = credentials || RegistryCopy.getDockerConfigCredentials(destRegistry);
// Get the top-level manifest
const topManifest = await this.getManifest(srcRegistry, srcRepo, srcTag, srcCredentials);
const { body, contentType, raw } = topManifest;
const isManifestList =
contentType.includes('manifest.list') ||
contentType.includes('image.index') ||
body.manifests !== undefined;
if (isManifestList) {
// Multi-arch: copy each platform manifest + blobs, then push the manifest list
const platforms = (body.manifests || []) as any[];
logger.log('info', `Multi-arch manifest with ${platforms.length} platform(s)`);
for (const platformEntry of platforms) {
const platDesc = platformEntry.platform
? `${platformEntry.platform.os}/${platformEntry.platform.architecture}`
: platformEntry.digest;
logger.log('info', `Copying platform: ${platDesc}`);
await this.copySingleManifest(
srcRegistry, srcRepo, destRegistry, destRepo,
platformEntry.digest, srcCredentials, destCredentials,
);
}
// Push the manifest list/index with the destination tag
const digest = await this.putManifest(
destRegistry, destRepo, destTag, raw, contentType, destCredentials,
);
logger.log('ok', `Pushed manifest list to ${destRegistry}/${destRepo}:${destTag} (${digest.substring(0, 19)}...)`);
} else {
// Single-platform manifest: copy blobs + push manifest
logger.log('info', 'Single-platform manifest');
// Copy config blob
if (body.config?.digest) {
logger.log('info', ' Copying config blob...');
await this.copyBlob(
srcRegistry, srcRepo, destRegistry, destRepo,
body.config.digest, srcCredentials, destCredentials,
);
}
// Copy layer blobs
const layers = body.layers || [];
for (let i = 0; i < layers.length; i++) {
logger.log('info', ` Copying layer ${i + 1}/${layers.length}...`);
await this.copyBlob(
srcRegistry, srcRepo, destRegistry, destRepo,
layers[i].digest, srcCredentials, destCredentials,
);
}
// Push the manifest with the destination tag
const digest = await this.putManifest(
destRegistry, destRepo, destTag, raw, contentType, destCredentials,
);
logger.log('ok', `Pushed manifest to ${destRegistry}/${destRepo}:${destTag} (${digest.substring(0, 19)}...)`);
}
}
/**
* Computes sha256 digest of a buffer.
*/
private computeDigest(data: Buffer): string {
const crypto = require('crypto');
const hash = crypto.createHash('sha256').update(data).digest('hex');
return `sha256:${hash}`;
}
}

108
ts/classes.tsdockercache.ts Normal file
View File

@@ -0,0 +1,108 @@
import * as crypto from 'crypto';
import * as fs from 'fs';
import * as path from 'path';
import * as plugins from './tsdocker.plugins.js';
import * as paths from './tsdocker.paths.js';
import { logger } from './tsdocker.logging.js';
import type { ICacheData, ICacheEntry } from './interfaces/index.js';
const smartshellInstance = new plugins.smartshell.Smartshell({
executor: 'bash',
});
/**
* Manages content-hash-based build caching for Dockerfiles.
* Cache is stored in .nogit/tsdocker_support.json.
*/
export class TsDockerCache {
private cacheFilePath: string;
private data: ICacheData;
constructor() {
this.cacheFilePath = path.join(paths.cwd, '.nogit', 'tsdocker_support.json');
this.data = { version: 1, entries: {} };
}
/**
* Loads cache data from disk. Falls back to empty cache on missing/corrupt file.
*/
public load(): void {
try {
const raw = fs.readFileSync(this.cacheFilePath, 'utf-8');
const parsed = JSON.parse(raw);
if (parsed && parsed.version === 1 && parsed.entries) {
this.data = parsed;
} else {
logger.log('warn', '[cache] Cache file has unexpected format, starting fresh');
this.data = { version: 1, entries: {} };
}
} catch {
// Missing or corrupt file — start fresh
this.data = { version: 1, entries: {} };
}
}
/**
* Saves cache data to disk. Creates .nogit directory if needed.
*/
public save(): void {
const dir = path.dirname(this.cacheFilePath);
fs.mkdirSync(dir, { recursive: true });
fs.writeFileSync(this.cacheFilePath, JSON.stringify(this.data, null, 2), 'utf-8');
}
/**
* Computes SHA-256 hash of Dockerfile content.
*/
public computeContentHash(content: string): string {
return crypto.createHash('sha256').update(content).digest('hex');
}
/**
* Checks whether a build can be skipped for the given Dockerfile.
* Logs detailed diagnostics and returns true if the build should be skipped.
*/
public async shouldSkipBuild(cleanTag: string, content: string): Promise<boolean> {
const contentHash = this.computeContentHash(content);
const entry = this.data.entries[cleanTag];
if (!entry) {
logger.log('info', `[cache] ${cleanTag}: no cached entry, will build`);
return false;
}
const hashMatch = entry.contentHash === contentHash;
logger.log('info', `[cache] ${cleanTag}: hash ${hashMatch ? 'matches' : 'changed'}`);
if (!hashMatch) {
logger.log('info', `[cache] ${cleanTag}: content changed, will build`);
return false;
}
// Hash matches — verify the image still exists locally
const inspectResult = await smartshellInstance.exec(
`docker image inspect ${entry.imageId} > /dev/null 2>&1`
);
const available = inspectResult.exitCode === 0;
if (available) {
logger.log('info', `[cache] ${cleanTag}: cache hit, skipping build`);
return true;
}
logger.log('info', `[cache] ${cleanTag}: image no longer available, will build`);
return false;
}
/**
* Records a successful build in the cache.
*/
public recordBuild(cleanTag: string, content: string, imageId: string, buildTag: string): void {
this.data.entries[cleanTag] = {
contentHash: this.computeContentHash(content),
imageId,
buildTag,
timestamp: Date.now(),
};
}
}

View File

@@ -1,10 +1,12 @@
import * as plugins from './tsdocker.plugins.js'; import * as plugins from './tsdocker.plugins.js';
import * as paths from './tsdocker.paths.js'; import * as paths from './tsdocker.paths.js';
import { logger } from './tsdocker.logging.js'; import { logger, formatDuration } from './tsdocker.logging.js';
import { Dockerfile } from './classes.dockerfile.js'; import { Dockerfile } from './classes.dockerfile.js';
import { DockerRegistry } from './classes.dockerregistry.js'; import { DockerRegistry } from './classes.dockerregistry.js';
import { RegistryStorage } from './classes.registrystorage.js'; import { RegistryStorage } from './classes.registrystorage.js';
import type { ITsDockerConfig } from './interfaces/index.js'; import { TsDockerCache } from './classes.tsdockercache.js';
import { DockerContext } from './classes.dockercontext.js';
import type { ITsDockerConfig, IBuildCommandOptions } from './interfaces/index.js';
const smartshellInstance = new plugins.smartshell.Smartshell({ const smartshellInstance = new plugins.smartshell.Smartshell({
executor: 'bash', executor: 'bash',
@@ -17,17 +19,27 @@ export class TsDockerManager {
public registryStorage: RegistryStorage; public registryStorage: RegistryStorage;
public config: ITsDockerConfig; public config: ITsDockerConfig;
public projectInfo: any; public projectInfo: any;
public dockerContext: DockerContext;
private dockerfiles: Dockerfile[] = []; private dockerfiles: Dockerfile[] = [];
constructor(config: ITsDockerConfig) { constructor(config: ITsDockerConfig) {
this.config = config; this.config = config;
this.registryStorage = new RegistryStorage(); this.registryStorage = new RegistryStorage();
this.dockerContext = new DockerContext();
} }
/** /**
* Prepares the manager by loading project info and registries * Prepares the manager by loading project info and registries
*/ */
public async prepare(): Promise<void> { public async prepare(contextArg?: string): Promise<void> {
// Detect Docker context
if (contextArg) {
this.dockerContext.setContext(contextArg);
}
await this.dockerContext.detect();
this.dockerContext.logContextInfo();
this.dockerContext.logRootlessWarnings();
// Load project info // Load project info
try { try {
const projectinfoInstance = new plugins.projectinfo.ProjectInfo(paths.cwd); const projectinfoInstance = new plugins.projectinfo.ProjectInfo(paths.cwd);
@@ -90,9 +102,10 @@ export class TsDockerManager {
} }
/** /**
* Builds all discovered Dockerfiles in dependency order * Builds discovered Dockerfiles in dependency order.
* When options.patterns is provided, only matching Dockerfiles (and their dependencies) are built.
*/ */
public async build(): Promise<Dockerfile[]> { public async build(options?: IBuildCommandOptions): Promise<Dockerfile[]> {
if (this.dockerfiles.length === 0) { if (this.dockerfiles.length === 0) {
await this.discoverDockerfiles(); await this.discoverDockerfiles();
} }
@@ -102,38 +115,246 @@ export class TsDockerManager {
return []; return [];
} }
// Determine which Dockerfiles to build
let toBuild = this.dockerfiles;
if (options?.patterns && options.patterns.length > 0) {
// Filter to matching Dockerfiles
const matched = this.dockerfiles.filter((df) => {
const basename = plugins.path.basename(df.filePath);
return options.patterns!.some((pattern) => {
if (pattern.includes('*') || pattern.includes('?')) {
// Convert glob pattern to regex
const regexStr = '^' + pattern.replace(/\*/g, '.*').replace(/\?/g, '.') + '$';
return new RegExp(regexStr).test(basename);
}
return basename === pattern;
});
});
if (matched.length === 0) {
logger.log('warn', `No Dockerfiles matched patterns: ${options.patterns.join(', ')}`);
return [];
}
// Resolve dependency chain and preserve topological order
toBuild = this.resolveWithDependencies(matched, this.dockerfiles);
logger.log('info', `Matched ${matched.length} Dockerfile(s), building ${toBuild.length} (including dependencies)`);
}
// Check if buildx is needed // Check if buildx is needed
if (this.config.platforms && this.config.platforms.length > 1) { const useBuildx = !!(options?.platform || (this.config.platforms && this.config.platforms.length > 1));
if (useBuildx) {
await this.ensureBuildx(); await this.ensureBuildx();
} }
logger.log('info', `Building ${this.dockerfiles.length} Dockerfiles...`); logger.log('info', '');
await Dockerfile.buildDockerfiles(this.dockerfiles); logger.log('info', '=== BUILD PHASE ===');
if (useBuildx) {
const platforms = options?.platform || this.config.platforms!.join(', ');
logger.log('info', `Build mode: buildx multi-platform [${platforms}]`);
} else {
logger.log('info', 'Build mode: standard docker build');
}
const localDeps = toBuild.filter(df => df.localBaseImageDependent);
if (localDeps.length > 0) {
logger.log('info', `Local dependencies: ${localDeps.map(df => `${df.cleanTag} -> ${df.localBaseDockerfile?.cleanTag}`).join(', ')}`);
}
if (options?.noCache) {
logger.log('info', 'Cache: disabled (--no-cache)');
}
if (options?.parallel) {
const concurrency = options.parallelConcurrency ?? 4;
const levels = Dockerfile.computeLevels(toBuild);
logger.log('info', `Parallel build: ${levels.length} level(s), concurrency ${concurrency}`);
for (let l = 0; l < levels.length; l++) {
const level = levels[l];
logger.log('info', ` Level ${l} (${level.length}): ${level.map(df => df.cleanTag).join(', ')}`);
}
}
logger.log('info', `Building ${toBuild.length} Dockerfile(s)...`);
if (options?.cached) {
// === CACHED MODE: skip builds for unchanged Dockerfiles ===
logger.log('info', '(cached mode active)');
const cache = new TsDockerCache();
cache.load();
const total = toBuild.length;
const overallStart = Date.now();
await Dockerfile.startLocalRegistry(this.dockerContext.contextInfo?.isRootless);
try {
if (options?.parallel) {
// === PARALLEL CACHED MODE ===
const concurrency = options.parallelConcurrency ?? 4;
const levels = Dockerfile.computeLevels(toBuild);
let built = 0;
for (let l = 0; l < levels.length; l++) {
const level = levels[l];
logger.log('info', `--- Level ${l}: building ${level.length} image(s) in parallel ---`);
const tasks = level.map((df) => {
const myIndex = ++built;
return async () => {
const progress = `(${myIndex}/${total})`;
const skip = await cache.shouldSkipBuild(df.cleanTag, df.content);
if (skip) {
logger.log('ok', `${progress} Skipped ${df.cleanTag} (cached)`);
} else {
logger.log('info', `${progress} Building ${df.cleanTag}...`);
const elapsed = await df.build({
platform: options?.platform,
timeout: options?.timeout,
noCache: options?.noCache,
verbose: options?.verbose,
});
logger.log('ok', `${progress} Built ${df.cleanTag} in ${formatDuration(elapsed)}`);
const imageId = await df.getId();
cache.recordBuild(df.cleanTag, df.content, imageId, df.buildTag);
}
return df;
};
});
await Dockerfile.runWithConcurrency(tasks, concurrency);
// After the entire level completes, push all to local registry + tag for deps
for (const df of level) {
const dependentBaseImages = new Set<string>();
for (const other of toBuild) {
if (other.localBaseDockerfile === df && other.baseImage !== df.buildTag) {
dependentBaseImages.add(other.baseImage);
}
}
for (const fullTag of dependentBaseImages) {
logger.log('info', `Tagging ${df.buildTag} as ${fullTag} for local dependency resolution`);
await smartshellInstance.exec(`docker tag ${df.buildTag} ${fullTag}`);
}
// Push ALL images to local registry (skip if already pushed via buildx)
if (!df.localRegistryTag) {
await Dockerfile.pushToLocalRegistry(df);
}
}
}
} else {
// === SEQUENTIAL CACHED MODE ===
for (let i = 0; i < total; i++) {
const dockerfileArg = toBuild[i];
const progress = `(${i + 1}/${total})`;
const skip = await cache.shouldSkipBuild(dockerfileArg.cleanTag, dockerfileArg.content);
if (skip) {
logger.log('ok', `${progress} Skipped ${dockerfileArg.cleanTag} (cached)`);
} else {
logger.log('info', `${progress} Building ${dockerfileArg.cleanTag}...`);
const elapsed = await dockerfileArg.build({
platform: options?.platform,
timeout: options?.timeout,
noCache: options?.noCache,
verbose: options?.verbose,
});
logger.log('ok', `${progress} Built ${dockerfileArg.cleanTag} in ${formatDuration(elapsed)}`);
const imageId = await dockerfileArg.getId();
cache.recordBuild(dockerfileArg.cleanTag, dockerfileArg.content, imageId, dockerfileArg.buildTag);
}
// Tag for dependents IMMEDIATELY (not after all builds)
const dependentBaseImages = new Set<string>();
for (const other of toBuild) {
if (other.localBaseDockerfile === dockerfileArg && other.baseImage !== dockerfileArg.buildTag) {
dependentBaseImages.add(other.baseImage);
}
}
for (const fullTag of dependentBaseImages) {
logger.log('info', `Tagging ${dockerfileArg.buildTag} as ${fullTag} for local dependency resolution`);
await smartshellInstance.exec(`docker tag ${dockerfileArg.buildTag} ${fullTag}`);
}
// Push ALL images to local registry (skip if already pushed via buildx)
if (!dockerfileArg.localRegistryTag) {
await Dockerfile.pushToLocalRegistry(dockerfileArg);
}
}
}
} finally {
await Dockerfile.stopLocalRegistry();
}
logger.log('info', `Total build time: ${formatDuration(Date.now() - overallStart)}`);
cache.save();
} else {
// === STANDARD MODE: build all via static helper ===
await Dockerfile.buildDockerfiles(toBuild, {
platform: options?.platform,
timeout: options?.timeout,
noCache: options?.noCache,
verbose: options?.verbose,
isRootless: this.dockerContext.contextInfo?.isRootless,
parallel: options?.parallel,
parallelConcurrency: options?.parallelConcurrency,
});
}
logger.log('success', 'All Dockerfiles built successfully'); logger.log('success', 'All Dockerfiles built successfully');
return this.dockerfiles; return toBuild;
}
/**
* Resolves a set of target Dockerfiles to include all their local base image dependencies,
* preserving the original topological build order.
*/
private resolveWithDependencies(targets: Dockerfile[], allSorted: Dockerfile[]): Dockerfile[] {
const needed = new Set<Dockerfile>();
const addWithDeps = (df: Dockerfile) => {
if (needed.has(df)) return;
needed.add(df);
if (df.localBaseImageDependent && df.localBaseDockerfile) {
addWithDeps(df.localBaseDockerfile);
}
};
for (const df of targets) addWithDeps(df);
return allSorted.filter((df) => needed.has(df));
} }
/** /**
* Ensures Docker buildx is set up for multi-architecture builds * Ensures Docker buildx is set up for multi-architecture builds
*/ */
private async ensureBuildx(): Promise<void> { private async ensureBuildx(): Promise<void> {
logger.log('info', 'Setting up Docker buildx for multi-platform builds...'); const builderName = this.dockerContext.getBuilderName();
const platforms = this.config.platforms?.join(', ') || 'default';
// Check if a buildx builder exists logger.log('info', `Setting up Docker buildx [${platforms}]...`);
const inspectResult = await smartshellInstance.exec('docker buildx inspect tsdocker-builder 2>/dev/null'); logger.log('info', `Builder: ${builderName}`);
const inspectResult = await smartshellInstance.exec(`docker buildx inspect ${builderName} 2>/dev/null`);
if (inspectResult.exitCode !== 0) { if (inspectResult.exitCode !== 0) {
// Create a new buildx builder logger.log('info', 'Creating new buildx builder with host network...');
logger.log('info', 'Creating new buildx builder...'); await smartshellInstance.exec(
await smartshellInstance.exec('docker buildx create --name tsdocker-builder --use'); `docker buildx create --name ${builderName} --driver docker-container --driver-opt network=host --use`
);
await smartshellInstance.exec('docker buildx inspect --bootstrap'); await smartshellInstance.exec('docker buildx inspect --bootstrap');
} else { } else {
// Use existing builder const inspectOutput = inspectResult.stdout || '';
await smartshellInstance.exec('docker buildx use tsdocker-builder'); if (!inspectOutput.includes('network=host')) {
logger.log('info', 'Recreating buildx builder with host network (migration)...');
await smartshellInstance.exec(`docker buildx rm ${builderName} 2>/dev/null`);
await smartshellInstance.exec(
`docker buildx create --name ${builderName} --driver docker-container --driver-opt network=host --use`
);
await smartshellInstance.exec('docker buildx inspect --bootstrap');
} else {
await smartshellInstance.exec(`docker buildx use ${builderName}`);
} }
}
logger.log('ok', 'Docker buildx ready'); logger.log('ok', `Docker buildx ready (builder: ${builderName}, platforms: ${platforms})`);
} }
/** /**
@@ -172,12 +393,18 @@ export class TsDockerManager {
return; return;
} }
// Push each Dockerfile to each registry // Start local registry (reads from persistent .nogit/docker-registry/)
await Dockerfile.startLocalRegistry(this.dockerContext.contextInfo?.isRootless);
try {
// Push each Dockerfile to each registry via OCI copy
for (const dockerfile of this.dockerfiles) { for (const dockerfile of this.dockerfiles) {
for (const registry of registriesToPush) { for (const registry of registriesToPush) {
await dockerfile.push(registry); await dockerfile.push(registry);
} }
} }
} finally {
await Dockerfile.stopLocalRegistry();
}
logger.log('success', 'All images pushed successfully'); logger.log('success', 'All images pushed successfully');
} }
@@ -203,7 +430,8 @@ export class TsDockerManager {
} }
/** /**
* Runs tests for all Dockerfiles * Runs tests for all Dockerfiles.
* Starts the local registry so multi-platform images can be auto-pulled.
*/ */
public async test(): Promise<void> { public async test(): Promise<void> {
if (this.dockerfiles.length === 0) { if (this.dockerfiles.length === 0) {
@@ -215,7 +443,16 @@ export class TsDockerManager {
return; return;
} }
logger.log('info', '');
logger.log('info', '=== TEST PHASE ===');
await Dockerfile.startLocalRegistry(this.dockerContext.contextInfo?.isRootless);
try {
await Dockerfile.testDockerfiles(this.dockerfiles); await Dockerfile.testDockerfiles(this.dockerfiles);
} finally {
await Dockerfile.stopLocalRegistry();
}
logger.log('success', 'All tests completed'); logger.log('success', 'All tests completed');
} }
@@ -227,19 +464,21 @@ export class TsDockerManager {
await this.discoverDockerfiles(); await this.discoverDockerfiles();
} }
console.log('\nDiscovered Dockerfiles:'); logger.log('info', '');
console.log('========================\n'); logger.log('info', 'Discovered Dockerfiles:');
logger.log('info', '========================');
logger.log('info', '');
for (let i = 0; i < this.dockerfiles.length; i++) { for (let i = 0; i < this.dockerfiles.length; i++) {
const df = this.dockerfiles[i]; const df = this.dockerfiles[i];
console.log(`${i + 1}. ${df.filePath}`); logger.log('info', `${i + 1}. ${df.filePath}`);
console.log(` Tag: ${df.cleanTag}`); logger.log('info', ` Tag: ${df.cleanTag}`);
console.log(` Base Image: ${df.baseImage}`); logger.log('info', ` Base Image: ${df.baseImage}`);
console.log(` Version: ${df.version}`); logger.log('info', ` Version: ${df.version}`);
if (df.localBaseImageDependent) { if (df.localBaseImageDependent) {
console.log(` Depends on: ${df.localBaseDockerfile?.cleanTag}`); logger.log('info', ` Depends on: ${df.localBaseDockerfile?.cleanTag}`);
} }
console.log(''); logger.log('info', '');
} }
return this.dockerfiles; return this.dockerfiles;

View File

@@ -68,3 +68,37 @@ export interface IPushResult {
digest?: string; digest?: string;
error?: string; error?: string;
} }
/**
* Options for the build command
*/
export interface IBuildCommandOptions {
patterns?: string[]; // Dockerfile name patterns (e.g., ['Dockerfile_base', 'Dockerfile_*'])
platform?: string; // Single platform override (e.g., 'linux/arm64')
timeout?: number; // Build timeout in seconds
noCache?: boolean; // Force rebuild without Docker layer cache (--no-cache)
cached?: boolean; // Skip builds when Dockerfile content hasn't changed
verbose?: boolean; // Stream raw docker build output (default: silent)
context?: string; // Explicit Docker context name (--context flag)
parallel?: boolean; // Enable parallel builds within dependency levels
parallelConcurrency?: number; // Max concurrent builds per level (default 4)
}
export interface ICacheEntry {
contentHash: string; // SHA-256 hex of Dockerfile content
imageId: string; // Docker image ID (sha256:...)
buildTag: string;
timestamp: number; // Unix ms
}
export interface ICacheData {
version: 1;
entries: { [cleanTag: string]: ICacheEntry };
}
export interface IDockerContextInfo {
name: string; // 'default', 'rootless', 'colima', etc.
endpoint: string; // 'unix:///var/run/docker.sock'
isRootless: boolean;
dockerHost?: string; // value of DOCKER_HOST env var, if set
}

View File

@@ -7,8 +7,12 @@ import * as DockerModule from './tsdocker.docker.js';
import { logger, ora } from './tsdocker.logging.js'; import { logger, ora } from './tsdocker.logging.js';
import { TsDockerManager } from './classes.tsdockermanager.js'; import { TsDockerManager } from './classes.tsdockermanager.js';
import { DockerContext } from './classes.dockercontext.js';
import type { IBuildCommandOptions } from './interfaces/index.js';
import { commitinfo } from './00_commitinfo_data.js';
const tsdockerCli = new plugins.smartcli.Smartcli(); const tsdockerCli = new plugins.smartcli.Smartcli();
tsdockerCli.addVersion(commitinfo.version);
export let run = () => { export let run = () => {
// Default command: run tests in container (legacy behavior) // Default command: run tests in container (legacy behavior)
@@ -23,14 +27,43 @@ export let run = () => {
}); });
/** /**
* Build all Dockerfiles in dependency order * Build Dockerfiles in dependency order
* Usage: tsdocker build [Dockerfile_patterns...] [--platform=linux/arm64] [--timeout=600]
*/ */
tsdockerCli.addCommand('build').subscribe(async argvArg => { tsdockerCli.addCommand('build').subscribe(async argvArg => {
try { try {
const config = await ConfigModule.run(); const config = await ConfigModule.run();
const manager = new TsDockerManager(config); const manager = new TsDockerManager(config);
await manager.prepare(); await manager.prepare(argvArg.context as string | undefined);
await manager.build();
const buildOptions: IBuildCommandOptions = {};
const patterns = argvArg._.slice(1) as string[];
if (patterns.length > 0) {
buildOptions.patterns = patterns;
}
if (argvArg.platform) {
buildOptions.platform = argvArg.platform as string;
}
if (argvArg.timeout) {
buildOptions.timeout = Number(argvArg.timeout);
}
if (argvArg.cache === false) {
buildOptions.noCache = true;
}
if (argvArg.cached) {
buildOptions.cached = true;
}
if (argvArg.verbose) {
buildOptions.verbose = true;
}
if (argvArg.parallel) {
buildOptions.parallel = true;
if (typeof argvArg.parallel === 'number') {
buildOptions.parallelConcurrency = argvArg.parallel;
}
}
await manager.build(buildOptions);
logger.log('success', 'Build completed successfully'); logger.log('success', 'Build completed successfully');
} catch (err) { } catch (err) {
logger.log('error', `Build failed: ${(err as Error).message}`); logger.log('error', `Build failed: ${(err as Error).message}`);
@@ -40,21 +73,47 @@ export let run = () => {
/** /**
* Push built images to configured registries * Push built images to configured registries
* Usage: tsdocker push [Dockerfile_patterns...] [--platform=linux/arm64] [--timeout=600] [--registry=url]
*/ */
tsdockerCli.addCommand('push').subscribe(async argvArg => { tsdockerCli.addCommand('push').subscribe(async argvArg => {
try { try {
const config = await ConfigModule.run(); const config = await ConfigModule.run();
const manager = new TsDockerManager(config); const manager = new TsDockerManager(config);
await manager.prepare(); await manager.prepare(argvArg.context as string | undefined);
// Login first // Login first
await manager.login(); await manager.login();
// Build images first (if not already built) // Parse build options from positional args and flags
await manager.build(); const buildOptions: IBuildCommandOptions = {};
const patterns = argvArg._.slice(1) as string[];
if (patterns.length > 0) {
buildOptions.patterns = patterns;
}
if (argvArg.platform) {
buildOptions.platform = argvArg.platform as string;
}
if (argvArg.timeout) {
buildOptions.timeout = Number(argvArg.timeout);
}
if (argvArg.cache === false) {
buildOptions.noCache = true;
}
if (argvArg.verbose) {
buildOptions.verbose = true;
}
if (argvArg.parallel) {
buildOptions.parallel = true;
if (typeof argvArg.parallel === 'number') {
buildOptions.parallelConcurrency = argvArg.parallel;
}
}
// Get registry from arguments if specified // Build images first (if not already built)
const registryArg = argvArg._[1]; // e.g., tsdocker push registry.gitlab.com await manager.build(buildOptions);
// Get registry from --registry flag
const registryArg = argvArg.registry as string | undefined;
const registries = registryArg ? [registryArg] : undefined; const registries = registryArg ? [registryArg] : undefined;
await manager.push(registries); await manager.push(registries);
@@ -78,7 +137,7 @@ export let run = () => {
const config = await ConfigModule.run(); const config = await ConfigModule.run();
const manager = new TsDockerManager(config); const manager = new TsDockerManager(config);
await manager.prepare(); await manager.prepare(argvArg.context as string | undefined);
// Login first // Login first
await manager.login(); await manager.login();
@@ -98,10 +157,26 @@ export let run = () => {
try { try {
const config = await ConfigModule.run(); const config = await ConfigModule.run();
const manager = new TsDockerManager(config); const manager = new TsDockerManager(config);
await manager.prepare(); await manager.prepare(argvArg.context as string | undefined);
// Build images first // Build images first
await manager.build(); const buildOptions: IBuildCommandOptions = {};
if (argvArg.cache === false) {
buildOptions.noCache = true;
}
if (argvArg.cached) {
buildOptions.cached = true;
}
if (argvArg.verbose) {
buildOptions.verbose = true;
}
if (argvArg.parallel) {
buildOptions.parallel = true;
if (typeof argvArg.parallel === 'number') {
buildOptions.parallelConcurrency = argvArg.parallel;
}
}
await manager.build(buildOptions);
// Run tests // Run tests
await manager.test(); await manager.test();
@@ -119,7 +194,7 @@ export let run = () => {
try { try {
const config = await ConfigModule.run(); const config = await ConfigModule.run();
const manager = new TsDockerManager(config); const manager = new TsDockerManager(config);
await manager.prepare(); await manager.prepare(argvArg.context as string | undefined);
await manager.login(); await manager.login();
logger.log('success', 'Login completed successfully'); logger.log('success', 'Login completed successfully');
} catch (err) { } catch (err) {
@@ -135,7 +210,7 @@ export let run = () => {
try { try {
const config = await ConfigModule.run(); const config = await ConfigModule.run();
const manager = new TsDockerManager(config); const manager = new TsDockerManager(config);
await manager.prepare(); await manager.prepare(argvArg.context as string | undefined);
await manager.list(); await manager.list();
} catch (err) { } catch (err) {
logger.log('error', `List failed: ${(err as Error).message}`); logger.log('error', `List failed: ${(err as Error).message}`);
@@ -162,27 +237,200 @@ export let run = () => {
}); });
tsdockerCli.addCommand('clean').subscribe(async argvArg => { tsdockerCli.addCommand('clean').subscribe(async argvArg => {
ora.text('cleaning up docker env...'); try {
if (argvArg.all) { const autoYes = !!argvArg.y;
const smartshellInstance = new plugins.smartshell.Smartshell({ const includeAll = !!argvArg.all;
executor: 'bash'
});
ora.text('killing any running docker containers...');
await smartshellInstance.exec(`docker kill $(docker ps -q)`);
ora.text('removing stopped containers...'); const smartshellInstance = new plugins.smartshell.Smartshell({ executor: 'bash' });
await smartshellInstance.exec(`docker rm $(docker ps -a -q)`); const interact = new plugins.smartinteract.SmartInteract();
ora.text('removing images...'); // --- Docker context detection ---
await smartshellInstance.exec(`docker rmi -f $(docker images -q -f dangling=true)`); ora.text('detecting docker context...');
const dockerContext = new DockerContext();
ora.text('removing all other images...'); if (argvArg.context) {
await smartshellInstance.exec(`docker rmi $(docker images -a -q)`); dockerContext.setContext(argvArg.context as string);
}
ora.text('removing all volumes...'); await dockerContext.detect();
await smartshellInstance.exec(`docker volume rm $(docker volume ls -f dangling=true -q)`); ora.stop();
dockerContext.logContextInfo();
// --- Helper: parse docker output into resource list ---
interface IDockerResource {
id: string;
display: string;
}
const listResources = async (command: string): Promise<IDockerResource[]> => {
const result = await smartshellInstance.execSilent(command);
if (result.exitCode !== 0 || !result.stdout.trim()) {
return [];
}
return result.stdout.trim().split('\n').filter(Boolean).map((line) => {
const parts = line.split('\t');
return {
id: parts[0],
display: parts.join(' | '),
};
});
};
// --- Helper: checkbox selection ---
const selectResources = async (
name: string,
message: string,
resources: IDockerResource[],
): Promise<string[]> => {
if (autoYes) {
return resources.map((r) => r.id);
}
const answer = await interact.askQuestion({
name,
type: 'checkbox',
message,
default: [],
choices: resources.map((r) => ({ name: r.display, value: r.id })),
});
return answer.value as string[];
};
// --- Helper: confirm action ---
const confirmAction = async (
name: string,
message: string,
): Promise<boolean> => {
if (autoYes) {
return true;
}
const answer = await interact.askQuestion({
name,
type: 'confirm',
message,
default: false,
});
return answer.value as boolean;
};
// === RUNNING CONTAINERS ===
const runningContainers = await listResources(
`docker ps --format '{{.ID}}\t{{.Names}}\t{{.Image}}\t{{.Status}}'`
);
if (runningContainers.length > 0) {
logger.log('info', `Found ${runningContainers.length} running container(s)`);
const selectedIds = await selectResources(
'runningContainers',
'Select running containers to kill:',
runningContainers,
);
if (selectedIds.length > 0) {
logger.log('info', `Killing ${selectedIds.length} container(s)...`);
await smartshellInstance.exec(`docker kill ${selectedIds.join(' ')}`);
}
} else {
logger.log('info', 'No running containers found');
}
// === STOPPED CONTAINERS ===
const stoppedContainers = await listResources(
`docker ps -a --filter status=exited --filter status=created --format '{{.ID}}\t{{.Names}}\t{{.Image}}\t{{.Status}}'`
);
if (stoppedContainers.length > 0) {
logger.log('info', `Found ${stoppedContainers.length} stopped container(s)`);
const selectedIds = await selectResources(
'stoppedContainers',
'Select stopped containers to remove:',
stoppedContainers,
);
if (selectedIds.length > 0) {
logger.log('info', `Removing ${selectedIds.length} container(s)...`);
await smartshellInstance.exec(`docker rm ${selectedIds.join(' ')}`);
}
} else {
logger.log('info', 'No stopped containers found');
}
// === DANGLING IMAGES ===
const danglingImages = await listResources(
`docker images -f dangling=true --format '{{.ID}}\t{{.Repository}}:{{.Tag}}\t{{.Size}}'`
);
if (danglingImages.length > 0) {
const confirmed = await confirmAction(
'removeDanglingImages',
`Remove ${danglingImages.length} dangling image(s)?`,
);
if (confirmed) {
logger.log('info', `Removing ${danglingImages.length} dangling image(s)...`);
const ids = danglingImages.map((r) => r.id).join(' ');
await smartshellInstance.exec(`docker rmi ${ids}`);
}
} else {
logger.log('info', 'No dangling images found');
}
// === ALL IMAGES (only with --all) ===
if (includeAll) {
const allImages = await listResources(
`docker images --format '{{.ID}}\t{{.Repository}}:{{.Tag}}\t{{.Size}}'`
);
if (allImages.length > 0) {
logger.log('info', `Found ${allImages.length} image(s) total`);
const selectedIds = await selectResources(
'allImages',
'Select images to remove:',
allImages,
);
if (selectedIds.length > 0) {
logger.log('info', `Removing ${selectedIds.length} image(s)...`);
await smartshellInstance.exec(`docker rmi -f ${selectedIds.join(' ')}`);
}
} else {
logger.log('info', 'No images found');
}
}
// === DANGLING VOLUMES ===
const danglingVolumes = await listResources(
`docker volume ls -f dangling=true --format '{{.Name}}\t{{.Driver}}'`
);
if (danglingVolumes.length > 0) {
const confirmed = await confirmAction(
'removeDanglingVolumes',
`Remove ${danglingVolumes.length} dangling volume(s)?`,
);
if (confirmed) {
logger.log('info', `Removing ${danglingVolumes.length} dangling volume(s)...`);
const names = danglingVolumes.map((r) => r.id).join(' ');
await smartshellInstance.exec(`docker volume rm ${names}`);
}
} else {
logger.log('info', 'No dangling volumes found');
}
// === ALL VOLUMES (only with --all) ===
if (includeAll) {
const allVolumes = await listResources(
`docker volume ls --format '{{.Name}}\t{{.Driver}}'`
);
if (allVolumes.length > 0) {
logger.log('info', `Found ${allVolumes.length} volume(s) total`);
const selectedIds = await selectResources(
'allVolumes',
'Select volumes to remove:',
allVolumes,
);
if (selectedIds.length > 0) {
logger.log('info', `Removing ${selectedIds.length} volume(s)...`);
await smartshellInstance.exec(`docker volume rm ${selectedIds.join(' ')}`);
}
} else {
logger.log('info', 'No volumes found');
}
}
logger.log('success', 'Docker cleanup completed!');
} catch (err) {
logger.log('error', `Clean failed: ${(err as Error).message}`);
process.exit(1);
} }
ora.finishSuccess('docker environment now is clean!');
}); });
tsdockerCli.addCommand('vscode').subscribe(async argvArg => { tsdockerCli.addCommand('vscode').subscribe(async argvArg => {

View File

@@ -15,3 +15,12 @@ export const logger = new plugins.smartlog.Smartlog({
logger.addLogDestination(new plugins.smartlogDestinationLocal.DestinationLocal()); logger.addLogDestination(new plugins.smartlogDestinationLocal.DestinationLocal());
export const ora = new plugins.smartlogSouceOra.SmartlogSourceOra(); export const ora = new plugins.smartlogSouceOra.SmartlogSourceOra();
export function formatDuration(ms: number): string {
if (ms < 1000) return `${ms}ms`;
const totalSeconds = ms / 1000;
if (totalSeconds < 60) return `${totalSeconds.toFixed(1)}s`;
const minutes = Math.floor(totalSeconds / 60);
const seconds = Math.round(totalSeconds % 60);
return `${minutes}m ${seconds}s`;
}

View File

@@ -11,6 +11,7 @@ import * as smartlog from '@push.rocks/smartlog';
import * as smartlogDestinationLocal from '@push.rocks/smartlog-destination-local'; import * as smartlogDestinationLocal from '@push.rocks/smartlog-destination-local';
import * as smartlogSouceOra from '@push.rocks/smartlog-source-ora'; import * as smartlogSouceOra from '@push.rocks/smartlog-source-ora';
import * as smartopen from '@push.rocks/smartopen'; import * as smartopen from '@push.rocks/smartopen';
import * as smartinteract from '@push.rocks/smartinteract';
import * as smartshell from '@push.rocks/smartshell'; import * as smartshell from '@push.rocks/smartshell';
import * as smartstring from '@push.rocks/smartstring'; import * as smartstring from '@push.rocks/smartstring';
@@ -25,6 +26,7 @@ export {
smartpromise, smartpromise,
qenv, qenv,
smartcli, smartcli,
smartinteract,
smartlog, smartlog,
smartlogDestinationLocal, smartlogDestinationLocal,
smartlogSouceOra, smartlogSouceOra,