Compare commits
48 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 93cf2ee7bf | |||
| 8cf8e43e59 | |||
| 3e4558abc5 | |||
| 3e0eb5e198 | |||
| 732e9e5cac | |||
| 5bf1779243 | |||
| 4908c21b84 | |||
| b1e2f0d8ea | |||
| d815915135 | |||
| 0f445b4c86 | |||
| 8f0514d10e | |||
| e1cf1768da | |||
| 4d32d5e71e | |||
| a4552498ac | |||
| 4585801f32 | |||
| 3dc75f5cda | |||
| 7591e0ed90 | |||
| d2c2a4c4dd | |||
| 89cd93cdff | |||
| 10aee5d4c5 | |||
| 53b7bd7048 | |||
| 101c4286c1 | |||
| 63078139ec | |||
| 0cb5515b93 | |||
| aa0425f9bc | |||
| 2d4d7c671a | |||
| 3085eb590f | |||
| 04b75b42f3 | |||
| b04b8c9033 | |||
| 2130a8a879 | |||
| 17de78aed3 | |||
| eddb8cd156 | |||
| cfc7798d49 | |||
| 37dfde005e | |||
| d1785aab86 | |||
| 31fb4aea3c | |||
| 907048fa87 | |||
| 02b267ee10 | |||
| 16cd0bbd87 | |||
| cc83743f9a | |||
| 7131c16f80 | |||
| 02688861f4 | |||
| 3a8b301b3e | |||
| c09bef33c3 | |||
| 32eb0d1d77 | |||
| 7cac628975 | |||
| c279dbd55e | |||
| 7b7064864e |
@@ -1,6 +0,0 @@
|
||||
FROM hosttoday/ht-docker-node:npmci
|
||||
RUN yarn global add @git.zone/tsdocker
|
||||
COPY ./ /workspace
|
||||
WORKDIR /workspace
|
||||
ENV CI=true
|
||||
CMD ["tsdocker","runinside"];
|
||||
194
changelog.md
194
changelog.md
@@ -1,5 +1,199 @@
|
||||
# Changelog
|
||||
|
||||
## 2026-03-19 - 2.2.0 - feat(cli/buildx)
|
||||
add pull control for builds and isolate buildx builders per project
|
||||
|
||||
- adds a new pull build option with --no-pull CLI support and defaults builds to refreshing base images with --pull
|
||||
- passes the selected buildx builder explicitly into build commands instead of relying on global docker buildx use state
|
||||
- generates project-hashed builder suffixes so concurrent runs from different project directories do not share the same local builder
|
||||
- updates session logging to include project hash and builder suffix for easier build diagnostics
|
||||
|
||||
## 2026-03-15 - 2.1.0 - feat(cli)
|
||||
add global remote builder configuration and native SSH buildx nodes for multi-platform builds
|
||||
|
||||
- adds a new `tsdocker config` command with subcommands to add, remove, list, and show remote builder definitions
|
||||
- introduces global config support for remote builders stored under `~/.git.zone/tsdocker/config.json`
|
||||
- builds can now create multi-node buildx setups with remote SSH builders and open reverse SSH tunnels so remote nodes can push to the local staging registry
|
||||
- updates the README and CLI help to document remote builder configuration and native cross-platform build workflows
|
||||
|
||||
## 2026-03-12 - 2.0.2 - fix(repo)
|
||||
no changes to commit
|
||||
|
||||
|
||||
## 2026-03-12 - 2.0.1 - fix(repository)
|
||||
no changes to commit
|
||||
|
||||
|
||||
## 2026-03-12 - 2.0.0 - BREAKING CHANGE(cli)
|
||||
remove legacy container test runner and make the default command show the man page
|
||||
|
||||
- Removes legacy testing and VS Code commands, including `runinside`, `vscode`, generated Dockerfile assets, and related configuration fields (`baseImage`, `command`, `dockerSock`, `keyValueObject`)
|
||||
- Simplifies configuration and dependencies by dropping qenv-based env loading and unused legacy packages
|
||||
- Updates CLI and documentation to reflect default help output and the current build/push-focused workflow
|
||||
|
||||
## 2026-02-07 - 1.17.4 - fix()
|
||||
no changes
|
||||
|
||||
|
||||
## 2026-02-07 - 1.17.3 - fix(registry)
|
||||
increase default maxRetries in fetchWithRetry from 3 to 6 to improve resilience when fetching registry resources
|
||||
|
||||
- Changed default maxRetries from 3 to 6 in ts/classes.registrycopy.ts
|
||||
- Reduces failures from transient network or registry errors by allowing more retry attempts
|
||||
- No API or behavior changes besides the increased default retry count
|
||||
|
||||
## 2026-02-07 - 1.17.2 - fix(registry)
|
||||
improve HTTP fetch retry logging, backoff calculation, and token-cache warning
|
||||
|
||||
- Include HTTP method in logs and normalize method to uppercase for consistency
|
||||
- Log retry attempts with method, URL and calculated exponential backoff delay
|
||||
- Compute and reuse exponential backoff delay variable instead of inline calculation
|
||||
- Log error when a 5xx response persists after all retry attempts and when fetch ultimately fails
|
||||
- Add a warning log when clearing cached token after a 401 response
|
||||
|
||||
## 2026-02-07 - 1.17.1 - fix(registrycopy)
|
||||
add fetchWithRetry wrapper to apply timeouts, retries with exponential backoff, and token cache handling; use it for registry HTTP requests
|
||||
|
||||
- Introduces fetchWithRetry(url, options, timeoutMs, maxRetries) to wrap fetch with AbortSignal timeout, exponential backoff retries, and retry behavior only for network errors and 5xx responses
|
||||
- Replaces direct fetch calls for registry /v2 checks, token requests, and blob uploads with fetchWithRetry (30s for auth/token checks, 300s for blob operations)
|
||||
- Clears token cache entry when a 401 response is received so the next attempt re-authenticates
|
||||
- Adds logging on retry attempts and backoff delays to improve robustness and observability
|
||||
|
||||
## 2026-02-07 - 1.17.0 - feat(tsdocker)
|
||||
add Dockerfile filtering, optional skip-build flow, and fallback Docker config credential loading
|
||||
|
||||
- Add TsDockerManager.filterDockerfiles(patterns) to filter discovered Dockerfiles by glob-style patterns and warn when no matches are found
|
||||
- Allow skipping image build with --no-build (argvArg.build === false): discover Dockerfiles and apply filters without performing build
|
||||
- Fallback to load Docker registry credentials from ~/.docker/config.json via RegistryCopy.getDockerConfigCredentials when env vars do not provide credentials
|
||||
- Import RegistryCopy and add info/warn logs when credentials are loaded or missing
|
||||
|
||||
## 2026-02-07 - 1.16.0 - feat(core)
|
||||
Introduce per-invocation TsDockerSession and session-aware local registry and build orchestration; stream and parse buildx output for improved logging and visibility; detect Docker topology and add CI-safe cleanup; update README with multi-arch, parallel-build, caching, and local registry usage and new CLI flags.
|
||||
|
||||
- Add TsDockerSession to allocate unique ports, container names and builder suffixes for concurrent runs (especially in CI).
|
||||
- Make local registry session-aware: start/stop/use registry container and persistent storage per session; retry on port conflicts.
|
||||
- Inject session into Dockerfile instances and TsDockerManager; use session.config.registryHost for tagging/pushing and test container naming.
|
||||
- Stream and parse buildx/docker build output via createBuildOutputHandler for clearer step/platform/CACHED/DONE logging and --progress=plain usage.
|
||||
- Detect Docker topology (socket-mount, dind, local) in DockerContext and expose it in context info.
|
||||
- Add manager.cleanup to remove CI-scoped buildx builders and ensure CLI calls cleanup after build/push/test.
|
||||
- Update interfaces to include topology and adjust many Dockerfile/manager methods to be session-aware.
|
||||
- Large README improvements: multi-arch flow, persistent local registry, parallel builds, caching, new CLI and clean flags, and examples for CI integration.
|
||||
|
||||
## 2026-02-07 - 1.15.1 - fix(registry)
|
||||
use persistent local registry and OCI Distribution API image copy for pushes
|
||||
|
||||
- Adds RegistryCopy class implementing the OCI Distribution API to copy images (including multi-arch manifest lists) from the local registry to remote registries.
|
||||
- All builds now go through a persistent local registry at localhost:5234 with volume storage at .nogit/docker-registry/; Dockerfile.startLocalRegistry mounts this directory.
|
||||
- Dockerfile.push now delegates to RegistryCopy.copyImage; Dockerfile.needsLocalRegistry() always returns true and config.push is now a no-op (kept for backward compat).
|
||||
- Multi-platform buildx builds are pushed to the local registry (this.localRegistryTag) during buildx --push; code avoids redundant pushes when images are already pushed by buildx.
|
||||
- Build, cached build, test, push and pull flows now start/stop the local registry automatically to support multi-platform/image resolution.
|
||||
- Introduces Dockerfile.getDestRepo and support for config.registryRepoMap to control destination repository mapping.
|
||||
- Breaking change: registry usage and push behavior changed (config.push ignored and local registry mandatory) — bump major version.
|
||||
|
||||
## 2026-02-07 - 1.15.0 - feat(clean)
|
||||
Make the `clean` command interactive: add smartinteract prompts, docker context detection, and selective resource removal with support for --all and -y auto-confirm
|
||||
|
||||
- Adds dependency @push.rocks/smartinteract and exposes it from the plugins module
|
||||
- Refactors tsdocker.cli.ts clean command to list Docker resources and prompt checkbox selection for running/stopped containers, images, and volumes
|
||||
- Adds DockerContext detection and logging to determine active Docker context
|
||||
- Introduces auto-confirm (-y) and --all handling to either auto-accept or allow full-image/volume removal
|
||||
- Replaces blunt shell commands with safer, interactive selection and adds improved error handling and logging
|
||||
|
||||
## 2026-02-07 - 1.14.0 - feat(build)
|
||||
add level-based parallel builds with --parallel and configurable concurrency
|
||||
|
||||
- Introduces --parallel and --parallel=<n> CLI flags to enable level-based parallel Docker builds (default concurrency 4).
|
||||
- Adds Dockerfile.computeLevels() to group topologically-sorted Dockerfiles into dependency levels.
|
||||
- Adds Dockerfile.runWithConcurrency() implementing a bounded-concurrency worker-pool (fast-fail via Promise.all).
|
||||
- Integrates parallel build mode into Dockerfile.buildDockerfiles() and TsDockerManager.build() for both cached and non-cached flows, including tagging and pushing for dependency resolution after each level.
|
||||
- Adds options.parallel and options.parallelConcurrency to the build interface and wires them through the CLI and manager.
|
||||
- Updates documentation (readme.hints.md) with usage examples and implementation notes.
|
||||
|
||||
## 2026-02-07 - 1.13.0 - feat(docker)
|
||||
add Docker context detection, rootless support, and context-aware buildx registry handling
|
||||
|
||||
- Introduce DockerContext class to detect current Docker context and rootless mode and to log warnings and context info
|
||||
- Add IDockerContextInfo interface and a new context option on build/config to pass explicit Docker context
|
||||
- Propagate --context CLI flag into TsDockerManager.prepare so CLI commands can set an explicit Docker context
|
||||
- Make buildx builder name context-aware (tsdocker-builder-<sanitized-context>) and log builder name/platforms
|
||||
- Pass isRootless into local registry startup and build pipeline; emit rootless-specific warnings and registry reachability hint
|
||||
|
||||
## 2026-02-06 - 1.12.0 - feat(docker)
|
||||
add detailed logging for buildx, build commands, local registry, and local dependency info
|
||||
|
||||
- Log startup of local registry including a note about buildx dependency bridging
|
||||
- Log constructed build commands and indicate whether buildx or standard docker build is used (including platforms and --push/--load distinctions)
|
||||
- Emit build mode summary at start of build phase and report local base-image dependency mappings
|
||||
- Report when --no-cache is enabled and surface buildx setup readiness with configured platforms
|
||||
- Non-functional change: purely adds informational logging to improve observability during builds
|
||||
|
||||
## 2026-02-06 - 1.11.0 - feat(docker)
|
||||
start temporary local registry for buildx dependency resolution and ensure buildx builder uses host network
|
||||
|
||||
- Introduce a temporary local registry (localhost:5234) with start/stop helpers and push support to expose local images for buildx
|
||||
- Add Dockerfile.needsLocalRegistry to decide when a local registry is required (local base dependencies + multi-platform or platform option)
|
||||
- Push built images to the local registry and set localRegistryTag on Dockerfile instances for BuildKit build-context usage
|
||||
- Tag built images in the host daemon for dependent Dockerfiles to resolve local FROM references
|
||||
- Integrate registry lifecycle into Dockerfile.buildDockerfiles and TsDockerManager build flows (start before builds, stop after)
|
||||
- Ensure buildx builder is created with --driver-opt network=host and recreate existing builder if it lacks host network to allow registry access from build containers
|
||||
|
||||
## 2026-02-06 - 1.10.0 - feat(classes.dockerfile)
|
||||
support using a local base image as a build context in buildx commands
|
||||
|
||||
- Adds --build-context flag mapping base image to docker-image://<localTag> when localBaseImageDependent && localBaseDockerfile are set
|
||||
- Appends the build context flag to both single-platform and multi-platform docker buildx commands
|
||||
- Logs an info message indicating the local build context mapping
|
||||
|
||||
## 2026-02-06 - 1.9.0 - feat(build)
|
||||
add verbose build output, progress logging, and timing for builds/tests
|
||||
|
||||
- Add 'verbose' option to build/test flows (interfaces, CLI, and method signatures) to allow streaming raw docker build output or run silently
|
||||
- Log per-item progress for build and test phases (e.g. (1/N) Building/Testing <tag>) and report individual durations
|
||||
- Return elapsed time from Dockerfile.build() and Dockerfile.test() and aggregate total build/test times in manager
|
||||
- Introduce formatDuration(ms) helper in logging module to format timings
|
||||
- Switch from console.log to structured logger calls across cache, manager, dockerfile and push paths
|
||||
- Use silent exec variants when verbose is false and stream exec when verbose is true
|
||||
|
||||
## 2026-02-06 - 1.8.0 - feat(build)
|
||||
add optional content-hash based build cache to skip rebuilding unchanged Dockerfiles
|
||||
|
||||
- Introduce TsDockerCache to compute SHA-256 of Dockerfile content and persist cache to .nogit/tsdocker_support.json
|
||||
- Add ICacheEntry and ICacheData interfaces and a cached flag to IBuildCommandOptions
|
||||
- Integrate cached mode in TsDockerManager: skip builds on cache hits, verify image presence, record builds on misses, and still perform dependency tagging
|
||||
- Expose --cached option in CLI to enable the cached build flow
|
||||
- Cache records store contentHash, imageId, buildTag and timestamp
|
||||
|
||||
## 2026-02-06 - 1.7.0 - feat(cli)
|
||||
add CLI version display using commitinfo
|
||||
|
||||
- Imported commitinfo from './00_commitinfo_data.js' and called tsdockerCli.addVersion(commitinfo.version) to surface package/commit version in the Smartcli instance
|
||||
- Change made in ts/tsdocker.cli.ts — small user-facing CLI enhancement; no breaking changes
|
||||
|
||||
## 2026-02-06 - 1.6.0 - feat(docker)
|
||||
add support for no-cache builds and tag built images for local dependency resolution
|
||||
|
||||
- Introduce IBuildCommandOptions.noCache to control --no-cache behavior
|
||||
- Propagate noCache from CLI (via cache flag) through TsDockerManager to Dockerfile.build
|
||||
- Append --no-cache to docker build/buildx commands when noCache is true
|
||||
- After building an image, tag it with full base image references used by dependent Dockerfiles so their FROM lines resolve to the locally-built image
|
||||
- Log tagging actions and execute docker tag via smartshellInstance
|
||||
|
||||
## 2026-02-06 - 1.5.0 - feat(build)
|
||||
add support for selective builds, platform override and build timeout
|
||||
|
||||
- Introduce IBuildCommandOptions with patterns, platform and timeout to control build behavior
|
||||
- Allow manager.build() to accept options and build only matching Dockerfiles (including dependencies) preserving topological order
|
||||
- Add CLI parsing for build/push to accept positional Dockerfile patterns and --platform/--timeout flags
|
||||
- Support single-platform override via docker buildx and multi-platform buildx detection
|
||||
- Implement streaming exec with timeout to kill long-running builds and surface timeout errors
|
||||
|
||||
## 2026-02-04 - 1.4.3 - fix(dockerfile)
|
||||
fix matching of base images to local Dockerfiles by stripping registry prefixes when comparing image references
|
||||
|
||||
- Added Dockerfile.extractRepoVersion(imageRef) to normalize image references by removing registry prefixes (detects registries containing '.' or ':' or 'localhost').
|
||||
- Use extractRepoVersion when checking tagToDockerfile and when mapping local base dockerfiles to ensure comparisons use repo:tag keys rather than full registry-prefixed references.
|
||||
- Prevents mismatches when baseImage includes a registry (e.g. "host.today/repo:version") so it correctly matches a local cleanTag like "repo:version".
|
||||
|
||||
## 2026-01-21 - 1.4.2 - fix(classes.dockerfile)
|
||||
use a single top-level fs import instead of requiring fs inside methods
|
||||
|
||||
|
||||
29
package.json
29
package.json
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@git.zone/tsdocker",
|
||||
"version": "1.4.2",
|
||||
"version": "2.2.0",
|
||||
"private": false,
|
||||
"description": "develop npm modules cross platform with docker",
|
||||
"main": "dist_ts/index.js",
|
||||
@@ -11,13 +11,6 @@
|
||||
"scripts": {
|
||||
"test": "(npm run build)",
|
||||
"build": "(tsbuild)",
|
||||
"testIntegration": "(npm run clean && npm run setupCheck && npm run testStandard)",
|
||||
"testStandard": "(cd test/ && tsx ../ts/index.ts)",
|
||||
"testClean": "(cd test/ && tsx ../ts/index.ts clean --all)",
|
||||
"testVscode": "(cd test/ && tsx ../ts/index.ts vscode)",
|
||||
"clean": "(rm -rf test/)",
|
||||
"compile": "(npmts --notest)",
|
||||
"setupCheck": "(git clone https://gitlab.com/sandboxzone/sandbox-npmts.git test/)",
|
||||
"buildDocs": "tsdoc"
|
||||
},
|
||||
"repository": {
|
||||
@@ -34,26 +27,22 @@
|
||||
},
|
||||
"homepage": "https://gitlab.com/gitzone/tsdocker#readme",
|
||||
"devDependencies": {
|
||||
"@git.zone/tsbuild": "^4.1.2",
|
||||
"@git.zone/tsbuild": "^4.3.0",
|
||||
"@git.zone/tsrun": "^2.0.1",
|
||||
"@git.zone/tstest": "^3.1.6",
|
||||
"@types/node": "^25.0.9"
|
||||
"@git.zone/tstest": "^3.3.2",
|
||||
"@types/node": "^25.5.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"@push.rocks/lik": "^6.2.2",
|
||||
"@push.rocks/lik": "^6.3.1",
|
||||
"@push.rocks/npmextra": "^5.3.3",
|
||||
"@push.rocks/projectinfo": "^5.0.2",
|
||||
"@push.rocks/qenv": "^6.1.3",
|
||||
"@push.rocks/smartanalytics": "^2.0.15",
|
||||
"@push.rocks/smartcli": "^4.0.20",
|
||||
"@push.rocks/smartfs": "^1.3.1",
|
||||
"@push.rocks/smartlog": "^3.1.10",
|
||||
"@push.rocks/smartfs": "^1.5.0",
|
||||
"@push.rocks/smartinteract": "^2.0.16",
|
||||
"@push.rocks/smartlog": "^3.2.1",
|
||||
"@push.rocks/smartlog-destination-local": "^9.0.2",
|
||||
"@push.rocks/smartlog-source-ora": "^1.0.9",
|
||||
"@push.rocks/smartopen": "^2.0.0",
|
||||
"@push.rocks/smartpromise": "^4.2.3",
|
||||
"@push.rocks/smartshell": "^3.3.0",
|
||||
"@push.rocks/smartstring": "^4.1.0"
|
||||
"@push.rocks/smartshell": "^3.3.7"
|
||||
},
|
||||
"packageManager": "pnpm@10.18.1+sha512.77a884a165cbba2d8d1c19e3b4880eee6d2fcabd0d879121e282196b80042351d5eb3ca0935fa599da1dc51265cc68816ad2bddd2a2de5ea9fdf92adbec7cd34",
|
||||
"type": "module",
|
||||
|
||||
5840
pnpm-lock.yaml
generated
5840
pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
@@ -3,7 +3,6 @@
|
||||
## Module Purpose
|
||||
|
||||
tsdocker is a comprehensive Docker development and building tool. It provides:
|
||||
- Testing npm modules in clean Docker environments (legacy feature)
|
||||
- Building Dockerfiles with dependency ordering
|
||||
- Multi-registry push/pull support
|
||||
- Multi-architecture builds (amd64/arm64)
|
||||
@@ -12,7 +11,7 @@ tsdocker is a comprehensive Docker development and building tool. It provides:
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `tsdocker` | Run tests in container (legacy default behavior) |
|
||||
| `tsdocker` | Show usage / man page |
|
||||
| `tsdocker build` | Build all Dockerfiles with dependency ordering |
|
||||
| `tsdocker push [registry]` | Push images to configured registries |
|
||||
| `tsdocker pull <registry>` | Pull images from registry |
|
||||
@@ -20,7 +19,6 @@ tsdocker is a comprehensive Docker development and building tool. It provides:
|
||||
| `tsdocker login` | Login to configured registries |
|
||||
| `tsdocker list` | List discovered Dockerfiles and dependencies |
|
||||
| `tsdocker clean --all` | Clean up Docker environment |
|
||||
| `tsdocker vscode` | Start VS Code in Docker |
|
||||
|
||||
## Configuration
|
||||
|
||||
@@ -45,9 +43,6 @@ Configure in `package.json` under `@git.zone/tsdocker`:
|
||||
|
||||
### Configuration Options
|
||||
|
||||
- `baseImage`: Base Docker image for testing (legacy)
|
||||
- `command`: Command to run in container (legacy)
|
||||
- `dockerSock`: Mount Docker socket (legacy)
|
||||
- `registries`: Array of registry URLs to push to
|
||||
- `registryRepoMap`: Map registry URLs to different repo paths
|
||||
- `buildArgEnvMap`: Map Docker build ARGs to environment variables
|
||||
@@ -78,8 +73,6 @@ ts/
|
||||
├── tsdocker.cli.ts (CLI commands)
|
||||
├── tsdocker.config.ts (configuration)
|
||||
├── tsdocker.plugins.ts (plugin imports)
|
||||
├── tsdocker.docker.ts (legacy test runner)
|
||||
├── tsdocker.snippets.ts (Dockerfile generation)
|
||||
├── classes.dockerfile.ts (Dockerfile management)
|
||||
├── classes.dockerregistry.ts (registry authentication)
|
||||
├── classes.registrystorage.ts (registry storage)
|
||||
@@ -96,11 +89,33 @@ ts/
|
||||
- `@push.rocks/smartcli`: CLI framework
|
||||
- `@push.rocks/projectinfo`: Project metadata
|
||||
|
||||
## Parallel Builds
|
||||
|
||||
`--parallel` flag enables level-based parallel Docker builds:
|
||||
|
||||
```bash
|
||||
tsdocker build --parallel # parallel, default concurrency (4)
|
||||
tsdocker build --parallel=8 # parallel, concurrency 8
|
||||
tsdocker build --parallel --cached # works with both modes
|
||||
```
|
||||
|
||||
Implementation: `Dockerfile.computeLevels()` groups topologically sorted Dockerfiles into dependency levels. `Dockerfile.runWithConcurrency()` provides a worker-pool pattern for bounded concurrency. Both are public static methods on the `Dockerfile` class. The parallel logic exists in both `Dockerfile.buildDockerfiles()` (standard mode) and `TsDockerManager.build()` (cached mode).
|
||||
|
||||
## OCI Distribution API Push (v1.16+)
|
||||
|
||||
All builds now go through a persistent local registry (`localhost:5234`) with volume storage at `.nogit/docker-registry/`. Pushes use the `RegistryCopy` class (`ts/classes.registrycopy.ts`) which implements the OCI Distribution API to copy images (including multi-arch manifest lists) from the local registry to remote registries. This replaces the old `docker tag + docker push` approach that only worked for single-platform images.
|
||||
|
||||
Key classes:
|
||||
- `RegistryCopy` — HTTP-based OCI image copy (auth, blob transfer, manifest handling)
|
||||
- `Dockerfile.push()` — Now delegates to `RegistryCopy.copyImage()`
|
||||
- `Dockerfile.needsLocalRegistry()` — Always returns true
|
||||
- `Dockerfile.startLocalRegistry()` — Uses persistent volume mount
|
||||
|
||||
The `config.push` field is now a no-op (kept for backward compat).
|
||||
|
||||
## Build Status
|
||||
|
||||
- Build: ✅ Passes
|
||||
- Legacy test functionality preserved
|
||||
- New Docker build functionality added
|
||||
|
||||
## Previous Upgrades (2025-11-22)
|
||||
|
||||
|
||||
526
readme.md
526
readme.md
@@ -1,6 +1,6 @@
|
||||
# @git.zone/tsdocker
|
||||
|
||||
> 🐳 The ultimate Docker development toolkit for TypeScript projects — build, test, and ship containerized applications with ease.
|
||||
> 🐳 The ultimate Docker development toolkit for TypeScript projects — build, test, and ship multi-arch containerized applications with zero friction.
|
||||
|
||||
## Issue Reporting and Security
|
||||
|
||||
@@ -8,15 +8,20 @@ For reporting bugs, issues, or security vulnerabilities, please visit [community
|
||||
|
||||
## What is tsdocker?
|
||||
|
||||
**tsdocker** is a comprehensive Docker development and building tool that handles everything from testing npm packages in clean environments to building and pushing multi-architecture Docker images across multiple registries.
|
||||
**tsdocker** is a comprehensive Docker development and build tool that handles everything from testing npm packages in clean environments to building and pushing multi-architecture Docker images across multiple registries — all from a single CLI.
|
||||
|
||||
### 🎯 Key Capabilities
|
||||
|
||||
- 🧪 **Containerized Testing** — Run your tests in pristine Docker environments
|
||||
- 🏗️ **Smart Docker Builds** — Automatically discover, sort, and build Dockerfiles by dependency
|
||||
- 🚀 **Multi-Registry Push** — Ship to Docker Hub, GitLab, GitHub Container Registry, and more
|
||||
- 🔧 **Multi-Architecture** — Build for `amd64` and `arm64` with Docker Buildx
|
||||
- ⚡ **Zero Config Start** — Works out of the box, scales with your needs
|
||||
- 🌍 **True Multi-Architecture** — Build for `amd64` and `arm64` simultaneously with Docker Buildx
|
||||
- 🚀 **Multi-Registry Push** — Ship to Docker Hub, GitLab, GitHub Container Registry, and more via OCI Distribution API
|
||||
- ⚡ **Parallel Builds** — Level-based parallel builds with configurable concurrency
|
||||
- 🗄️ **Persistent Local Registry** — All images flow through a local OCI registry with persistent storage
|
||||
- 📦 **Build Caching** — Skip unchanged Dockerfiles with content-hash caching
|
||||
- 🎯 **Dockerfile Filtering** — Build or push only specific Dockerfiles using glob patterns
|
||||
- 🔁 **Resilient Push** — Automatic retry with exponential backoff, timeouts, and token refresh for rock-solid pushes
|
||||
- 🏭 **CI-Safe Isolation** — Unique sessions per invocation prevent collisions in parallel CI pipelines
|
||||
- 🔧 **Zero Config Start** — Works out of the box, scales with your needs
|
||||
|
||||
## Installation
|
||||
|
||||
@@ -30,16 +35,6 @@ pnpm install --save-dev @git.zone/tsdocker
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 🧪 Run Tests in Docker
|
||||
|
||||
The simplest use case — run your tests in a clean container:
|
||||
|
||||
```bash
|
||||
tsdocker
|
||||
```
|
||||
|
||||
This pulls your configured base image, mounts your project, and executes your test command in isolation.
|
||||
|
||||
### 🏗️ Build Docker Images
|
||||
|
||||
Got `Dockerfile` files? Build them all with automatic dependency ordering:
|
||||
@@ -53,6 +48,7 @@ tsdocker will:
|
||||
2. 📊 Analyze `FROM` dependencies between them
|
||||
3. 🔄 Sort them topologically
|
||||
4. 🏗️ Build each image in the correct order
|
||||
5. 📦 Push every image to a persistent local registry (`.nogit/docker-registry/`)
|
||||
|
||||
### 📤 Push to Registries
|
||||
|
||||
@@ -63,33 +59,97 @@ Ship your images to one or all configured registries:
|
||||
tsdocker push
|
||||
|
||||
# Push to a specific registry
|
||||
tsdocker push registry.gitlab.com
|
||||
tsdocker push --registry=registry.gitlab.com
|
||||
|
||||
# Push without rebuilding (use existing images in local registry)
|
||||
tsdocker push --no-build
|
||||
```
|
||||
|
||||
Under the hood, `tsdocker push` uses the **OCI Distribution API** to copy images directly from the local registry to remote registries. This means multi-arch manifest lists are preserved end-to-end — no more single-platform-only pushes. Every request is protected with **automatic retry** (up to 6 attempts with exponential backoff) and **5-minute timeouts**, so transient network issues don't kill your push mid-transfer.
|
||||
|
||||
### 🎯 Build Only Specific Dockerfiles
|
||||
|
||||
Target specific Dockerfiles by name pattern — dependencies are resolved automatically:
|
||||
|
||||
```bash
|
||||
# Build only the base image
|
||||
tsdocker build Dockerfile_base
|
||||
|
||||
# Build anything matching a glob pattern
|
||||
tsdocker build Dockerfile_app*
|
||||
|
||||
# Push specific images only (skip build phase)
|
||||
tsdocker push --no-build Dockerfile_api Dockerfile_web
|
||||
```
|
||||
|
||||
## CLI Commands
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `tsdocker` | Run tests in a fresh Docker container |
|
||||
| `tsdocker` | Show usage / man page |
|
||||
| `tsdocker build` | Build all Dockerfiles with dependency ordering |
|
||||
| `tsdocker push [registry]` | Push images to configured registries |
|
||||
| `tsdocker push` | Build + push images to configured registries |
|
||||
| `tsdocker pull <registry>` | Pull images from a specific registry |
|
||||
| `tsdocker test` | Run container test scripts (test_*.sh) |
|
||||
| `tsdocker test` | Build + run container test scripts (`test_*.sh`) |
|
||||
| `tsdocker login` | Authenticate with configured registries |
|
||||
| `tsdocker list` | Display discovered Dockerfiles and their dependencies |
|
||||
| `tsdocker clean --all` | ⚠️ Aggressively clean Docker environment |
|
||||
| `tsdocker vscode` | Launch containerized VS Code in browser |
|
||||
| `tsdocker config` | Manage global tsdocker configuration (remote builders, etc.) |
|
||||
| `tsdocker clean` | Interactively clean Docker environment |
|
||||
|
||||
### Build Flags
|
||||
|
||||
| Flag | Description |
|
||||
|------|-------------|
|
||||
| `<patterns>` | Positional Dockerfile name patterns (e.g. `Dockerfile_base`, `Dockerfile_app*`) |
|
||||
| `--platform=linux/arm64` | Override build platform for a single architecture |
|
||||
| `--timeout=600` | Build timeout in seconds |
|
||||
| `--no-cache` | Force rebuild without Docker layer cache |
|
||||
| `--cached` | Skip unchanged Dockerfiles (content-hash based) |
|
||||
| `--verbose` | Stream raw `docker build` output |
|
||||
| `--parallel` | Enable level-based parallel builds (default concurrency: 4) |
|
||||
| `--parallel=8` | Parallel builds with custom concurrency |
|
||||
| `--context=mycontext` | Use a specific Docker context |
|
||||
|
||||
### Push Flags
|
||||
|
||||
| Flag | Description |
|
||||
|------|-------------|
|
||||
| `<patterns>` | Positional Dockerfile name patterns to select which images to push |
|
||||
| `--registry=<url>` | Push to a single specific registry instead of all configured |
|
||||
| `--no-build` | Skip the build phase; only push existing images from local registry |
|
||||
|
||||
### Config Subcommands
|
||||
|
||||
| Subcommand | Description |
|
||||
|------------|-------------|
|
||||
| `add-builder` | Add or update a remote builder node |
|
||||
| `remove-builder` | Remove a remote builder by name |
|
||||
| `list-builders` | List all configured remote builders |
|
||||
| `show` | Show the full global configuration |
|
||||
|
||||
**`add-builder` flags:**
|
||||
|
||||
| Flag | Description |
|
||||
|------|-------------|
|
||||
| `--name=<name>` | Builder name (e.g. `arm64-builder`) |
|
||||
| `--host=<user@ip>` | SSH host (e.g. `armbuilder@192.168.1.100`) |
|
||||
| `--platform=<p>` | Target platform (e.g. `linux/arm64`) |
|
||||
| `--ssh-key=<path>` | SSH key path (optional, uses SSH agent/config by default) |
|
||||
|
||||
### Clean Flags
|
||||
|
||||
| Flag | Description |
|
||||
|------|-------------|
|
||||
| `--all` | Include all images and volumes (not just dangling) |
|
||||
| `-y` | Auto-confirm all prompts |
|
||||
|
||||
## Configuration
|
||||
|
||||
Configure tsdocker in your `package.json` or `npmextra.json`:
|
||||
Configure tsdocker in your `package.json` or `npmextra.json` under the `@git.zone/tsdocker` key:
|
||||
|
||||
```json
|
||||
{
|
||||
"@git.zone/tsdocker": {
|
||||
"baseImage": "node:20",
|
||||
"command": "npm test",
|
||||
"dockerSock": false,
|
||||
"registries": ["registry.gitlab.com", "docker.io"],
|
||||
"registryRepoMap": {
|
||||
"registry.gitlab.com": "myorg/myproject"
|
||||
@@ -98,7 +158,6 @@ Configure tsdocker in your `package.json` or `npmextra.json`:
|
||||
"NODE_VERSION": "NODE_VERSION"
|
||||
},
|
||||
"platforms": ["linux/amd64", "linux/arm64"],
|
||||
"push": false,
|
||||
"testDir": "./test"
|
||||
}
|
||||
}
|
||||
@@ -106,24 +165,107 @@ Configure tsdocker in your `package.json` or `npmextra.json`:
|
||||
|
||||
### Configuration Options
|
||||
|
||||
#### Testing Options (Legacy)
|
||||
|
||||
| Option | Type | Description |
|
||||
|--------|------|-------------|
|
||||
| `baseImage` | `string` | Docker image for test environment (default: `hosttoday/ht-docker-node:npmdocker`) |
|
||||
| `command` | `string` | Command to run inside container (default: `npmci npm test`) |
|
||||
| `dockerSock` | `boolean` | Mount Docker socket for DinD scenarios (default: `false`) |
|
||||
|
||||
#### Build & Push Options
|
||||
|
||||
| Option | Type | Description |
|
||||
|--------|------|-------------|
|
||||
| `registries` | `string[]` | Registry URLs to push to |
|
||||
| `registryRepoMap` | `object` | Map registries to different repository paths |
|
||||
| `buildArgEnvMap` | `object` | Map Docker build ARGs to environment variables |
|
||||
| `platforms` | `string[]` | Target architectures (default: `["linux/amd64"]`) |
|
||||
| `push` | `boolean` | Auto-push after build (default: `false`) |
|
||||
| `testDir` | `string` | Directory containing test scripts |
|
||||
| Option | Type | Default | Description |
|
||||
|--------|------|---------|-------------|
|
||||
| `registries` | `string[]` | `[]` | Registry URLs to push to |
|
||||
| `registryRepoMap` | `object` | `{}` | Map registries to different repository paths |
|
||||
| `buildArgEnvMap` | `object` | `{}` | Map Docker build ARGs to environment variables |
|
||||
| `platforms` | `string[]` | `["linux/amd64"]` | Target architectures for multi-arch builds |
|
||||
| `testDir` | `string` | `./test` | Directory containing test scripts |
|
||||
|
||||
## Architecture: How tsdocker Works
|
||||
|
||||
tsdocker uses a **local OCI registry** as the canonical store for all built images. This design solves fundamental problems with Docker's local daemon, which cannot hold multi-architecture manifest lists.
|
||||
|
||||
### 📐 Build Flow
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────┐
|
||||
│ tsdocker build │
|
||||
│ │
|
||||
│ 1. Start local registry (localhost:<dynamic-port>) │
|
||||
│ └── Persistent volume: .nogit/docker-registry/ │
|
||||
│ │
|
||||
│ 2. For each Dockerfile (topological order): │
|
||||
│ ├── Multi-platform: buildx --push → registry │
|
||||
│ └── Single-platform: docker build → registry │
|
||||
│ │
|
||||
│ 3. Stop local registry (data persists on disk) │
|
||||
└─────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### 📤 Push Flow
|
||||
|
||||
```
|
||||
┌────────────────────────────────────────────────────────┐
|
||||
│ tsdocker push │
|
||||
│ │
|
||||
│ 1. Start local registry (loads persisted data) │
|
||||
│ │
|
||||
│ 2. For each image × each remote registry: │
|
||||
│ └── OCI Distribution API copy (with retry): │
|
||||
│ ├── Fetch manifest (single or multi-arch) │
|
||||
│ ├── Copy blobs (skip if already exist) │
|
||||
│ ├── Retry up to 6× with exponential backoff │
|
||||
│ └── Push manifest with destination tag │
|
||||
│ │
|
||||
│ 3. Stop local registry │
|
||||
└────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### 🔑 Why a Local Registry?
|
||||
|
||||
| Problem | Solution |
|
||||
|---------|----------|
|
||||
| `docker buildx --load` fails for multi-arch images | `buildx --push` to local registry works for any number of platforms |
|
||||
| `docker push` only pushes single-platform manifests | OCI API copy preserves full manifest lists (multi-arch) |
|
||||
| Images lost between build and push phases | Persistent storage at `.nogit/docker-registry/` survives restarts |
|
||||
| Redundant blob uploads on incremental pushes | HEAD checks skip blobs that already exist on the remote |
|
||||
|
||||
### 🔁 Resilient Push
|
||||
|
||||
The OCI Distribution API client wraps every HTTP request with:
|
||||
|
||||
- **Timeouts** — 5-minute timeout for blob operations, 30-second timeout for auth/metadata calls via `AbortSignal.timeout()`
|
||||
- **Automatic Retry** — Up to 6 attempts with exponential backoff (1s → 2s → 4s → 8s → 16s → 32s)
|
||||
- **Smart Retry Logic** — Retries on network errors (`ECONNRESET`, `fetch failed`) and 5xx server errors; does NOT retry 4xx client errors
|
||||
- **Token Refresh** — On 401 responses, the cached auth token is cleared so the next retry re-authenticates automatically
|
||||
|
||||
This means transient issues like stale connection pools, brief network blips, or token expiry during long multi-arch pushes (56+ blob operations) are handled gracefully instead of killing the entire transfer.
|
||||
|
||||
### 🏭 CI-Safe Session Isolation
|
||||
|
||||
Every tsdocker invocation gets its own **session** with unique:
|
||||
|
||||
- **Session ID** — Random 8-char hex (override with `TSDOCKER_SESSION_ID`)
|
||||
- **Registry port** — Dynamically allocated (override with `TSDOCKER_REGISTRY_PORT`)
|
||||
- **Registry container** — Named `tsdocker-registry-<sessionId>`
|
||||
- **Builder suffix** — In CI, the buildx builder gets a `-<sessionId>` suffix to prevent collisions
|
||||
|
||||
This prevents resource conflicts when multiple CI jobs run tsdocker in parallel. Auto-detected CI systems:
|
||||
|
||||
| Environment Variable | CI System |
|
||||
|---------------------|-----------|
|
||||
| `GITEA_ACTIONS` | Gitea Actions |
|
||||
| `GITHUB_ACTIONS` | GitHub Actions |
|
||||
| `GITLAB_CI` | GitLab CI |
|
||||
| `CI` | Generic CI |
|
||||
|
||||
In local dev, no suffix is added — keeping a persistent builder for faster rebuilds.
|
||||
|
||||
### 🔍 Docker Context & Topology Detection
|
||||
|
||||
tsdocker automatically detects your Docker environment topology:
|
||||
|
||||
| Topology | Detection | Meaning |
|
||||
|----------|-----------|---------|
|
||||
| `local` | Default | Standard Docker installation on the host |
|
||||
| `socket-mount` | `/.dockerenv` exists | Running inside a container with Docker socket mounted |
|
||||
| `dind` | `DOCKER_HOST` starts with `tcp://` | Docker-in-Docker setup |
|
||||
|
||||
Context-aware builder names (`tsdocker-builder-<context>`) prevent conflicts across Docker contexts. Rootless Docker configurations trigger appropriate warnings.
|
||||
|
||||
## Registry Authentication
|
||||
|
||||
@@ -140,13 +282,17 @@ export DOCKER_REGISTRY_USER="username"
|
||||
export DOCKER_REGISTRY_PASSWORD="password"
|
||||
```
|
||||
|
||||
### Docker Config Fallback
|
||||
|
||||
When pushing, tsdocker will also read credentials from `~/.docker/config.json` if no explicit credentials are provided via environment variables. This means `docker login` credentials work automatically. Docker Hub special cases (`docker.io`, `index.docker.io`, `registry-1.docker.io`) are all recognized.
|
||||
|
||||
### Login Command
|
||||
|
||||
```bash
|
||||
tsdocker login
|
||||
```
|
||||
|
||||
Authenticates with all configured registries.
|
||||
Authenticates with all configured registries using the provided environment variables.
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
@@ -162,7 +308,72 @@ Build for multiple platforms using Docker Buildx:
|
||||
}
|
||||
```
|
||||
|
||||
tsdocker automatically sets up a Buildx builder when multiple platforms are specified.
|
||||
tsdocker automatically:
|
||||
- Sets up a Buildx builder with `--driver-opt network=host` (so buildx can reach the local registry)
|
||||
- Pushes multi-platform images to the local registry via `buildx --push`
|
||||
- Copies the full manifest list (including all platform variants) to remote registries on `tsdocker push`
|
||||
|
||||
### 🖥️ Native Remote Builders
|
||||
|
||||
Instead of relying on slow QEMU emulation for cross-platform builds, tsdocker can use **native remote machines** via SSH as build nodes. For example, use a real arm64 machine for `linux/arm64` builds:
|
||||
|
||||
```bash
|
||||
# Add a remote arm64 builder
|
||||
tsdocker config add-builder \
|
||||
--name=arm64-builder \
|
||||
--host=armbuilder@192.168.1.100 \
|
||||
--platform=linux/arm64 \
|
||||
--ssh-key=~/.ssh/id_ed25519
|
||||
|
||||
# List configured builders
|
||||
tsdocker config list-builders
|
||||
|
||||
# Remove a builder
|
||||
tsdocker config remove-builder --name=arm64-builder
|
||||
|
||||
# Show full global config
|
||||
tsdocker config show
|
||||
```
|
||||
|
||||
Global configuration is stored at `~/.git.zone/tsdocker/config.json`.
|
||||
|
||||
**How it works:**
|
||||
|
||||
When remote builders are configured and the project's `platforms` includes a matching platform, tsdocker automatically:
|
||||
|
||||
1. Creates a **multi-node buildx builder** — local node for `linux/amd64`, remote SSH node for `linux/arm64`
|
||||
2. Opens **SSH reverse tunnels** so the remote builder can push to the local staging registry
|
||||
3. Builds natively on each platform's hardware — no QEMU overhead
|
||||
4. Tears down tunnels after the build completes
|
||||
|
||||
```
|
||||
[Local machine] [Remote arm64 machine]
|
||||
registry:2 on localhost:PORT <──── SSH reverse tunnel ──── localhost:PORT
|
||||
BuildKit (amd64) ──push──> BuildKit (arm64) ──push──>
|
||||
localhost:PORT localhost:PORT (tunneled)
|
||||
```
|
||||
|
||||
**Prerequisites for the remote machine:**
|
||||
- Docker installed and running
|
||||
- A user with Docker group access (no sudo needed)
|
||||
- SSH key access configured
|
||||
|
||||
### ⚡ Parallel Builds
|
||||
|
||||
Speed up builds by building independent images concurrently:
|
||||
|
||||
```bash
|
||||
# Default concurrency (4 workers)
|
||||
tsdocker build --parallel
|
||||
|
||||
# Custom concurrency
|
||||
tsdocker build --parallel=8
|
||||
|
||||
# Works with caching too
|
||||
tsdocker build --parallel --cached
|
||||
```
|
||||
|
||||
tsdocker groups Dockerfiles into **dependency levels** using topological analysis. Images within the same level have no dependencies on each other and build in parallel. Each level completes before the next begins.
|
||||
|
||||
### 📦 Dockerfile Naming Conventions
|
||||
|
||||
@@ -175,6 +386,26 @@ tsdocker discovers files matching `Dockerfile*`:
|
||||
| `Dockerfile_alpine` | `alpine` |
|
||||
| `Dockerfile_##version##` | Uses `package.json` version |
|
||||
|
||||
### 🎯 Dockerfile Filtering
|
||||
|
||||
Build or push only the Dockerfiles you need. Positional arguments are matched against Dockerfile basenames as glob patterns:
|
||||
|
||||
```bash
|
||||
# Build a single Dockerfile
|
||||
tsdocker build Dockerfile_base
|
||||
|
||||
# Glob patterns with * and ? wildcards
|
||||
tsdocker build Dockerfile_app*
|
||||
|
||||
# Multiple patterns
|
||||
tsdocker build Dockerfile_base Dockerfile_web
|
||||
|
||||
# Push specific images without rebuilding
|
||||
tsdocker push --no-build Dockerfile_api
|
||||
```
|
||||
|
||||
When filtering for `build`, **dependencies are auto-resolved**: if `Dockerfile_app` depends on `Dockerfile_base`, specifying only `Dockerfile_app` will automatically include `Dockerfile_base` in the build order.
|
||||
|
||||
### 🔗 Dependency-Aware Builds
|
||||
|
||||
If you have multiple Dockerfiles that depend on each other:
|
||||
@@ -190,7 +421,7 @@ COPY . .
|
||||
RUN npm run build
|
||||
```
|
||||
|
||||
tsdocker automatically detects that `Dockerfile_app` depends on `Dockerfile_base` and builds them in the correct order.
|
||||
tsdocker automatically detects that `Dockerfile_app` depends on `Dockerfile_base`, builds them in the correct order, and makes the base image available to dependent builds via the local registry (using `--build-context` for buildx).
|
||||
|
||||
### 🧪 Container Test Scripts
|
||||
|
||||
@@ -210,6 +441,8 @@ Run with:
|
||||
tsdocker test
|
||||
```
|
||||
|
||||
This builds all images, starts the local registry (so multi-arch images can be pulled), and runs each matching test script inside a container.
|
||||
|
||||
### 🔧 Build Args from Environment
|
||||
|
||||
Pass environment variables as Docker build arguments:
|
||||
@@ -232,45 +465,6 @@ FROM node:${NODE_VERSION}
|
||||
RUN echo "//registry.npmjs.org/:_authToken=${NPM_TOKEN}" > ~/.npmrc
|
||||
```
|
||||
|
||||
### 🐳 Docker-in-Docker Testing
|
||||
|
||||
Test Docker-related tools by mounting the Docker socket:
|
||||
|
||||
```json
|
||||
{
|
||||
"@git.zone/tsdocker": {
|
||||
"baseImage": "docker:latest",
|
||||
"command": "docker version && docker ps",
|
||||
"dockerSock": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 📋 Listing Dockerfiles
|
||||
|
||||
Inspect your project's Dockerfiles and their relationships:
|
||||
|
||||
```bash
|
||||
tsdocker list
|
||||
```
|
||||
|
||||
Output:
|
||||
```
|
||||
Discovered Dockerfiles:
|
||||
========================
|
||||
|
||||
1. Dockerfile_base
|
||||
Tag: myproject:base
|
||||
Base Image: node:20-alpine
|
||||
Version: base
|
||||
|
||||
2. Dockerfile_app
|
||||
Tag: myproject:app
|
||||
Base Image: myproject:base
|
||||
Version: app
|
||||
Depends on: myproject:base
|
||||
```
|
||||
|
||||
### 🗺️ Registry Repo Mapping
|
||||
|
||||
Use different repository names for different registries:
|
||||
@@ -287,40 +481,55 @@ Use different repository names for different registries:
|
||||
}
|
||||
```
|
||||
|
||||
## Environment Variables
|
||||
When pushing, tsdocker maps the local repo name to the registry-specific path. For example, a locally built `myproject:latest` becomes `registry.gitlab.com/mygroup/myproject:latest` and `docker.io/myuser/myproject:latest`.
|
||||
|
||||
### qenv Integration
|
||||
### 📋 Listing Dockerfiles
|
||||
|
||||
tsdocker automatically loads environment variables from `qenv.yml`:
|
||||
Inspect your project's Dockerfiles and their relationships:
|
||||
|
||||
```yaml
|
||||
# qenv.yml
|
||||
API_KEY: your-api-key
|
||||
DATABASE_URL: postgres://localhost/test
|
||||
```bash
|
||||
tsdocker list
|
||||
```
|
||||
|
||||
These are injected into your test container automatically.
|
||||
Output:
|
||||
```
|
||||
Discovered Dockerfiles:
|
||||
========================
|
||||
|
||||
1. /path/to/Dockerfile_base
|
||||
Tag: myproject:base
|
||||
Base Image: node:20-alpine
|
||||
Version: base
|
||||
|
||||
2. /path/to/Dockerfile_app
|
||||
Tag: myproject:app
|
||||
Base Image: myproject:base
|
||||
Version: app
|
||||
Depends on: myproject:base
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
### Basic Test Configuration
|
||||
### Minimal Build & Push
|
||||
|
||||
```json
|
||||
{
|
||||
"@git.zone/tsdocker": {
|
||||
"baseImage": "node:20",
|
||||
"command": "npm test"
|
||||
"registries": ["docker.io"],
|
||||
"platforms": ["linux/amd64"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```bash
|
||||
tsdocker push
|
||||
```
|
||||
|
||||
### Full Production Setup
|
||||
|
||||
```json
|
||||
{
|
||||
"@git.zone/tsdocker": {
|
||||
"baseImage": "node:20-alpine",
|
||||
"command": "pnpm test",
|
||||
"registries": ["registry.gitlab.com", "ghcr.io", "docker.io"],
|
||||
"registryRepoMap": {
|
||||
"registry.gitlab.com": "myorg/myapp",
|
||||
@@ -338,73 +547,87 @@ These are injected into your test container automatically.
|
||||
|
||||
### CI/CD Integration
|
||||
|
||||
**GitLab CI:**
|
||||
|
||||
```yaml
|
||||
# .gitlab-ci.yml
|
||||
build:
|
||||
build-and-push:
|
||||
stage: build
|
||||
script:
|
||||
- npm install -g @git.zone/tsdocker
|
||||
- tsdocker build
|
||||
- tsdocker push
|
||||
variables:
|
||||
DOCKER_REGISTRY_1: "registry.gitlab.com|$CI_REGISTRY_USER|$CI_REGISTRY_PASSWORD"
|
||||
```
|
||||
|
||||
# GitHub Actions
|
||||
**GitHub Actions:**
|
||||
|
||||
```yaml
|
||||
- name: Build and Push
|
||||
run: |
|
||||
npm install -g @git.zone/tsdocker
|
||||
tsdocker login
|
||||
tsdocker build
|
||||
tsdocker push
|
||||
env:
|
||||
DOCKER_REGISTRY_1: "ghcr.io|${{ github.actor }}|${{ secrets.GITHUB_TOKEN }}"
|
||||
```
|
||||
|
||||
## Requirements
|
||||
**Gitea Actions:**
|
||||
|
||||
- **Docker** — Docker Engine or Docker Desktop must be installed
|
||||
- **Node.js** — Version 18 or higher (ESM support required)
|
||||
- **Docker Buildx** — Required for multi-architecture builds (included in Docker Desktop)
|
||||
```yaml
|
||||
- name: Build and Push
|
||||
run: |
|
||||
npm install -g @git.zone/tsdocker
|
||||
tsdocker push
|
||||
env:
|
||||
DOCKER_REGISTRY_1: "gitea.example.com|${{ secrets.REGISTRY_USER }}|${{ secrets.REGISTRY_PASSWORD }}"
|
||||
```
|
||||
|
||||
## Why tsdocker?
|
||||
|
||||
### 🎯 The Problem
|
||||
|
||||
Managing Docker workflows manually is tedious:
|
||||
- Remembering build order for dependent images
|
||||
- Pushing to multiple registries with different credentials
|
||||
- Setting up Buildx for multi-arch builds
|
||||
- Ensuring consistent test environments
|
||||
|
||||
### ✨ The Solution
|
||||
|
||||
tsdocker automates the entire workflow:
|
||||
- **One command** to build all images in dependency order
|
||||
- **One command** to push to all registries
|
||||
- **Automatic** Buildx setup for multi-platform builds
|
||||
- **Consistent** containerized test environments
|
||||
tsdocker auto-detects all three CI systems and enables session isolation automatically — no extra configuration needed.
|
||||
|
||||
## TypeScript API
|
||||
|
||||
tsdocker exposes its types for programmatic use:
|
||||
tsdocker can also be used programmatically:
|
||||
|
||||
```typescript
|
||||
import type { ITsDockerConfig } from '@git.zone/tsdocker/dist_ts/interfaces/index.js';
|
||||
import { TsDockerManager } from '@git.zone/tsdocker/dist_ts/classes.tsdockermanager.js';
|
||||
import type { ITsDockerConfig } from '@git.zone/tsdocker/dist_ts/interfaces/index.js';
|
||||
|
||||
const config: ITsDockerConfig = {
|
||||
baseImage: 'node:20',
|
||||
command: 'npm test',
|
||||
dockerSock: false,
|
||||
keyValueObject: {},
|
||||
registries: ['docker.io'],
|
||||
platforms: ['linux/amd64'],
|
||||
platforms: ['linux/amd64', 'linux/arm64'],
|
||||
};
|
||||
|
||||
const manager = new TsDockerManager(config);
|
||||
await manager.prepare();
|
||||
await manager.build();
|
||||
await manager.build({ parallel: true });
|
||||
await manager.push();
|
||||
```
|
||||
|
||||
## Environment Variables
|
||||
|
||||
### CI & Session Control
|
||||
|
||||
| Variable | Description |
|
||||
|----------|-------------|
|
||||
| `TSDOCKER_SESSION_ID` | Override the auto-generated session ID (default: random 8-char hex) |
|
||||
| `TSDOCKER_REGISTRY_PORT` | Override the dynamically allocated local registry port |
|
||||
| `CI` | Generic CI detection (also `GITHUB_ACTIONS`, `GITLAB_CI`, `GITEA_ACTIONS`) |
|
||||
|
||||
### Registry Credentials
|
||||
|
||||
| Variable | Description |
|
||||
|----------|-------------|
|
||||
| `DOCKER_REGISTRY_1` through `DOCKER_REGISTRY_10` | Pipe-delimited: `registry\|username\|password` |
|
||||
| `DOCKER_REGISTRY_URL` | Registry URL for single-registry setup |
|
||||
| `DOCKER_REGISTRY_USER` | Username for single-registry setup |
|
||||
| `DOCKER_REGISTRY_PASSWORD` | Password for single-registry setup |
|
||||
|
||||
## Requirements
|
||||
|
||||
- **Docker** — Docker Engine 20+ or Docker Desktop
|
||||
- **Node.js** — Version 18 or higher (for native `fetch` and ESM support)
|
||||
- **Docker Buildx** — Required for multi-architecture builds (included in Docker Desktop)
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "docker not found"
|
||||
@@ -417,11 +640,10 @@ docker --version
|
||||
|
||||
### Multi-arch build fails
|
||||
|
||||
Make sure Docker Buildx is available:
|
||||
Make sure Docker Buildx is available. tsdocker will set up the builder automatically, but you can verify:
|
||||
|
||||
```bash
|
||||
docker buildx version
|
||||
docker buildx create --use
|
||||
```
|
||||
|
||||
### Registry authentication fails
|
||||
@@ -433,29 +655,31 @@ echo $DOCKER_REGISTRY_1
|
||||
tsdocker login
|
||||
```
|
||||
|
||||
tsdocker also falls back to `~/.docker/config.json` — ensure you've run `docker login` for your target registries.
|
||||
|
||||
### Push fails with "fetch failed"
|
||||
|
||||
tsdocker automatically retries failed requests up to 6 times with exponential backoff. If pushes still fail:
|
||||
|
||||
- Check network connectivity to the target registry
|
||||
- Verify your credentials haven't expired
|
||||
- Look for retry log messages (`fetch failed (attempt X/6)`) to diagnose the pattern
|
||||
- Large layers may need longer timeouts — the default 5-minute timeout per request should cover most cases
|
||||
|
||||
### Circular dependency detected
|
||||
|
||||
Review your Dockerfiles' `FROM` statements — you have images depending on each other in a loop.
|
||||
|
||||
## Performance Tips
|
||||
### Build context too large
|
||||
|
||||
🚀 **Use specific tags**: `node:20-alpine` is smaller and faster than `node:latest`
|
||||
Use a `.dockerignore` file to exclude `node_modules`, `.git`, `.nogit`, and other large directories:
|
||||
|
||||
🚀 **Leverage caching**: Docker layers are cached — your builds get faster over time
|
||||
|
||||
🚀 **Prune regularly**: `docker system prune` reclaims disk space
|
||||
|
||||
🚀 **Use .dockerignore**: Exclude `node_modules`, `.git`, etc. from build context
|
||||
|
||||
## Migration from Legacy
|
||||
|
||||
Previously published as `npmdocker`, now `@git.zone/tsdocker`:
|
||||
|
||||
| Old | New |
|
||||
|-----|-----|
|
||||
| `npmdocker` command | `tsdocker` command |
|
||||
| `"npmdocker"` config key | `"@git.zone/tsdocker"` config key |
|
||||
| CommonJS | ESM with `.js` imports |
|
||||
```
|
||||
node_modules
|
||||
.git
|
||||
.nogit
|
||||
dist_ts
|
||||
```
|
||||
|
||||
## License and Legal Information
|
||||
|
||||
|
||||
@@ -3,6 +3,6 @@
|
||||
*/
|
||||
export const commitinfo = {
|
||||
name: '@git.zone/tsdocker',
|
||||
version: '1.4.2',
|
||||
version: '2.2.0',
|
||||
description: 'develop npm modules cross platform with docker'
|
||||
}
|
||||
|
||||
79
ts/classes.dockercontext.ts
Normal file
79
ts/classes.dockercontext.ts
Normal file
@@ -0,0 +1,79 @@
|
||||
import * as plugins from './tsdocker.plugins.js';
|
||||
import * as fs from 'fs';
|
||||
import { logger } from './tsdocker.logging.js';
|
||||
import type { IDockerContextInfo } from './interfaces/index.js';
|
||||
|
||||
const smartshellInstance = new plugins.smartshell.Smartshell({ executor: 'bash' });
|
||||
|
||||
export class DockerContext {
|
||||
public contextInfo: IDockerContextInfo | null = null;
|
||||
|
||||
/** Sets DOCKER_CONTEXT env var for explicit context selection. */
|
||||
public setContext(contextName: string): void {
|
||||
process.env.DOCKER_CONTEXT = contextName;
|
||||
logger.log('info', `Docker context explicitly set to: ${contextName}`);
|
||||
}
|
||||
|
||||
/** Detects current Docker context via `docker context inspect` and rootless via `docker info`. */
|
||||
public async detect(): Promise<IDockerContextInfo> {
|
||||
let name = 'default';
|
||||
let endpoint = 'unknown';
|
||||
|
||||
const contextResult = await smartshellInstance.execSilent(
|
||||
`docker context inspect --format '{{json .}}'`
|
||||
);
|
||||
if (contextResult.exitCode === 0 && contextResult.stdout) {
|
||||
try {
|
||||
const parsed = JSON.parse(contextResult.stdout.trim());
|
||||
const data = Array.isArray(parsed) ? parsed[0] : parsed;
|
||||
name = data.Name || 'default';
|
||||
endpoint = data.Endpoints?.docker?.Host || 'unknown';
|
||||
} catch { /* fallback to defaults */ }
|
||||
}
|
||||
|
||||
let isRootless = false;
|
||||
const infoResult = await smartshellInstance.execSilent(
|
||||
`docker info --format '{{json .SecurityOptions}}'`
|
||||
);
|
||||
if (infoResult.exitCode === 0 && infoResult.stdout) {
|
||||
isRootless = infoResult.stdout.includes('name=rootless');
|
||||
}
|
||||
|
||||
// Detect topology
|
||||
let topology: 'socket-mount' | 'dind' | 'local' = 'local';
|
||||
if (process.env.DOCKER_HOST && process.env.DOCKER_HOST.startsWith('tcp://')) {
|
||||
topology = 'dind';
|
||||
} else if (fs.existsSync('/.dockerenv')) {
|
||||
topology = 'socket-mount';
|
||||
}
|
||||
|
||||
this.contextInfo = { name, endpoint, isRootless, dockerHost: process.env.DOCKER_HOST, topology };
|
||||
return this.contextInfo;
|
||||
}
|
||||
|
||||
/** Logs context info prominently. */
|
||||
public logContextInfo(): void {
|
||||
if (!this.contextInfo) return;
|
||||
const { name, endpoint, isRootless, dockerHost, topology } = this.contextInfo;
|
||||
logger.log('info', '=== DOCKER CONTEXT ===');
|
||||
logger.log('info', `Context: ${name}`);
|
||||
logger.log('info', `Endpoint: ${endpoint}`);
|
||||
if (dockerHost) logger.log('info', `DOCKER_HOST: ${dockerHost}`);
|
||||
logger.log('info', `Rootless: ${isRootless ? 'yes' : 'no'}`);
|
||||
logger.log('info', `Topology: ${topology || 'local'}`);
|
||||
}
|
||||
|
||||
/** Emits rootless-specific warnings. */
|
||||
public logRootlessWarnings(): void {
|
||||
if (!this.contextInfo?.isRootless) return;
|
||||
logger.log('warn', '[rootless] network=host in buildx is namespaced by rootlesskit');
|
||||
logger.log('warn', '[rootless] Local registry may have localhost vs 127.0.0.1 resolution quirks');
|
||||
}
|
||||
|
||||
/** Returns context-aware builder name: tsdocker-builder-<context> */
|
||||
public getBuilderName(): string {
|
||||
const contextName = this.contextInfo?.name || 'default';
|
||||
const sanitized = contextName.replace(/[^a-zA-Z0-9_-]/g, '-');
|
||||
return `tsdocker-builder-${sanitized}`;
|
||||
}
|
||||
}
|
||||
@@ -1,8 +1,10 @@
|
||||
import * as plugins from './tsdocker.plugins.js';
|
||||
import * as paths from './tsdocker.paths.js';
|
||||
import { logger } from './tsdocker.logging.js';
|
||||
import { logger, formatDuration } from './tsdocker.logging.js';
|
||||
import { DockerRegistry } from './classes.dockerregistry.js';
|
||||
import type { IDockerfileOptions, ITsDockerConfig } from './interfaces/index.js';
|
||||
import { RegistryCopy } from './classes.registrycopy.js';
|
||||
import { TsDockerSession } from './classes.tsdockersession.js';
|
||||
import type { IDockerfileOptions, ITsDockerConfig, IBuildCommandOptions } from './interfaces/index.js';
|
||||
import type { TsDockerManager } from './classes.tsdockermanager.js';
|
||||
import * as fs from 'fs';
|
||||
|
||||
@@ -10,6 +12,15 @@ const smartshellInstance = new plugins.smartshell.Smartshell({
|
||||
executor: 'bash',
|
||||
});
|
||||
|
||||
/**
|
||||
* Extracts a platform string (e.g. "linux/amd64") from a buildx bracket prefix.
|
||||
* The prefix may be like "linux/amd64 ", "linux/amd64 stage-1 ", "stage-1 ", or "".
|
||||
*/
|
||||
function extractPlatform(prefix: string): string | null {
|
||||
const match = prefix.match(/linux\/\w+/);
|
||||
return match ? match[0] : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Class Dockerfile represents a Dockerfile on disk
|
||||
*/
|
||||
@@ -26,8 +37,10 @@ export class Dockerfile {
|
||||
.map(entry => plugins.path.join(paths.cwd, entry.name));
|
||||
|
||||
const readDockerfilesArray: Dockerfile[] = [];
|
||||
logger.log('info', `found ${fileTree.length} Dockerfiles:`);
|
||||
console.log(fileTree);
|
||||
logger.log('info', `found ${fileTree.length} Dockerfile(s):`);
|
||||
for (const filePath of fileTree) {
|
||||
logger.log('info', ` ${plugins.path.basename(filePath)}`);
|
||||
}
|
||||
|
||||
for (const dockerfilePath of fileTree) {
|
||||
const myDockerfile = new Dockerfile(managerRef, {
|
||||
@@ -58,9 +71,14 @@ export class Dockerfile {
|
||||
const dependencies: Dockerfile[] = [];
|
||||
const baseImage = dockerfile.baseImage;
|
||||
|
||||
// Extract repo:version from baseImage for comparison with cleanTag
|
||||
// baseImage may include a registry prefix (e.g., "host.today/repo:version")
|
||||
// but cleanTag is just "repo:version", so we strip the registry prefix
|
||||
const baseImageKey = Dockerfile.extractRepoVersion(baseImage);
|
||||
|
||||
// Check if the baseImage is among the local Dockerfiles
|
||||
if (tagToDockerfile.has(baseImage)) {
|
||||
const baseDockerfile = tagToDockerfile.get(baseImage)!;
|
||||
if (tagToDockerfile.has(baseImageKey)) {
|
||||
const baseDockerfile = tagToDockerfile.get(baseImageKey)!;
|
||||
dependencies.push(baseDockerfile);
|
||||
dockerfile.localBaseImageDependent = true;
|
||||
dockerfile.localBaseDockerfile = baseDockerfile;
|
||||
@@ -116,8 +134,10 @@ export class Dockerfile {
|
||||
public static async mapDockerfiles(sortedDockerfileArray: Dockerfile[]): Promise<Dockerfile[]> {
|
||||
sortedDockerfileArray.forEach((dockerfileArg) => {
|
||||
if (dockerfileArg.localBaseImageDependent) {
|
||||
// Extract repo:version from baseImage for comparison with cleanTag
|
||||
const baseImageKey = Dockerfile.extractRepoVersion(dockerfileArg.baseImage);
|
||||
sortedDockerfileArray.forEach((dockfile2: Dockerfile) => {
|
||||
if (dockfile2.cleanTag === dockerfileArg.baseImage) {
|
||||
if (dockfile2.cleanTag === baseImageKey) {
|
||||
dockerfileArg.localBaseDockerfile = dockfile2;
|
||||
}
|
||||
});
|
||||
@@ -126,13 +146,221 @@ export class Dockerfile {
|
||||
return sortedDockerfileArray;
|
||||
}
|
||||
|
||||
/** Local registry is always needed — it's the canonical store for all built images. */
|
||||
public static needsLocalRegistry(
|
||||
_dockerfiles?: Dockerfile[],
|
||||
_options?: { platform?: string },
|
||||
): boolean {
|
||||
return true;
|
||||
}
|
||||
|
||||
/** Starts a persistent registry:2 container with session-unique port and name. */
|
||||
public static async startLocalRegistry(session: TsDockerSession, isRootless?: boolean): Promise<void> {
|
||||
const { registryPort, registryHost, registryContainerName, isCI, sessionId } = session.config;
|
||||
|
||||
// Ensure persistent storage directory exists — isolate per session in CI
|
||||
const registryDataDir = isCI
|
||||
? plugins.path.join(paths.cwd, '.nogit', 'docker-registry', sessionId)
|
||||
: plugins.path.join(paths.cwd, '.nogit', 'docker-registry');
|
||||
fs.mkdirSync(registryDataDir, { recursive: true });
|
||||
|
||||
await smartshellInstance.execSilent(
|
||||
`docker rm -f ${registryContainerName} 2>/dev/null || true`
|
||||
);
|
||||
|
||||
const runCmd = `docker run -d --name ${registryContainerName} -p ${registryPort}:5000 -v "${registryDataDir}:/var/lib/registry" registry:2`;
|
||||
let result = await smartshellInstance.execSilent(runCmd);
|
||||
|
||||
// Port retry: if port was stolen between allocation and docker run, reallocate once
|
||||
if (result.exitCode !== 0 && (result.stderr || result.stdout || '').includes('port is already allocated')) {
|
||||
const newPort = await TsDockerSession.allocatePort();
|
||||
logger.log('warn', `Port ${registryPort} taken, retrying with ${newPort}`);
|
||||
session.config.registryPort = newPort;
|
||||
session.config.registryHost = `localhost:${newPort}`;
|
||||
const retryCmd = `docker run -d --name ${registryContainerName} -p ${newPort}:5000 -v "${registryDataDir}:/var/lib/registry" registry:2`;
|
||||
result = await smartshellInstance.execSilent(retryCmd);
|
||||
}
|
||||
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Failed to start local registry: ${result.stderr || result.stdout}`);
|
||||
}
|
||||
// registry:2 starts near-instantly; brief wait for readiness
|
||||
await new Promise(resolve => setTimeout(resolve, 1000));
|
||||
logger.log('info', `Started local registry at ${session.config.registryHost} (container: ${registryContainerName})`);
|
||||
if (isRootless) {
|
||||
logger.log('warn', `[rootless] Registry on port ${session.config.registryPort} — if buildx cannot reach localhost, try 127.0.0.1`);
|
||||
}
|
||||
}
|
||||
|
||||
/** Stops and removes the session-specific local registry container. */
|
||||
public static async stopLocalRegistry(session: TsDockerSession): Promise<void> {
|
||||
await smartshellInstance.execSilent(
|
||||
`docker rm -f ${session.config.registryContainerName} 2>/dev/null || true`
|
||||
);
|
||||
logger.log('info', `Stopped local registry (${session.config.registryContainerName})`);
|
||||
}
|
||||
|
||||
/** Pushes a built image to the local registry for buildx consumption. */
|
||||
public static async pushToLocalRegistry(session: TsDockerSession, dockerfile: Dockerfile): Promise<void> {
|
||||
const registryTag = `${session.config.registryHost}/${dockerfile.buildTag}`;
|
||||
await smartshellInstance.execSilent(`docker tag ${dockerfile.buildTag} ${registryTag}`);
|
||||
const result = await smartshellInstance.execSilent(`docker push ${registryTag}`);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Failed to push to local registry: ${result.stderr || result.stdout}`);
|
||||
}
|
||||
dockerfile.localRegistryTag = registryTag;
|
||||
logger.log('info', `Pushed ${dockerfile.buildTag} to local registry as ${registryTag}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Groups topologically sorted Dockerfiles into dependency levels.
|
||||
* Level 0 = no local dependencies; level N = depends on something in level N-1.
|
||||
* Images within the same level are independent and can build in parallel.
|
||||
*/
|
||||
public static computeLevels(sortedDockerfiles: Dockerfile[]): Dockerfile[][] {
|
||||
const levelMap = new Map<Dockerfile, number>();
|
||||
for (const df of sortedDockerfiles) {
|
||||
if (!df.localBaseImageDependent || !df.localBaseDockerfile) {
|
||||
levelMap.set(df, 0);
|
||||
} else {
|
||||
const depLevel = levelMap.get(df.localBaseDockerfile) ?? 0;
|
||||
levelMap.set(df, depLevel + 1);
|
||||
}
|
||||
}
|
||||
const maxLevel = Math.max(...Array.from(levelMap.values()), 0);
|
||||
const levels: Dockerfile[][] = [];
|
||||
for (let l = 0; l <= maxLevel; l++) {
|
||||
levels.push(sortedDockerfiles.filter(df => levelMap.get(df) === l));
|
||||
}
|
||||
return levels;
|
||||
}
|
||||
|
||||
/**
|
||||
* Runs async tasks with bounded concurrency (worker-pool pattern).
|
||||
* Fast-fail: if any task throws, Promise.all rejects immediately.
|
||||
*/
|
||||
public static async runWithConcurrency<T>(
|
||||
tasks: (() => Promise<T>)[],
|
||||
concurrency: number,
|
||||
): Promise<T[]> {
|
||||
const results: T[] = new Array(tasks.length);
|
||||
let nextIndex = 0;
|
||||
async function worker(): Promise<void> {
|
||||
while (true) {
|
||||
const idx = nextIndex++;
|
||||
if (idx >= tasks.length) break;
|
||||
results[idx] = await tasks[idx]();
|
||||
}
|
||||
}
|
||||
const workers = Array.from(
|
||||
{ length: Math.min(concurrency, tasks.length) },
|
||||
() => worker(),
|
||||
);
|
||||
await Promise.all(workers);
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds the corresponding real docker image for each Dockerfile class instance
|
||||
*/
|
||||
public static async buildDockerfiles(sortedArrayArg: Dockerfile[]): Promise<Dockerfile[]> {
|
||||
for (const dockerfileArg of sortedArrayArg) {
|
||||
await dockerfileArg.build();
|
||||
public static async buildDockerfiles(
|
||||
sortedArrayArg: Dockerfile[],
|
||||
session: TsDockerSession,
|
||||
options?: { platform?: string; timeout?: number; noCache?: boolean; pull?: boolean; verbose?: boolean; isRootless?: boolean; parallel?: boolean; parallelConcurrency?: number; onRegistryStarted?: () => Promise<void>; onBeforeRegistryStop?: () => Promise<void> },
|
||||
): Promise<Dockerfile[]> {
|
||||
const total = sortedArrayArg.length;
|
||||
const overallStart = Date.now();
|
||||
|
||||
await Dockerfile.startLocalRegistry(session, options?.isRootless);
|
||||
if (options?.onRegistryStarted) {
|
||||
await options.onRegistryStarted();
|
||||
}
|
||||
|
||||
try {
|
||||
if (options?.parallel) {
|
||||
// === PARALLEL MODE: build independent images concurrently within each level ===
|
||||
const concurrency = options.parallelConcurrency ?? 4;
|
||||
const levels = Dockerfile.computeLevels(sortedArrayArg);
|
||||
|
||||
logger.log('info', `Parallel build: ${levels.length} level(s), concurrency ${concurrency}`);
|
||||
for (let l = 0; l < levels.length; l++) {
|
||||
const level = levels[l];
|
||||
logger.log('info', ` Level ${l} (${level.length}): ${level.map(df => df.cleanTag).join(', ')}`);
|
||||
}
|
||||
|
||||
let built = 0;
|
||||
for (let l = 0; l < levels.length; l++) {
|
||||
const level = levels[l];
|
||||
logger.log('info', `--- Level ${l}: building ${level.length} image(s) in parallel ---`);
|
||||
|
||||
const tasks = level.map((df) => {
|
||||
const myIndex = ++built;
|
||||
return async () => {
|
||||
const progress = `(${myIndex}/${total})`;
|
||||
logger.log('info', `${progress} Building ${df.cleanTag}...`);
|
||||
const elapsed = await df.build(options);
|
||||
logger.log('ok', `${progress} Built ${df.cleanTag} in ${formatDuration(elapsed)}`);
|
||||
return df;
|
||||
};
|
||||
});
|
||||
|
||||
await Dockerfile.runWithConcurrency(tasks, concurrency);
|
||||
|
||||
// After the entire level completes, push all to local registry + tag for deps
|
||||
for (const df of level) {
|
||||
// Tag in host daemon for dependency resolution
|
||||
const dependentBaseImages = new Set<string>();
|
||||
for (const other of sortedArrayArg) {
|
||||
if (other.localBaseDockerfile === df && other.baseImage !== df.buildTag) {
|
||||
dependentBaseImages.add(other.baseImage);
|
||||
}
|
||||
}
|
||||
for (const fullTag of dependentBaseImages) {
|
||||
logger.log('info', `Tagging ${df.buildTag} as ${fullTag} for local dependency resolution`);
|
||||
await smartshellInstance.exec(`docker tag ${df.buildTag} ${fullTag}`);
|
||||
}
|
||||
// Push ALL images to local registry (skip if already pushed via buildx)
|
||||
if (!df.localRegistryTag) {
|
||||
await Dockerfile.pushToLocalRegistry(session, df);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// === SEQUENTIAL MODE: build one at a time ===
|
||||
for (let i = 0; i < total; i++) {
|
||||
const dockerfileArg = sortedArrayArg[i];
|
||||
const progress = `(${i + 1}/${total})`;
|
||||
logger.log('info', `${progress} Building ${dockerfileArg.cleanTag}...`);
|
||||
|
||||
const elapsed = await dockerfileArg.build(options);
|
||||
logger.log('ok', `${progress} Built ${dockerfileArg.cleanTag} in ${formatDuration(elapsed)}`);
|
||||
|
||||
// Tag in host daemon for standard docker build compatibility
|
||||
const dependentBaseImages = new Set<string>();
|
||||
for (const other of sortedArrayArg) {
|
||||
if (other.localBaseDockerfile === dockerfileArg && other.baseImage !== dockerfileArg.buildTag) {
|
||||
dependentBaseImages.add(other.baseImage);
|
||||
}
|
||||
}
|
||||
for (const fullTag of dependentBaseImages) {
|
||||
logger.log('info', `Tagging ${dockerfileArg.buildTag} as ${fullTag} for local dependency resolution`);
|
||||
await smartshellInstance.exec(`docker tag ${dockerfileArg.buildTag} ${fullTag}`);
|
||||
}
|
||||
|
||||
// Push ALL images to local registry (skip if already pushed via buildx)
|
||||
if (!dockerfileArg.localRegistryTag) {
|
||||
await Dockerfile.pushToLocalRegistry(session, dockerfileArg);
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
if (options?.onBeforeRegistryStop) {
|
||||
await options.onBeforeRegistryStop();
|
||||
}
|
||||
await Dockerfile.stopLocalRegistry(session);
|
||||
}
|
||||
|
||||
logger.log('info', `Total build time: ${formatDuration(Date.now() - overallStart)}`);
|
||||
return sortedArrayArg;
|
||||
}
|
||||
|
||||
@@ -140,9 +368,19 @@ export class Dockerfile {
|
||||
* Tests all Dockerfiles by calling Dockerfile.test()
|
||||
*/
|
||||
public static async testDockerfiles(sortedArrayArg: Dockerfile[]): Promise<Dockerfile[]> {
|
||||
for (const dockerfileArg of sortedArrayArg) {
|
||||
await dockerfileArg.test();
|
||||
const total = sortedArrayArg.length;
|
||||
const overallStart = Date.now();
|
||||
|
||||
for (let i = 0; i < total; i++) {
|
||||
const dockerfileArg = sortedArrayArg[i];
|
||||
const progress = `(${i + 1}/${total})`;
|
||||
logger.log('info', `${progress} Testing ${dockerfileArg.cleanTag}...`);
|
||||
|
||||
const elapsed = await dockerfileArg.test();
|
||||
logger.log('ok', `${progress} Tested ${dockerfileArg.cleanTag} in ${formatDuration(elapsed)}`);
|
||||
}
|
||||
|
||||
logger.log('info', `Total test time: ${formatDuration(Date.now() - overallStart)}`);
|
||||
return sortedArrayArg;
|
||||
}
|
||||
|
||||
@@ -231,6 +469,34 @@ export class Dockerfile {
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts the repo:version part from a full image reference, stripping any registry prefix.
|
||||
* Examples:
|
||||
* "registry.example.com/repo:version" -> "repo:version"
|
||||
* "repo:version" -> "repo:version"
|
||||
* "host.today/ht-docker-node:npmci" -> "ht-docker-node:npmci"
|
||||
*/
|
||||
private static extractRepoVersion(imageRef: string): string {
|
||||
const parts = imageRef.split('/');
|
||||
if (parts.length === 1) {
|
||||
// No registry prefix: "repo:version"
|
||||
return imageRef;
|
||||
}
|
||||
|
||||
// Check if first part looks like a registry (contains '.' or ':' or is 'localhost')
|
||||
const firstPart = parts[0];
|
||||
const looksLikeRegistry =
|
||||
firstPart.includes('.') || firstPart.includes(':') || firstPart === 'localhost';
|
||||
|
||||
if (looksLikeRegistry) {
|
||||
// Strip registry: "registry.example.com/repo:version" -> "repo:version"
|
||||
return parts.slice(1).join('/');
|
||||
}
|
||||
|
||||
// No registry prefix, could be "org/repo:version"
|
||||
return imageRef;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the docker tag string for a given registry and repo
|
||||
*/
|
||||
@@ -282,6 +548,7 @@ export class Dockerfile {
|
||||
|
||||
// INSTANCE PROPERTIES
|
||||
public managerRef: TsDockerManager;
|
||||
public session?: TsDockerSession;
|
||||
public filePath!: string;
|
||||
public repo: string;
|
||||
public version: string;
|
||||
@@ -293,6 +560,7 @@ export class Dockerfile {
|
||||
public baseImage: string;
|
||||
public localBaseImageDependent: boolean;
|
||||
public localBaseDockerfile!: Dockerfile;
|
||||
public localRegistryTag?: string;
|
||||
|
||||
constructor(managerRefArg: TsDockerManager, options: IDockerfileOptions) {
|
||||
this.managerRef = managerRefArg;
|
||||
@@ -325,75 +593,193 @@ export class Dockerfile {
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds the Dockerfile
|
||||
* Creates a line-by-line handler for Docker build output that logs
|
||||
* recognized layer/step lines in an emphasized format.
|
||||
*/
|
||||
public async build(): Promise<void> {
|
||||
logger.log('info', 'now building Dockerfile for ' + this.cleanTag);
|
||||
const buildArgsString = await Dockerfile.getDockerBuildArgs(this.managerRef);
|
||||
const config = this.managerRef.config;
|
||||
private createBuildOutputHandler(verbose: boolean): {
|
||||
handleChunk: (chunk: Buffer | string) => void;
|
||||
} {
|
||||
let buffer = '';
|
||||
const tag = this.cleanTag;
|
||||
|
||||
let buildCommand: string;
|
||||
|
||||
// Check if multi-platform build is needed
|
||||
if (config.platforms && config.platforms.length > 1) {
|
||||
// Multi-platform build using buildx
|
||||
const platformString = config.platforms.join(',');
|
||||
buildCommand = `docker buildx build --platform ${platformString} -t ${this.buildTag} -f ${this.filePath} ${buildArgsString} .`;
|
||||
|
||||
if (config.push) {
|
||||
buildCommand += ' --push';
|
||||
} else {
|
||||
buildCommand += ' --load';
|
||||
const handleLine = (line: string) => {
|
||||
// In verbose mode, write raw output prefixed with tag for identification
|
||||
if (verbose) {
|
||||
process.stdout.write(`[${tag}] ${line}\n`);
|
||||
}
|
||||
} else {
|
||||
// Standard build
|
||||
const versionLabel = this.managerRef.projectInfo?.npm?.version || 'unknown';
|
||||
buildCommand = `docker build --label="version=${versionLabel}" -t ${this.buildTag} -f ${this.filePath} ${buildArgsString} .`;
|
||||
}
|
||||
|
||||
const result = await smartshellInstance.exec(buildCommand);
|
||||
if (result.exitCode !== 0) {
|
||||
logger.log('error', `Build failed for ${this.cleanTag}`);
|
||||
console.log(result.stdout);
|
||||
throw new Error(`Build failed for ${this.cleanTag}`);
|
||||
}
|
||||
// Buildx step: #N [platform step/total] INSTRUCTION
|
||||
const bxStep = line.match(/^#\d+ \[([^\]]+?)(\d+\/\d+)\] (.+)/);
|
||||
if (bxStep) {
|
||||
const prefix = bxStep[1].trim();
|
||||
const step = bxStep[2];
|
||||
const instruction = bxStep[3];
|
||||
const platform = extractPlatform(prefix);
|
||||
const platStr = platform ? `${platform} ▸ ` : '';
|
||||
logger.log('note', `[${tag}] ${platStr}[${step}] ${instruction}`);
|
||||
return;
|
||||
}
|
||||
|
||||
logger.log('ok', `Built ${this.cleanTag}`);
|
||||
// Buildx CACHED: #N CACHED
|
||||
const bxCached = line.match(/^#(\d+) CACHED/);
|
||||
if (bxCached) {
|
||||
logger.log('note', `[${tag}] CACHED`);
|
||||
return;
|
||||
}
|
||||
|
||||
// Buildx DONE: #N DONE 12.3s
|
||||
const bxDone = line.match(/^#\d+ DONE (.+)/);
|
||||
if (bxDone) {
|
||||
const timing = bxDone[1];
|
||||
if (!timing.startsWith('0.0')) {
|
||||
logger.log('note', `[${tag}] DONE ${timing}`);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Buildx export phase: #N exporting ...
|
||||
const bxExport = line.match(/^#\d+ exporting (.+)/);
|
||||
if (bxExport) {
|
||||
logger.log('note', `[${tag}] exporting ${bxExport[1]}`);
|
||||
return;
|
||||
}
|
||||
|
||||
// Standard docker build: Step N/M : INSTRUCTION
|
||||
const stdStep = line.match(/^Step (\d+\/\d+) : (.+)/);
|
||||
if (stdStep) {
|
||||
logger.log('note', `[${tag}] Step ${stdStep[1]}: ${stdStep[2]}`);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
return {
|
||||
handleChunk: (chunk: Buffer | string) => {
|
||||
buffer += chunk.toString();
|
||||
const lines = buffer.split('\n');
|
||||
buffer = lines.pop() || '';
|
||||
for (const line of lines) {
|
||||
const trimmed = line.replace(/\r$/, '').trim();
|
||||
if (trimmed) handleLine(trimmed);
|
||||
}
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Pushes the Dockerfile to a registry
|
||||
* Builds the Dockerfile
|
||||
*/
|
||||
public async build(options?: { platform?: string; timeout?: number; noCache?: boolean; pull?: boolean; verbose?: boolean }): Promise<number> {
|
||||
const startTime = Date.now();
|
||||
const buildArgsString = await Dockerfile.getDockerBuildArgs(this.managerRef);
|
||||
const config = this.managerRef.config;
|
||||
const platformOverride = options?.platform;
|
||||
const timeout = options?.timeout;
|
||||
const noCacheFlag = options?.noCache ? ' --no-cache' : '';
|
||||
const pullFlag = options?.pull !== false ? ' --pull' : '';
|
||||
const verbose = options?.verbose ?? false;
|
||||
|
||||
let buildContextFlag = '';
|
||||
if (this.localBaseImageDependent && this.localBaseDockerfile) {
|
||||
const fromImage = this.baseImage;
|
||||
if (this.localBaseDockerfile.localRegistryTag) {
|
||||
// BuildKit pulls from the local registry (reachable via host network)
|
||||
const registryTag = this.localBaseDockerfile.localRegistryTag;
|
||||
buildContextFlag = ` --build-context "${fromImage}=docker-image://${registryTag}"`;
|
||||
logger.log('info', `Using local registry build context: ${fromImage} -> docker-image://${registryTag}`);
|
||||
}
|
||||
}
|
||||
|
||||
let buildCommand: string;
|
||||
const builderFlag = this.managerRef.currentBuilderName ? ` --builder ${this.managerRef.currentBuilderName}` : '';
|
||||
|
||||
if (platformOverride) {
|
||||
// Single platform override via buildx
|
||||
buildCommand = `docker buildx build${builderFlag} --progress=plain --platform ${platformOverride}${noCacheFlag}${pullFlag}${buildContextFlag} --load -t ${this.buildTag} -f ${this.filePath} ${buildArgsString} .`;
|
||||
logger.log('info', `Build: buildx --platform ${platformOverride} --load`);
|
||||
} else if (config.platforms && config.platforms.length > 1) {
|
||||
// Multi-platform build using buildx — always push to local registry
|
||||
const platformString = config.platforms.join(',');
|
||||
const registryHost = this.session?.config.registryHost || 'localhost:5234';
|
||||
const localTag = `${registryHost}/${this.buildTag}`;
|
||||
buildCommand = `docker buildx build${builderFlag} --progress=plain --platform ${platformString}${noCacheFlag}${pullFlag}${buildContextFlag} -t ${localTag} -f ${this.filePath} ${buildArgsString} --push .`;
|
||||
this.localRegistryTag = localTag;
|
||||
logger.log('info', `Build: buildx --platform ${platformString} --push to local registry`);
|
||||
} else {
|
||||
// Standard build
|
||||
const versionLabel = this.managerRef.projectInfo?.npm?.version || 'unknown';
|
||||
buildCommand = `docker build --progress=plain --label="version=${versionLabel}"${noCacheFlag}${pullFlag} -t ${this.buildTag} -f ${this.filePath} ${buildArgsString} .`;
|
||||
logger.log('info', 'Build: docker build (standard)');
|
||||
}
|
||||
|
||||
// Execute build with real-time layer logging
|
||||
const handler = this.createBuildOutputHandler(verbose);
|
||||
const streaming = await smartshellInstance.execStreamingSilent(buildCommand);
|
||||
|
||||
// Intercept output for layer logging
|
||||
streaming.childProcess.stdout?.on('data', handler.handleChunk);
|
||||
streaming.childProcess.stderr?.on('data', handler.handleChunk);
|
||||
|
||||
if (timeout) {
|
||||
const timeoutPromise = new Promise<never>((_, reject) => {
|
||||
setTimeout(() => {
|
||||
streaming.childProcess.kill();
|
||||
reject(new Error(`Build timed out after ${timeout}s for ${this.cleanTag}`));
|
||||
}, timeout * 1000);
|
||||
});
|
||||
const result = await Promise.race([streaming.finalPromise, timeoutPromise]);
|
||||
if (result.exitCode !== 0) {
|
||||
logger.log('error', `Build failed for ${this.cleanTag}`);
|
||||
throw new Error(`Build failed for ${this.cleanTag}`);
|
||||
}
|
||||
} else {
|
||||
const result = await streaming.finalPromise;
|
||||
if (result.exitCode !== 0) {
|
||||
logger.log('error', `Build failed for ${this.cleanTag}`);
|
||||
if (!verbose && result.stdout) {
|
||||
logger.log('error', `Build output:\n${result.stdout}`);
|
||||
}
|
||||
throw new Error(`Build failed for ${this.cleanTag}`);
|
||||
}
|
||||
}
|
||||
|
||||
return Date.now() - startTime;
|
||||
}
|
||||
|
||||
/**
|
||||
* Pushes the Dockerfile to a registry using OCI Distribution API copy
|
||||
* from the local registry to the remote registry.
|
||||
*/
|
||||
public async push(dockerRegistryArg: DockerRegistry, versionSuffix?: string): Promise<void> {
|
||||
this.pushTag = Dockerfile.getDockerTagString(
|
||||
this.managerRef,
|
||||
dockerRegistryArg.registryUrl,
|
||||
const destRepo = this.getDestRepo(dockerRegistryArg.registryUrl);
|
||||
const destTag = versionSuffix ? `${this.version}_${versionSuffix}` : this.version;
|
||||
const registryCopy = new RegistryCopy();
|
||||
const registryHost = this.session?.config.registryHost || 'localhost:5234';
|
||||
|
||||
this.pushTag = `${dockerRegistryArg.registryUrl}/${destRepo}:${destTag}`;
|
||||
logger.log('info', `Pushing ${this.pushTag} via OCI copy from local registry...`);
|
||||
|
||||
await registryCopy.copyImage(
|
||||
registryHost,
|
||||
this.repo,
|
||||
this.version,
|
||||
versionSuffix
|
||||
dockerRegistryArg.registryUrl,
|
||||
destRepo,
|
||||
destTag,
|
||||
{ username: dockerRegistryArg.username, password: dockerRegistryArg.password },
|
||||
);
|
||||
|
||||
await smartshellInstance.exec(`docker tag ${this.buildTag} ${this.pushTag}`);
|
||||
const pushResult = await smartshellInstance.exec(`docker push ${this.pushTag}`);
|
||||
|
||||
if (pushResult.exitCode !== 0) {
|
||||
logger.log('error', `Push failed for ${this.pushTag}`);
|
||||
throw new Error(`Push failed for ${this.pushTag}`);
|
||||
}
|
||||
|
||||
// Get image digest
|
||||
const inspectResult = await smartshellInstance.exec(
|
||||
`docker inspect --format="{{index .RepoDigests 0}}" ${this.pushTag}`
|
||||
);
|
||||
|
||||
if (inspectResult.exitCode === 0 && inspectResult.stdout.includes('@')) {
|
||||
const imageDigest = inspectResult.stdout.split('@')[1]?.trim();
|
||||
console.log(`The image ${this.pushTag} has digest ${imageDigest}`);
|
||||
}
|
||||
|
||||
logger.log('ok', `Pushed ${this.pushTag}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the destination repository for a given registry URL,
|
||||
* using registryRepoMap if configured, otherwise the default repo.
|
||||
*/
|
||||
private getDestRepo(registryUrl: string): string {
|
||||
const config = this.managerRef.config;
|
||||
return config.registryRepoMap?.[registryUrl] || this.repo;
|
||||
}
|
||||
|
||||
/**
|
||||
* Pulls the Dockerfile from a registry
|
||||
*/
|
||||
@@ -413,40 +799,46 @@ export class Dockerfile {
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests the Dockerfile by running a test script if it exists
|
||||
* Tests the Dockerfile by running a test script if it exists.
|
||||
* For multi-platform builds, uses the local registry tag so Docker can auto-pull.
|
||||
*/
|
||||
public async test(): Promise<void> {
|
||||
public async test(): Promise<number> {
|
||||
const startTime = Date.now();
|
||||
const testDir = this.managerRef.config.testDir || plugins.path.join(paths.cwd, 'test');
|
||||
const testFile = plugins.path.join(testDir, 'test_' + this.version + '.sh');
|
||||
// Use local registry tag for multi-platform images (not in daemon), otherwise buildTag
|
||||
const imageRef = this.localRegistryTag || this.buildTag;
|
||||
|
||||
const sessionId = this.session?.config.sessionId || 'default';
|
||||
const testContainerName = `tsdocker_test_${sessionId}`;
|
||||
const testImageName = `tsdocker_test_image_${sessionId}`;
|
||||
|
||||
const testFileExists = fs.existsSync(testFile);
|
||||
|
||||
if (testFileExists) {
|
||||
logger.log('info', `Running tests for ${this.cleanTag}`);
|
||||
|
||||
// Run tests in container
|
||||
await smartshellInstance.exec(
|
||||
`docker run --name tsdocker_test_container --entrypoint="bash" ${this.buildTag} -c "mkdir /tsdocker_test"`
|
||||
`docker run --name ${testContainerName} --entrypoint="bash" ${imageRef} -c "mkdir /tsdocker_test"`
|
||||
);
|
||||
await smartshellInstance.exec(`docker cp ${testFile} tsdocker_test_container:/tsdocker_test/test.sh`);
|
||||
await smartshellInstance.exec(`docker commit tsdocker_test_container tsdocker_test_image`);
|
||||
await smartshellInstance.exec(`docker cp ${testFile} ${testContainerName}:/tsdocker_test/test.sh`);
|
||||
await smartshellInstance.exec(`docker commit ${testContainerName} ${testImageName}`);
|
||||
|
||||
const testResult = await smartshellInstance.exec(
|
||||
`docker run --entrypoint="bash" tsdocker_test_image -x /tsdocker_test/test.sh`
|
||||
`docker run --entrypoint="bash" ${testImageName} -x /tsdocker_test/test.sh`
|
||||
);
|
||||
|
||||
// Cleanup
|
||||
await smartshellInstance.exec(`docker rm tsdocker_test_container`);
|
||||
await smartshellInstance.exec(`docker rmi --force tsdocker_test_image`);
|
||||
await smartshellInstance.exec(`docker rm ${testContainerName}`);
|
||||
await smartshellInstance.exec(`docker rmi --force ${testImageName}`);
|
||||
|
||||
if (testResult.exitCode !== 0) {
|
||||
throw new Error(`Tests failed for ${this.cleanTag}`);
|
||||
}
|
||||
|
||||
logger.log('ok', `Tests passed for ${this.cleanTag}`);
|
||||
} else {
|
||||
logger.log('warn', `Skipping tests for ${this.cleanTag} because no test file was found at ${testFile}`);
|
||||
logger.log('warn', `Skipping tests for ${this.cleanTag} — no test file at ${testFile}`);
|
||||
}
|
||||
|
||||
return Date.now() - startTime;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
76
ts/classes.globalconfig.ts
Normal file
76
ts/classes.globalconfig.ts
Normal file
@@ -0,0 +1,76 @@
|
||||
import * as fs from 'fs';
|
||||
import * as plugins from './tsdocker.plugins.js';
|
||||
import { logger } from './tsdocker.logging.js';
|
||||
import type { IGlobalConfig, IRemoteBuilder } from './interfaces/index.js';
|
||||
|
||||
const CONFIG_DIR = plugins.path.join(
|
||||
process.env.HOME || process.env.USERPROFILE || '~',
|
||||
'.git.zone',
|
||||
'tsdocker',
|
||||
);
|
||||
const CONFIG_PATH = plugins.path.join(CONFIG_DIR, 'config.json');
|
||||
|
||||
const DEFAULT_CONFIG: IGlobalConfig = {
|
||||
remoteBuilders: [],
|
||||
};
|
||||
|
||||
export class GlobalConfig {
|
||||
static getConfigPath(): string {
|
||||
return CONFIG_PATH;
|
||||
}
|
||||
|
||||
static load(): IGlobalConfig {
|
||||
try {
|
||||
const raw = fs.readFileSync(CONFIG_PATH, 'utf-8');
|
||||
const parsed = JSON.parse(raw);
|
||||
return {
|
||||
...DEFAULT_CONFIG,
|
||||
...parsed,
|
||||
};
|
||||
} catch {
|
||||
return { ...DEFAULT_CONFIG };
|
||||
}
|
||||
}
|
||||
|
||||
static save(config: IGlobalConfig): void {
|
||||
fs.mkdirSync(CONFIG_DIR, { recursive: true });
|
||||
fs.writeFileSync(CONFIG_PATH, JSON.stringify(config, null, 2) + '\n', 'utf-8');
|
||||
}
|
||||
|
||||
static addBuilder(builder: IRemoteBuilder): void {
|
||||
const config = GlobalConfig.load();
|
||||
const existing = config.remoteBuilders.findIndex((b) => b.name === builder.name);
|
||||
if (existing >= 0) {
|
||||
config.remoteBuilders[existing] = builder;
|
||||
logger.log('info', `Updated remote builder: ${builder.name}`);
|
||||
} else {
|
||||
config.remoteBuilders.push(builder);
|
||||
logger.log('info', `Added remote builder: ${builder.name}`);
|
||||
}
|
||||
GlobalConfig.save(config);
|
||||
}
|
||||
|
||||
static removeBuilder(name: string): void {
|
||||
const config = GlobalConfig.load();
|
||||
const before = config.remoteBuilders.length;
|
||||
config.remoteBuilders = config.remoteBuilders.filter((b) => b.name !== name);
|
||||
if (config.remoteBuilders.length < before) {
|
||||
logger.log('info', `Removed remote builder: ${name}`);
|
||||
} else {
|
||||
logger.log('warn', `Remote builder not found: ${name}`);
|
||||
}
|
||||
GlobalConfig.save(config);
|
||||
}
|
||||
|
||||
static getBuilders(): IRemoteBuilder[] {
|
||||
return GlobalConfig.load().remoteBuilders;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns remote builders that match any of the requested platforms
|
||||
*/
|
||||
static getBuildersForPlatforms(platforms: string[]): IRemoteBuilder[] {
|
||||
const builders = GlobalConfig.getBuilders();
|
||||
return builders.filter((b) => platforms.includes(b.platform));
|
||||
}
|
||||
}
|
||||
567
ts/classes.registrycopy.ts
Normal file
567
ts/classes.registrycopy.ts
Normal file
@@ -0,0 +1,567 @@
|
||||
import * as fs from 'fs';
|
||||
import * as os from 'os';
|
||||
import * as path from 'path';
|
||||
import { logger } from './tsdocker.logging.js';
|
||||
|
||||
interface IRegistryCredentials {
|
||||
username: string;
|
||||
password: string;
|
||||
}
|
||||
|
||||
interface ITokenCache {
|
||||
[scope: string]: { token: string; expiry: number };
|
||||
}
|
||||
|
||||
/**
|
||||
* OCI Distribution API client for copying images between registries.
|
||||
* Supports manifest lists (multi-arch) and single-platform manifests.
|
||||
* Uses native fetch (Node 18+).
|
||||
*/
|
||||
export class RegistryCopy {
|
||||
private tokenCache: ITokenCache = {};
|
||||
|
||||
/**
|
||||
* Wraps fetch() with timeout (via AbortSignal) and retry with exponential backoff.
|
||||
* Retries on network errors and 5xx; does NOT retry on 4xx client errors.
|
||||
* On 401, clears the token cache entry so the next attempt re-authenticates.
|
||||
*/
|
||||
private async fetchWithRetry(
|
||||
url: string,
|
||||
options: RequestInit & { duplex?: string },
|
||||
timeoutMs: number = 300_000,
|
||||
maxRetries: number = 6,
|
||||
): Promise<Response> {
|
||||
const method = (options.method || 'GET').toUpperCase();
|
||||
let lastError: Error | null = null;
|
||||
for (let attempt = 1; attempt <= maxRetries; attempt++) {
|
||||
try {
|
||||
if (attempt > 1) {
|
||||
logger.log('info', `Retry ${attempt}/${maxRetries} for ${method} ${url}`);
|
||||
}
|
||||
const resp = await fetch(url, {
|
||||
...options,
|
||||
signal: AbortSignal.timeout(timeoutMs),
|
||||
});
|
||||
// Retry on 5xx server errors (but not 4xx)
|
||||
if (resp.status >= 500 && attempt < maxRetries) {
|
||||
const delay = 1000 * Math.pow(2, attempt - 1);
|
||||
logger.log('warn', `${method} ${url} returned ${resp.status}, retrying in ${delay}ms (attempt ${attempt}/${maxRetries})...`);
|
||||
await new Promise(r => setTimeout(r, delay));
|
||||
continue;
|
||||
}
|
||||
if (resp.status >= 500) {
|
||||
logger.log('error', `${method} ${url} returned ${resp.status} after ${maxRetries} attempts, giving up`);
|
||||
}
|
||||
return resp;
|
||||
} catch (err) {
|
||||
lastError = err as Error;
|
||||
if (attempt < maxRetries) {
|
||||
const delay = 1000 * Math.pow(2, attempt - 1);
|
||||
logger.log('warn', `${method} ${url} failed (attempt ${attempt}/${maxRetries}): ${lastError.message}, retrying in ${delay}ms...`);
|
||||
await new Promise(r => setTimeout(r, delay));
|
||||
} else {
|
||||
logger.log('error', `${method} ${url} failed after ${maxRetries} attempts: ${lastError.message}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
throw lastError!;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads Docker credentials from ~/.docker/config.json for a given registry.
|
||||
* Supports base64-encoded "auth" field in the config.
|
||||
*/
|
||||
public static getDockerConfigCredentials(registryUrl: string): IRegistryCredentials | null {
|
||||
try {
|
||||
const configPath = path.join(os.homedir(), '.docker', 'config.json');
|
||||
if (!fs.existsSync(configPath)) return null;
|
||||
|
||||
const config = JSON.parse(fs.readFileSync(configPath, 'utf-8'));
|
||||
const auths = config.auths || {};
|
||||
|
||||
// Try exact match first, then common variations
|
||||
const keys = [
|
||||
registryUrl,
|
||||
`https://${registryUrl}`,
|
||||
`http://${registryUrl}`,
|
||||
];
|
||||
|
||||
// Docker Hub special cases
|
||||
if (registryUrl === 'docker.io' || registryUrl === 'registry-1.docker.io') {
|
||||
keys.push(
|
||||
'https://index.docker.io/v1/',
|
||||
'https://index.docker.io/v2/',
|
||||
'index.docker.io',
|
||||
'docker.io',
|
||||
'registry-1.docker.io',
|
||||
);
|
||||
}
|
||||
|
||||
for (const key of keys) {
|
||||
if (auths[key]?.auth) {
|
||||
const decoded = Buffer.from(auths[key].auth, 'base64').toString('utf-8');
|
||||
const colonIndex = decoded.indexOf(':');
|
||||
if (colonIndex > 0) {
|
||||
return {
|
||||
username: decoded.substring(0, colonIndex),
|
||||
password: decoded.substring(colonIndex + 1),
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the API base URL for a registry.
|
||||
* Docker Hub uses registry-1.docker.io as API endpoint.
|
||||
*/
|
||||
private getRegistryApiBase(registry: string): string {
|
||||
if (registry === 'docker.io' || registry === 'index.docker.io') {
|
||||
return 'https://registry-1.docker.io';
|
||||
}
|
||||
// Local registries (localhost) use HTTP
|
||||
if (registry.startsWith('localhost') || registry.startsWith('127.0.0.1')) {
|
||||
return `http://${registry}`;
|
||||
}
|
||||
return `https://${registry}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Obtains a Bearer token for registry operations.
|
||||
* Follows the standard Docker auth flow:
|
||||
* GET /v2/ → 401 with Www-Authenticate → request token
|
||||
*/
|
||||
private async getToken(
|
||||
registry: string,
|
||||
repo: string,
|
||||
actions: string,
|
||||
credentials?: IRegistryCredentials | null,
|
||||
): Promise<string | null> {
|
||||
const scope = `repository:${repo}:${actions}`;
|
||||
const cached = this.tokenCache[`${registry}/${scope}`];
|
||||
if (cached && cached.expiry > Date.now()) {
|
||||
return cached.token;
|
||||
}
|
||||
|
||||
const apiBase = this.getRegistryApiBase(registry);
|
||||
|
||||
// Local registries typically don't need auth
|
||||
if (registry.startsWith('localhost') || registry.startsWith('127.0.0.1')) {
|
||||
return null;
|
||||
}
|
||||
|
||||
try {
|
||||
const checkResp = await this.fetchWithRetry(`${apiBase}/v2/`, { method: 'GET' }, 30_000);
|
||||
if (checkResp.ok) return null; // No auth needed
|
||||
|
||||
const wwwAuth = checkResp.headers.get('www-authenticate') || '';
|
||||
const realmMatch = wwwAuth.match(/realm="([^"]+)"/);
|
||||
const serviceMatch = wwwAuth.match(/service="([^"]+)"/);
|
||||
|
||||
if (!realmMatch) return null;
|
||||
|
||||
const realm = realmMatch[1];
|
||||
const service = serviceMatch ? serviceMatch[1] : '';
|
||||
|
||||
const tokenUrl = new URL(realm);
|
||||
tokenUrl.searchParams.set('scope', scope);
|
||||
if (service) tokenUrl.searchParams.set('service', service);
|
||||
|
||||
const headers: Record<string, string> = {};
|
||||
const creds = credentials || RegistryCopy.getDockerConfigCredentials(registry);
|
||||
if (creds) {
|
||||
headers['Authorization'] = 'Basic ' + Buffer.from(`${creds.username}:${creds.password}`).toString('base64');
|
||||
}
|
||||
|
||||
const tokenResp = await this.fetchWithRetry(tokenUrl.toString(), { headers }, 30_000);
|
||||
if (!tokenResp.ok) {
|
||||
const body = await tokenResp.text();
|
||||
throw new Error(`Token request failed (${tokenResp.status}): ${body}`);
|
||||
}
|
||||
|
||||
const tokenData = await tokenResp.json() as any;
|
||||
const token = tokenData.token || tokenData.access_token;
|
||||
|
||||
if (token) {
|
||||
// Cache for 5 minutes (conservative)
|
||||
this.tokenCache[`${registry}/${scope}`] = {
|
||||
token,
|
||||
expiry: Date.now() + 5 * 60 * 1000,
|
||||
};
|
||||
}
|
||||
|
||||
return token;
|
||||
} catch (err) {
|
||||
logger.log('warn', `Auth for ${registry}: ${(err as Error).message}`);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Makes an authenticated request to a registry.
|
||||
*/
|
||||
private async registryFetch(
|
||||
registry: string,
|
||||
path: string,
|
||||
options: {
|
||||
method?: string;
|
||||
headers?: Record<string, string>;
|
||||
body?: Buffer | ReadableStream | null;
|
||||
repo?: string;
|
||||
actions?: string;
|
||||
credentials?: IRegistryCredentials | null;
|
||||
} = {},
|
||||
): Promise<Response> {
|
||||
const apiBase = this.getRegistryApiBase(registry);
|
||||
const method = options.method || 'GET';
|
||||
const headers: Record<string, string> = { ...(options.headers || {}) };
|
||||
|
||||
const repo = options.repo || '';
|
||||
const actions = options.actions || 'pull';
|
||||
const token = await this.getToken(registry, repo, actions, options.credentials);
|
||||
|
||||
if (token) {
|
||||
headers['Authorization'] = `Bearer ${token}`;
|
||||
}
|
||||
|
||||
const url = `${apiBase}${path}`;
|
||||
const fetchOptions: any = { method, headers };
|
||||
if (options.body) {
|
||||
fetchOptions.body = options.body;
|
||||
fetchOptions.duplex = 'half'; // Required for streaming body in Node
|
||||
}
|
||||
|
||||
const resp = await this.fetchWithRetry(url, fetchOptions, 300_000);
|
||||
|
||||
// Token expired — clear cache so next call re-authenticates
|
||||
if (resp.status === 401 && token) {
|
||||
const cacheKey = `${registry}/${`repository:${repo}:${actions}`}`;
|
||||
logger.log('warn', `Got 401 for ${registry}${path} — clearing cached token for ${cacheKey}`);
|
||||
delete this.tokenCache[cacheKey];
|
||||
}
|
||||
|
||||
return resp;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a manifest from a registry (supports both manifest lists and single manifests).
|
||||
*/
|
||||
private async getManifest(
|
||||
registry: string,
|
||||
repo: string,
|
||||
reference: string,
|
||||
credentials?: IRegistryCredentials | null,
|
||||
): Promise<{ contentType: string; body: any; digest: string; raw: Buffer }> {
|
||||
const accept = [
|
||||
'application/vnd.oci.image.index.v1+json',
|
||||
'application/vnd.docker.distribution.manifest.list.v2+json',
|
||||
'application/vnd.oci.image.manifest.v1+json',
|
||||
'application/vnd.docker.distribution.manifest.v2+json',
|
||||
].join(', ');
|
||||
|
||||
const resp = await this.registryFetch(registry, `/v2/${repo}/manifests/${reference}`, {
|
||||
headers: { 'Accept': accept },
|
||||
repo,
|
||||
actions: 'pull',
|
||||
credentials,
|
||||
});
|
||||
|
||||
if (!resp.ok) {
|
||||
const body = await resp.text();
|
||||
throw new Error(`Failed to get manifest ${registry}/${repo}:${reference} (${resp.status}): ${body}`);
|
||||
}
|
||||
|
||||
const raw = Buffer.from(await resp.arrayBuffer());
|
||||
const contentType = resp.headers.get('content-type') || '';
|
||||
const digest = resp.headers.get('docker-content-digest') || this.computeDigest(raw);
|
||||
const body = JSON.parse(raw.toString('utf-8'));
|
||||
|
||||
return { contentType, body, digest, raw };
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if a blob exists in the destination registry.
|
||||
*/
|
||||
private async blobExists(
|
||||
registry: string,
|
||||
repo: string,
|
||||
digest: string,
|
||||
credentials?: IRegistryCredentials | null,
|
||||
): Promise<boolean> {
|
||||
const resp = await this.registryFetch(registry, `/v2/${repo}/blobs/${digest}`, {
|
||||
method: 'HEAD',
|
||||
repo,
|
||||
actions: 'pull,push',
|
||||
credentials,
|
||||
});
|
||||
return resp.ok;
|
||||
}
|
||||
|
||||
/**
|
||||
* Copies a single blob from source to destination registry.
|
||||
* Uses monolithic upload (POST initiate + PUT complete).
|
||||
*/
|
||||
private async copyBlob(
|
||||
srcRegistry: string,
|
||||
srcRepo: string,
|
||||
destRegistry: string,
|
||||
destRepo: string,
|
||||
digest: string,
|
||||
srcCredentials?: IRegistryCredentials | null,
|
||||
destCredentials?: IRegistryCredentials | null,
|
||||
): Promise<void> {
|
||||
// Check if blob already exists at destination
|
||||
const exists = await this.blobExists(destRegistry, destRepo, digest, destCredentials);
|
||||
if (exists) {
|
||||
logger.log('info', ` Blob ${digest.substring(0, 19)}... already exists, skipping`);
|
||||
return;
|
||||
}
|
||||
|
||||
// Download blob from source
|
||||
const getResp = await this.registryFetch(srcRegistry, `/v2/${srcRepo}/blobs/${digest}`, {
|
||||
repo: srcRepo,
|
||||
actions: 'pull',
|
||||
credentials: srcCredentials,
|
||||
});
|
||||
|
||||
if (!getResp.ok) {
|
||||
throw new Error(`Failed to get blob ${digest} from ${srcRegistry}/${srcRepo}: ${getResp.status}`);
|
||||
}
|
||||
|
||||
const blobData = Buffer.from(await getResp.arrayBuffer());
|
||||
const blobSize = blobData.length;
|
||||
|
||||
// Initiate upload at destination
|
||||
const postResp = await this.registryFetch(destRegistry, `/v2/${destRepo}/blobs/uploads/`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Length': '0' },
|
||||
repo: destRepo,
|
||||
actions: 'pull,push',
|
||||
credentials: destCredentials,
|
||||
});
|
||||
|
||||
if (!postResp.ok && postResp.status !== 202) {
|
||||
const body = await postResp.text();
|
||||
throw new Error(`Failed to initiate upload at ${destRegistry}/${destRepo}: ${postResp.status} ${body}`);
|
||||
}
|
||||
|
||||
// Get upload URL from Location header
|
||||
let uploadUrl = postResp.headers.get('location') || '';
|
||||
if (!uploadUrl) {
|
||||
throw new Error(`No upload location returned from ${destRegistry}/${destRepo}`);
|
||||
}
|
||||
|
||||
// Make upload URL absolute if relative
|
||||
if (uploadUrl.startsWith('/')) {
|
||||
const apiBase = this.getRegistryApiBase(destRegistry);
|
||||
uploadUrl = `${apiBase}${uploadUrl}`;
|
||||
}
|
||||
|
||||
// Complete upload with PUT (monolithic)
|
||||
const separator = uploadUrl.includes('?') ? '&' : '?';
|
||||
const putUrl = `${uploadUrl}${separator}digest=${encodeURIComponent(digest)}`;
|
||||
|
||||
// For PUT to the upload URL, we need auth
|
||||
const token = await this.getToken(destRegistry, destRepo, 'pull,push', destCredentials);
|
||||
const putHeaders: Record<string, string> = {
|
||||
'Content-Type': 'application/octet-stream',
|
||||
'Content-Length': String(blobSize),
|
||||
};
|
||||
if (token) {
|
||||
putHeaders['Authorization'] = `Bearer ${token}`;
|
||||
}
|
||||
|
||||
const putResp = await this.fetchWithRetry(putUrl, {
|
||||
method: 'PUT',
|
||||
headers: putHeaders,
|
||||
body: blobData,
|
||||
}, 300_000);
|
||||
|
||||
if (!putResp.ok) {
|
||||
const body = await putResp.text();
|
||||
throw new Error(`Failed to upload blob ${digest} to ${destRegistry}/${destRepo}: ${putResp.status} ${body}`);
|
||||
}
|
||||
|
||||
const sizeStr = blobSize > 1048576
|
||||
? `${(blobSize / 1048576).toFixed(1)} MB`
|
||||
: `${(blobSize / 1024).toFixed(1)} KB`;
|
||||
logger.log('info', ` Copied blob ${digest.substring(0, 19)}... (${sizeStr})`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Pushes a manifest to a registry.
|
||||
*/
|
||||
private async putManifest(
|
||||
registry: string,
|
||||
repo: string,
|
||||
reference: string,
|
||||
manifest: Buffer,
|
||||
contentType: string,
|
||||
credentials?: IRegistryCredentials | null,
|
||||
): Promise<string> {
|
||||
const resp = await this.registryFetch(registry, `/v2/${repo}/manifests/${reference}`, {
|
||||
method: 'PUT',
|
||||
headers: {
|
||||
'Content-Type': contentType,
|
||||
'Content-Length': String(manifest.length),
|
||||
},
|
||||
body: manifest,
|
||||
repo,
|
||||
actions: 'pull,push',
|
||||
credentials,
|
||||
});
|
||||
|
||||
if (!resp.ok) {
|
||||
const body = await resp.text();
|
||||
throw new Error(`Failed to put manifest ${registry}/${repo}:${reference} (${resp.status}): ${body}`);
|
||||
}
|
||||
|
||||
const digest = resp.headers.get('docker-content-digest') || this.computeDigest(manifest);
|
||||
return digest;
|
||||
}
|
||||
|
||||
/**
|
||||
* Copies a single-platform manifest and all its blobs from source to destination.
|
||||
*/
|
||||
private async copySingleManifest(
|
||||
srcRegistry: string,
|
||||
srcRepo: string,
|
||||
destRegistry: string,
|
||||
destRepo: string,
|
||||
manifestDigest: string,
|
||||
srcCredentials?: IRegistryCredentials | null,
|
||||
destCredentials?: IRegistryCredentials | null,
|
||||
): Promise<void> {
|
||||
// Get the platform manifest
|
||||
const { body: manifest, contentType, raw } = await this.getManifest(
|
||||
srcRegistry, srcRepo, manifestDigest, srcCredentials,
|
||||
);
|
||||
|
||||
// Copy config blob
|
||||
if (manifest.config?.digest) {
|
||||
logger.log('info', ` Copying config blob...`);
|
||||
await this.copyBlob(
|
||||
srcRegistry, srcRepo, destRegistry, destRepo,
|
||||
manifest.config.digest, srcCredentials, destCredentials,
|
||||
);
|
||||
}
|
||||
|
||||
// Copy layer blobs
|
||||
const layers = manifest.layers || [];
|
||||
for (let i = 0; i < layers.length; i++) {
|
||||
const layer = layers[i];
|
||||
logger.log('info', ` Copying layer ${i + 1}/${layers.length}...`);
|
||||
await this.copyBlob(
|
||||
srcRegistry, srcRepo, destRegistry, destRepo,
|
||||
layer.digest, srcCredentials, destCredentials,
|
||||
);
|
||||
}
|
||||
|
||||
// Push the platform manifest by digest
|
||||
await this.putManifest(
|
||||
destRegistry, destRepo, manifestDigest, raw, contentType, destCredentials,
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Copies a complete image (single or multi-arch) from source to destination registry.
|
||||
*
|
||||
* @param srcRegistry - Source registry host (e.g., "localhost:5234")
|
||||
* @param srcRepo - Source repository (e.g., "myapp")
|
||||
* @param srcTag - Source tag (e.g., "v1.0.0")
|
||||
* @param destRegistry - Destination registry host (e.g., "registry.gitlab.com")
|
||||
* @param destRepo - Destination repository (e.g., "org/myapp")
|
||||
* @param destTag - Destination tag (e.g., "v1.0.0" or "v1.0.0_arm64")
|
||||
* @param credentials - Optional credentials for destination registry
|
||||
*/
|
||||
public async copyImage(
|
||||
srcRegistry: string,
|
||||
srcRepo: string,
|
||||
srcTag: string,
|
||||
destRegistry: string,
|
||||
destRepo: string,
|
||||
destTag: string,
|
||||
credentials?: IRegistryCredentials | null,
|
||||
): Promise<void> {
|
||||
logger.log('info', `Copying ${srcRegistry}/${srcRepo}:${srcTag} -> ${destRegistry}/${destRepo}:${destTag}`);
|
||||
|
||||
// Source is always the local registry (no credentials needed)
|
||||
const srcCredentials: IRegistryCredentials | null = null;
|
||||
const destCredentials = credentials || RegistryCopy.getDockerConfigCredentials(destRegistry);
|
||||
|
||||
// Get the top-level manifest
|
||||
const topManifest = await this.getManifest(srcRegistry, srcRepo, srcTag, srcCredentials);
|
||||
const { body, contentType, raw } = topManifest;
|
||||
|
||||
const isManifestList =
|
||||
contentType.includes('manifest.list') ||
|
||||
contentType.includes('image.index') ||
|
||||
body.manifests !== undefined;
|
||||
|
||||
if (isManifestList) {
|
||||
// Multi-arch: copy each platform manifest + blobs, then push the manifest list
|
||||
const platforms = (body.manifests || []) as any[];
|
||||
logger.log('info', `Multi-arch manifest with ${platforms.length} platform(s)`);
|
||||
|
||||
for (const platformEntry of platforms) {
|
||||
const platDesc = platformEntry.platform
|
||||
? `${platformEntry.platform.os}/${platformEntry.platform.architecture}`
|
||||
: platformEntry.digest;
|
||||
logger.log('info', `Copying platform: ${platDesc}`);
|
||||
|
||||
await this.copySingleManifest(
|
||||
srcRegistry, srcRepo, destRegistry, destRepo,
|
||||
platformEntry.digest, srcCredentials, destCredentials,
|
||||
);
|
||||
}
|
||||
|
||||
// Push the manifest list/index with the destination tag
|
||||
const digest = await this.putManifest(
|
||||
destRegistry, destRepo, destTag, raw, contentType, destCredentials,
|
||||
);
|
||||
logger.log('ok', `Pushed manifest list to ${destRegistry}/${destRepo}:${destTag} (${digest.substring(0, 19)}...)`);
|
||||
} else {
|
||||
// Single-platform manifest: copy blobs + push manifest
|
||||
logger.log('info', 'Single-platform manifest');
|
||||
|
||||
// Copy config blob
|
||||
if (body.config?.digest) {
|
||||
logger.log('info', ' Copying config blob...');
|
||||
await this.copyBlob(
|
||||
srcRegistry, srcRepo, destRegistry, destRepo,
|
||||
body.config.digest, srcCredentials, destCredentials,
|
||||
);
|
||||
}
|
||||
|
||||
// Copy layer blobs
|
||||
const layers = body.layers || [];
|
||||
for (let i = 0; i < layers.length; i++) {
|
||||
logger.log('info', ` Copying layer ${i + 1}/${layers.length}...`);
|
||||
await this.copyBlob(
|
||||
srcRegistry, srcRepo, destRegistry, destRepo,
|
||||
layers[i].digest, srcCredentials, destCredentials,
|
||||
);
|
||||
}
|
||||
|
||||
// Push the manifest with the destination tag
|
||||
const digest = await this.putManifest(
|
||||
destRegistry, destRepo, destTag, raw, contentType, destCredentials,
|
||||
);
|
||||
logger.log('ok', `Pushed manifest to ${destRegistry}/${destRepo}:${destTag} (${digest.substring(0, 19)}...)`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Computes sha256 digest of a buffer.
|
||||
*/
|
||||
private computeDigest(data: Buffer): string {
|
||||
const crypto = require('crypto');
|
||||
const hash = crypto.createHash('sha256').update(data).digest('hex');
|
||||
return `sha256:${hash}`;
|
||||
}
|
||||
}
|
||||
77
ts/classes.sshtunnel.ts
Normal file
77
ts/classes.sshtunnel.ts
Normal file
@@ -0,0 +1,77 @@
|
||||
import * as plugins from './tsdocker.plugins.js';
|
||||
import { logger } from './tsdocker.logging.js';
|
||||
import type { IRemoteBuilder } from './interfaces/index.js';
|
||||
|
||||
const smartshellInstance = new plugins.smartshell.Smartshell({
|
||||
executor: 'bash',
|
||||
});
|
||||
|
||||
/**
|
||||
* Manages SSH reverse tunnels for remote builder nodes.
|
||||
* Opens tunnels so that the local staging registry (localhost:<port>)
|
||||
* is accessible as localhost:<port> on each remote machine.
|
||||
*/
|
||||
export class SshTunnelManager {
|
||||
private tunnelPids: number[] = [];
|
||||
|
||||
/**
|
||||
* Opens a reverse SSH tunnel to make localPort accessible on the remote machine.
|
||||
* ssh -f -N -o StrictHostKeyChecking=no -o ExitOnForwardFailure=yes
|
||||
* -R <localPort>:localhost:<localPort> [-i keyPath] user@host
|
||||
*/
|
||||
async openTunnel(builder: IRemoteBuilder, localPort: number): Promise<void> {
|
||||
const keyOpt = builder.sshKeyPath ? `-i ${builder.sshKeyPath} ` : '';
|
||||
const cmd = [
|
||||
'ssh -f -N',
|
||||
'-o StrictHostKeyChecking=no',
|
||||
'-o ExitOnForwardFailure=yes',
|
||||
`-R ${localPort}:localhost:${localPort}`,
|
||||
`${keyOpt}${builder.host}`,
|
||||
].join(' ');
|
||||
|
||||
logger.log('info', `Opening SSH tunnel to ${builder.host} for port ${localPort}...`);
|
||||
const result = await smartshellInstance.exec(cmd);
|
||||
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(
|
||||
`Failed to open SSH tunnel to ${builder.host}: ${result.stderr || 'unknown error'}`
|
||||
);
|
||||
}
|
||||
|
||||
// Find the PID of the tunnel process we just started
|
||||
const pidResult = await smartshellInstance.exec(
|
||||
`pgrep -f "ssh.*-R ${localPort}:localhost:${localPort}.*${builder.host}" | tail -1`
|
||||
);
|
||||
if (pidResult.exitCode === 0 && pidResult.stdout.trim()) {
|
||||
const pid = parseInt(pidResult.stdout.trim(), 10);
|
||||
if (!isNaN(pid)) {
|
||||
this.tunnelPids.push(pid);
|
||||
logger.log('ok', `SSH tunnel to ${builder.host} established (PID ${pid})`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Opens tunnels for all provided remote builders
|
||||
*/
|
||||
async openTunnels(builders: IRemoteBuilder[], localPort: number): Promise<void> {
|
||||
for (const builder of builders) {
|
||||
await this.openTunnel(builder, localPort);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Closes all tunnel processes
|
||||
*/
|
||||
async closeAll(): Promise<void> {
|
||||
for (const pid of this.tunnelPids) {
|
||||
try {
|
||||
process.kill(pid, 'SIGTERM');
|
||||
logger.log('info', `Closed SSH tunnel (PID ${pid})`);
|
||||
} catch {
|
||||
// Process may have already exited
|
||||
}
|
||||
}
|
||||
this.tunnelPids = [];
|
||||
}
|
||||
}
|
||||
108
ts/classes.tsdockercache.ts
Normal file
108
ts/classes.tsdockercache.ts
Normal file
@@ -0,0 +1,108 @@
|
||||
import * as crypto from 'crypto';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
import * as plugins from './tsdocker.plugins.js';
|
||||
import * as paths from './tsdocker.paths.js';
|
||||
import { logger } from './tsdocker.logging.js';
|
||||
import type { ICacheData, ICacheEntry } from './interfaces/index.js';
|
||||
|
||||
const smartshellInstance = new plugins.smartshell.Smartshell({
|
||||
executor: 'bash',
|
||||
});
|
||||
|
||||
/**
|
||||
* Manages content-hash-based build caching for Dockerfiles.
|
||||
* Cache is stored in .nogit/tsdocker_support.json.
|
||||
*/
|
||||
export class TsDockerCache {
|
||||
private cacheFilePath: string;
|
||||
private data: ICacheData;
|
||||
|
||||
constructor() {
|
||||
this.cacheFilePath = path.join(paths.cwd, '.nogit', 'tsdocker_support.json');
|
||||
this.data = { version: 1, entries: {} };
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads cache data from disk. Falls back to empty cache on missing/corrupt file.
|
||||
*/
|
||||
public load(): void {
|
||||
try {
|
||||
const raw = fs.readFileSync(this.cacheFilePath, 'utf-8');
|
||||
const parsed = JSON.parse(raw);
|
||||
if (parsed && parsed.version === 1 && parsed.entries) {
|
||||
this.data = parsed;
|
||||
} else {
|
||||
logger.log('warn', '[cache] Cache file has unexpected format, starting fresh');
|
||||
this.data = { version: 1, entries: {} };
|
||||
}
|
||||
} catch {
|
||||
// Missing or corrupt file — start fresh
|
||||
this.data = { version: 1, entries: {} };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Saves cache data to disk. Creates .nogit directory if needed.
|
||||
*/
|
||||
public save(): void {
|
||||
const dir = path.dirname(this.cacheFilePath);
|
||||
fs.mkdirSync(dir, { recursive: true });
|
||||
fs.writeFileSync(this.cacheFilePath, JSON.stringify(this.data, null, 2), 'utf-8');
|
||||
}
|
||||
|
||||
/**
|
||||
* Computes SHA-256 hash of Dockerfile content.
|
||||
*/
|
||||
public computeContentHash(content: string): string {
|
||||
return crypto.createHash('sha256').update(content).digest('hex');
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks whether a build can be skipped for the given Dockerfile.
|
||||
* Logs detailed diagnostics and returns true if the build should be skipped.
|
||||
*/
|
||||
public async shouldSkipBuild(cleanTag: string, content: string): Promise<boolean> {
|
||||
const contentHash = this.computeContentHash(content);
|
||||
const entry = this.data.entries[cleanTag];
|
||||
|
||||
if (!entry) {
|
||||
logger.log('info', `[cache] ${cleanTag}: no cached entry, will build`);
|
||||
return false;
|
||||
}
|
||||
|
||||
const hashMatch = entry.contentHash === contentHash;
|
||||
logger.log('info', `[cache] ${cleanTag}: hash ${hashMatch ? 'matches' : 'changed'}`);
|
||||
|
||||
if (!hashMatch) {
|
||||
logger.log('info', `[cache] ${cleanTag}: content changed, will build`);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Hash matches — verify the image still exists locally
|
||||
const inspectResult = await smartshellInstance.exec(
|
||||
`docker image inspect ${entry.imageId} > /dev/null 2>&1`
|
||||
);
|
||||
const available = inspectResult.exitCode === 0;
|
||||
|
||||
if (available) {
|
||||
logger.log('info', `[cache] ${cleanTag}: cache hit, skipping build`);
|
||||
return true;
|
||||
}
|
||||
|
||||
logger.log('info', `[cache] ${cleanTag}: image no longer available, will build`);
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Records a successful build in the cache.
|
||||
*/
|
||||
public recordBuild(cleanTag: string, content: string, imageId: string, buildTag: string): void {
|
||||
this.data.entries[cleanTag] = {
|
||||
contentHash: this.computeContentHash(content),
|
||||
imageId,
|
||||
buildTag,
|
||||
timestamp: Date.now(),
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -1,10 +1,16 @@
|
||||
import * as plugins from './tsdocker.plugins.js';
|
||||
import * as paths from './tsdocker.paths.js';
|
||||
import { logger } from './tsdocker.logging.js';
|
||||
import { logger, formatDuration } from './tsdocker.logging.js';
|
||||
import { Dockerfile } from './classes.dockerfile.js';
|
||||
import { DockerRegistry } from './classes.dockerregistry.js';
|
||||
import { RegistryStorage } from './classes.registrystorage.js';
|
||||
import type { ITsDockerConfig } from './interfaces/index.js';
|
||||
import { TsDockerCache } from './classes.tsdockercache.js';
|
||||
import { DockerContext } from './classes.dockercontext.js';
|
||||
import { TsDockerSession } from './classes.tsdockersession.js';
|
||||
import { RegistryCopy } from './classes.registrycopy.js';
|
||||
import { GlobalConfig } from './classes.globalconfig.js';
|
||||
import { SshTunnelManager } from './classes.sshtunnel.js';
|
||||
import type { ITsDockerConfig, IBuildCommandOptions, IRemoteBuilder } from './interfaces/index.js';
|
||||
|
||||
const smartshellInstance = new plugins.smartshell.Smartshell({
|
||||
executor: 'bash',
|
||||
@@ -17,17 +23,31 @@ export class TsDockerManager {
|
||||
public registryStorage: RegistryStorage;
|
||||
public config: ITsDockerConfig;
|
||||
public projectInfo: any;
|
||||
public dockerContext: DockerContext;
|
||||
public session!: TsDockerSession;
|
||||
public currentBuilderName?: string;
|
||||
private dockerfiles: Dockerfile[] = [];
|
||||
private activeRemoteBuilders: IRemoteBuilder[] = [];
|
||||
private sshTunnelManager?: SshTunnelManager;
|
||||
|
||||
constructor(config: ITsDockerConfig) {
|
||||
this.config = config;
|
||||
this.registryStorage = new RegistryStorage();
|
||||
this.dockerContext = new DockerContext();
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepares the manager by loading project info and registries
|
||||
*/
|
||||
public async prepare(): Promise<void> {
|
||||
public async prepare(contextArg?: string): Promise<void> {
|
||||
// Detect Docker context
|
||||
if (contextArg) {
|
||||
this.dockerContext.setContext(contextArg);
|
||||
}
|
||||
await this.dockerContext.detect();
|
||||
this.dockerContext.logContextInfo();
|
||||
this.dockerContext.logRootlessWarnings();
|
||||
|
||||
// Load project info
|
||||
try {
|
||||
const projectinfoInstance = new plugins.projectinfo.ProjectInfo(paths.cwd);
|
||||
@@ -62,9 +82,28 @@ export class TsDockerManager {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback: check ~/.docker/config.json if env vars didn't provide credentials
|
||||
if (!this.registryStorage.getRegistryByUrl(registryUrl)) {
|
||||
const dockerConfigCreds = RegistryCopy.getDockerConfigCredentials(registryUrl);
|
||||
if (dockerConfigCreds) {
|
||||
const registry = new DockerRegistry({
|
||||
registryUrl,
|
||||
username: dockerConfigCreds.username,
|
||||
password: dockerConfigCreds.password,
|
||||
});
|
||||
this.registryStorage.addRegistry(registry);
|
||||
logger.log('info', `Loaded credentials for ${registryUrl} from ~/.docker/config.json`);
|
||||
} else {
|
||||
logger.log('warn', `No credentials found for ${registryUrl} (checked env vars and ~/.docker/config.json)`);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create session identity (unique ports, names for CI concurrency)
|
||||
this.session = await TsDockerSession.create();
|
||||
|
||||
logger.log('info', `Prepared TsDockerManager with ${this.registryStorage.getAllRegistries().length} registries`);
|
||||
}
|
||||
|
||||
@@ -86,13 +125,39 @@ export class TsDockerManager {
|
||||
this.dockerfiles = await Dockerfile.readDockerfiles(this);
|
||||
this.dockerfiles = await Dockerfile.sortDockerfiles(this.dockerfiles);
|
||||
this.dockerfiles = await Dockerfile.mapDockerfiles(this.dockerfiles);
|
||||
// Inject session into each Dockerfile
|
||||
for (const df of this.dockerfiles) {
|
||||
df.session = this.session;
|
||||
}
|
||||
return this.dockerfiles;
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds all discovered Dockerfiles in dependency order
|
||||
* Filters discovered Dockerfiles by name patterns (glob-style).
|
||||
* Mutates this.dockerfiles in place.
|
||||
*/
|
||||
public async build(): Promise<Dockerfile[]> {
|
||||
public filterDockerfiles(patterns: string[]): void {
|
||||
const matched = this.dockerfiles.filter((df) => {
|
||||
const basename = plugins.path.basename(df.filePath);
|
||||
return patterns.some((pattern) => {
|
||||
if (pattern.includes('*') || pattern.includes('?')) {
|
||||
const regexStr = '^' + pattern.replace(/\*/g, '.*').replace(/\?/g, '.') + '$';
|
||||
return new RegExp(regexStr).test(basename);
|
||||
}
|
||||
return basename === pattern;
|
||||
});
|
||||
});
|
||||
if (matched.length === 0) {
|
||||
logger.log('warn', `No Dockerfiles matched patterns: ${patterns.join(', ')}`);
|
||||
}
|
||||
this.dockerfiles = matched;
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds discovered Dockerfiles in dependency order.
|
||||
* When options.patterns is provided, only matching Dockerfiles (and their dependencies) are built.
|
||||
*/
|
||||
public async build(options?: IBuildCommandOptions): Promise<Dockerfile[]> {
|
||||
if (this.dockerfiles.length === 0) {
|
||||
await this.discoverDockerfiles();
|
||||
}
|
||||
@@ -102,38 +167,338 @@ export class TsDockerManager {
|
||||
return [];
|
||||
}
|
||||
|
||||
// Determine which Dockerfiles to build
|
||||
let toBuild = this.dockerfiles;
|
||||
|
||||
if (options?.patterns && options.patterns.length > 0) {
|
||||
// Filter to matching Dockerfiles
|
||||
const matched = this.dockerfiles.filter((df) => {
|
||||
const basename = plugins.path.basename(df.filePath);
|
||||
return options.patterns!.some((pattern) => {
|
||||
if (pattern.includes('*') || pattern.includes('?')) {
|
||||
// Convert glob pattern to regex
|
||||
const regexStr = '^' + pattern.replace(/\*/g, '.*').replace(/\?/g, '.') + '$';
|
||||
return new RegExp(regexStr).test(basename);
|
||||
}
|
||||
return basename === pattern;
|
||||
});
|
||||
});
|
||||
|
||||
if (matched.length === 0) {
|
||||
logger.log('warn', `No Dockerfiles matched patterns: ${options.patterns.join(', ')}`);
|
||||
return [];
|
||||
}
|
||||
|
||||
// Resolve dependency chain and preserve topological order
|
||||
toBuild = this.resolveWithDependencies(matched, this.dockerfiles);
|
||||
logger.log('info', `Matched ${matched.length} Dockerfile(s), building ${toBuild.length} (including dependencies)`);
|
||||
}
|
||||
|
||||
// Check if buildx is needed
|
||||
if (this.config.platforms && this.config.platforms.length > 1) {
|
||||
const useBuildx = !!(options?.platform || (this.config.platforms && this.config.platforms.length > 1));
|
||||
if (useBuildx) {
|
||||
await this.ensureBuildx();
|
||||
}
|
||||
|
||||
logger.log('info', `Building ${this.dockerfiles.length} Dockerfiles...`);
|
||||
await Dockerfile.buildDockerfiles(this.dockerfiles);
|
||||
logger.log('info', '');
|
||||
logger.log('info', '=== BUILD PHASE ===');
|
||||
|
||||
if (useBuildx) {
|
||||
const platforms = options?.platform || this.config.platforms!.join(', ');
|
||||
logger.log('info', `Build mode: buildx multi-platform [${platforms}]`);
|
||||
} else {
|
||||
logger.log('info', 'Build mode: standard docker build');
|
||||
}
|
||||
|
||||
const localDeps = toBuild.filter(df => df.localBaseImageDependent);
|
||||
if (localDeps.length > 0) {
|
||||
logger.log('info', `Local dependencies: ${localDeps.map(df => `${df.cleanTag} -> ${df.localBaseDockerfile?.cleanTag}`).join(', ')}`);
|
||||
}
|
||||
|
||||
if (options?.noCache) {
|
||||
logger.log('info', 'Cache: disabled (--no-cache)');
|
||||
}
|
||||
|
||||
if (options?.parallel) {
|
||||
const concurrency = options.parallelConcurrency ?? 4;
|
||||
const levels = Dockerfile.computeLevels(toBuild);
|
||||
logger.log('info', `Parallel build: ${levels.length} level(s), concurrency ${concurrency}`);
|
||||
for (let l = 0; l < levels.length; l++) {
|
||||
const level = levels[l];
|
||||
logger.log('info', ` Level ${l} (${level.length}): ${level.map(df => df.cleanTag).join(', ')}`);
|
||||
}
|
||||
}
|
||||
|
||||
logger.log('info', `Building ${toBuild.length} Dockerfile(s)...`);
|
||||
|
||||
if (options?.cached) {
|
||||
// === CACHED MODE: skip builds for unchanged Dockerfiles ===
|
||||
logger.log('info', '(cached mode active)');
|
||||
const cache = new TsDockerCache();
|
||||
cache.load();
|
||||
|
||||
const total = toBuild.length;
|
||||
const overallStart = Date.now();
|
||||
await Dockerfile.startLocalRegistry(this.session, this.dockerContext.contextInfo?.isRootless);
|
||||
await this.openRemoteTunnels();
|
||||
|
||||
try {
|
||||
if (options?.parallel) {
|
||||
// === PARALLEL CACHED MODE ===
|
||||
const concurrency = options.parallelConcurrency ?? 4;
|
||||
const levels = Dockerfile.computeLevels(toBuild);
|
||||
|
||||
let built = 0;
|
||||
for (let l = 0; l < levels.length; l++) {
|
||||
const level = levels[l];
|
||||
logger.log('info', `--- Level ${l}: building ${level.length} image(s) in parallel ---`);
|
||||
|
||||
const tasks = level.map((df) => {
|
||||
const myIndex = ++built;
|
||||
return async () => {
|
||||
const progress = `(${myIndex}/${total})`;
|
||||
const skip = await cache.shouldSkipBuild(df.cleanTag, df.content);
|
||||
|
||||
if (skip) {
|
||||
logger.log('ok', `${progress} Skipped ${df.cleanTag} (cached)`);
|
||||
} else {
|
||||
logger.log('info', `${progress} Building ${df.cleanTag}...`);
|
||||
const elapsed = await df.build({
|
||||
platform: options?.platform,
|
||||
timeout: options?.timeout,
|
||||
noCache: options?.noCache,
|
||||
pull: options?.pull,
|
||||
verbose: options?.verbose,
|
||||
});
|
||||
logger.log('ok', `${progress} Built ${df.cleanTag} in ${formatDuration(elapsed)}`);
|
||||
const imageId = await df.getId();
|
||||
cache.recordBuild(df.cleanTag, df.content, imageId, df.buildTag);
|
||||
}
|
||||
return df;
|
||||
};
|
||||
});
|
||||
|
||||
await Dockerfile.runWithConcurrency(tasks, concurrency);
|
||||
|
||||
// After the entire level completes, push all to local registry + tag for deps
|
||||
for (const df of level) {
|
||||
const dependentBaseImages = new Set<string>();
|
||||
for (const other of toBuild) {
|
||||
if (other.localBaseDockerfile === df && other.baseImage !== df.buildTag) {
|
||||
dependentBaseImages.add(other.baseImage);
|
||||
}
|
||||
}
|
||||
for (const fullTag of dependentBaseImages) {
|
||||
logger.log('info', `Tagging ${df.buildTag} as ${fullTag} for local dependency resolution`);
|
||||
await smartshellInstance.exec(`docker tag ${df.buildTag} ${fullTag}`);
|
||||
}
|
||||
// Push ALL images to local registry (skip if already pushed via buildx)
|
||||
if (!df.localRegistryTag) {
|
||||
await Dockerfile.pushToLocalRegistry(this.session, df);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// === SEQUENTIAL CACHED MODE ===
|
||||
for (let i = 0; i < total; i++) {
|
||||
const dockerfileArg = toBuild[i];
|
||||
const progress = `(${i + 1}/${total})`;
|
||||
const skip = await cache.shouldSkipBuild(dockerfileArg.cleanTag, dockerfileArg.content);
|
||||
|
||||
if (skip) {
|
||||
logger.log('ok', `${progress} Skipped ${dockerfileArg.cleanTag} (cached)`);
|
||||
} else {
|
||||
logger.log('info', `${progress} Building ${dockerfileArg.cleanTag}...`);
|
||||
const elapsed = await dockerfileArg.build({
|
||||
platform: options?.platform,
|
||||
timeout: options?.timeout,
|
||||
noCache: options?.noCache,
|
||||
pull: options?.pull,
|
||||
verbose: options?.verbose,
|
||||
});
|
||||
logger.log('ok', `${progress} Built ${dockerfileArg.cleanTag} in ${formatDuration(elapsed)}`);
|
||||
const imageId = await dockerfileArg.getId();
|
||||
cache.recordBuild(dockerfileArg.cleanTag, dockerfileArg.content, imageId, dockerfileArg.buildTag);
|
||||
}
|
||||
|
||||
// Tag for dependents IMMEDIATELY (not after all builds)
|
||||
const dependentBaseImages = new Set<string>();
|
||||
for (const other of toBuild) {
|
||||
if (other.localBaseDockerfile === dockerfileArg && other.baseImage !== dockerfileArg.buildTag) {
|
||||
dependentBaseImages.add(other.baseImage);
|
||||
}
|
||||
}
|
||||
for (const fullTag of dependentBaseImages) {
|
||||
logger.log('info', `Tagging ${dockerfileArg.buildTag} as ${fullTag} for local dependency resolution`);
|
||||
await smartshellInstance.exec(`docker tag ${dockerfileArg.buildTag} ${fullTag}`);
|
||||
}
|
||||
|
||||
// Push ALL images to local registry (skip if already pushed via buildx)
|
||||
if (!dockerfileArg.localRegistryTag) {
|
||||
await Dockerfile.pushToLocalRegistry(this.session, dockerfileArg);
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
await this.closeRemoteTunnels();
|
||||
await Dockerfile.stopLocalRegistry(this.session);
|
||||
}
|
||||
|
||||
logger.log('info', `Total build time: ${formatDuration(Date.now() - overallStart)}`);
|
||||
cache.save();
|
||||
} else {
|
||||
// === STANDARD MODE: build all via static helper ===
|
||||
await Dockerfile.buildDockerfiles(toBuild, this.session, {
|
||||
platform: options?.platform,
|
||||
timeout: options?.timeout,
|
||||
noCache: options?.noCache,
|
||||
pull: options?.pull,
|
||||
verbose: options?.verbose,
|
||||
isRootless: this.dockerContext.contextInfo?.isRootless,
|
||||
parallel: options?.parallel,
|
||||
parallelConcurrency: options?.parallelConcurrency,
|
||||
onRegistryStarted: () => this.openRemoteTunnels(),
|
||||
onBeforeRegistryStop: () => this.closeRemoteTunnels(),
|
||||
});
|
||||
}
|
||||
|
||||
logger.log('success', 'All Dockerfiles built successfully');
|
||||
|
||||
return this.dockerfiles;
|
||||
return toBuild;
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensures Docker buildx is set up for multi-architecture builds
|
||||
* Resolves a set of target Dockerfiles to include all their local base image dependencies,
|
||||
* preserving the original topological build order.
|
||||
*/
|
||||
private resolveWithDependencies(targets: Dockerfile[], allSorted: Dockerfile[]): Dockerfile[] {
|
||||
const needed = new Set<Dockerfile>();
|
||||
const addWithDeps = (df: Dockerfile) => {
|
||||
if (needed.has(df)) return;
|
||||
needed.add(df);
|
||||
if (df.localBaseImageDependent && df.localBaseDockerfile) {
|
||||
addWithDeps(df.localBaseDockerfile);
|
||||
}
|
||||
};
|
||||
for (const df of targets) addWithDeps(df);
|
||||
return allSorted.filter((df) => needed.has(df));
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensures Docker buildx is set up for multi-architecture builds.
|
||||
* When remote builders are configured in the global config, creates a multi-node
|
||||
* builder with native nodes instead of relying on QEMU emulation.
|
||||
*/
|
||||
private async ensureBuildx(): Promise<void> {
|
||||
logger.log('info', 'Setting up Docker buildx for multi-platform builds...');
|
||||
const builderName = this.dockerContext.getBuilderName() + (this.session?.config.builderSuffix || '');
|
||||
const platforms = this.config.platforms?.join(', ') || 'default';
|
||||
logger.log('info', `Setting up Docker buildx [${platforms}]...`);
|
||||
logger.log('info', `Builder: ${builderName}`);
|
||||
|
||||
// Check if a buildx builder exists
|
||||
const inspectResult = await smartshellInstance.exec('docker buildx inspect tsdocker-builder 2>/dev/null');
|
||||
// Check for remote builders matching our target platforms
|
||||
const requestedPlatforms = this.config.platforms || ['linux/amd64'];
|
||||
const remoteBuilders = GlobalConfig.getBuildersForPlatforms(requestedPlatforms);
|
||||
|
||||
if (inspectResult.exitCode !== 0) {
|
||||
// Create a new buildx builder
|
||||
logger.log('info', 'Creating new buildx builder...');
|
||||
await smartshellInstance.exec('docker buildx create --name tsdocker-builder --use');
|
||||
await smartshellInstance.exec('docker buildx inspect --bootstrap');
|
||||
if (remoteBuilders.length > 0) {
|
||||
await this.ensureBuildxWithRemoteNodes(builderName, requestedPlatforms, remoteBuilders);
|
||||
} else {
|
||||
// Use existing builder
|
||||
await smartshellInstance.exec('docker buildx use tsdocker-builder');
|
||||
await this.ensureBuildxLocal(builderName);
|
||||
}
|
||||
|
||||
logger.log('ok', 'Docker buildx ready');
|
||||
this.currentBuilderName = builderName;
|
||||
logger.log('ok', `Docker buildx ready (builder: ${builderName}, platforms: ${platforms})`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a multi-node buildx builder with local + remote SSH nodes.
|
||||
*/
|
||||
private async ensureBuildxWithRemoteNodes(
|
||||
builderName: string,
|
||||
requestedPlatforms: string[],
|
||||
remoteBuilders: IRemoteBuilder[],
|
||||
): Promise<void> {
|
||||
const remotePlatforms = new Set(remoteBuilders.map((b) => b.platform));
|
||||
const localPlatforms = requestedPlatforms.filter((p) => !remotePlatforms.has(p));
|
||||
|
||||
logger.log('info', `Remote builders: ${remoteBuilders.map((b) => `${b.name} (${b.platform} @ ${b.host})`).join(', ')}`);
|
||||
if (localPlatforms.length > 0) {
|
||||
logger.log('info', `Local platforms: ${localPlatforms.join(', ')}`);
|
||||
}
|
||||
|
||||
// Always recreate the builder to ensure correct node topology
|
||||
await smartshellInstance.execSilent(`docker buildx rm ${builderName} 2>/dev/null || true`);
|
||||
|
||||
// Create the local node
|
||||
const localPlatformFlag = localPlatforms.length > 0 ? ` --platform ${localPlatforms.join(',')}` : '';
|
||||
await smartshellInstance.exec(
|
||||
`docker buildx create --name ${builderName} --driver docker-container --driver-opt network=host${localPlatformFlag}`
|
||||
);
|
||||
|
||||
// Append remote nodes
|
||||
for (const builder of remoteBuilders) {
|
||||
logger.log('info', `Appending remote node: ${builder.name} (${builder.platform}) via ssh://${builder.host}`);
|
||||
const appendResult = await smartshellInstance.exec(
|
||||
`docker buildx create --append --name ${builderName} --driver docker-container --driver-opt network=host --platform ${builder.platform} --node ${builder.name} ssh://${builder.host}`
|
||||
);
|
||||
if (appendResult.exitCode !== 0) {
|
||||
throw new Error(`Failed to append remote builder ${builder.name}: ${appendResult.stderr}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Bootstrap all nodes
|
||||
await smartshellInstance.exec(`docker buildx inspect --builder ${builderName} --bootstrap`);
|
||||
|
||||
// Store active remote builders for SSH tunnel setup during build
|
||||
this.activeRemoteBuilders = remoteBuilders;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a single-node local buildx builder (original behavior, uses QEMU for cross-platform).
|
||||
*/
|
||||
private async ensureBuildxLocal(builderName: string): Promise<void> {
|
||||
const inspectResult = await smartshellInstance.exec(`docker buildx inspect ${builderName} 2>/dev/null`);
|
||||
|
||||
if (inspectResult.exitCode !== 0) {
|
||||
logger.log('info', 'Creating new buildx builder with host network...');
|
||||
await smartshellInstance.exec(
|
||||
`docker buildx create --name ${builderName} --driver docker-container --driver-opt network=host`
|
||||
);
|
||||
await smartshellInstance.exec(`docker buildx inspect --builder ${builderName} --bootstrap`);
|
||||
} else {
|
||||
const inspectOutput = inspectResult.stdout || '';
|
||||
if (!inspectOutput.includes('network=host')) {
|
||||
logger.log('info', 'Recreating buildx builder with host network (migration)...');
|
||||
await smartshellInstance.exec(`docker buildx rm ${builderName} 2>/dev/null`);
|
||||
await smartshellInstance.exec(
|
||||
`docker buildx create --name ${builderName} --driver docker-container --driver-opt network=host`
|
||||
);
|
||||
await smartshellInstance.exec(`docker buildx inspect --builder ${builderName} --bootstrap`);
|
||||
}
|
||||
}
|
||||
this.activeRemoteBuilders = [];
|
||||
}
|
||||
|
||||
/**
|
||||
* Opens SSH reverse tunnels for remote builders so they can reach the local registry.
|
||||
*/
|
||||
private async openRemoteTunnels(): Promise<void> {
|
||||
if (this.activeRemoteBuilders.length === 0) return;
|
||||
|
||||
this.sshTunnelManager = new SshTunnelManager();
|
||||
await this.sshTunnelManager.openTunnels(
|
||||
this.activeRemoteBuilders,
|
||||
this.session.config.registryPort,
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Closes any active SSH tunnels.
|
||||
*/
|
||||
private async closeRemoteTunnels(): Promise<void> {
|
||||
if (this.sshTunnelManager) {
|
||||
await this.sshTunnelManager.closeAll();
|
||||
this.sshTunnelManager = undefined;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -172,11 +537,17 @@ export class TsDockerManager {
|
||||
return;
|
||||
}
|
||||
|
||||
// Push each Dockerfile to each registry
|
||||
for (const dockerfile of this.dockerfiles) {
|
||||
for (const registry of registriesToPush) {
|
||||
await dockerfile.push(registry);
|
||||
// Start local registry (reads from persistent .nogit/docker-registry/)
|
||||
await Dockerfile.startLocalRegistry(this.session, this.dockerContext.contextInfo?.isRootless);
|
||||
try {
|
||||
// Push each Dockerfile to each registry via OCI copy
|
||||
for (const dockerfile of this.dockerfiles) {
|
||||
for (const registry of registriesToPush) {
|
||||
await dockerfile.push(registry);
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
await Dockerfile.stopLocalRegistry(this.session);
|
||||
}
|
||||
|
||||
logger.log('success', 'All images pushed successfully');
|
||||
@@ -203,7 +574,8 @@ export class TsDockerManager {
|
||||
}
|
||||
|
||||
/**
|
||||
* Runs tests for all Dockerfiles
|
||||
* Runs tests for all Dockerfiles.
|
||||
* Starts the local registry so multi-platform images can be auto-pulled.
|
||||
*/
|
||||
public async test(): Promise<void> {
|
||||
if (this.dockerfiles.length === 0) {
|
||||
@@ -215,7 +587,16 @@ export class TsDockerManager {
|
||||
return;
|
||||
}
|
||||
|
||||
await Dockerfile.testDockerfiles(this.dockerfiles);
|
||||
logger.log('info', '');
|
||||
logger.log('info', '=== TEST PHASE ===');
|
||||
|
||||
await Dockerfile.startLocalRegistry(this.session, this.dockerContext.contextInfo?.isRootless);
|
||||
try {
|
||||
await Dockerfile.testDockerfiles(this.dockerfiles);
|
||||
} finally {
|
||||
await Dockerfile.stopLocalRegistry(this.session);
|
||||
}
|
||||
|
||||
logger.log('success', 'All tests completed');
|
||||
}
|
||||
|
||||
@@ -227,19 +608,21 @@ export class TsDockerManager {
|
||||
await this.discoverDockerfiles();
|
||||
}
|
||||
|
||||
console.log('\nDiscovered Dockerfiles:');
|
||||
console.log('========================\n');
|
||||
logger.log('info', '');
|
||||
logger.log('info', 'Discovered Dockerfiles:');
|
||||
logger.log('info', '========================');
|
||||
logger.log('info', '');
|
||||
|
||||
for (let i = 0; i < this.dockerfiles.length; i++) {
|
||||
const df = this.dockerfiles[i];
|
||||
console.log(`${i + 1}. ${df.filePath}`);
|
||||
console.log(` Tag: ${df.cleanTag}`);
|
||||
console.log(` Base Image: ${df.baseImage}`);
|
||||
console.log(` Version: ${df.version}`);
|
||||
logger.log('info', `${i + 1}. ${df.filePath}`);
|
||||
logger.log('info', ` Tag: ${df.cleanTag}`);
|
||||
logger.log('info', ` Base Image: ${df.baseImage}`);
|
||||
logger.log('info', ` Version: ${df.version}`);
|
||||
if (df.localBaseImageDependent) {
|
||||
console.log(` Depends on: ${df.localBaseDockerfile?.cleanTag}`);
|
||||
logger.log('info', ` Depends on: ${df.localBaseDockerfile?.cleanTag}`);
|
||||
}
|
||||
console.log('');
|
||||
logger.log('info', '');
|
||||
}
|
||||
|
||||
return this.dockerfiles;
|
||||
@@ -251,4 +634,16 @@ export class TsDockerManager {
|
||||
public getDockerfiles(): Dockerfile[] {
|
||||
return this.dockerfiles;
|
||||
}
|
||||
|
||||
/**
|
||||
* Cleans up session-specific resources.
|
||||
* In CI, removes the session-specific buildx builder to avoid accumulation.
|
||||
*/
|
||||
public async cleanup(): Promise<void> {
|
||||
if (this.session?.config.isCI && this.session.config.builderSuffix) {
|
||||
const builderName = this.dockerContext.getBuilderName() + this.session.config.builderSuffix;
|
||||
logger.log('info', `CI cleanup: removing buildx builder ${builderName}`);
|
||||
await smartshellInstance.execSilent(`docker buildx rm ${builderName} 2>/dev/null || true`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
111
ts/classes.tsdockersession.ts
Normal file
111
ts/classes.tsdockersession.ts
Normal file
@@ -0,0 +1,111 @@
|
||||
import * as crypto from 'crypto';
|
||||
import * as net from 'net';
|
||||
import { logger } from './tsdocker.logging.js';
|
||||
|
||||
export interface ISessionConfig {
|
||||
sessionId: string;
|
||||
projectHash: string;
|
||||
registryPort: number;
|
||||
registryHost: string;
|
||||
registryContainerName: string;
|
||||
isCI: boolean;
|
||||
ciSystem: string | null;
|
||||
builderSuffix: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Per-invocation session identity for tsdocker.
|
||||
* Generates unique ports, container names, and builder names so that
|
||||
* concurrent CI jobs on the same Docker host don't collide.
|
||||
*
|
||||
* In local (non-CI) dev the builder suffix contains a project hash so
|
||||
* that concurrent runs in different project directories use separate builders.
|
||||
*/
|
||||
export class TsDockerSession {
|
||||
public config: ISessionConfig;
|
||||
|
||||
private constructor(config: ISessionConfig) {
|
||||
this.config = config;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new session. Allocates a dynamic port unless overridden
|
||||
* via `TSDOCKER_REGISTRY_PORT`.
|
||||
*/
|
||||
public static async create(): Promise<TsDockerSession> {
|
||||
const sessionId =
|
||||
process.env.TSDOCKER_SESSION_ID || crypto.randomBytes(4).toString('hex');
|
||||
const projectHash = crypto.createHash('sha256').update(process.cwd()).digest('hex').substring(0, 8);
|
||||
|
||||
const registryPort = await TsDockerSession.allocatePort();
|
||||
const registryHost = `localhost:${registryPort}`;
|
||||
const registryContainerName = `tsdocker-registry-${sessionId}`;
|
||||
|
||||
const { isCI, ciSystem } = TsDockerSession.detectCI();
|
||||
const builderSuffix = isCI ? `-${projectHash}-${sessionId}` : `-${projectHash}`;
|
||||
|
||||
const config: ISessionConfig = {
|
||||
sessionId,
|
||||
projectHash,
|
||||
registryPort,
|
||||
registryHost,
|
||||
registryContainerName,
|
||||
isCI,
|
||||
ciSystem,
|
||||
builderSuffix,
|
||||
};
|
||||
|
||||
const session = new TsDockerSession(config);
|
||||
session.logInfo();
|
||||
return session;
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocates a free TCP port. Respects `TSDOCKER_REGISTRY_PORT` override.
|
||||
*/
|
||||
public static async allocatePort(): Promise<number> {
|
||||
const envPort = process.env.TSDOCKER_REGISTRY_PORT;
|
||||
if (envPort) {
|
||||
const parsed = parseInt(envPort, 10);
|
||||
if (!isNaN(parsed) && parsed > 0) {
|
||||
return parsed;
|
||||
}
|
||||
}
|
||||
|
||||
return new Promise<number>((resolve, reject) => {
|
||||
const srv = net.createServer();
|
||||
srv.listen(0, '127.0.0.1', () => {
|
||||
const addr = srv.address() as net.AddressInfo;
|
||||
const port = addr.port;
|
||||
srv.close((err) => {
|
||||
if (err) reject(err);
|
||||
else resolve(port);
|
||||
});
|
||||
});
|
||||
srv.on('error', reject);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Detects whether we're running inside a CI system.
|
||||
*/
|
||||
private static detectCI(): { isCI: boolean; ciSystem: string | null } {
|
||||
if (process.env.GITEA_ACTIONS) return { isCI: true, ciSystem: 'gitea-actions' };
|
||||
if (process.env.GITHUB_ACTIONS) return { isCI: true, ciSystem: 'github-actions' };
|
||||
if (process.env.GITLAB_CI) return { isCI: true, ciSystem: 'gitlab-ci' };
|
||||
if (process.env.CI) return { isCI: true, ciSystem: 'generic' };
|
||||
return { isCI: false, ciSystem: null };
|
||||
}
|
||||
|
||||
private logInfo(): void {
|
||||
const c = this.config;
|
||||
logger.log('info', '=== TSDOCKER SESSION ===');
|
||||
logger.log('info', `Session ID: ${c.sessionId}`);
|
||||
logger.log('info', `Registry: ${c.registryHost} (container: ${c.registryContainerName})`);
|
||||
logger.log('info', `Project hash: ${c.projectHash}`);
|
||||
logger.log('info', `Builder suffix: ${c.builderSuffix}`);
|
||||
if (c.isCI) {
|
||||
logger.log('info', `CI detected: ${c.ciSystem}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,15 +1,7 @@
|
||||
/**
|
||||
* Configuration interface for tsdocker
|
||||
* Extends legacy config with new Docker build capabilities
|
||||
*/
|
||||
export interface ITsDockerConfig {
|
||||
// Legacy (backward compatible)
|
||||
baseImage: string;
|
||||
command: string;
|
||||
dockerSock: boolean;
|
||||
keyValueObject: { [key: string]: any };
|
||||
|
||||
// New Docker build config
|
||||
registries?: string[];
|
||||
registryRepoMap?: { [registry: string]: string };
|
||||
buildArgEnvMap?: { [dockerArg: string]: string };
|
||||
@@ -68,3 +60,56 @@ export interface IPushResult {
|
||||
digest?: string;
|
||||
error?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Options for the build command
|
||||
*/
|
||||
export interface IBuildCommandOptions {
|
||||
patterns?: string[]; // Dockerfile name patterns (e.g., ['Dockerfile_base', 'Dockerfile_*'])
|
||||
platform?: string; // Single platform override (e.g., 'linux/arm64')
|
||||
timeout?: number; // Build timeout in seconds
|
||||
noCache?: boolean; // Force rebuild without Docker layer cache (--no-cache)
|
||||
pull?: boolean; // Pull fresh base images before building (default: true)
|
||||
cached?: boolean; // Skip builds when Dockerfile content hasn't changed
|
||||
verbose?: boolean; // Stream raw docker build output (default: silent)
|
||||
context?: string; // Explicit Docker context name (--context flag)
|
||||
parallel?: boolean; // Enable parallel builds within dependency levels
|
||||
parallelConcurrency?: number; // Max concurrent builds per level (default 4)
|
||||
}
|
||||
|
||||
export interface ICacheEntry {
|
||||
contentHash: string; // SHA-256 hex of Dockerfile content
|
||||
imageId: string; // Docker image ID (sha256:...)
|
||||
buildTag: string;
|
||||
timestamp: number; // Unix ms
|
||||
}
|
||||
|
||||
export interface ICacheData {
|
||||
version: 1;
|
||||
entries: { [cleanTag: string]: ICacheEntry };
|
||||
}
|
||||
|
||||
export interface IDockerContextInfo {
|
||||
name: string; // 'default', 'rootless', 'colima', etc.
|
||||
endpoint: string; // 'unix:///var/run/docker.sock'
|
||||
isRootless: boolean;
|
||||
dockerHost?: string; // value of DOCKER_HOST env var, if set
|
||||
topology?: 'socket-mount' | 'dind' | 'local';
|
||||
}
|
||||
|
||||
/**
|
||||
* A remote builder node for native cross-platform builds
|
||||
*/
|
||||
export interface IRemoteBuilder {
|
||||
name: string; // e.g., "arm64-builder"
|
||||
host: string; // e.g., "armbuilder@192.168.190.216"
|
||||
platform: string; // e.g., "linux/arm64"
|
||||
sshKeyPath?: string; // e.g., "~/.ssh/id_ed25519"
|
||||
}
|
||||
|
||||
/**
|
||||
* Global tsdocker configuration stored at ~/.git.zone/tsdocker/config.json
|
||||
*/
|
||||
export interface IGlobalConfig {
|
||||
remoteBuilders: IRemoteBuilder[];
|
||||
}
|
||||
|
||||
@@ -3,34 +3,141 @@ import * as paths from './tsdocker.paths.js';
|
||||
|
||||
// modules
|
||||
import * as ConfigModule from './tsdocker.config.js';
|
||||
import * as DockerModule from './tsdocker.docker.js';
|
||||
|
||||
import { logger, ora } from './tsdocker.logging.js';
|
||||
import { TsDockerManager } from './classes.tsdockermanager.js';
|
||||
import { DockerContext } from './classes.dockercontext.js';
|
||||
import { GlobalConfig } from './classes.globalconfig.js';
|
||||
import type { IBuildCommandOptions } from './interfaces/index.js';
|
||||
import { commitinfo } from './00_commitinfo_data.js';
|
||||
|
||||
const tsdockerCli = new plugins.smartcli.Smartcli();
|
||||
tsdockerCli.addVersion(commitinfo.version);
|
||||
|
||||
const printManPage = () => {
|
||||
const manPage = `
|
||||
TSDOCKER(1) User Commands TSDOCKER(1)
|
||||
|
||||
NAME
|
||||
tsdocker - build, test, and push Docker images
|
||||
|
||||
VERSION
|
||||
${commitinfo.version}
|
||||
|
||||
SYNOPSIS
|
||||
tsdocker <command> [options]
|
||||
|
||||
COMMANDS
|
||||
build [patterns...] [flags] Build Dockerfiles in dependency order
|
||||
push [patterns...] [flags] Build and push images to registries
|
||||
pull <registry-url> Pull images from a registry
|
||||
test [flags] Build and run container test scripts
|
||||
login Authenticate with configured registries
|
||||
list List discovered Dockerfiles
|
||||
config <subcommand> [flags] Manage global tsdocker configuration
|
||||
clean [-y] [--all] Interactive Docker resource cleanup
|
||||
|
||||
BUILD / PUSH OPTIONS
|
||||
--platform=<p> Target platform (e.g. linux/arm64)
|
||||
--timeout=<s> Build timeout in seconds
|
||||
--no-cache Rebuild without Docker layer cache
|
||||
--no-pull Skip pulling latest base images (use cached)
|
||||
--cached Skip builds when Dockerfile is unchanged
|
||||
--verbose Stream raw docker build output
|
||||
--parallel[=<n>] Parallel builds (optional concurrency limit)
|
||||
--context=<name> Docker context to use
|
||||
|
||||
PUSH-ONLY OPTIONS
|
||||
--registry=<url> Push to a specific registry
|
||||
--no-build Push already-built images (skip build step)
|
||||
|
||||
CLEAN OPTIONS
|
||||
-y Auto-confirm all prompts
|
||||
--all Include all images and volumes (not just dangling)
|
||||
|
||||
CONFIG SUBCOMMANDS
|
||||
add-builder Add a remote builder node
|
||||
--name=<n> Builder name (e.g. arm64-builder)
|
||||
--host=<h> SSH host (e.g. user@192.168.1.100)
|
||||
--platform=<p> Platform (e.g. linux/arm64)
|
||||
--ssh-key=<path> SSH key path (optional)
|
||||
remove-builder Remove a remote builder by name
|
||||
--name=<n> Builder name to remove
|
||||
list-builders List all configured remote builders
|
||||
show Show full global config
|
||||
|
||||
CONFIGURATION
|
||||
Configure via npmextra.json under the "@git.zone/tsdocker" key:
|
||||
|
||||
registries Array of registry URLs to push to
|
||||
registryRepoMap Map of registry URL to repo path overrides
|
||||
buildArgEnvMap Map of Docker build-arg names to env var names
|
||||
platforms Array of target platforms (default: ["linux/amd64"])
|
||||
push Boolean, auto-push after build
|
||||
testDir Directory containing test_*.sh scripts
|
||||
|
||||
Global config is stored at ~/.git.zone/tsdocker/config.json
|
||||
and managed via the "config" command.
|
||||
|
||||
EXAMPLES
|
||||
tsdocker build
|
||||
tsdocker build Dockerfile_app --platform=linux/arm64
|
||||
tsdocker push --registry=ghcr.io
|
||||
tsdocker test --verbose
|
||||
tsdocker clean -y --all
|
||||
tsdocker config add-builder --name=arm64 --host=user@host --platform=linux/arm64
|
||||
tsdocker config list-builders
|
||||
`;
|
||||
console.log(manPage);
|
||||
};
|
||||
|
||||
export let run = () => {
|
||||
// Default command: run tests in container (legacy behavior)
|
||||
tsdockerCli.standardCommand().subscribe(async argvArg => {
|
||||
const configArg = await ConfigModule.run().then(DockerModule.run);
|
||||
if (configArg.exitCode === 0) {
|
||||
logger.log('success', 'container ended all right!');
|
||||
} else {
|
||||
logger.log('error', `container ended with error! Exit Code is ${configArg.exitCode}`);
|
||||
process.exit(1);
|
||||
}
|
||||
// Default command: print man page
|
||||
tsdockerCli.standardCommand().subscribe(async () => {
|
||||
printManPage();
|
||||
});
|
||||
|
||||
/**
|
||||
* Build all Dockerfiles in dependency order
|
||||
* Build Dockerfiles in dependency order
|
||||
* Usage: tsdocker build [Dockerfile_patterns...] [--platform=linux/arm64] [--timeout=600]
|
||||
*/
|
||||
tsdockerCli.addCommand('build').subscribe(async argvArg => {
|
||||
try {
|
||||
const config = await ConfigModule.run();
|
||||
const manager = new TsDockerManager(config);
|
||||
await manager.prepare();
|
||||
await manager.build();
|
||||
await manager.prepare(argvArg.context as string | undefined);
|
||||
|
||||
const buildOptions: IBuildCommandOptions = {};
|
||||
const patterns = argvArg._.slice(1) as string[];
|
||||
if (patterns.length > 0) {
|
||||
buildOptions.patterns = patterns;
|
||||
}
|
||||
if (argvArg.platform) {
|
||||
buildOptions.platform = argvArg.platform as string;
|
||||
}
|
||||
if (argvArg.timeout) {
|
||||
buildOptions.timeout = Number(argvArg.timeout);
|
||||
}
|
||||
if (argvArg.cache === false) {
|
||||
buildOptions.noCache = true;
|
||||
}
|
||||
// --pull is default true; --no-pull sets pull=false
|
||||
buildOptions.pull = argvArg.pull !== false;
|
||||
if (argvArg.cached) {
|
||||
buildOptions.cached = true;
|
||||
}
|
||||
if (argvArg.verbose) {
|
||||
buildOptions.verbose = true;
|
||||
}
|
||||
if (argvArg.parallel) {
|
||||
buildOptions.parallel = true;
|
||||
if (typeof argvArg.parallel === 'number') {
|
||||
buildOptions.parallelConcurrency = argvArg.parallel;
|
||||
}
|
||||
}
|
||||
|
||||
await manager.build(buildOptions);
|
||||
await manager.cleanup();
|
||||
logger.log('success', 'Build completed successfully');
|
||||
} catch (err) {
|
||||
logger.log('error', `Build failed: ${(err as Error).message}`);
|
||||
@@ -40,24 +147,59 @@ export let run = () => {
|
||||
|
||||
/**
|
||||
* Push built images to configured registries
|
||||
* Usage: tsdocker push [Dockerfile_patterns...] [--platform=linux/arm64] [--timeout=600] [--registry=url]
|
||||
*/
|
||||
tsdockerCli.addCommand('push').subscribe(async argvArg => {
|
||||
try {
|
||||
const config = await ConfigModule.run();
|
||||
const manager = new TsDockerManager(config);
|
||||
await manager.prepare();
|
||||
await manager.prepare(argvArg.context as string | undefined);
|
||||
|
||||
// Login first
|
||||
await manager.login();
|
||||
|
||||
// Build images first (if not already built)
|
||||
await manager.build();
|
||||
// Parse build options from positional args and flags
|
||||
const buildOptions: IBuildCommandOptions = {};
|
||||
const patterns = argvArg._.slice(1) as string[];
|
||||
if (patterns.length > 0) {
|
||||
buildOptions.patterns = patterns;
|
||||
}
|
||||
if (argvArg.platform) {
|
||||
buildOptions.platform = argvArg.platform as string;
|
||||
}
|
||||
if (argvArg.timeout) {
|
||||
buildOptions.timeout = Number(argvArg.timeout);
|
||||
}
|
||||
if (argvArg.cache === false) {
|
||||
buildOptions.noCache = true;
|
||||
}
|
||||
buildOptions.pull = argvArg.pull !== false;
|
||||
if (argvArg.verbose) {
|
||||
buildOptions.verbose = true;
|
||||
}
|
||||
if (argvArg.parallel) {
|
||||
buildOptions.parallel = true;
|
||||
if (typeof argvArg.parallel === 'number') {
|
||||
buildOptions.parallelConcurrency = argvArg.parallel;
|
||||
}
|
||||
}
|
||||
|
||||
// Get registry from arguments if specified
|
||||
const registryArg = argvArg._[1]; // e.g., tsdocker push registry.gitlab.com
|
||||
// Build images first, unless --no-build is set
|
||||
if (argvArg.build === false) {
|
||||
await manager.discoverDockerfiles();
|
||||
if (buildOptions.patterns?.length) {
|
||||
manager.filterDockerfiles(buildOptions.patterns);
|
||||
}
|
||||
} else {
|
||||
await manager.build(buildOptions);
|
||||
}
|
||||
|
||||
// Get registry from --registry flag
|
||||
const registryArg = argvArg.registry as string | undefined;
|
||||
const registries = registryArg ? [registryArg] : undefined;
|
||||
|
||||
await manager.push(registries);
|
||||
await manager.cleanup();
|
||||
logger.log('success', 'Push completed successfully');
|
||||
} catch (err) {
|
||||
logger.log('error', `Push failed: ${(err as Error).message}`);
|
||||
@@ -78,7 +220,7 @@ export let run = () => {
|
||||
|
||||
const config = await ConfigModule.run();
|
||||
const manager = new TsDockerManager(config);
|
||||
await manager.prepare();
|
||||
await manager.prepare(argvArg.context as string | undefined);
|
||||
|
||||
// Login first
|
||||
await manager.login();
|
||||
@@ -98,13 +240,31 @@ export let run = () => {
|
||||
try {
|
||||
const config = await ConfigModule.run();
|
||||
const manager = new TsDockerManager(config);
|
||||
await manager.prepare();
|
||||
await manager.prepare(argvArg.context as string | undefined);
|
||||
|
||||
// Build images first
|
||||
await manager.build();
|
||||
const buildOptions: IBuildCommandOptions = {};
|
||||
if (argvArg.cache === false) {
|
||||
buildOptions.noCache = true;
|
||||
}
|
||||
buildOptions.pull = argvArg.pull !== false;
|
||||
if (argvArg.cached) {
|
||||
buildOptions.cached = true;
|
||||
}
|
||||
if (argvArg.verbose) {
|
||||
buildOptions.verbose = true;
|
||||
}
|
||||
if (argvArg.parallel) {
|
||||
buildOptions.parallel = true;
|
||||
if (typeof argvArg.parallel === 'number') {
|
||||
buildOptions.parallelConcurrency = argvArg.parallel;
|
||||
}
|
||||
}
|
||||
await manager.build(buildOptions);
|
||||
|
||||
// Run tests
|
||||
await manager.test();
|
||||
await manager.cleanup();
|
||||
logger.log('success', 'Tests completed successfully');
|
||||
} catch (err) {
|
||||
logger.log('error', `Tests failed: ${(err as Error).message}`);
|
||||
@@ -119,7 +279,7 @@ export let run = () => {
|
||||
try {
|
||||
const config = await ConfigModule.run();
|
||||
const manager = new TsDockerManager(config);
|
||||
await manager.prepare();
|
||||
await manager.prepare(argvArg.context as string | undefined);
|
||||
await manager.login();
|
||||
logger.log('success', 'Login completed successfully');
|
||||
} catch (err) {
|
||||
@@ -135,7 +295,7 @@ export let run = () => {
|
||||
try {
|
||||
const config = await ConfigModule.run();
|
||||
const manager = new TsDockerManager(config);
|
||||
await manager.prepare();
|
||||
await manager.prepare(argvArg.context as string | undefined);
|
||||
await manager.list();
|
||||
} catch (err) {
|
||||
logger.log('error', `List failed: ${(err as Error).message}`);
|
||||
@@ -144,59 +304,270 @@ export let run = () => {
|
||||
});
|
||||
|
||||
/**
|
||||
* this command is executed inside docker and meant for use from outside docker
|
||||
* Manage global tsdocker configuration (remote builders, etc.)
|
||||
* Usage: tsdocker config <subcommand> [--name=...] [--host=...] [--platform=...] [--ssh-key=...]
|
||||
*/
|
||||
tsdockerCli.addCommand('runinside').subscribe(async argvArg => {
|
||||
logger.log('ok', 'Allright. We are now in Docker!');
|
||||
ora.text('now trying to run your specified command');
|
||||
const configArg = await ConfigModule.run();
|
||||
const smartshellInstance = new plugins.smartshell.Smartshell({
|
||||
executor: 'bash'
|
||||
});
|
||||
ora.stop();
|
||||
await smartshellInstance.exec(configArg.command).then(response => {
|
||||
if (response.exitCode !== 0) {
|
||||
process.exit(1);
|
||||
tsdockerCli.addCommand('config').subscribe(async argvArg => {
|
||||
try {
|
||||
const subcommand = argvArg._[1] as string;
|
||||
|
||||
switch (subcommand) {
|
||||
case 'add-builder': {
|
||||
const name = argvArg.name as string;
|
||||
const host = argvArg.host as string;
|
||||
const platform = argvArg.platform as string;
|
||||
const sshKeyPath = argvArg['ssh-key'] as string | undefined;
|
||||
|
||||
if (!name || !host || !platform) {
|
||||
logger.log('error', 'Required: --name, --host, --platform');
|
||||
logger.log('info', 'Usage: tsdocker config add-builder --name=arm64-builder --host=user@host --platform=linux/arm64 [--ssh-key=~/.ssh/id_ed25519]');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
GlobalConfig.addBuilder({ name, host, platform, sshKeyPath });
|
||||
logger.log('success', `Remote builder "${name}" configured: ${platform} via ssh://${host}`);
|
||||
break;
|
||||
}
|
||||
|
||||
case 'remove-builder': {
|
||||
const name = argvArg.name as string;
|
||||
if (!name) {
|
||||
logger.log('error', 'Required: --name');
|
||||
logger.log('info', 'Usage: tsdocker config remove-builder --name=arm64-builder');
|
||||
process.exit(1);
|
||||
}
|
||||
GlobalConfig.removeBuilder(name);
|
||||
logger.log('success', `Remote builder "${name}" removed`);
|
||||
break;
|
||||
}
|
||||
|
||||
case 'list-builders': {
|
||||
const builders = GlobalConfig.getBuilders();
|
||||
if (builders.length === 0) {
|
||||
logger.log('info', 'No remote builders configured');
|
||||
} else {
|
||||
logger.log('info', `${builders.length} remote builder(s):`);
|
||||
for (const b of builders) {
|
||||
const keyInfo = b.sshKeyPath ? ` (key: ${b.sshKeyPath})` : '';
|
||||
logger.log('info', ` ${b.name}: ${b.platform} via ssh://${b.host}${keyInfo}`);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case 'show': {
|
||||
const config = GlobalConfig.load();
|
||||
logger.log('info', `Config file: ${GlobalConfig.getConfigPath()}`);
|
||||
console.log(JSON.stringify(config, null, 2));
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
logger.log('error', `Unknown config subcommand: ${subcommand || '(none)'}`);
|
||||
logger.log('info', 'Available: add-builder, remove-builder, list-builders, show');
|
||||
process.exit(1);
|
||||
}
|
||||
});
|
||||
} catch (err) {
|
||||
logger.log('error', `Config failed: ${(err as Error).message}`);
|
||||
process.exit(1);
|
||||
}
|
||||
});
|
||||
|
||||
tsdockerCli.addCommand('clean').subscribe(async argvArg => {
|
||||
ora.text('cleaning up docker env...');
|
||||
if (argvArg.all) {
|
||||
const smartshellInstance = new plugins.smartshell.Smartshell({
|
||||
executor: 'bash'
|
||||
});
|
||||
ora.text('killing any running docker containers...');
|
||||
await smartshellInstance.exec(`docker kill $(docker ps -q)`);
|
||||
try {
|
||||
const autoYes = !!argvArg.y;
|
||||
const includeAll = !!argvArg.all;
|
||||
|
||||
ora.text('removing stopped containers...');
|
||||
await smartshellInstance.exec(`docker rm $(docker ps -a -q)`);
|
||||
const smartshellInstance = new plugins.smartshell.Smartshell({ executor: 'bash' });
|
||||
const interact = new plugins.smartinteract.SmartInteract();
|
||||
|
||||
ora.text('removing images...');
|
||||
await smartshellInstance.exec(`docker rmi -f $(docker images -q -f dangling=true)`);
|
||||
// --- Docker context detection ---
|
||||
ora.text('detecting docker context...');
|
||||
const dockerContext = new DockerContext();
|
||||
if (argvArg.context) {
|
||||
dockerContext.setContext(argvArg.context as string);
|
||||
}
|
||||
await dockerContext.detect();
|
||||
ora.stop();
|
||||
dockerContext.logContextInfo();
|
||||
|
||||
ora.text('removing all other images...');
|
||||
await smartshellInstance.exec(`docker rmi $(docker images -a -q)`);
|
||||
// --- Helper: parse docker output into resource list ---
|
||||
interface IDockerResource {
|
||||
id: string;
|
||||
display: string;
|
||||
}
|
||||
|
||||
ora.text('removing all volumes...');
|
||||
await smartshellInstance.exec(`docker volume rm $(docker volume ls -f dangling=true -q)`);
|
||||
const listResources = async (command: string): Promise<IDockerResource[]> => {
|
||||
const result = await smartshellInstance.execSilent(command);
|
||||
if (result.exitCode !== 0 || !result.stdout.trim()) {
|
||||
return [];
|
||||
}
|
||||
return result.stdout.trim().split('\n').filter(Boolean).map((line) => {
|
||||
const parts = line.split('\t');
|
||||
return {
|
||||
id: parts[0],
|
||||
display: parts.join(' | '),
|
||||
};
|
||||
});
|
||||
};
|
||||
|
||||
// --- Helper: checkbox selection ---
|
||||
const selectResources = async (
|
||||
name: string,
|
||||
message: string,
|
||||
resources: IDockerResource[],
|
||||
): Promise<string[]> => {
|
||||
if (autoYes) {
|
||||
return resources.map((r) => r.id);
|
||||
}
|
||||
const answer = await interact.askQuestion({
|
||||
name,
|
||||
type: 'checkbox',
|
||||
message,
|
||||
default: [],
|
||||
choices: resources.map((r) => ({ name: r.display, value: r.id })),
|
||||
});
|
||||
return answer.value as string[];
|
||||
};
|
||||
|
||||
// --- Helper: confirm action ---
|
||||
const confirmAction = async (
|
||||
name: string,
|
||||
message: string,
|
||||
): Promise<boolean> => {
|
||||
if (autoYes) {
|
||||
return true;
|
||||
}
|
||||
const answer = await interact.askQuestion({
|
||||
name,
|
||||
type: 'confirm',
|
||||
message,
|
||||
default: false,
|
||||
});
|
||||
return answer.value as boolean;
|
||||
};
|
||||
|
||||
// === RUNNING CONTAINERS ===
|
||||
const runningContainers = await listResources(
|
||||
`docker ps --format '{{.ID}}\t{{.Names}}\t{{.Image}}\t{{.Status}}'`
|
||||
);
|
||||
if (runningContainers.length > 0) {
|
||||
logger.log('info', `Found ${runningContainers.length} running container(s)`);
|
||||
const selectedIds = await selectResources(
|
||||
'runningContainers',
|
||||
'Select running containers to kill:',
|
||||
runningContainers,
|
||||
);
|
||||
if (selectedIds.length > 0) {
|
||||
logger.log('info', `Killing ${selectedIds.length} container(s)...`);
|
||||
await smartshellInstance.exec(`docker kill ${selectedIds.join(' ')}`);
|
||||
}
|
||||
} else {
|
||||
logger.log('info', 'No running containers found');
|
||||
}
|
||||
|
||||
// === STOPPED CONTAINERS ===
|
||||
const stoppedContainers = await listResources(
|
||||
`docker ps -a --filter status=exited --filter status=created --format '{{.ID}}\t{{.Names}}\t{{.Image}}\t{{.Status}}'`
|
||||
);
|
||||
if (stoppedContainers.length > 0) {
|
||||
logger.log('info', `Found ${stoppedContainers.length} stopped container(s)`);
|
||||
const selectedIds = await selectResources(
|
||||
'stoppedContainers',
|
||||
'Select stopped containers to remove:',
|
||||
stoppedContainers,
|
||||
);
|
||||
if (selectedIds.length > 0) {
|
||||
logger.log('info', `Removing ${selectedIds.length} container(s)...`);
|
||||
await smartshellInstance.exec(`docker rm ${selectedIds.join(' ')}`);
|
||||
}
|
||||
} else {
|
||||
logger.log('info', 'No stopped containers found');
|
||||
}
|
||||
|
||||
// === DANGLING IMAGES ===
|
||||
const danglingImages = await listResources(
|
||||
`docker images -f dangling=true --format '{{.ID}}\t{{.Repository}}:{{.Tag}}\t{{.Size}}'`
|
||||
);
|
||||
if (danglingImages.length > 0) {
|
||||
const confirmed = await confirmAction(
|
||||
'removeDanglingImages',
|
||||
`Remove ${danglingImages.length} dangling image(s)?`,
|
||||
);
|
||||
if (confirmed) {
|
||||
logger.log('info', `Removing ${danglingImages.length} dangling image(s)...`);
|
||||
const ids = danglingImages.map((r) => r.id).join(' ');
|
||||
await smartshellInstance.exec(`docker rmi ${ids}`);
|
||||
}
|
||||
} else {
|
||||
logger.log('info', 'No dangling images found');
|
||||
}
|
||||
|
||||
// === ALL IMAGES (only with --all) ===
|
||||
if (includeAll) {
|
||||
const allImages = await listResources(
|
||||
`docker images --format '{{.ID}}\t{{.Repository}}:{{.Tag}}\t{{.Size}}'`
|
||||
);
|
||||
if (allImages.length > 0) {
|
||||
logger.log('info', `Found ${allImages.length} image(s) total`);
|
||||
const selectedIds = await selectResources(
|
||||
'allImages',
|
||||
'Select images to remove:',
|
||||
allImages,
|
||||
);
|
||||
if (selectedIds.length > 0) {
|
||||
logger.log('info', `Removing ${selectedIds.length} image(s)...`);
|
||||
await smartshellInstance.exec(`docker rmi -f ${selectedIds.join(' ')}`);
|
||||
}
|
||||
} else {
|
||||
logger.log('info', 'No images found');
|
||||
}
|
||||
}
|
||||
|
||||
// === DANGLING VOLUMES ===
|
||||
const danglingVolumes = await listResources(
|
||||
`docker volume ls -f dangling=true --format '{{.Name}}\t{{.Driver}}'`
|
||||
);
|
||||
if (danglingVolumes.length > 0) {
|
||||
const confirmed = await confirmAction(
|
||||
'removeDanglingVolumes',
|
||||
`Remove ${danglingVolumes.length} dangling volume(s)?`,
|
||||
);
|
||||
if (confirmed) {
|
||||
logger.log('info', `Removing ${danglingVolumes.length} dangling volume(s)...`);
|
||||
const names = danglingVolumes.map((r) => r.id).join(' ');
|
||||
await smartshellInstance.exec(`docker volume rm ${names}`);
|
||||
}
|
||||
} else {
|
||||
logger.log('info', 'No dangling volumes found');
|
||||
}
|
||||
|
||||
// === ALL VOLUMES (only with --all) ===
|
||||
if (includeAll) {
|
||||
const allVolumes = await listResources(
|
||||
`docker volume ls --format '{{.Name}}\t{{.Driver}}'`
|
||||
);
|
||||
if (allVolumes.length > 0) {
|
||||
logger.log('info', `Found ${allVolumes.length} volume(s) total`);
|
||||
const selectedIds = await selectResources(
|
||||
'allVolumes',
|
||||
'Select volumes to remove:',
|
||||
allVolumes,
|
||||
);
|
||||
if (selectedIds.length > 0) {
|
||||
logger.log('info', `Removing ${selectedIds.length} volume(s)...`);
|
||||
await smartshellInstance.exec(`docker volume rm ${selectedIds.join(' ')}`);
|
||||
}
|
||||
} else {
|
||||
logger.log('info', 'No volumes found');
|
||||
}
|
||||
}
|
||||
|
||||
logger.log('success', 'Docker cleanup completed!');
|
||||
} catch (err) {
|
||||
logger.log('error', `Clean failed: ${(err as Error).message}`);
|
||||
process.exit(1);
|
||||
}
|
||||
ora.finishSuccess('docker environment now is clean!');
|
||||
});
|
||||
|
||||
tsdockerCli.addCommand('vscode').subscribe(async argvArg => {
|
||||
const smartshellInstance = new plugins.smartshell.Smartshell({
|
||||
executor: 'bash'
|
||||
});
|
||||
logger.log('ok', `Starting vscode in cwd ${paths.cwd}`);
|
||||
await smartshellInstance.execAndWaitForLine(
|
||||
`docker run -p 127.0.0.1:8443:8443 -v "${
|
||||
paths.cwd
|
||||
}:/home/coder/project" registry.gitlab.com/hosttoday/ht-docker-vscode --allow-http --no-auth`,
|
||||
/Connected to shared process/
|
||||
);
|
||||
await plugins.smartopen.openUrl('testing-vscode.git.zone:8443');
|
||||
});
|
||||
|
||||
tsdockerCli.startParse();
|
||||
|
||||
@@ -1,34 +1,10 @@
|
||||
import * as plugins from './tsdocker.plugins.js';
|
||||
import * as paths from './tsdocker.paths.js';
|
||||
import * as fs from 'fs';
|
||||
import type { ITsDockerConfig } from './interfaces/index.js';
|
||||
|
||||
// Re-export ITsDockerConfig as IConfig for backward compatibility
|
||||
export type IConfig = ITsDockerConfig & {
|
||||
exitCode?: number;
|
||||
};
|
||||
|
||||
const getQenvKeyValueObject = async () => {
|
||||
let qenvKeyValueObjectArray: { [key: string]: string | number };
|
||||
if (fs.existsSync(plugins.path.join(paths.cwd, 'qenv.yml'))) {
|
||||
qenvKeyValueObjectArray = new plugins.qenv.Qenv(paths.cwd, '.nogit/').keyValueObject;
|
||||
} else {
|
||||
qenvKeyValueObjectArray = {};
|
||||
}
|
||||
return qenvKeyValueObjectArray;
|
||||
};
|
||||
|
||||
const buildConfig = async (qenvKeyValueObjectArg: { [key: string]: string | number }) => {
|
||||
const buildConfig = async (): Promise<ITsDockerConfig> => {
|
||||
const npmextra = new plugins.npmextra.Npmextra(paths.cwd);
|
||||
const config = npmextra.dataFor<IConfig>('@git.zone/tsdocker', {
|
||||
// Legacy options (backward compatible)
|
||||
baseImage: 'hosttoday/ht-docker-node:npmdocker',
|
||||
init: 'rm -rf node_nodules/ && yarn install',
|
||||
command: 'npmci npm test',
|
||||
dockerSock: false,
|
||||
keyValueObject: qenvKeyValueObjectArg,
|
||||
|
||||
// New Docker build options
|
||||
const config = npmextra.dataFor<ITsDockerConfig>('@git.zone/tsdocker', {
|
||||
registries: [],
|
||||
registryRepoMap: {},
|
||||
buildArgEnvMap: {},
|
||||
@@ -39,7 +15,6 @@ const buildConfig = async (qenvKeyValueObjectArg: { [key: string]: string | numb
|
||||
return config;
|
||||
};
|
||||
|
||||
export let run = async (): Promise<IConfig> => {
|
||||
const config = await getQenvKeyValueObject().then(buildConfig);
|
||||
return config;
|
||||
export let run = async (): Promise<ITsDockerConfig> => {
|
||||
return buildConfig();
|
||||
};
|
||||
|
||||
@@ -1,169 +0,0 @@
|
||||
import * as plugins from './tsdocker.plugins.js';
|
||||
import * as paths from './tsdocker.paths.js';
|
||||
import * as snippets from './tsdocker.snippets.js';
|
||||
|
||||
import { logger, ora } from './tsdocker.logging.js';
|
||||
|
||||
const smartshellInstance = new plugins.smartshell.Smartshell({
|
||||
executor: 'bash'
|
||||
});
|
||||
|
||||
// interfaces
|
||||
import type { IConfig } from './tsdocker.config.js';
|
||||
|
||||
let config: IConfig;
|
||||
|
||||
/**
|
||||
* the docker data used to build the internal testing container
|
||||
*/
|
||||
const dockerData = {
|
||||
imageTag: 'npmdocker-temp-image:latest',
|
||||
containerName: 'npmdocker-temp-container',
|
||||
dockerProjectMountString: '',
|
||||
dockerSockString: '',
|
||||
dockerEnvString: ''
|
||||
};
|
||||
|
||||
/**
|
||||
* check if docker is available
|
||||
*/
|
||||
const checkDocker = () => {
|
||||
const done = plugins.smartpromise.defer();
|
||||
ora.text('checking docker...');
|
||||
|
||||
if (smartshellInstance.exec('which docker')) {
|
||||
logger.log('ok', 'Docker found!');
|
||||
done.resolve();
|
||||
} else {
|
||||
done.reject(new Error('docker not found on this machine'));
|
||||
}
|
||||
return done.promise;
|
||||
};
|
||||
|
||||
/**
|
||||
* builds the Dockerfile according to the config in the project
|
||||
*/
|
||||
const buildDockerFile = async () => {
|
||||
const done = plugins.smartpromise.defer();
|
||||
ora.text('building Dockerfile...');
|
||||
const dockerfile: string = snippets.dockerfileSnippet({
|
||||
baseImage: config.baseImage,
|
||||
command: config.command
|
||||
});
|
||||
logger.log('info', `Base image is: ${config.baseImage}`);
|
||||
logger.log('info', `Command is: ${config.command}`);
|
||||
await plugins.smartfs.file(plugins.path.join(paths.cwd, 'npmdocker')).write(dockerfile);
|
||||
logger.log('ok', 'Dockerfile created!');
|
||||
ora.stop();
|
||||
done.resolve();
|
||||
return done.promise;
|
||||
};
|
||||
|
||||
/**
|
||||
* builds the Dockerimage from the built Dockerfile
|
||||
*/
|
||||
const buildDockerImage = async () => {
|
||||
logger.log('info', 'pulling latest base image from registry...');
|
||||
await smartshellInstance.exec(`docker pull ${config.baseImage}`);
|
||||
ora.text('building Dockerimage...');
|
||||
const execResult = await smartshellInstance.execSilent(
|
||||
`docker build --load -f npmdocker -t ${dockerData.imageTag} ${paths.cwd}`
|
||||
);
|
||||
if (execResult.exitCode !== 0) {
|
||||
console.log(execResult.stdout);
|
||||
process.exit(1);
|
||||
}
|
||||
logger.log('ok', 'Dockerimage built!');
|
||||
};
|
||||
|
||||
const buildDockerProjectMountString = async () => {
|
||||
if (process.env.CI !== 'true') {
|
||||
dockerData.dockerProjectMountString = `-v ${paths.cwd}:/workspace`;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* builds an environment string that docker cli understands
|
||||
*/
|
||||
const buildDockerEnvString = async () => {
|
||||
for (const key of Object.keys(config.keyValueObject)) {
|
||||
const envString = (dockerData.dockerEnvString =
|
||||
dockerData.dockerEnvString + `-e ${key}=${config.keyValueObject[key]} `);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* creates string to mount the docker.sock inside the testcontainer
|
||||
*/
|
||||
const buildDockerSockString = async () => {
|
||||
if (config.dockerSock) {
|
||||
dockerData.dockerSockString = `-v /var/run/docker.sock:/var/run/docker.sock`;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* creates a container by running the built Dockerimage
|
||||
*/
|
||||
const runDockerImage = async () => {
|
||||
const done = plugins.smartpromise.defer();
|
||||
ora.text('starting Container...');
|
||||
ora.stop();
|
||||
logger.log('info', 'now running Dockerimage');
|
||||
config.exitCode = (await smartshellInstance.exec(
|
||||
`docker run ${dockerData.dockerProjectMountString} ${dockerData.dockerSockString} ${
|
||||
dockerData.dockerEnvString
|
||||
} --name ${dockerData.containerName} ${dockerData.imageTag}`
|
||||
)).exitCode;
|
||||
};
|
||||
|
||||
/**
|
||||
* cleans up: deletes the test container
|
||||
*/
|
||||
const deleteDockerContainer = async () => {
|
||||
await smartshellInstance.execSilent(`docker rm -f ${dockerData.containerName}`);
|
||||
};
|
||||
|
||||
/**
|
||||
* cleans up deletes the test image
|
||||
*/
|
||||
const deleteDockerImage = async () => {
|
||||
await smartshellInstance.execSilent(`docker rmi ${dockerData.imageTag}`).then(async response => {
|
||||
if (response.exitCode !== 0) {
|
||||
console.log(response.stdout);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
const preClean = async () => {
|
||||
await deleteDockerImage()
|
||||
.then(deleteDockerContainer)
|
||||
.then(async () => {
|
||||
logger.log('ok', 'ensured clean Docker environment!');
|
||||
});
|
||||
};
|
||||
|
||||
const postClean = async () => {
|
||||
await deleteDockerContainer()
|
||||
.then(deleteDockerImage)
|
||||
.then(async () => {
|
||||
logger.log('ok', 'cleaned up!');
|
||||
});
|
||||
await plugins.smartfs.file(paths.npmdockerFile).delete();
|
||||
};
|
||||
|
||||
export let run = async (configArg: IConfig): Promise<IConfig> => {
|
||||
config = configArg;
|
||||
const resultConfig = await checkDocker()
|
||||
.then(preClean)
|
||||
.then(buildDockerFile)
|
||||
.then(buildDockerImage)
|
||||
.then(buildDockerProjectMountString)
|
||||
.then(buildDockerEnvString)
|
||||
.then(buildDockerSockString)
|
||||
.then(runDockerImage)
|
||||
.then(postClean)
|
||||
.catch(err => {
|
||||
console.log(err);
|
||||
});
|
||||
return config;
|
||||
};
|
||||
@@ -15,3 +15,12 @@ export const logger = new plugins.smartlog.Smartlog({
|
||||
logger.addLogDestination(new plugins.smartlogDestinationLocal.DestinationLocal());
|
||||
|
||||
export const ora = new plugins.smartlogSouceOra.SmartlogSourceOra();
|
||||
|
||||
export function formatDuration(ms: number): string {
|
||||
if (ms < 1000) return `${ms}ms`;
|
||||
const totalSeconds = ms / 1000;
|
||||
if (totalSeconds < 60) return `${totalSeconds.toFixed(1)}s`;
|
||||
const minutes = Math.floor(totalSeconds / 60);
|
||||
const seconds = Math.round(totalSeconds % 60);
|
||||
return `${minutes}m ${seconds}s`;
|
||||
}
|
||||
|
||||
@@ -11,4 +11,3 @@ export let cwd = process.cwd();
|
||||
export let packageBase = plugins.path.join(__dirname, '../');
|
||||
export let assets = plugins.path.join(packageBase, 'assets/');
|
||||
fs.mkdirSync(assets, { recursive: true });
|
||||
export let npmdockerFile = plugins.path.join(cwd, 'npmdocker');
|
||||
|
||||
@@ -3,16 +3,13 @@ import * as lik from '@push.rocks/lik';
|
||||
import * as npmextra from '@push.rocks/npmextra';
|
||||
import * as path from 'path';
|
||||
import * as projectinfo from '@push.rocks/projectinfo';
|
||||
import * as smartpromise from '@push.rocks/smartpromise';
|
||||
import * as qenv from '@push.rocks/qenv';
|
||||
import * as smartcli from '@push.rocks/smartcli';
|
||||
import { SmartFs, SmartFsProviderNode } from '@push.rocks/smartfs';
|
||||
import * as smartlog from '@push.rocks/smartlog';
|
||||
import * as smartlogDestinationLocal from '@push.rocks/smartlog-destination-local';
|
||||
import * as smartlogSouceOra from '@push.rocks/smartlog-source-ora';
|
||||
import * as smartopen from '@push.rocks/smartopen';
|
||||
import * as smartinteract from '@push.rocks/smartinteract';
|
||||
import * as smartshell from '@push.rocks/smartshell';
|
||||
import * as smartstring from '@push.rocks/smartstring';
|
||||
|
||||
// Create smartfs instance
|
||||
export const smartfs = new SmartFs(new SmartFsProviderNode());
|
||||
@@ -22,13 +19,10 @@ export {
|
||||
npmextra,
|
||||
path,
|
||||
projectinfo,
|
||||
smartpromise,
|
||||
qenv,
|
||||
smartcli,
|
||||
smartinteract,
|
||||
smartlog,
|
||||
smartlogDestinationLocal,
|
||||
smartlogSouceOra,
|
||||
smartopen,
|
||||
smartshell,
|
||||
smartstring
|
||||
};
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
import * as plugins from './tsdocker.plugins.js';
|
||||
|
||||
export interface IDockerfileSnippet {
|
||||
baseImage: string;
|
||||
command: string;
|
||||
}
|
||||
|
||||
let getMountSolutionString = (optionsArg: IDockerfileSnippet) => {
|
||||
if (process.env.CI) {
|
||||
return 'COPY ./ /workspace';
|
||||
} else {
|
||||
return '# not copying workspcae since not in CI';
|
||||
}
|
||||
};
|
||||
|
||||
let getGlobalPreparationString = (optionsArg: IDockerfileSnippet) => {
|
||||
// Always install tsdocker to ensure the latest version is available
|
||||
return 'RUN npm install -g @git.zone/tsdocker';
|
||||
};
|
||||
|
||||
export let dockerfileSnippet = (optionsArg: IDockerfileSnippet): string => {
|
||||
return plugins.smartstring.indent.normalize(
|
||||
`
|
||||
FROM ${optionsArg.baseImage}
|
||||
# For info about what tsdocker does read the docs at https://gitzone.github.io/tsdocker
|
||||
${getGlobalPreparationString(optionsArg)}
|
||||
${getMountSolutionString(optionsArg)}
|
||||
WORKDIR /workspace
|
||||
ENV CI=true
|
||||
ENTRYPOINT ["tsdocker"]
|
||||
CMD ["runinside"]
|
||||
`
|
||||
);
|
||||
};
|
||||
Reference in New Issue
Block a user