Compare commits
44 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 53b7bd7048 | |||
| 101c4286c1 | |||
| 63078139ec | |||
| 0cb5515b93 | |||
| aa0425f9bc | |||
| 2d4d7c671a | |||
| 3085eb590f | |||
| 04b75b42f3 | |||
| b04b8c9033 | |||
| 2130a8a879 | |||
| 17de78aed3 | |||
| eddb8cd156 | |||
| cfc7798d49 | |||
| 37dfde005e | |||
| d1785aab86 | |||
| 31fb4aea3c | |||
| 907048fa87 | |||
| 02b267ee10 | |||
| 16cd0bbd87 | |||
| cc83743f9a | |||
| 7131c16f80 | |||
| 02688861f4 | |||
| 3a8b301b3e | |||
| c09bef33c3 | |||
| 32eb0d1d77 | |||
| 7cac628975 | |||
| c279dbd55e | |||
| 7b7064864e | |||
| 36f06cef09 | |||
| b0f87deb4b | |||
| 9805324746 | |||
| 808066d8c3 | |||
| 6922d19454 | |||
| e1492f8ec4 | |||
| e9a12f1c17 | |||
| 6995010a2c | |||
| e0cbc9cfec | |||
| c538e6b10b | |||
| c31df766fc | |||
| 0c626c20e7 | |||
| c07f10b97b | |||
| 08d32f0370 | |||
| ac386f01e0 | |||
| 08ead4258f |
66
.gitea/workflows/default_nottags.yaml
Normal file
66
.gitea/workflows/default_nottags.yaml
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
name: Default (not tags)
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags-ignore:
|
||||||
|
- '**'
|
||||||
|
|
||||||
|
env:
|
||||||
|
IMAGE: code.foss.global/host.today/ht-docker-node:npmci
|
||||||
|
NPMCI_COMPUTED_REPOURL: https://${{gitea.repository_owner}}:${{secrets.GITEA_TOKEN}}@/${{gitea.repository}}.git
|
||||||
|
NPMCI_TOKEN_NPM: ${{secrets.NPMCI_TOKEN_NPM}}
|
||||||
|
NPMCI_TOKEN_NPM2: ${{secrets.NPMCI_TOKEN_NPM2}}
|
||||||
|
NPMCI_GIT_GITHUBTOKEN: ${{secrets.NPMCI_GIT_GITHUBTOKEN}}
|
||||||
|
NPMCI_URL_CLOUDLY: ${{secrets.NPMCI_URL_CLOUDLY}}
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
security:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
continue-on-error: true
|
||||||
|
container:
|
||||||
|
image: ${{ env.IMAGE }}
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Install pnpm and npmci
|
||||||
|
run: |
|
||||||
|
pnpm install -g pnpm
|
||||||
|
pnpm install -g @ship.zone/npmci
|
||||||
|
|
||||||
|
- name: Run npm prepare
|
||||||
|
run: npmci npm prepare
|
||||||
|
|
||||||
|
- name: Audit production dependencies
|
||||||
|
run: |
|
||||||
|
npmci command npm config set registry https://registry.npmjs.org
|
||||||
|
npmci command pnpm audit --audit-level=high --prod
|
||||||
|
continue-on-error: true
|
||||||
|
|
||||||
|
- name: Audit development dependencies
|
||||||
|
run: |
|
||||||
|
npmci command npm config set registry https://registry.npmjs.org
|
||||||
|
npmci command pnpm audit --audit-level=high --dev
|
||||||
|
continue-on-error: true
|
||||||
|
|
||||||
|
test:
|
||||||
|
if: ${{ always() }}
|
||||||
|
needs: security
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
container:
|
||||||
|
image: ${{ env.IMAGE }}
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Test stable
|
||||||
|
run: |
|
||||||
|
npmci node install stable
|
||||||
|
npmci npm install
|
||||||
|
npmci npm test
|
||||||
|
|
||||||
|
- name: Test build
|
||||||
|
run: |
|
||||||
|
npmci node install stable
|
||||||
|
npmci npm install
|
||||||
|
npmci npm build
|
||||||
124
.gitea/workflows/default_tags.yaml
Normal file
124
.gitea/workflows/default_tags.yaml
Normal file
@@ -0,0 +1,124 @@
|
|||||||
|
name: Default (tags)
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- '*'
|
||||||
|
|
||||||
|
env:
|
||||||
|
IMAGE: code.foss.global/host.today/ht-docker-node:npmci
|
||||||
|
NPMCI_COMPUTED_REPOURL: https://${{gitea.repository_owner}}:${{secrets.GITEA_TOKEN}}@/${{gitea.repository}}.git
|
||||||
|
NPMCI_TOKEN_NPM: ${{secrets.NPMCI_TOKEN_NPM}}
|
||||||
|
NPMCI_TOKEN_NPM2: ${{secrets.NPMCI_TOKEN_NPM2}}
|
||||||
|
NPMCI_GIT_GITHUBTOKEN: ${{secrets.NPMCI_GIT_GITHUBTOKEN}}
|
||||||
|
NPMCI_URL_CLOUDLY: ${{secrets.NPMCI_URL_CLOUDLY}}
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
security:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
continue-on-error: true
|
||||||
|
container:
|
||||||
|
image: ${{ env.IMAGE }}
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Prepare
|
||||||
|
run: |
|
||||||
|
pnpm install -g pnpm
|
||||||
|
pnpm install -g @ship.zone/npmci
|
||||||
|
npmci npm prepare
|
||||||
|
|
||||||
|
- name: Audit production dependencies
|
||||||
|
run: |
|
||||||
|
npmci command npm config set registry https://registry.npmjs.org
|
||||||
|
npmci command pnpm audit --audit-level=high --prod
|
||||||
|
continue-on-error: true
|
||||||
|
|
||||||
|
- name: Audit development dependencies
|
||||||
|
run: |
|
||||||
|
npmci command npm config set registry https://registry.npmjs.org
|
||||||
|
npmci command pnpm audit --audit-level=high --dev
|
||||||
|
continue-on-error: true
|
||||||
|
|
||||||
|
test:
|
||||||
|
if: ${{ always() }}
|
||||||
|
needs: security
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
container:
|
||||||
|
image: ${{ env.IMAGE }}
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Prepare
|
||||||
|
run: |
|
||||||
|
pnpm install -g pnpm
|
||||||
|
pnpm install -g @ship.zone/npmci
|
||||||
|
npmci npm prepare
|
||||||
|
|
||||||
|
- name: Test stable
|
||||||
|
run: |
|
||||||
|
npmci node install stable
|
||||||
|
npmci npm install
|
||||||
|
npmci npm test
|
||||||
|
|
||||||
|
- name: Test build
|
||||||
|
run: |
|
||||||
|
npmci node install stable
|
||||||
|
npmci npm install
|
||||||
|
npmci npm build
|
||||||
|
|
||||||
|
release:
|
||||||
|
needs: test
|
||||||
|
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/')
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
container:
|
||||||
|
image: ${{ env.IMAGE }}
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Prepare
|
||||||
|
run: |
|
||||||
|
pnpm install -g pnpm
|
||||||
|
pnpm install -g @ship.zone/npmci
|
||||||
|
npmci npm prepare
|
||||||
|
|
||||||
|
- name: Release
|
||||||
|
run: |
|
||||||
|
npmci node install stable
|
||||||
|
npmci npm publish
|
||||||
|
|
||||||
|
metadata:
|
||||||
|
needs: test
|
||||||
|
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/')
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
container:
|
||||||
|
image: ${{ env.IMAGE }}
|
||||||
|
continue-on-error: true
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Prepare
|
||||||
|
run: |
|
||||||
|
pnpm install -g pnpm
|
||||||
|
pnpm install -g @ship.zone/npmci
|
||||||
|
npmci npm prepare
|
||||||
|
|
||||||
|
- name: Code quality
|
||||||
|
run: |
|
||||||
|
npmci command npm install -g typescript
|
||||||
|
npmci npm install
|
||||||
|
|
||||||
|
- name: Trigger
|
||||||
|
run: npmci trigger
|
||||||
|
|
||||||
|
- name: Build docs and upload artifacts
|
||||||
|
run: |
|
||||||
|
npmci node install stable
|
||||||
|
npmci npm install
|
||||||
|
pnpm install -g @git.zone/tsdoc
|
||||||
|
npmci command tsdoc
|
||||||
|
continue-on-error: true
|
||||||
11
.gitignore
vendored
11
.gitignore
vendored
@@ -3,7 +3,6 @@
|
|||||||
# artifacts
|
# artifacts
|
||||||
coverage/
|
coverage/
|
||||||
public/
|
public/
|
||||||
pages/
|
|
||||||
|
|
||||||
# installs
|
# installs
|
||||||
node_modules/
|
node_modules/
|
||||||
@@ -15,9 +14,11 @@ node_modules/
|
|||||||
|
|
||||||
# builds
|
# builds
|
||||||
dist/
|
dist/
|
||||||
dist_web/
|
dist_*/
|
||||||
dist_serve/
|
|
||||||
dist_ts_web/
|
|
||||||
|
|
||||||
# custom
|
# AI
|
||||||
|
.claude/
|
||||||
|
.serena/
|
||||||
|
|
||||||
|
#------# custom
|
||||||
test
|
test
|
||||||
|
|||||||
@@ -1,69 +0,0 @@
|
|||||||
image: hosttoday/ht-docker-dbase:npmci
|
|
||||||
services:
|
|
||||||
- docker:dind
|
|
||||||
|
|
||||||
stages:
|
|
||||||
- mirror
|
|
||||||
- test
|
|
||||||
- release
|
|
||||||
- trigger
|
|
||||||
- pages
|
|
||||||
|
|
||||||
mirror:
|
|
||||||
image: hosttoday/ht-docker-node:npmci
|
|
||||||
stage: mirror
|
|
||||||
script:
|
|
||||||
- npmci git mirror
|
|
||||||
tags:
|
|
||||||
- docker
|
|
||||||
|
|
||||||
|
|
||||||
test:
|
|
||||||
stage: test
|
|
||||||
script:
|
|
||||||
- npmci node install stable
|
|
||||||
- npmci npm install
|
|
||||||
- npmci npm test
|
|
||||||
tags:
|
|
||||||
- docker
|
|
||||||
- lossless
|
|
||||||
- priv
|
|
||||||
|
|
||||||
release:
|
|
||||||
stage: release
|
|
||||||
environment: npmjs-com_registry
|
|
||||||
script:
|
|
||||||
- npmci npm prepare
|
|
||||||
- npmci npm publish
|
|
||||||
only:
|
|
||||||
- tags
|
|
||||||
tags:
|
|
||||||
- docker
|
|
||||||
- lossless
|
|
||||||
- priv
|
|
||||||
|
|
||||||
trigger:
|
|
||||||
stage: trigger
|
|
||||||
script:
|
|
||||||
- npmci trigger
|
|
||||||
only:
|
|
||||||
- tags
|
|
||||||
tags:
|
|
||||||
- docker
|
|
||||||
- lossless
|
|
||||||
- priv
|
|
||||||
|
|
||||||
pages:
|
|
||||||
image: hosttoday/ht-docker-node:npmci
|
|
||||||
stage: pages
|
|
||||||
script:
|
|
||||||
- npmci command npm install -g @gitzone/tsdoc
|
|
||||||
- npmci command tsdoc
|
|
||||||
only:
|
|
||||||
- tags
|
|
||||||
tags:
|
|
||||||
- docker
|
|
||||||
artifacts:
|
|
||||||
expire_in: 1 week
|
|
||||||
paths:
|
|
||||||
- public
|
|
||||||
11
.vscode/launch.json
vendored
Normal file
11
.vscode/launch.json
vendored
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
{
|
||||||
|
"version": "0.2.0",
|
||||||
|
"configurations": [
|
||||||
|
{
|
||||||
|
"command": "npm test",
|
||||||
|
"name": "Run npm test",
|
||||||
|
"request": "launch",
|
||||||
|
"type": "node-terminal"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
27
.vscode/settings.json
vendored
27
.vscode/settings.json
vendored
@@ -1,3 +1,26 @@
|
|||||||
{
|
{
|
||||||
"typescript.tsdk": "node_modules/typescript/lib"
|
"json.schemas": [
|
||||||
}
|
{
|
||||||
|
"fileMatch": ["/npmextra.json"],
|
||||||
|
"schema": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"npmci": {
|
||||||
|
"type": "object",
|
||||||
|
"description": "settings for npmci"
|
||||||
|
},
|
||||||
|
"gitzone": {
|
||||||
|
"type": "object",
|
||||||
|
"description": "settings for gitzone",
|
||||||
|
"properties": {
|
||||||
|
"projectType": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["website", "element", "service", "npm", "wcc"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|||||||
62
README.md
62
README.md
@@ -1,62 +0,0 @@
|
|||||||
# @gitzone/npmdocker
|
|
||||||
develop npm modules cross platform with docker
|
|
||||||
|
|
||||||
## Availabililty and Links
|
|
||||||
* [npmjs.org (npm package)](https://www.npmjs.com/package/@gitzone/npmdocker)
|
|
||||||
* [gitlab.com (source)](https://gitlab.com/gitzone/npmdocker)
|
|
||||||
* [github.com (source mirror)](https://github.com/gitzone/npmdocker)
|
|
||||||
* [docs (typedoc)](https://gitzone.gitlab.io/npmdocker/)
|
|
||||||
|
|
||||||
## Status for master
|
|
||||||
[](https://gitlab.com/gitzone/npmdocker/commits/master)
|
|
||||||
[](https://gitlab.com/gitzone/npmdocker/commits/master)
|
|
||||||
[](https://www.npmjs.com/package/@gitzone/npmdocker)
|
|
||||||
[](https://snyk.io/test/npm/@gitzone/npmdocker)
|
|
||||||
[](https://nodejs.org/dist/latest-v10.x/docs/api/)
|
|
||||||
[](https://nodejs.org/dist/latest-v10.x/docs/api/)
|
|
||||||
[](https://prettier.io/)
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
Use TypeScript for best in class instellisense.
|
|
||||||
|
|
||||||
### Why does this package exist?
|
|
||||||
|
|
||||||
Sometimes you want a clean and fresh linux environment everytime you test your package.
|
|
||||||
Usually this is the default i CI, but locally behaviour tends to defer.
|
|
||||||
|
|
||||||
### Where does it work
|
|
||||||
|
|
||||||
The npmdocker package works in everywhere where the docker cli is available. e.g.:
|
|
||||||
|
|
||||||
- docker toolbox
|
|
||||||
- native docker application
|
|
||||||
- docker in docker
|
|
||||||
- mounted docker.sock
|
|
||||||
|
|
||||||
### How do I use it?
|
|
||||||
|
|
||||||
create a npmextra.json in the project's root directory
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"npmdocker": {
|
|
||||||
"baseImage": "hosttoday/ht-docker-node:npmts",
|
|
||||||
"command": "npmci test stable",
|
|
||||||
"dockerSock": false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
| option | description |
|
|
||||||
| ----------- | ------------------------------------------------------------------------------------- |
|
|
||||||
| baseImage | the base image that is the context for your project |
|
|
||||||
| command | the cli command to run within the the project's directory inside the docker container |
|
|
||||||
| dockersSock | wether or not the testcontainer will have access to the docker.sock of the host |
|
|
||||||
|
|
||||||
For further information read the linked docs at the top of this readme.
|
|
||||||
|
|
||||||
> MIT licensed | **©** [Lossless GmbH](https://lossless.gmbh)
|
|
||||||
| By using this npm module you agree to our [privacy policy](https://lossless.gmbH/privacy.html)
|
|
||||||
|
|
||||||
[](https://maintainedby.lossless.com)
|
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
FROM hosttoday/ht-docker-node:npmci
|
FROM hosttoday/ht-docker-node:npmci
|
||||||
RUN yarn global add @gitzone/tsdocker
|
RUN yarn global add @git.zone/tsdocker
|
||||||
COPY ./ /workspace
|
COPY ./ /workspace
|
||||||
WORKDIR /workspace
|
WORKDIR /workspace
|
||||||
ENV CI=true
|
ENV CI=true
|
||||||
|
|||||||
528
changelog.md
Normal file
528
changelog.md
Normal file
@@ -0,0 +1,528 @@
|
|||||||
|
# Changelog
|
||||||
|
|
||||||
|
## 2026-02-07 - 1.16.0 - feat(core)
|
||||||
|
Introduce per-invocation TsDockerSession and session-aware local registry and build orchestration; stream and parse buildx output for improved logging and visibility; detect Docker topology and add CI-safe cleanup; update README with multi-arch, parallel-build, caching, and local registry usage and new CLI flags.
|
||||||
|
|
||||||
|
- Add TsDockerSession to allocate unique ports, container names and builder suffixes for concurrent runs (especially in CI).
|
||||||
|
- Make local registry session-aware: start/stop/use registry container and persistent storage per session; retry on port conflicts.
|
||||||
|
- Inject session into Dockerfile instances and TsDockerManager; use session.config.registryHost for tagging/pushing and test container naming.
|
||||||
|
- Stream and parse buildx/docker build output via createBuildOutputHandler for clearer step/platform/CACHED/DONE logging and --progress=plain usage.
|
||||||
|
- Detect Docker topology (socket-mount, dind, local) in DockerContext and expose it in context info.
|
||||||
|
- Add manager.cleanup to remove CI-scoped buildx builders and ensure CLI calls cleanup after build/push/test.
|
||||||
|
- Update interfaces to include topology and adjust many Dockerfile/manager methods to be session-aware.
|
||||||
|
- Large README improvements: multi-arch flow, persistent local registry, parallel builds, caching, new CLI and clean flags, and examples for CI integration.
|
||||||
|
|
||||||
|
## 2026-02-07 - 1.15.1 - fix(registry)
|
||||||
|
use persistent local registry and OCI Distribution API image copy for pushes
|
||||||
|
|
||||||
|
- Adds RegistryCopy class implementing the OCI Distribution API to copy images (including multi-arch manifest lists) from the local registry to remote registries.
|
||||||
|
- All builds now go through a persistent local registry at localhost:5234 with volume storage at .nogit/docker-registry/; Dockerfile.startLocalRegistry mounts this directory.
|
||||||
|
- Dockerfile.push now delegates to RegistryCopy.copyImage; Dockerfile.needsLocalRegistry() always returns true and config.push is now a no-op (kept for backward compat).
|
||||||
|
- Multi-platform buildx builds are pushed to the local registry (this.localRegistryTag) during buildx --push; code avoids redundant pushes when images are already pushed by buildx.
|
||||||
|
- Build, cached build, test, push and pull flows now start/stop the local registry automatically to support multi-platform/image resolution.
|
||||||
|
- Introduces Dockerfile.getDestRepo and support for config.registryRepoMap to control destination repository mapping.
|
||||||
|
- Breaking change: registry usage and push behavior changed (config.push ignored and local registry mandatory) — bump major version.
|
||||||
|
|
||||||
|
## 2026-02-07 - 1.15.0 - feat(clean)
|
||||||
|
Make the `clean` command interactive: add smartinteract prompts, docker context detection, and selective resource removal with support for --all and -y auto-confirm
|
||||||
|
|
||||||
|
- Adds dependency @push.rocks/smartinteract and exposes it from the plugins module
|
||||||
|
- Refactors tsdocker.cli.ts clean command to list Docker resources and prompt checkbox selection for running/stopped containers, images, and volumes
|
||||||
|
- Adds DockerContext detection and logging to determine active Docker context
|
||||||
|
- Introduces auto-confirm (-y) and --all handling to either auto-accept or allow full-image/volume removal
|
||||||
|
- Replaces blunt shell commands with safer, interactive selection and adds improved error handling and logging
|
||||||
|
|
||||||
|
## 2026-02-07 - 1.14.0 - feat(build)
|
||||||
|
add level-based parallel builds with --parallel and configurable concurrency
|
||||||
|
|
||||||
|
- Introduces --parallel and --parallel=<n> CLI flags to enable level-based parallel Docker builds (default concurrency 4).
|
||||||
|
- Adds Dockerfile.computeLevels() to group topologically-sorted Dockerfiles into dependency levels.
|
||||||
|
- Adds Dockerfile.runWithConcurrency() implementing a bounded-concurrency worker-pool (fast-fail via Promise.all).
|
||||||
|
- Integrates parallel build mode into Dockerfile.buildDockerfiles() and TsDockerManager.build() for both cached and non-cached flows, including tagging and pushing for dependency resolution after each level.
|
||||||
|
- Adds options.parallel and options.parallelConcurrency to the build interface and wires them through the CLI and manager.
|
||||||
|
- Updates documentation (readme.hints.md) with usage examples and implementation notes.
|
||||||
|
|
||||||
|
## 2026-02-07 - 1.13.0 - feat(docker)
|
||||||
|
add Docker context detection, rootless support, and context-aware buildx registry handling
|
||||||
|
|
||||||
|
- Introduce DockerContext class to detect current Docker context and rootless mode and to log warnings and context info
|
||||||
|
- Add IDockerContextInfo interface and a new context option on build/config to pass explicit Docker context
|
||||||
|
- Propagate --context CLI flag into TsDockerManager.prepare so CLI commands can set an explicit Docker context
|
||||||
|
- Make buildx builder name context-aware (tsdocker-builder-<sanitized-context>) and log builder name/platforms
|
||||||
|
- Pass isRootless into local registry startup and build pipeline; emit rootless-specific warnings and registry reachability hint
|
||||||
|
|
||||||
|
## 2026-02-06 - 1.12.0 - feat(docker)
|
||||||
|
add detailed logging for buildx, build commands, local registry, and local dependency info
|
||||||
|
|
||||||
|
- Log startup of local registry including a note about buildx dependency bridging
|
||||||
|
- Log constructed build commands and indicate whether buildx or standard docker build is used (including platforms and --push/--load distinctions)
|
||||||
|
- Emit build mode summary at start of build phase and report local base-image dependency mappings
|
||||||
|
- Report when --no-cache is enabled and surface buildx setup readiness with configured platforms
|
||||||
|
- Non-functional change: purely adds informational logging to improve observability during builds
|
||||||
|
|
||||||
|
## 2026-02-06 - 1.11.0 - feat(docker)
|
||||||
|
start temporary local registry for buildx dependency resolution and ensure buildx builder uses host network
|
||||||
|
|
||||||
|
- Introduce a temporary local registry (localhost:5234) with start/stop helpers and push support to expose local images for buildx
|
||||||
|
- Add Dockerfile.needsLocalRegistry to decide when a local registry is required (local base dependencies + multi-platform or platform option)
|
||||||
|
- Push built images to the local registry and set localRegistryTag on Dockerfile instances for BuildKit build-context usage
|
||||||
|
- Tag built images in the host daemon for dependent Dockerfiles to resolve local FROM references
|
||||||
|
- Integrate registry lifecycle into Dockerfile.buildDockerfiles and TsDockerManager build flows (start before builds, stop after)
|
||||||
|
- Ensure buildx builder is created with --driver-opt network=host and recreate existing builder if it lacks host network to allow registry access from build containers
|
||||||
|
|
||||||
|
## 2026-02-06 - 1.10.0 - feat(classes.dockerfile)
|
||||||
|
support using a local base image as a build context in buildx commands
|
||||||
|
|
||||||
|
- Adds --build-context flag mapping base image to docker-image://<localTag> when localBaseImageDependent && localBaseDockerfile are set
|
||||||
|
- Appends the build context flag to both single-platform and multi-platform docker buildx commands
|
||||||
|
- Logs an info message indicating the local build context mapping
|
||||||
|
|
||||||
|
## 2026-02-06 - 1.9.0 - feat(build)
|
||||||
|
add verbose build output, progress logging, and timing for builds/tests
|
||||||
|
|
||||||
|
- Add 'verbose' option to build/test flows (interfaces, CLI, and method signatures) to allow streaming raw docker build output or run silently
|
||||||
|
- Log per-item progress for build and test phases (e.g. (1/N) Building/Testing <tag>) and report individual durations
|
||||||
|
- Return elapsed time from Dockerfile.build() and Dockerfile.test() and aggregate total build/test times in manager
|
||||||
|
- Introduce formatDuration(ms) helper in logging module to format timings
|
||||||
|
- Switch from console.log to structured logger calls across cache, manager, dockerfile and push paths
|
||||||
|
- Use silent exec variants when verbose is false and stream exec when verbose is true
|
||||||
|
|
||||||
|
## 2026-02-06 - 1.8.0 - feat(build)
|
||||||
|
add optional content-hash based build cache to skip rebuilding unchanged Dockerfiles
|
||||||
|
|
||||||
|
- Introduce TsDockerCache to compute SHA-256 of Dockerfile content and persist cache to .nogit/tsdocker_support.json
|
||||||
|
- Add ICacheEntry and ICacheData interfaces and a cached flag to IBuildCommandOptions
|
||||||
|
- Integrate cached mode in TsDockerManager: skip builds on cache hits, verify image presence, record builds on misses, and still perform dependency tagging
|
||||||
|
- Expose --cached option in CLI to enable the cached build flow
|
||||||
|
- Cache records store contentHash, imageId, buildTag and timestamp
|
||||||
|
|
||||||
|
## 2026-02-06 - 1.7.0 - feat(cli)
|
||||||
|
add CLI version display using commitinfo
|
||||||
|
|
||||||
|
- Imported commitinfo from './00_commitinfo_data.js' and called tsdockerCli.addVersion(commitinfo.version) to surface package/commit version in the Smartcli instance
|
||||||
|
- Change made in ts/tsdocker.cli.ts — small user-facing CLI enhancement; no breaking changes
|
||||||
|
|
||||||
|
## 2026-02-06 - 1.6.0 - feat(docker)
|
||||||
|
add support for no-cache builds and tag built images for local dependency resolution
|
||||||
|
|
||||||
|
- Introduce IBuildCommandOptions.noCache to control --no-cache behavior
|
||||||
|
- Propagate noCache from CLI (via cache flag) through TsDockerManager to Dockerfile.build
|
||||||
|
- Append --no-cache to docker build/buildx commands when noCache is true
|
||||||
|
- After building an image, tag it with full base image references used by dependent Dockerfiles so their FROM lines resolve to the locally-built image
|
||||||
|
- Log tagging actions and execute docker tag via smartshellInstance
|
||||||
|
|
||||||
|
## 2026-02-06 - 1.5.0 - feat(build)
|
||||||
|
add support for selective builds, platform override and build timeout
|
||||||
|
|
||||||
|
- Introduce IBuildCommandOptions with patterns, platform and timeout to control build behavior
|
||||||
|
- Allow manager.build() to accept options and build only matching Dockerfiles (including dependencies) preserving topological order
|
||||||
|
- Add CLI parsing for build/push to accept positional Dockerfile patterns and --platform/--timeout flags
|
||||||
|
- Support single-platform override via docker buildx and multi-platform buildx detection
|
||||||
|
- Implement streaming exec with timeout to kill long-running builds and surface timeout errors
|
||||||
|
|
||||||
|
## 2026-02-04 - 1.4.3 - fix(dockerfile)
|
||||||
|
fix matching of base images to local Dockerfiles by stripping registry prefixes when comparing image references
|
||||||
|
|
||||||
|
- Added Dockerfile.extractRepoVersion(imageRef) to normalize image references by removing registry prefixes (detects registries containing '.' or ':' or 'localhost').
|
||||||
|
- Use extractRepoVersion when checking tagToDockerfile and when mapping local base dockerfiles to ensure comparisons use repo:tag keys rather than full registry-prefixed references.
|
||||||
|
- Prevents mismatches when baseImage includes a registry (e.g. "host.today/repo:version") so it correctly matches a local cleanTag like "repo:version".
|
||||||
|
|
||||||
|
## 2026-01-21 - 1.4.2 - fix(classes.dockerfile)
|
||||||
|
use a single top-level fs import instead of requiring fs inside methods
|
||||||
|
|
||||||
|
- Added top-level import: import * as fs from 'fs' in ts/classes.dockerfile.ts
|
||||||
|
- Removed inline require('fs') calls and replaced with the imported fs in constructor and test() to keep imports consistent
|
||||||
|
- No behavioral change expected; this is a cleanup/refactor to standardize module usage
|
||||||
|
|
||||||
|
## 2026-01-20 - 1.4.1 - fix(docs)
|
||||||
|
update README: expand usage, installation, quick start, features, troubleshooting and migration notes
|
||||||
|
|
||||||
|
- Expanded README content: new Quick Start, Installation examples, and detailed Features section (containerized testing, smart Docker builds, multi-registry push, multi-architecture support, zero-config start)
|
||||||
|
- Added troubleshooting and performance tips including registry login guidance and circular dependency advice
|
||||||
|
- Updated migration notes from legacy npmdocker to @git.zone/tsdocker (command and config key changes, ESM guidance)
|
||||||
|
- Documentation-only change — no source code modified
|
||||||
|
|
||||||
|
## 2026-01-20 - 1.4.0 - feat(tsdocker)
|
||||||
|
add multi-registry and multi-arch Docker build/push/pull manager, registry storage, Dockerfile handling, and new CLI commands
|
||||||
|
|
||||||
|
- Introduce TsDockerManager orchestrator to discover, sort, build, test, push and pull Dockerfiles
|
||||||
|
- Add Dockerfile class with dependency-aware build order, buildx support, push/pull and test flows (new large module)
|
||||||
|
- Add DockerRegistry and RegistryStorage classes to manage registry credentials, login/logout and environment loading
|
||||||
|
- Add CLI commands: build, push, pull, test, login, list (and integrate TsDockerManager into CLI)
|
||||||
|
- Extend configuration (ITsDockerConfig) with registries, registryRepoMap, buildArgEnvMap, platforms, push and testDir; re-export as IConfig for backwards compatibility
|
||||||
|
- Add @push.rocks/lik to dependencies and import it in tsdocker.plugins
|
||||||
|
- Remove legacy speedtest command and related package.json script
|
||||||
|
- Update README and readme.hints with new features, configuration examples and command list
|
||||||
|
|
||||||
|
## 2026-01-19 - 1.3.0 - feat(packaging)
|
||||||
|
Rename package scope to @git.zone and migrate to ESM; rename CLI/config keys, update entrypoints and imports, bump Node requirement to 18, and adjust scripts/dependencies
|
||||||
|
|
||||||
|
- Package renamed to @git.zone/tsdocker (scope change) — consumers must update package reference.
|
||||||
|
- Configuration key changed from 'npmdocker' to '@git.zone/tsdocker' in npmextra.json; update project config accordingly.
|
||||||
|
- CLI command renamed from 'npmdocker' to 'tsdocker' and entrypoint/entrypoint binary references updated.
|
||||||
|
- Project migrated to ESM: imports now use .js extensions, package main/typings point to dist_ts, and ts source uses ESM patterns — Node >=18 required.
|
||||||
|
- Build/test scripts changed to use tsx and updated test task names; CI/workflow and npmextra release registries updated.
|
||||||
|
- Dependencies/devDependencies bumped; smartfs, smartcli and tsbuild versions updated.
|
||||||
|
- Docker build command now uses '--load' and default base images/installation behavior adjusted (global install of tsdocker in image).
|
||||||
|
|
||||||
|
## 2025-12-13 - 1.2.43 - fix(packaging)
|
||||||
|
|
||||||
|
Rename package scope to @git.zone and migrate deps/CI; pin pnpm and enable ESM packaging
|
||||||
|
|
||||||
|
- Rename npm package scope from @gitzone/tsdocker to @git.zone/tsdocker (package.json, commitinfo, README, npmextra)
|
||||||
|
- Migrate devDependencies from @gitzone/_ to @git.zone/_ and ensure runtime packages use @push.rocks/\* where applicable
|
||||||
|
- Replace smartfile usage with smartfs and update code to use async smartfs.file(...).write()/delete() patterns
|
||||||
|
- Add packageManager pin for pnpm, set type: "module", add files array and pnpm.overrides in package.json
|
||||||
|
- Add tsconfig.json with NodeNext/ES2022 settings and other ESM-related adjustments
|
||||||
|
- Add Gitea CI workflows (.gitea/workflows/default_tags.yaml and default_nottags.yaml) for test, audit and release flows
|
||||||
|
- Update assets Dockerfile to reference @git.zone/tsdocker and other packaging/CI related scripts
|
||||||
|
- Update .gitignore to consolidate dist patterns and add AI/tooling excludes (.claude/, .serena/)
|
||||||
|
- Update README, readme.hints.md and changelog to document the scope rename, dependency migrations and SmartFS migration
|
||||||
|
|
||||||
|
## 2025-11-22 - 1.2.42 - fix(package.json)
|
||||||
|
|
||||||
|
Add packageManager field to package.json to pin pnpm version
|
||||||
|
|
||||||
|
- Add packageManager: "pnpm@10.18.1+sha512.77a884a165cbba2d8d1c19e3b4880eee6d2fcabd0d879121e282196b80042351d5eb3ca0935fa599da1dc51265cc68816ad2bddd2a2de5ea9fdf92adbec7cd34" to package.json to lock pnpm CLI version and integrity
|
||||||
|
|
||||||
|
## 2025-11-22 - 1.2.41 - fix(core)
|
||||||
|
|
||||||
|
Migrate to @git.zone / @push.rocks packages, replace smartfile with smartfs and adapt filesystem usage; update dev deps and remove CI/lint config
|
||||||
|
|
||||||
|
- Updated devDependencies from @gitzone/_ to @git.zone/_ (tsbuild, tsrun, tstest) and bumped versions
|
||||||
|
- Re-scoped runtime dependencies from @pushrocks/_ to @push.rocks/_ and updated package versions
|
||||||
|
- Replaced deprecated smartfile usage with new async smartfs API; added SmartFs instance in ts/tsdocker.plugins.ts
|
||||||
|
- Switched sync filesystem calls to Node fs where appropriate (fs.existsSync, fs.mkdirSync) and updated code to await smartfs.file(...).write()/delete()
|
||||||
|
- Made buildDockerFile async and awaited file write/delete operations to ensure correct async flow
|
||||||
|
- Updated CLI bootstrap to require @git.zone/tsrun in cli.ts.js
|
||||||
|
- Removed tslint.json and cleaned up CI configuration (.gitlab-ci.yml content removed)
|
||||||
|
- Added readme.hints.md describing the migration and dependency changes
|
||||||
|
|
||||||
|
## 2021-09-30 - 1.2.40 - release (no code changes)
|
||||||
|
|
||||||
|
Routine release tag with no recorded source changes.
|
||||||
|
|
||||||
|
- Tagged release only (no changelogged changes).
|
||||||
|
|
||||||
|
## 2021-09-30 - 1.2.39 - fix(core)
|
||||||
|
|
||||||
|
Core maintenance updates.
|
||||||
|
|
||||||
|
- Internal core updates and maintenance.
|
||||||
|
|
||||||
|
## 2019-05-28 - 1.2.38 - fix(core)
|
||||||
|
|
||||||
|
Core maintenance updates.
|
||||||
|
|
||||||
|
- Internal core updates and maintenance.
|
||||||
|
|
||||||
|
## 2019-05-27 - 1.2.37 - fix(core)
|
||||||
|
|
||||||
|
Core maintenance updates.
|
||||||
|
|
||||||
|
- Internal core updates and maintenance.
|
||||||
|
|
||||||
|
## 2019-05-27 - 1.2.36 - fix(core)
|
||||||
|
|
||||||
|
Core maintenance updates.
|
||||||
|
|
||||||
|
- Internal core updates and maintenance.
|
||||||
|
|
||||||
|
## 2019-05-21 - 1.2.35 - fix(core)
|
||||||
|
|
||||||
|
Core maintenance updates.
|
||||||
|
|
||||||
|
- Internal core updates and maintenance.
|
||||||
|
|
||||||
|
## 2019-05-21 - 1.2.34 - fix(core)
|
||||||
|
|
||||||
|
Core maintenance updates.
|
||||||
|
|
||||||
|
- Internal core updates and maintenance.
|
||||||
|
|
||||||
|
## 2019-05-12 - 1.2.33 - fix(core)
|
||||||
|
|
||||||
|
Core maintenance updates.
|
||||||
|
|
||||||
|
- Internal core updates and maintenance.
|
||||||
|
|
||||||
|
## 2019-05-12 - 1.2.32 - fix(core)
|
||||||
|
|
||||||
|
Core maintenance updates.
|
||||||
|
|
||||||
|
- Internal core updates and maintenance.
|
||||||
|
|
||||||
|
## 2019-05-12 - 1.2.31 - fix(bin name)
|
||||||
|
|
||||||
|
Rename of the published CLI binary.
|
||||||
|
|
||||||
|
- Changed published binary name from "npmdocker" to "tsdocker".
|
||||||
|
|
||||||
|
## 2019-05-10 - 1.2.30 - fix(core)
|
||||||
|
|
||||||
|
Core maintenance updates.
|
||||||
|
|
||||||
|
- Internal core updates and maintenance.
|
||||||
|
|
||||||
|
## 2019-05-10 - 1.2.29 - fix(core)
|
||||||
|
|
||||||
|
Core maintenance updates.
|
||||||
|
|
||||||
|
- Internal core updates and maintenance.
|
||||||
|
|
||||||
|
## 2019-05-10 - 1.2.28 - fix(core)
|
||||||
|
|
||||||
|
Core maintenance updates.
|
||||||
|
|
||||||
|
- Internal core updates and maintenance.
|
||||||
|
|
||||||
|
## 2019-05-09 - 1.2.27 - fix(core)
|
||||||
|
|
||||||
|
Core maintenance updates.
|
||||||
|
|
||||||
|
- Internal core updates and maintenance.
|
||||||
|
|
||||||
|
## 2018-10-29 - 1.2.26 - fix(ci)
|
||||||
|
|
||||||
|
CI build process change.
|
||||||
|
|
||||||
|
- Removed "npmts" from the build process.
|
||||||
|
|
||||||
|
## 2018-10-29 - 1.2.25 - fix(core)
|
||||||
|
|
||||||
|
Core maintenance updates.
|
||||||
|
|
||||||
|
- Internal core updates and maintenance.
|
||||||
|
|
||||||
|
## 2018-10-28 - 1.2.24 - fix(clean)
|
||||||
|
|
||||||
|
Improved image cleanup.
|
||||||
|
|
||||||
|
- Images are now cleaned in a more thorough way.
|
||||||
|
|
||||||
|
## 2018-09-16 - 1.2.23 - fix(core)
|
||||||
|
|
||||||
|
Core maintenance updates.
|
||||||
|
|
||||||
|
- Internal core updates and maintenance.
|
||||||
|
|
||||||
|
## 2018-09-16 - 1.2.22 - fix(dependencies)
|
||||||
|
|
||||||
|
Dependency updates.
|
||||||
|
|
||||||
|
- Updated dependencies (maintenance).
|
||||||
|
|
||||||
|
## 2018-07-21 - 1.2.21 - fix(update to latest standards)
|
||||||
|
|
||||||
|
Standards/update alignment.
|
||||||
|
|
||||||
|
- Updated codebase to latest standards (general maintenance).
|
||||||
|
|
||||||
|
## 2018-05-18 - 1.2.20 - release (no code changes)
|
||||||
|
|
||||||
|
Tagged release with no recorded source changes.
|
||||||
|
|
||||||
|
- Tagged release only (no changelogged changes).
|
||||||
|
|
||||||
|
## 2018-05-18 - 1.2.19 - fix(ci)
|
||||||
|
|
||||||
|
CI improvements.
|
||||||
|
|
||||||
|
- Added a build command to package.json to support CI builds.
|
||||||
|
|
||||||
|
## 2018-05-18 - 1.2.18 - fix(package)
|
||||||
|
|
||||||
|
Packaging change for scoped publish.
|
||||||
|
|
||||||
|
- Include npmdocker under the @git.zone npm scope.
|
||||||
|
|
||||||
|
## 2018-01-24 - 1.2.18 - update
|
||||||
|
|
||||||
|
Documentation update.
|
||||||
|
|
||||||
|
- Updated package description.
|
||||||
|
|
||||||
|
## 2017-10-13 - 1.2.17 - fix(cleanup)
|
||||||
|
|
||||||
|
Cleanup behavior fix.
|
||||||
|
|
||||||
|
- Now cleans up correctly after operations.
|
||||||
|
|
||||||
|
## 2017-10-13 - 1.2.16 - update
|
||||||
|
|
||||||
|
Miscellaneous updates.
|
||||||
|
|
||||||
|
- General maintenance and updates.
|
||||||
|
|
||||||
|
## 2017-10-13 - 1.2.15 - fix(test)
|
||||||
|
|
||||||
|
Testing improvements.
|
||||||
|
|
||||||
|
- Fixed Docker testing.
|
||||||
|
|
||||||
|
## 2017-10-07 - 1.2.14 - ci
|
||||||
|
|
||||||
|
CI improvements.
|
||||||
|
|
||||||
|
- Updated CI configuration.
|
||||||
|
|
||||||
|
## 2017-10-07 - 1.2.13 - update(analytics)
|
||||||
|
|
||||||
|
Analytics integration.
|
||||||
|
|
||||||
|
- Updated Analytics integration.
|
||||||
|
|
||||||
|
## 2017-10-07 - 1.2.12 - update(dependencies)
|
||||||
|
|
||||||
|
Dependency updates.
|
||||||
|
|
||||||
|
- Updated dependencies.
|
||||||
|
|
||||||
|
## 2017-07-16 - 1.2.11 - update
|
||||||
|
|
||||||
|
Dependency and greeting update.
|
||||||
|
|
||||||
|
- Updated dependencies and changed greeting text.
|
||||||
|
|
||||||
|
## 2017-04-21 - 1.2.10 - feature
|
||||||
|
|
||||||
|
Added analytics.
|
||||||
|
|
||||||
|
- Now includes SmartAnalytics.
|
||||||
|
|
||||||
|
## 2017-04-02 - 1.2.8 - docs & ci
|
||||||
|
|
||||||
|
Docs and CI updates.
|
||||||
|
|
||||||
|
- Updated README and CI configuration.
|
||||||
|
|
||||||
|
## 2017-04-02 - 1.2.7 - fix(command)
|
||||||
|
|
||||||
|
Command execution fix.
|
||||||
|
|
||||||
|
- Fixed command execution behavior.
|
||||||
|
|
||||||
|
## 2017-03-28 - 1.2.6 - ci
|
||||||
|
|
||||||
|
CI configuration update.
|
||||||
|
|
||||||
|
- Updated .gitlab-ci.yml for correct images/steps.
|
||||||
|
|
||||||
|
## 2017-03-28 - 1.2.5 - ci
|
||||||
|
|
||||||
|
CI improvements.
|
||||||
|
|
||||||
|
- Further CI updates.
|
||||||
|
|
||||||
|
## 2017-03-28 - 1.2.4 - perf
|
||||||
|
|
||||||
|
Performance improvements.
|
||||||
|
|
||||||
|
- Now runs asynchronously and is significantly faster.
|
||||||
|
|
||||||
|
## 2017-02-12 - 1.2.3 - feature
|
||||||
|
|
||||||
|
New cleanup and diagnostics features.
|
||||||
|
|
||||||
|
- Added speedtest utility.
|
||||||
|
- Added removal of volumes.
|
||||||
|
|
||||||
|
## 2017-02-11 - 1.2.2 - feature
|
||||||
|
|
||||||
|
Cleanup enhancement.
|
||||||
|
|
||||||
|
- Added "clean --all" option to remove more artifacts.
|
||||||
|
|
||||||
|
## 2017-02-11 - 1.2.1 - maintenance
|
||||||
|
|
||||||
|
Docs and dependency updates.
|
||||||
|
|
||||||
|
- Updated README and dependencies.
|
||||||
|
|
||||||
|
## 2016-08-04 - 1.2.0 - maintenance
|
||||||
|
|
||||||
|
Dependency cleanup.
|
||||||
|
|
||||||
|
- Removed unnecessary dependencies.
|
||||||
|
|
||||||
|
## 2016-07-29 - 1.1.6 - feature
|
||||||
|
|
||||||
|
Environment support.
|
||||||
|
|
||||||
|
- Added support for qenv.
|
||||||
|
|
||||||
|
## 2016-07-29 - 1.1.5 - fix
|
||||||
|
|
||||||
|
Container cleanup improvements.
|
||||||
|
|
||||||
|
- Now also removes old running containers.
|
||||||
|
|
||||||
|
## 2016-07-29 - 1.1.4 - fix
|
||||||
|
|
||||||
|
Namespace conflict avoidance.
|
||||||
|
|
||||||
|
- Removes previous containers to avoid name-space conflicts after errors.
|
||||||
|
|
||||||
|
## 2016-07-29 - 1.1.3 - ci
|
||||||
|
|
||||||
|
CI image configuration.
|
||||||
|
|
||||||
|
- Added correct images for GitLab CI.
|
||||||
|
|
||||||
|
## 2016-07-29 - 1.1.2 - ci
|
||||||
|
|
||||||
|
CI fixes.
|
||||||
|
|
||||||
|
- Fixed GitLab CI configuration.
|
||||||
|
|
||||||
|
## 2016-07-28 - 1.1.1 - ci
|
||||||
|
|
||||||
|
CI fixes and configuration.
|
||||||
|
|
||||||
|
- Fixed gitlab.yml and CI issues.
|
||||||
|
|
||||||
|
## 2016-07-28 - 1.1.0 - feature
|
||||||
|
|
||||||
|
Docker-in-Docker support.
|
||||||
|
|
||||||
|
- Improved support for Docker-in-Docker scenarios.
|
||||||
|
|
||||||
|
## 2016-07-28 - 1.0.5 - feature & ci
|
||||||
|
|
||||||
|
Docker socket option and CI update.
|
||||||
|
|
||||||
|
- Added dockerSock option.
|
||||||
|
- Updated .gitlab-ci.yml.
|
||||||
|
|
||||||
|
## 2016-07-19 - 1.0.4 - release (no code changes)
|
||||||
|
|
||||||
|
Tagged release with no recorded source changes.
|
||||||
|
|
||||||
|
- Tagged release only (no changelogged changes).
|
||||||
|
|
||||||
|
## 2016-07-19 - 1.0.3 - feature
|
||||||
|
|
||||||
|
Environment tagging.
|
||||||
|
|
||||||
|
- Added environment tag support.
|
||||||
|
|
||||||
|
## 2016-07-19 - 1.0.2 - milestone
|
||||||
|
|
||||||
|
CLI and stability improvements.
|
||||||
|
|
||||||
|
- Wired up CLI usage.
|
||||||
|
- Marked as fully working.
|
||||||
|
|
||||||
|
## 2016-07-19 - 1.0.1 - initial improvements
|
||||||
|
|
||||||
|
Early project refinements and Docker integration.
|
||||||
|
|
||||||
|
- Added/updated Docker integration and configuration.
|
||||||
|
- Improved config handling and path management.
|
||||||
|
- Updated Docker handling and removed test artifacts.
|
||||||
|
|
||||||
|
## 2016-07-13 - 1.0.0 - initial
|
||||||
|
|
||||||
|
Initial release.
|
||||||
|
|
||||||
|
- Added README and initial project scaffolding.
|
||||||
2
cli.js
Normal file → Executable file
2
cli.js
Normal file → Executable file
@@ -1,3 +1,3 @@
|
|||||||
#!/usr/bin/env node
|
#!/usr/bin/env node
|
||||||
process.env.CLI_CALL = 'true';
|
process.env.CLI_CALL = 'true';
|
||||||
require('./dist/index');
|
import('./dist_ts/index.js');
|
||||||
|
|||||||
@@ -1,4 +0,0 @@
|
|||||||
#!/usr/bin/env node
|
|
||||||
process.env.CLI_CALL = 'true';
|
|
||||||
require('@gitzone/tsrun');
|
|
||||||
require('./ts/index');
|
|
||||||
@@ -3,18 +3,25 @@
|
|||||||
"mode": "default",
|
"mode": "default",
|
||||||
"cli": true
|
"cli": true
|
||||||
},
|
},
|
||||||
"npmci": {
|
"@git.zone/cli": {
|
||||||
"npmGlobalTools": [],
|
"projectType": "npm",
|
||||||
"npmAccessLevel": "public"
|
|
||||||
},
|
|
||||||
"gitzone": {
|
|
||||||
"module": {
|
"module": {
|
||||||
"githost": "gitlab.com",
|
"githost": "gitlab.com",
|
||||||
"gitscope": "gitzone",
|
"gitscope": "gitzone",
|
||||||
"gitrepo": "npmdocker",
|
"gitrepo": "tsdocker",
|
||||||
"shortDescription": "develop npm modules cross platform with docker",
|
"description": "develop npm modules cross platform with docker",
|
||||||
"npmPackagename": "@gitzone/npmdocker",
|
"npmPackagename": "@git.zone/tsdocker",
|
||||||
"license": "MIT"
|
"license": "MIT"
|
||||||
|
},
|
||||||
|
"release": {
|
||||||
|
"accessLevel": "public",
|
||||||
|
"registries": [
|
||||||
|
"https://verdaccio.lossless.digital",
|
||||||
|
"https://registry.npmjs.org"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
"@ship.zone/szci": {
|
||||||
|
"npmGlobalTools": []
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
2071
package-lock.json
generated
2071
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
84
package.json
84
package.json
@@ -1,27 +1,28 @@
|
|||||||
{
|
{
|
||||||
"name": "@gitzone/tsdocker",
|
"name": "@git.zone/tsdocker",
|
||||||
"version": "1.2.39",
|
"version": "1.16.0",
|
||||||
"private": false,
|
"private": false,
|
||||||
"description": "develop npm modules cross platform with docker",
|
"description": "develop npm modules cross platform with docker",
|
||||||
"main": "dist/index.js",
|
"main": "dist_ts/index.js",
|
||||||
"typings": "dist/index.d.ts",
|
"typings": "dist_ts/index.d.ts",
|
||||||
"bin": {
|
"bin": {
|
||||||
"tsdocker": "cli.js"
|
"tsdocker": "cli.js"
|
||||||
},
|
},
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"test": "(npm run clean && npm run setupCheck && npm run testStandard && npm run testSpeed)",
|
"test": "(npm run build)",
|
||||||
"build": "(tsbuild)",
|
"build": "(tsbuild)",
|
||||||
"testStandard": "(cd test/ && node ../cli.ts.js)",
|
"testIntegration": "(npm run clean && npm run setupCheck && npm run testStandard)",
|
||||||
"testSpeed": "(cd test/ && node ../cli.ts.js speedtest)",
|
"testStandard": "(cd test/ && tsx ../ts/index.ts)",
|
||||||
"testClean": "(cd test/ && node ../cli.ts.js clean --all)",
|
"testClean": "(cd test/ && tsx ../ts/index.ts clean --all)",
|
||||||
"testVscode": "(cd test/ && node ../cli.ts.js vscode)",
|
"testVscode": "(cd test/ && tsx ../ts/index.ts vscode)",
|
||||||
"clean": "(rm -rf test/)",
|
"clean": "(rm -rf test/)",
|
||||||
"compile": "(npmts --notest)",
|
"compile": "(npmts --notest)",
|
||||||
"setupCheck": "(git clone https://gitlab.com/sandboxzone/sandbox-npmts.git test/)"
|
"setupCheck": "(git clone https://gitlab.com/sandboxzone/sandbox-npmts.git test/)",
|
||||||
|
"buildDocs": "tsdoc"
|
||||||
},
|
},
|
||||||
"repository": {
|
"repository": {
|
||||||
"type": "git",
|
"type": "git",
|
||||||
"url": "git+ssh://git@gitlab.com/gitzone/npmdocker.git"
|
"url": "https://gitlab.com/gitzone/tsdocker.git"
|
||||||
},
|
},
|
||||||
"keywords": [
|
"keywords": [
|
||||||
"docker"
|
"docker"
|
||||||
@@ -29,32 +30,47 @@
|
|||||||
"author": "Lossless GmbH",
|
"author": "Lossless GmbH",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"bugs": {
|
"bugs": {
|
||||||
"url": "https://gitlab.com/gitzone/npmdocker/issues"
|
"url": "https://gitlab.com/gitzone/tsdocker/issues"
|
||||||
},
|
},
|
||||||
"homepage": "https://gitlab.com/gitzone/npmdocker#README",
|
"homepage": "https://gitlab.com/gitzone/tsdocker#readme",
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@gitzone/tsbuild": "^2.1.11",
|
"@git.zone/tsbuild": "^4.1.2",
|
||||||
"@gitzone/tsrun": "^1.2.6",
|
"@git.zone/tsrun": "^2.0.1",
|
||||||
"@gitzone/tstest": "^1.0.23",
|
"@git.zone/tstest": "^3.1.6",
|
||||||
"@pushrocks/tapbundle": "^3.0.9",
|
"@types/node": "^25.0.9"
|
||||||
"@types/node": "^12.0.2",
|
|
||||||
"tslint": "^5.16.0",
|
|
||||||
"tslint-config-prettier": "^1.18.0"
|
|
||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@pushrocks/npmextra": "^3.0.5",
|
"@push.rocks/lik": "^6.2.2",
|
||||||
"@pushrocks/projectinfo": "^4.0.2",
|
"@push.rocks/npmextra": "^5.3.3",
|
||||||
"@pushrocks/qenv": "^4.0.0",
|
"@push.rocks/projectinfo": "^5.0.2",
|
||||||
"@pushrocks/smartanalytics": "^2.0.15",
|
"@push.rocks/qenv": "^6.1.3",
|
||||||
"@pushrocks/smartcli": "^3.0.7",
|
"@push.rocks/smartanalytics": "^2.0.15",
|
||||||
"@pushrocks/smartfile": "^7.0.2",
|
"@push.rocks/smartcli": "^4.0.20",
|
||||||
"@pushrocks/smartlog": "^2.0.19",
|
"@push.rocks/smartfs": "^1.3.1",
|
||||||
"@pushrocks/smartlog-destination-local": "^7.0.5",
|
"@push.rocks/smartinteract": "^2.0.16",
|
||||||
"@pushrocks/smartlog-source-ora": "^1.0.7",
|
"@push.rocks/smartlog": "^3.1.10",
|
||||||
"@pushrocks/smartopen": "^1.0.8",
|
"@push.rocks/smartlog-destination-local": "^9.0.2",
|
||||||
"@pushrocks/smartpromise": "^3.0.2",
|
"@push.rocks/smartlog-source-ora": "^1.0.9",
|
||||||
"@pushrocks/smartshell": "^2.0.22",
|
"@push.rocks/smartopen": "^2.0.0",
|
||||||
"@pushrocks/smartstring": "^3.0.10",
|
"@push.rocks/smartpromise": "^4.2.3",
|
||||||
"@types/shelljs": "^0.8.5"
|
"@push.rocks/smartshell": "^3.3.0",
|
||||||
|
"@push.rocks/smartstring": "^4.1.0"
|
||||||
|
},
|
||||||
|
"packageManager": "pnpm@10.18.1+sha512.77a884a165cbba2d8d1c19e3b4880eee6d2fcabd0d879121e282196b80042351d5eb3ca0935fa599da1dc51265cc68816ad2bddd2a2de5ea9fdf92adbec7cd34",
|
||||||
|
"type": "module",
|
||||||
|
"files": [
|
||||||
|
"ts/**/*",
|
||||||
|
"ts_web/**/*",
|
||||||
|
"dist/**/*",
|
||||||
|
"dist_*/**/*",
|
||||||
|
"dist_ts/**/*",
|
||||||
|
"dist_ts_web/**/*",
|
||||||
|
"assets/**/*",
|
||||||
|
"cli.js",
|
||||||
|
"npmextra.json",
|
||||||
|
"readme.md"
|
||||||
|
],
|
||||||
|
"pnpm": {
|
||||||
|
"overrides": {}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
9020
pnpm-lock.yaml
generated
Normal file
9020
pnpm-lock.yaml
generated
Normal file
File diff suppressed because it is too large
Load Diff
133
readme.hints.md
Normal file
133
readme.hints.md
Normal file
@@ -0,0 +1,133 @@
|
|||||||
|
# tsdocker Project Hints
|
||||||
|
|
||||||
|
## Module Purpose
|
||||||
|
|
||||||
|
tsdocker is a comprehensive Docker development and building tool. It provides:
|
||||||
|
- Testing npm modules in clean Docker environments (legacy feature)
|
||||||
|
- Building Dockerfiles with dependency ordering
|
||||||
|
- Multi-registry push/pull support
|
||||||
|
- Multi-architecture builds (amd64/arm64)
|
||||||
|
|
||||||
|
## New CLI Commands (2026-01-19)
|
||||||
|
|
||||||
|
| Command | Description |
|
||||||
|
|---------|-------------|
|
||||||
|
| `tsdocker` | Run tests in container (legacy default behavior) |
|
||||||
|
| `tsdocker build` | Build all Dockerfiles with dependency ordering |
|
||||||
|
| `tsdocker push [registry]` | Push images to configured registries |
|
||||||
|
| `tsdocker pull <registry>` | Pull images from registry |
|
||||||
|
| `tsdocker test` | Run container tests (test scripts) |
|
||||||
|
| `tsdocker login` | Login to configured registries |
|
||||||
|
| `tsdocker list` | List discovered Dockerfiles and dependencies |
|
||||||
|
| `tsdocker clean --all` | Clean up Docker environment |
|
||||||
|
| `tsdocker vscode` | Start VS Code in Docker |
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
Configure in `package.json` under `@git.zone/tsdocker`:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"@git.zone/tsdocker": {
|
||||||
|
"registries": ["registry.gitlab.com", "docker.io"],
|
||||||
|
"registryRepoMap": {
|
||||||
|
"registry.gitlab.com": "host.today/ht-docker-node"
|
||||||
|
},
|
||||||
|
"buildArgEnvMap": {
|
||||||
|
"NODE_VERSION": "NODE_VERSION"
|
||||||
|
},
|
||||||
|
"platforms": ["linux/amd64", "linux/arm64"],
|
||||||
|
"push": false,
|
||||||
|
"testDir": "./test"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Configuration Options
|
||||||
|
|
||||||
|
- `baseImage`: Base Docker image for testing (legacy)
|
||||||
|
- `command`: Command to run in container (legacy)
|
||||||
|
- `dockerSock`: Mount Docker socket (legacy)
|
||||||
|
- `registries`: Array of registry URLs to push to
|
||||||
|
- `registryRepoMap`: Map registry URLs to different repo paths
|
||||||
|
- `buildArgEnvMap`: Map Docker build ARGs to environment variables
|
||||||
|
- `platforms`: Target architectures for buildx
|
||||||
|
- `push`: Auto-push after build
|
||||||
|
- `testDir`: Directory containing test scripts
|
||||||
|
|
||||||
|
## Registry Authentication
|
||||||
|
|
||||||
|
Set environment variables for registry login:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Pipe-delimited format (numbered 1-10)
|
||||||
|
export DOCKER_REGISTRY_1="registry.gitlab.com|username|password"
|
||||||
|
export DOCKER_REGISTRY_2="docker.io|username|password"
|
||||||
|
|
||||||
|
# Or individual registry format
|
||||||
|
export DOCKER_REGISTRY_URL="registry.gitlab.com"
|
||||||
|
export DOCKER_REGISTRY_USER="username"
|
||||||
|
export DOCKER_REGISTRY_PASSWORD="password"
|
||||||
|
```
|
||||||
|
|
||||||
|
## File Structure
|
||||||
|
|
||||||
|
```
|
||||||
|
ts/
|
||||||
|
├── index.ts (entry point)
|
||||||
|
├── tsdocker.cli.ts (CLI commands)
|
||||||
|
├── tsdocker.config.ts (configuration)
|
||||||
|
├── tsdocker.plugins.ts (plugin imports)
|
||||||
|
├── tsdocker.docker.ts (legacy test runner)
|
||||||
|
├── tsdocker.snippets.ts (Dockerfile generation)
|
||||||
|
├── classes.dockerfile.ts (Dockerfile management)
|
||||||
|
├── classes.dockerregistry.ts (registry authentication)
|
||||||
|
├── classes.registrystorage.ts (registry storage)
|
||||||
|
├── classes.tsdockermanager.ts (orchestrator)
|
||||||
|
└── interfaces/
|
||||||
|
└── index.ts (type definitions)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Dependencies
|
||||||
|
|
||||||
|
- `@push.rocks/lik`: Object mapping utilities
|
||||||
|
- `@push.rocks/smartfs`: Filesystem operations
|
||||||
|
- `@push.rocks/smartshell`: Shell command execution
|
||||||
|
- `@push.rocks/smartcli`: CLI framework
|
||||||
|
- `@push.rocks/projectinfo`: Project metadata
|
||||||
|
|
||||||
|
## Parallel Builds
|
||||||
|
|
||||||
|
`--parallel` flag enables level-based parallel Docker builds:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
tsdocker build --parallel # parallel, default concurrency (4)
|
||||||
|
tsdocker build --parallel=8 # parallel, concurrency 8
|
||||||
|
tsdocker build --parallel --cached # works with both modes
|
||||||
|
```
|
||||||
|
|
||||||
|
Implementation: `Dockerfile.computeLevels()` groups topologically sorted Dockerfiles into dependency levels. `Dockerfile.runWithConcurrency()` provides a worker-pool pattern for bounded concurrency. Both are public static methods on the `Dockerfile` class. The parallel logic exists in both `Dockerfile.buildDockerfiles()` (standard mode) and `TsDockerManager.build()` (cached mode).
|
||||||
|
|
||||||
|
## OCI Distribution API Push (v1.16+)
|
||||||
|
|
||||||
|
All builds now go through a persistent local registry (`localhost:5234`) with volume storage at `.nogit/docker-registry/`. Pushes use the `RegistryCopy` class (`ts/classes.registrycopy.ts`) which implements the OCI Distribution API to copy images (including multi-arch manifest lists) from the local registry to remote registries. This replaces the old `docker tag + docker push` approach that only worked for single-platform images.
|
||||||
|
|
||||||
|
Key classes:
|
||||||
|
- `RegistryCopy` — HTTP-based OCI image copy (auth, blob transfer, manifest handling)
|
||||||
|
- `Dockerfile.push()` — Now delegates to `RegistryCopy.copyImage()`
|
||||||
|
- `Dockerfile.needsLocalRegistry()` — Always returns true
|
||||||
|
- `Dockerfile.startLocalRegistry()` — Uses persistent volume mount
|
||||||
|
|
||||||
|
The `config.push` field is now a no-op (kept for backward compat).
|
||||||
|
|
||||||
|
## Build Status
|
||||||
|
|
||||||
|
- Build: ✅ Passes
|
||||||
|
- Legacy test functionality preserved
|
||||||
|
- New Docker build functionality added
|
||||||
|
|
||||||
|
## Previous Upgrades (2025-11-22)
|
||||||
|
|
||||||
|
- Updated all @git.zone/_ dependencies to @git.zone/_ scope
|
||||||
|
- Updated all @pushrocks/_ dependencies to @push.rocks/_ scope
|
||||||
|
- Migrated from smartfile v8 to smartfs v1.1.0
|
||||||
554
readme.md
Normal file
554
readme.md
Normal file
@@ -0,0 +1,554 @@
|
|||||||
|
# @git.zone/tsdocker
|
||||||
|
|
||||||
|
> 🐳 The ultimate Docker development toolkit for TypeScript projects — build, test, and ship multi-arch containerized applications with zero friction.
|
||||||
|
|
||||||
|
## Issue Reporting and Security
|
||||||
|
|
||||||
|
For reporting bugs, issues, or security vulnerabilities, please visit [community.foss.global/](https://community.foss.global/). This is the central community hub for all issue reporting. Developers who sign and comply with our contribution agreement and go through identification can also get a [code.foss.global/](https://code.foss.global/) account to submit Pull Requests directly.
|
||||||
|
|
||||||
|
## What is tsdocker?
|
||||||
|
|
||||||
|
**tsdocker** is a comprehensive Docker development and build tool that handles everything from testing npm packages in clean environments to building and pushing multi-architecture Docker images across multiple registries — all from a single CLI.
|
||||||
|
|
||||||
|
### 🎯 Key Capabilities
|
||||||
|
|
||||||
|
- 🧪 **Containerized Testing** — Run your tests in pristine Docker environments
|
||||||
|
- 🏗️ **Smart Docker Builds** — Automatically discover, sort, and build Dockerfiles by dependency
|
||||||
|
- 🌍 **True Multi-Architecture** — Build for `amd64` and `arm64` simultaneously with Docker Buildx
|
||||||
|
- 🚀 **Multi-Registry Push** — Ship to Docker Hub, GitLab, GitHub Container Registry, and more via OCI Distribution API
|
||||||
|
- ⚡ **Parallel Builds** — Level-based parallel builds with configurable concurrency
|
||||||
|
- 🗄️ **Persistent Local Registry** — All images flow through a local OCI registry with persistent storage
|
||||||
|
- 📦 **Build Caching** — Skip unchanged Dockerfiles with content-hash caching
|
||||||
|
- 🔧 **Zero Config Start** — Works out of the box, scales with your needs
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Global installation (recommended for CLI usage)
|
||||||
|
npm install -g @git.zone/tsdocker
|
||||||
|
|
||||||
|
# Or project-local installation
|
||||||
|
pnpm install --save-dev @git.zone/tsdocker
|
||||||
|
```
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
### 🧪 Run Tests in Docker
|
||||||
|
|
||||||
|
The simplest use case — run your tests in a clean container:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
tsdocker
|
||||||
|
```
|
||||||
|
|
||||||
|
This pulls your configured base image, mounts your project, and executes your test command in isolation.
|
||||||
|
|
||||||
|
### 🏗️ Build Docker Images
|
||||||
|
|
||||||
|
Got `Dockerfile` files? Build them all with automatic dependency ordering:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
tsdocker build
|
||||||
|
```
|
||||||
|
|
||||||
|
tsdocker will:
|
||||||
|
1. 🔍 Discover all `Dockerfile*` files in your project
|
||||||
|
2. 📊 Analyze `FROM` dependencies between them
|
||||||
|
3. 🔄 Sort them topologically
|
||||||
|
4. 🏗️ Build each image in the correct order
|
||||||
|
5. 📦 Push every image to a persistent local registry (`.nogit/docker-registry/`)
|
||||||
|
|
||||||
|
### 📤 Push to Registries
|
||||||
|
|
||||||
|
Ship your images to one or all configured registries:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Push to all configured registries
|
||||||
|
tsdocker push
|
||||||
|
|
||||||
|
# Push to a specific registry
|
||||||
|
tsdocker push --registry=registry.gitlab.com
|
||||||
|
```
|
||||||
|
|
||||||
|
Under the hood, `tsdocker push` uses the **OCI Distribution API** to copy images directly from the local registry to remote registries. This means multi-arch manifest lists are preserved end-to-end — no more single-platform-only pushes.
|
||||||
|
|
||||||
|
## CLI Commands
|
||||||
|
|
||||||
|
| Command | Description |
|
||||||
|
|---------|-------------|
|
||||||
|
| `tsdocker` | Run tests in a fresh Docker container (legacy mode) |
|
||||||
|
| `tsdocker build` | Build all Dockerfiles with dependency ordering |
|
||||||
|
| `tsdocker push` | Build + push images to configured registries |
|
||||||
|
| `tsdocker pull <registry>` | Pull images from a specific registry |
|
||||||
|
| `tsdocker test` | Build + run container test scripts (`test_*.sh`) |
|
||||||
|
| `tsdocker login` | Authenticate with configured registries |
|
||||||
|
| `tsdocker list` | Display discovered Dockerfiles and their dependencies |
|
||||||
|
| `tsdocker clean` | Interactively clean Docker environment |
|
||||||
|
| `tsdocker vscode` | Launch containerized VS Code in browser |
|
||||||
|
|
||||||
|
### Build Flags
|
||||||
|
|
||||||
|
| Flag | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| `--platform=linux/arm64` | Override build platform for a single architecture |
|
||||||
|
| `--timeout=600` | Build timeout in seconds |
|
||||||
|
| `--no-cache` | Force rebuild without Docker layer cache |
|
||||||
|
| `--cached` | Skip unchanged Dockerfiles (content-hash based) |
|
||||||
|
| `--verbose` | Stream raw `docker build` output |
|
||||||
|
| `--parallel` | Enable level-based parallel builds (default concurrency: 4) |
|
||||||
|
| `--parallel=8` | Parallel builds with custom concurrency |
|
||||||
|
| `--context=mycontext` | Use a specific Docker context |
|
||||||
|
|
||||||
|
### Clean Flags
|
||||||
|
|
||||||
|
| Flag | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| `--all` | Include all images and volumes (not just dangling) |
|
||||||
|
| `-y` | Auto-confirm all prompts |
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
Configure tsdocker in your `package.json` or `npmextra.json` under the `@git.zone/tsdocker` key:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"@git.zone/tsdocker": {
|
||||||
|
"registries": ["registry.gitlab.com", "docker.io"],
|
||||||
|
"registryRepoMap": {
|
||||||
|
"registry.gitlab.com": "myorg/myproject"
|
||||||
|
},
|
||||||
|
"buildArgEnvMap": {
|
||||||
|
"NODE_VERSION": "NODE_VERSION"
|
||||||
|
},
|
||||||
|
"platforms": ["linux/amd64", "linux/arm64"],
|
||||||
|
"testDir": "./test"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Configuration Options
|
||||||
|
|
||||||
|
#### Build & Push Options
|
||||||
|
|
||||||
|
| Option | Type | Default | Description |
|
||||||
|
|--------|------|---------|-------------|
|
||||||
|
| `registries` | `string[]` | `[]` | Registry URLs to push to |
|
||||||
|
| `registryRepoMap` | `object` | `{}` | Map registries to different repository paths |
|
||||||
|
| `buildArgEnvMap` | `object` | `{}` | Map Docker build ARGs to environment variables |
|
||||||
|
| `platforms` | `string[]` | `["linux/amd64"]` | Target architectures for multi-arch builds |
|
||||||
|
| `testDir` | `string` | `./test` | Directory containing test scripts |
|
||||||
|
|
||||||
|
#### Legacy Testing Options
|
||||||
|
|
||||||
|
These options configure the `tsdocker` default command (containerized test runner):
|
||||||
|
|
||||||
|
| Option | Type | Default | Description |
|
||||||
|
|--------|------|---------|-------------|
|
||||||
|
| `baseImage` | `string` | `hosttoday/ht-docker-node:npmdocker` | Docker image for test environment |
|
||||||
|
| `command` | `string` | `npmci npm test` | Command to run inside the container |
|
||||||
|
| `dockerSock` | `boolean` | `false` | Mount Docker socket for DinD scenarios |
|
||||||
|
|
||||||
|
## Architecture: How tsdocker Works
|
||||||
|
|
||||||
|
tsdocker uses a **local OCI registry** as the canonical store for all built images. This design solves fundamental problems with Docker's local daemon, which cannot hold multi-architecture manifest lists.
|
||||||
|
|
||||||
|
### 📐 Build Flow
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─────────────────────────────────────────────────┐
|
||||||
|
│ tsdocker build │
|
||||||
|
│ │
|
||||||
|
│ 1. Start local registry (localhost:5234) │
|
||||||
|
│ └── Persistent volume: .nogit/docker-registry/
|
||||||
|
│ │
|
||||||
|
│ 2. For each Dockerfile (topological order): │
|
||||||
|
│ ├── Multi-platform: buildx --push → registry │
|
||||||
|
│ └── Single-platform: docker build → registry │
|
||||||
|
│ │
|
||||||
|
│ 3. Stop local registry (data persists on disk) │
|
||||||
|
└─────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### 📤 Push Flow
|
||||||
|
|
||||||
|
```
|
||||||
|
┌──────────────────────────────────────────────────┐
|
||||||
|
│ tsdocker push │
|
||||||
|
│ │
|
||||||
|
│ 1. Start local registry (loads persisted data) │
|
||||||
|
│ │
|
||||||
|
│ 2. For each image × each remote registry: │
|
||||||
|
│ └── OCI Distribution API copy: │
|
||||||
|
│ ├── Fetch manifest (single or multi-arch) │
|
||||||
|
│ ├── Copy blobs (skip if already exist) │
|
||||||
|
│ └── Push manifest with destination tag │
|
||||||
|
│ │
|
||||||
|
│ 3. Stop local registry │
|
||||||
|
└──────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### 🔑 Why a Local Registry?
|
||||||
|
|
||||||
|
| Problem | Solution |
|
||||||
|
|---------|----------|
|
||||||
|
| `docker buildx --load` fails for multi-arch images | `buildx --push` to local registry works for any number of platforms |
|
||||||
|
| `docker push` only pushes single-platform manifests | OCI API copy preserves full manifest lists (multi-arch) |
|
||||||
|
| Images lost between build and push phases | Persistent storage at `.nogit/docker-registry/` survives restarts |
|
||||||
|
| Redundant blob uploads on incremental pushes | HEAD checks skip blobs that already exist on the remote |
|
||||||
|
|
||||||
|
## Registry Authentication
|
||||||
|
|
||||||
|
### Environment Variables
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Pipe-delimited format (supports DOCKER_REGISTRY_1 through DOCKER_REGISTRY_10)
|
||||||
|
export DOCKER_REGISTRY_1="registry.gitlab.com|username|password"
|
||||||
|
export DOCKER_REGISTRY_2="docker.io|username|password"
|
||||||
|
|
||||||
|
# Individual registry format
|
||||||
|
export DOCKER_REGISTRY_URL="registry.gitlab.com"
|
||||||
|
export DOCKER_REGISTRY_USER="username"
|
||||||
|
export DOCKER_REGISTRY_PASSWORD="password"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Docker Config Fallback
|
||||||
|
|
||||||
|
When pushing, tsdocker will also read credentials from `~/.docker/config.json` if no explicit credentials are provided via environment variables. This means `docker login` credentials work automatically.
|
||||||
|
|
||||||
|
### Login Command
|
||||||
|
|
||||||
|
```bash
|
||||||
|
tsdocker login
|
||||||
|
```
|
||||||
|
|
||||||
|
Authenticates with all configured registries using the provided environment variables.
|
||||||
|
|
||||||
|
## Advanced Usage
|
||||||
|
|
||||||
|
### 🔀 Multi-Architecture Builds
|
||||||
|
|
||||||
|
Build for multiple platforms using Docker Buildx:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"@git.zone/tsdocker": {
|
||||||
|
"platforms": ["linux/amd64", "linux/arm64"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
tsdocker automatically:
|
||||||
|
- Sets up a Buildx builder with `--driver-opt network=host` (so buildx can reach the local registry)
|
||||||
|
- Pushes multi-platform images to the local registry via `buildx --push`
|
||||||
|
- Copies the full manifest list (including all platform variants) to remote registries on `tsdocker push`
|
||||||
|
|
||||||
|
### ⚡ Parallel Builds
|
||||||
|
|
||||||
|
Speed up builds by building independent images concurrently:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Default concurrency (4 workers)
|
||||||
|
tsdocker build --parallel
|
||||||
|
|
||||||
|
# Custom concurrency
|
||||||
|
tsdocker build --parallel=8
|
||||||
|
|
||||||
|
# Works with caching too
|
||||||
|
tsdocker build --parallel --cached
|
||||||
|
```
|
||||||
|
|
||||||
|
tsdocker groups Dockerfiles into **dependency levels** using topological analysis. Images within the same level have no dependencies on each other and build in parallel. Each level completes before the next begins.
|
||||||
|
|
||||||
|
### 📦 Dockerfile Naming Conventions
|
||||||
|
|
||||||
|
tsdocker discovers files matching `Dockerfile*`:
|
||||||
|
|
||||||
|
| File Name | Version Tag |
|
||||||
|
|-----------|-------------|
|
||||||
|
| `Dockerfile` | `latest` |
|
||||||
|
| `Dockerfile_v1.0.0` | `v1.0.0` |
|
||||||
|
| `Dockerfile_alpine` | `alpine` |
|
||||||
|
| `Dockerfile_##version##` | Uses `package.json` version |
|
||||||
|
|
||||||
|
### 🔗 Dependency-Aware Builds
|
||||||
|
|
||||||
|
If you have multiple Dockerfiles that depend on each other:
|
||||||
|
|
||||||
|
```dockerfile
|
||||||
|
# Dockerfile_base
|
||||||
|
FROM node:20-alpine
|
||||||
|
RUN npm install -g typescript
|
||||||
|
|
||||||
|
# Dockerfile_app
|
||||||
|
FROM myproject:base
|
||||||
|
COPY . .
|
||||||
|
RUN npm run build
|
||||||
|
```
|
||||||
|
|
||||||
|
tsdocker automatically detects that `Dockerfile_app` depends on `Dockerfile_base`, builds them in the correct order, and makes the base image available to dependent builds via the local registry (using `--build-context` for buildx).
|
||||||
|
|
||||||
|
### 🧪 Container Test Scripts
|
||||||
|
|
||||||
|
Create test scripts in your test directory:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# test/test_latest.sh
|
||||||
|
#!/bin/bash
|
||||||
|
node --version
|
||||||
|
npm --version
|
||||||
|
echo "Container tests passed!"
|
||||||
|
```
|
||||||
|
|
||||||
|
Run with:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
tsdocker test
|
||||||
|
```
|
||||||
|
|
||||||
|
This builds all images, starts the local registry (so multi-arch images can be pulled), and runs each matching test script inside a container.
|
||||||
|
|
||||||
|
### 🔧 Build Args from Environment
|
||||||
|
|
||||||
|
Pass environment variables as Docker build arguments:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"@git.zone/tsdocker": {
|
||||||
|
"buildArgEnvMap": {
|
||||||
|
"NPM_TOKEN": "NPM_TOKEN",
|
||||||
|
"NODE_VERSION": "NODE_VERSION"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```dockerfile
|
||||||
|
ARG NPM_TOKEN
|
||||||
|
ARG NODE_VERSION=20
|
||||||
|
FROM node:${NODE_VERSION}
|
||||||
|
RUN echo "//registry.npmjs.org/:_authToken=${NPM_TOKEN}" > ~/.npmrc
|
||||||
|
```
|
||||||
|
|
||||||
|
### 🗺️ Registry Repo Mapping
|
||||||
|
|
||||||
|
Use different repository names for different registries:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"@git.zone/tsdocker": {
|
||||||
|
"registries": ["registry.gitlab.com", "docker.io"],
|
||||||
|
"registryRepoMap": {
|
||||||
|
"registry.gitlab.com": "mygroup/myproject",
|
||||||
|
"docker.io": "myuser/myproject"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
When pushing, tsdocker maps the local repo name to the registry-specific path. For example, a locally built `myproject:latest` becomes `registry.gitlab.com/mygroup/myproject:latest` and `docker.io/myuser/myproject:latest`.
|
||||||
|
|
||||||
|
### 🐳 Docker-in-Docker Testing
|
||||||
|
|
||||||
|
Test Docker-related tools by mounting the Docker socket:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"@git.zone/tsdocker": {
|
||||||
|
"baseImage": "docker:latest",
|
||||||
|
"command": "docker version && docker ps",
|
||||||
|
"dockerSock": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 📋 Listing Dockerfiles
|
||||||
|
|
||||||
|
Inspect your project's Dockerfiles and their relationships:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
tsdocker list
|
||||||
|
```
|
||||||
|
|
||||||
|
Output:
|
||||||
|
```
|
||||||
|
Discovered Dockerfiles:
|
||||||
|
========================
|
||||||
|
|
||||||
|
1. /path/to/Dockerfile_base
|
||||||
|
Tag: myproject:base
|
||||||
|
Base Image: node:20-alpine
|
||||||
|
Version: base
|
||||||
|
|
||||||
|
2. /path/to/Dockerfile_app
|
||||||
|
Tag: myproject:app
|
||||||
|
Base Image: myproject:base
|
||||||
|
Version: app
|
||||||
|
Depends on: myproject:base
|
||||||
|
```
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### Minimal Build & Push
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"@git.zone/tsdocker": {
|
||||||
|
"registries": ["docker.io"],
|
||||||
|
"platforms": ["linux/amd64"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
tsdocker push
|
||||||
|
```
|
||||||
|
|
||||||
|
### Full Production Setup
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"@git.zone/tsdocker": {
|
||||||
|
"registries": ["registry.gitlab.com", "ghcr.io", "docker.io"],
|
||||||
|
"registryRepoMap": {
|
||||||
|
"registry.gitlab.com": "myorg/myapp",
|
||||||
|
"ghcr.io": "myorg/myapp",
|
||||||
|
"docker.io": "myuser/myapp"
|
||||||
|
},
|
||||||
|
"buildArgEnvMap": {
|
||||||
|
"NPM_TOKEN": "NPM_TOKEN"
|
||||||
|
},
|
||||||
|
"platforms": ["linux/amd64", "linux/arm64"],
|
||||||
|
"testDir": "./docker-tests"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### CI/CD Integration
|
||||||
|
|
||||||
|
**GitLab CI:**
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
build-and-push:
|
||||||
|
stage: build
|
||||||
|
script:
|
||||||
|
- npm install -g @git.zone/tsdocker
|
||||||
|
- tsdocker push
|
||||||
|
variables:
|
||||||
|
DOCKER_REGISTRY_1: "registry.gitlab.com|$CI_REGISTRY_USER|$CI_REGISTRY_PASSWORD"
|
||||||
|
```
|
||||||
|
|
||||||
|
**GitHub Actions:**
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
- name: Build and Push
|
||||||
|
run: |
|
||||||
|
npm install -g @git.zone/tsdocker
|
||||||
|
tsdocker login
|
||||||
|
tsdocker push
|
||||||
|
env:
|
||||||
|
DOCKER_REGISTRY_1: "ghcr.io|${{ github.actor }}|${{ secrets.GITHUB_TOKEN }}"
|
||||||
|
```
|
||||||
|
|
||||||
|
## TypeScript API
|
||||||
|
|
||||||
|
tsdocker can also be used programmatically:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { TsDockerManager } from '@git.zone/tsdocker/dist_ts/classes.tsdockermanager.js';
|
||||||
|
import type { ITsDockerConfig } from '@git.zone/tsdocker/dist_ts/interfaces/index.js';
|
||||||
|
|
||||||
|
const config: ITsDockerConfig = {
|
||||||
|
baseImage: 'node:20',
|
||||||
|
command: 'npm test',
|
||||||
|
dockerSock: false,
|
||||||
|
keyValueObject: {},
|
||||||
|
registries: ['docker.io'],
|
||||||
|
platforms: ['linux/amd64', 'linux/arm64'],
|
||||||
|
};
|
||||||
|
|
||||||
|
const manager = new TsDockerManager(config);
|
||||||
|
await manager.prepare();
|
||||||
|
await manager.build({ parallel: true });
|
||||||
|
await manager.push();
|
||||||
|
```
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
- **Docker** — Docker Engine 20+ or Docker Desktop
|
||||||
|
- **Node.js** — Version 18 or higher (for native `fetch` and ESM support)
|
||||||
|
- **Docker Buildx** — Required for multi-architecture builds (included in Docker Desktop)
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### "docker not found"
|
||||||
|
|
||||||
|
Ensure Docker is installed and in your PATH:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker --version
|
||||||
|
```
|
||||||
|
|
||||||
|
### Multi-arch build fails
|
||||||
|
|
||||||
|
Make sure Docker Buildx is available. tsdocker will set up the builder automatically, but you can verify:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker buildx version
|
||||||
|
```
|
||||||
|
|
||||||
|
### Registry authentication fails
|
||||||
|
|
||||||
|
Check your environment variables are set correctly:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
echo $DOCKER_REGISTRY_1
|
||||||
|
tsdocker login
|
||||||
|
```
|
||||||
|
|
||||||
|
tsdocker also falls back to `~/.docker/config.json` — ensure you've run `docker login` for your target registries.
|
||||||
|
|
||||||
|
### Circular dependency detected
|
||||||
|
|
||||||
|
Review your Dockerfiles' `FROM` statements — you have images depending on each other in a loop.
|
||||||
|
|
||||||
|
### Build context too large
|
||||||
|
|
||||||
|
Use a `.dockerignore` file to exclude `node_modules`, `.git`, `.nogit`, and other large directories:
|
||||||
|
|
||||||
|
```
|
||||||
|
node_modules
|
||||||
|
.git
|
||||||
|
.nogit
|
||||||
|
dist_ts
|
||||||
|
```
|
||||||
|
|
||||||
|
## Migration from Legacy
|
||||||
|
|
||||||
|
Previously published as `npmdocker`, now `@git.zone/tsdocker`:
|
||||||
|
|
||||||
|
| Old | New |
|
||||||
|
|-----|-----|
|
||||||
|
| `npmdocker` command | `tsdocker` command |
|
||||||
|
| `"npmdocker"` config key | `"@git.zone/tsdocker"` config key |
|
||||||
|
| CommonJS | ESM with `.js` imports |
|
||||||
|
|
||||||
|
## License and Legal Information
|
||||||
|
|
||||||
|
This repository contains open-source code licensed under the MIT License. A copy of the license can be found in the [LICENSE](./LICENSE) file.
|
||||||
|
|
||||||
|
**Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
### Trademarks
|
||||||
|
|
||||||
|
This project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and any related products or services are trademarks of Task Venture Capital GmbH or third parties, and are not included within the scope of the MIT license granted herein.
|
||||||
|
|
||||||
|
Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines or the guidelines of the respective third-party owners, and any usage must be approved in writing. Third-party trademarks used herein are the property of their respective owners and used only in a descriptive manner, e.g. for an implementation of an API or similar.
|
||||||
|
|
||||||
|
### Company Information
|
||||||
|
|
||||||
|
Task Venture Capital GmbH
|
||||||
|
Registered at District Court Bremen HRB 35230 HB, Germany
|
||||||
|
|
||||||
|
For any legal inquiries or further information, please contact us via email at hello@task.vc.
|
||||||
|
|
||||||
|
By using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.
|
||||||
8
ts/00_commitinfo_data.ts
Normal file
8
ts/00_commitinfo_data.ts
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
/**
|
||||||
|
* autocreated commitinfo by @push.rocks/commitinfo
|
||||||
|
*/
|
||||||
|
export const commitinfo = {
|
||||||
|
name: '@git.zone/tsdocker',
|
||||||
|
version: '1.16.0',
|
||||||
|
description: 'develop npm modules cross platform with docker'
|
||||||
|
}
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
/**
|
|
||||||
* smartanalytics:
|
|
||||||
* We count executions of this tool to keep track which of our tools are really used.
|
|
||||||
* This insight is used to plan spending our limited resources for improving them.
|
|
||||||
* Any submitted analytics data is fully anonymized (no Ips or any other personal information is tracked).
|
|
||||||
* Feel free to dig into the smartanalytics package, if you are interested in how it works.
|
|
||||||
* Our privacy policy can be found here: https://lossless.gmbh/privacy.html
|
|
||||||
* The privacy policy is also linked in the readme, so we hope this behaviour does not come as a surprise to you.
|
|
||||||
* Have a nice day and regards
|
|
||||||
* Your Open Source team at Lossless GmbH :)
|
|
||||||
*/
|
|
||||||
import * as smartanalytics from '@pushrocks/smartanalytics';
|
|
||||||
const npmdockerAnalytics = new smartanalytics.Analytics({
|
|
||||||
apiEndPoint: 'https://pubapi.lossless.one',
|
|
||||||
appName: 'tsdocker',
|
|
||||||
projectId: 'gitzone'
|
|
||||||
});
|
|
||||||
npmdockerAnalytics.recordEvent('npmtoolexecution', {
|
|
||||||
somedata: 'somedata'
|
|
||||||
});
|
|
||||||
79
ts/classes.dockercontext.ts
Normal file
79
ts/classes.dockercontext.ts
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
import * as plugins from './tsdocker.plugins.js';
|
||||||
|
import * as fs from 'fs';
|
||||||
|
import { logger } from './tsdocker.logging.js';
|
||||||
|
import type { IDockerContextInfo } from './interfaces/index.js';
|
||||||
|
|
||||||
|
const smartshellInstance = new plugins.smartshell.Smartshell({ executor: 'bash' });
|
||||||
|
|
||||||
|
export class DockerContext {
|
||||||
|
public contextInfo: IDockerContextInfo | null = null;
|
||||||
|
|
||||||
|
/** Sets DOCKER_CONTEXT env var for explicit context selection. */
|
||||||
|
public setContext(contextName: string): void {
|
||||||
|
process.env.DOCKER_CONTEXT = contextName;
|
||||||
|
logger.log('info', `Docker context explicitly set to: ${contextName}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Detects current Docker context via `docker context inspect` and rootless via `docker info`. */
|
||||||
|
public async detect(): Promise<IDockerContextInfo> {
|
||||||
|
let name = 'default';
|
||||||
|
let endpoint = 'unknown';
|
||||||
|
|
||||||
|
const contextResult = await smartshellInstance.execSilent(
|
||||||
|
`docker context inspect --format '{{json .}}'`
|
||||||
|
);
|
||||||
|
if (contextResult.exitCode === 0 && contextResult.stdout) {
|
||||||
|
try {
|
||||||
|
const parsed = JSON.parse(contextResult.stdout.trim());
|
||||||
|
const data = Array.isArray(parsed) ? parsed[0] : parsed;
|
||||||
|
name = data.Name || 'default';
|
||||||
|
endpoint = data.Endpoints?.docker?.Host || 'unknown';
|
||||||
|
} catch { /* fallback to defaults */ }
|
||||||
|
}
|
||||||
|
|
||||||
|
let isRootless = false;
|
||||||
|
const infoResult = await smartshellInstance.execSilent(
|
||||||
|
`docker info --format '{{json .SecurityOptions}}'`
|
||||||
|
);
|
||||||
|
if (infoResult.exitCode === 0 && infoResult.stdout) {
|
||||||
|
isRootless = infoResult.stdout.includes('name=rootless');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Detect topology
|
||||||
|
let topology: 'socket-mount' | 'dind' | 'local' = 'local';
|
||||||
|
if (process.env.DOCKER_HOST && process.env.DOCKER_HOST.startsWith('tcp://')) {
|
||||||
|
topology = 'dind';
|
||||||
|
} else if (fs.existsSync('/.dockerenv')) {
|
||||||
|
topology = 'socket-mount';
|
||||||
|
}
|
||||||
|
|
||||||
|
this.contextInfo = { name, endpoint, isRootless, dockerHost: process.env.DOCKER_HOST, topology };
|
||||||
|
return this.contextInfo;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Logs context info prominently. */
|
||||||
|
public logContextInfo(): void {
|
||||||
|
if (!this.contextInfo) return;
|
||||||
|
const { name, endpoint, isRootless, dockerHost, topology } = this.contextInfo;
|
||||||
|
logger.log('info', '=== DOCKER CONTEXT ===');
|
||||||
|
logger.log('info', `Context: ${name}`);
|
||||||
|
logger.log('info', `Endpoint: ${endpoint}`);
|
||||||
|
if (dockerHost) logger.log('info', `DOCKER_HOST: ${dockerHost}`);
|
||||||
|
logger.log('info', `Rootless: ${isRootless ? 'yes' : 'no'}`);
|
||||||
|
logger.log('info', `Topology: ${topology || 'local'}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Emits rootless-specific warnings. */
|
||||||
|
public logRootlessWarnings(): void {
|
||||||
|
if (!this.contextInfo?.isRootless) return;
|
||||||
|
logger.log('warn', '[rootless] network=host in buildx is namespaced by rootlesskit');
|
||||||
|
logger.log('warn', '[rootless] Local registry may have localhost vs 127.0.0.1 resolution quirks');
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Returns context-aware builder name: tsdocker-builder-<context> */
|
||||||
|
public getBuilderName(): string {
|
||||||
|
const contextName = this.contextInfo?.name || 'default';
|
||||||
|
const sanitized = contextName.replace(/[^a-zA-Z0-9_-]/g, '-');
|
||||||
|
return `tsdocker-builder-${sanitized}`;
|
||||||
|
}
|
||||||
|
}
|
||||||
845
ts/classes.dockerfile.ts
Normal file
845
ts/classes.dockerfile.ts
Normal file
@@ -0,0 +1,845 @@
|
|||||||
|
import * as plugins from './tsdocker.plugins.js';
|
||||||
|
import * as paths from './tsdocker.paths.js';
|
||||||
|
import { logger, formatDuration } from './tsdocker.logging.js';
|
||||||
|
import { DockerRegistry } from './classes.dockerregistry.js';
|
||||||
|
import { RegistryCopy } from './classes.registrycopy.js';
|
||||||
|
import { TsDockerSession } from './classes.tsdockersession.js';
|
||||||
|
import type { IDockerfileOptions, ITsDockerConfig, IBuildCommandOptions } from './interfaces/index.js';
|
||||||
|
import type { TsDockerManager } from './classes.tsdockermanager.js';
|
||||||
|
import * as fs from 'fs';
|
||||||
|
|
||||||
|
const smartshellInstance = new plugins.smartshell.Smartshell({
|
||||||
|
executor: 'bash',
|
||||||
|
});
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extracts a platform string (e.g. "linux/amd64") from a buildx bracket prefix.
|
||||||
|
* The prefix may be like "linux/amd64 ", "linux/amd64 stage-1 ", "stage-1 ", or "".
|
||||||
|
*/
|
||||||
|
function extractPlatform(prefix: string): string | null {
|
||||||
|
const match = prefix.match(/linux\/\w+/);
|
||||||
|
return match ? match[0] : null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Class Dockerfile represents a Dockerfile on disk
|
||||||
|
*/
|
||||||
|
export class Dockerfile {
|
||||||
|
// STATIC METHODS
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates instances of class Dockerfile for all Dockerfiles in cwd
|
||||||
|
*/
|
||||||
|
public static async readDockerfiles(managerRef: TsDockerManager): Promise<Dockerfile[]> {
|
||||||
|
const entries = await plugins.smartfs.directory(paths.cwd).filter('Dockerfile*').list();
|
||||||
|
const fileTree = entries
|
||||||
|
.filter(entry => entry.isFile)
|
||||||
|
.map(entry => plugins.path.join(paths.cwd, entry.name));
|
||||||
|
|
||||||
|
const readDockerfilesArray: Dockerfile[] = [];
|
||||||
|
logger.log('info', `found ${fileTree.length} Dockerfile(s):`);
|
||||||
|
for (const filePath of fileTree) {
|
||||||
|
logger.log('info', ` ${plugins.path.basename(filePath)}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const dockerfilePath of fileTree) {
|
||||||
|
const myDockerfile = new Dockerfile(managerRef, {
|
||||||
|
filePath: dockerfilePath,
|
||||||
|
read: true,
|
||||||
|
});
|
||||||
|
readDockerfilesArray.push(myDockerfile);
|
||||||
|
}
|
||||||
|
|
||||||
|
return readDockerfilesArray;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sorts Dockerfiles into a build order based on dependencies (topological sort)
|
||||||
|
*/
|
||||||
|
public static async sortDockerfiles(dockerfiles: Dockerfile[]): Promise<Dockerfile[]> {
|
||||||
|
logger.log('info', 'Sorting Dockerfiles based on dependencies...');
|
||||||
|
|
||||||
|
// Map from cleanTag to Dockerfile instance for quick lookup
|
||||||
|
const tagToDockerfile = new Map<string, Dockerfile>();
|
||||||
|
dockerfiles.forEach((dockerfile) => {
|
||||||
|
tagToDockerfile.set(dockerfile.cleanTag, dockerfile);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Build the dependency graph
|
||||||
|
const graph = new Map<Dockerfile, Dockerfile[]>();
|
||||||
|
dockerfiles.forEach((dockerfile) => {
|
||||||
|
const dependencies: Dockerfile[] = [];
|
||||||
|
const baseImage = dockerfile.baseImage;
|
||||||
|
|
||||||
|
// Extract repo:version from baseImage for comparison with cleanTag
|
||||||
|
// baseImage may include a registry prefix (e.g., "host.today/repo:version")
|
||||||
|
// but cleanTag is just "repo:version", so we strip the registry prefix
|
||||||
|
const baseImageKey = Dockerfile.extractRepoVersion(baseImage);
|
||||||
|
|
||||||
|
// Check if the baseImage is among the local Dockerfiles
|
||||||
|
if (tagToDockerfile.has(baseImageKey)) {
|
||||||
|
const baseDockerfile = tagToDockerfile.get(baseImageKey)!;
|
||||||
|
dependencies.push(baseDockerfile);
|
||||||
|
dockerfile.localBaseImageDependent = true;
|
||||||
|
dockerfile.localBaseDockerfile = baseDockerfile;
|
||||||
|
}
|
||||||
|
|
||||||
|
graph.set(dockerfile, dependencies);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Perform topological sort
|
||||||
|
const sortedDockerfiles: Dockerfile[] = [];
|
||||||
|
const visited = new Set<Dockerfile>();
|
||||||
|
const tempMarked = new Set<Dockerfile>();
|
||||||
|
|
||||||
|
const visit = (dockerfile: Dockerfile) => {
|
||||||
|
if (tempMarked.has(dockerfile)) {
|
||||||
|
throw new Error(`Circular dependency detected involving ${dockerfile.cleanTag}`);
|
||||||
|
}
|
||||||
|
if (!visited.has(dockerfile)) {
|
||||||
|
tempMarked.add(dockerfile);
|
||||||
|
const dependencies = graph.get(dockerfile) || [];
|
||||||
|
dependencies.forEach((dep) => visit(dep));
|
||||||
|
tempMarked.delete(dockerfile);
|
||||||
|
visited.add(dockerfile);
|
||||||
|
sortedDockerfiles.push(dockerfile);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
try {
|
||||||
|
dockerfiles.forEach((dockerfile) => {
|
||||||
|
if (!visited.has(dockerfile)) {
|
||||||
|
visit(dockerfile);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
logger.log('error', (error as Error).message);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Log the sorted order
|
||||||
|
sortedDockerfiles.forEach((dockerfile, index) => {
|
||||||
|
logger.log(
|
||||||
|
'info',
|
||||||
|
`Build order ${index + 1}: ${dockerfile.cleanTag} with base image ${dockerfile.baseImage}`
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
return sortedDockerfiles;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Maps local Dockerfiles dependencies to the corresponding Dockerfile class instances
|
||||||
|
*/
|
||||||
|
public static async mapDockerfiles(sortedDockerfileArray: Dockerfile[]): Promise<Dockerfile[]> {
|
||||||
|
sortedDockerfileArray.forEach((dockerfileArg) => {
|
||||||
|
if (dockerfileArg.localBaseImageDependent) {
|
||||||
|
// Extract repo:version from baseImage for comparison with cleanTag
|
||||||
|
const baseImageKey = Dockerfile.extractRepoVersion(dockerfileArg.baseImage);
|
||||||
|
sortedDockerfileArray.forEach((dockfile2: Dockerfile) => {
|
||||||
|
if (dockfile2.cleanTag === baseImageKey) {
|
||||||
|
dockerfileArg.localBaseDockerfile = dockfile2;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return sortedDockerfileArray;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Local registry is always needed — it's the canonical store for all built images. */
|
||||||
|
public static needsLocalRegistry(
|
||||||
|
_dockerfiles?: Dockerfile[],
|
||||||
|
_options?: { platform?: string },
|
||||||
|
): boolean {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Starts a persistent registry:2 container with session-unique port and name. */
|
||||||
|
public static async startLocalRegistry(session: TsDockerSession, isRootless?: boolean): Promise<void> {
|
||||||
|
const { registryPort, registryHost, registryContainerName, isCI, sessionId } = session.config;
|
||||||
|
|
||||||
|
// Ensure persistent storage directory exists — isolate per session in CI
|
||||||
|
const registryDataDir = isCI
|
||||||
|
? plugins.path.join(paths.cwd, '.nogit', 'docker-registry', sessionId)
|
||||||
|
: plugins.path.join(paths.cwd, '.nogit', 'docker-registry');
|
||||||
|
fs.mkdirSync(registryDataDir, { recursive: true });
|
||||||
|
|
||||||
|
await smartshellInstance.execSilent(
|
||||||
|
`docker rm -f ${registryContainerName} 2>/dev/null || true`
|
||||||
|
);
|
||||||
|
|
||||||
|
const runCmd = `docker run -d --name ${registryContainerName} -p ${registryPort}:5000 -v "${registryDataDir}:/var/lib/registry" registry:2`;
|
||||||
|
let result = await smartshellInstance.execSilent(runCmd);
|
||||||
|
|
||||||
|
// Port retry: if port was stolen between allocation and docker run, reallocate once
|
||||||
|
if (result.exitCode !== 0 && (result.stderr || result.stdout || '').includes('port is already allocated')) {
|
||||||
|
const newPort = await TsDockerSession.allocatePort();
|
||||||
|
logger.log('warn', `Port ${registryPort} taken, retrying with ${newPort}`);
|
||||||
|
session.config.registryPort = newPort;
|
||||||
|
session.config.registryHost = `localhost:${newPort}`;
|
||||||
|
const retryCmd = `docker run -d --name ${registryContainerName} -p ${newPort}:5000 -v "${registryDataDir}:/var/lib/registry" registry:2`;
|
||||||
|
result = await smartshellInstance.execSilent(retryCmd);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (result.exitCode !== 0) {
|
||||||
|
throw new Error(`Failed to start local registry: ${result.stderr || result.stdout}`);
|
||||||
|
}
|
||||||
|
// registry:2 starts near-instantly; brief wait for readiness
|
||||||
|
await new Promise(resolve => setTimeout(resolve, 1000));
|
||||||
|
logger.log('info', `Started local registry at ${session.config.registryHost} (container: ${registryContainerName})`);
|
||||||
|
if (isRootless) {
|
||||||
|
logger.log('warn', `[rootless] Registry on port ${session.config.registryPort} — if buildx cannot reach localhost, try 127.0.0.1`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Stops and removes the session-specific local registry container. */
|
||||||
|
public static async stopLocalRegistry(session: TsDockerSession): Promise<void> {
|
||||||
|
await smartshellInstance.execSilent(
|
||||||
|
`docker rm -f ${session.config.registryContainerName} 2>/dev/null || true`
|
||||||
|
);
|
||||||
|
logger.log('info', `Stopped local registry (${session.config.registryContainerName})`);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Pushes a built image to the local registry for buildx consumption. */
|
||||||
|
public static async pushToLocalRegistry(session: TsDockerSession, dockerfile: Dockerfile): Promise<void> {
|
||||||
|
const registryTag = `${session.config.registryHost}/${dockerfile.buildTag}`;
|
||||||
|
await smartshellInstance.execSilent(`docker tag ${dockerfile.buildTag} ${registryTag}`);
|
||||||
|
const result = await smartshellInstance.execSilent(`docker push ${registryTag}`);
|
||||||
|
if (result.exitCode !== 0) {
|
||||||
|
throw new Error(`Failed to push to local registry: ${result.stderr || result.stdout}`);
|
||||||
|
}
|
||||||
|
dockerfile.localRegistryTag = registryTag;
|
||||||
|
logger.log('info', `Pushed ${dockerfile.buildTag} to local registry as ${registryTag}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Groups topologically sorted Dockerfiles into dependency levels.
|
||||||
|
* Level 0 = no local dependencies; level N = depends on something in level N-1.
|
||||||
|
* Images within the same level are independent and can build in parallel.
|
||||||
|
*/
|
||||||
|
public static computeLevels(sortedDockerfiles: Dockerfile[]): Dockerfile[][] {
|
||||||
|
const levelMap = new Map<Dockerfile, number>();
|
||||||
|
for (const df of sortedDockerfiles) {
|
||||||
|
if (!df.localBaseImageDependent || !df.localBaseDockerfile) {
|
||||||
|
levelMap.set(df, 0);
|
||||||
|
} else {
|
||||||
|
const depLevel = levelMap.get(df.localBaseDockerfile) ?? 0;
|
||||||
|
levelMap.set(df, depLevel + 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
const maxLevel = Math.max(...Array.from(levelMap.values()), 0);
|
||||||
|
const levels: Dockerfile[][] = [];
|
||||||
|
for (let l = 0; l <= maxLevel; l++) {
|
||||||
|
levels.push(sortedDockerfiles.filter(df => levelMap.get(df) === l));
|
||||||
|
}
|
||||||
|
return levels;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Runs async tasks with bounded concurrency (worker-pool pattern).
|
||||||
|
* Fast-fail: if any task throws, Promise.all rejects immediately.
|
||||||
|
*/
|
||||||
|
public static async runWithConcurrency<T>(
|
||||||
|
tasks: (() => Promise<T>)[],
|
||||||
|
concurrency: number,
|
||||||
|
): Promise<T[]> {
|
||||||
|
const results: T[] = new Array(tasks.length);
|
||||||
|
let nextIndex = 0;
|
||||||
|
async function worker(): Promise<void> {
|
||||||
|
while (true) {
|
||||||
|
const idx = nextIndex++;
|
||||||
|
if (idx >= tasks.length) break;
|
||||||
|
results[idx] = await tasks[idx]();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
const workers = Array.from(
|
||||||
|
{ length: Math.min(concurrency, tasks.length) },
|
||||||
|
() => worker(),
|
||||||
|
);
|
||||||
|
await Promise.all(workers);
|
||||||
|
return results;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Builds the corresponding real docker image for each Dockerfile class instance
|
||||||
|
*/
|
||||||
|
public static async buildDockerfiles(
|
||||||
|
sortedArrayArg: Dockerfile[],
|
||||||
|
session: TsDockerSession,
|
||||||
|
options?: { platform?: string; timeout?: number; noCache?: boolean; verbose?: boolean; isRootless?: boolean; parallel?: boolean; parallelConcurrency?: number },
|
||||||
|
): Promise<Dockerfile[]> {
|
||||||
|
const total = sortedArrayArg.length;
|
||||||
|
const overallStart = Date.now();
|
||||||
|
|
||||||
|
await Dockerfile.startLocalRegistry(session, options?.isRootless);
|
||||||
|
|
||||||
|
try {
|
||||||
|
if (options?.parallel) {
|
||||||
|
// === PARALLEL MODE: build independent images concurrently within each level ===
|
||||||
|
const concurrency = options.parallelConcurrency ?? 4;
|
||||||
|
const levels = Dockerfile.computeLevels(sortedArrayArg);
|
||||||
|
|
||||||
|
logger.log('info', `Parallel build: ${levels.length} level(s), concurrency ${concurrency}`);
|
||||||
|
for (let l = 0; l < levels.length; l++) {
|
||||||
|
const level = levels[l];
|
||||||
|
logger.log('info', ` Level ${l} (${level.length}): ${level.map(df => df.cleanTag).join(', ')}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
let built = 0;
|
||||||
|
for (let l = 0; l < levels.length; l++) {
|
||||||
|
const level = levels[l];
|
||||||
|
logger.log('info', `--- Level ${l}: building ${level.length} image(s) in parallel ---`);
|
||||||
|
|
||||||
|
const tasks = level.map((df) => {
|
||||||
|
const myIndex = ++built;
|
||||||
|
return async () => {
|
||||||
|
const progress = `(${myIndex}/${total})`;
|
||||||
|
logger.log('info', `${progress} Building ${df.cleanTag}...`);
|
||||||
|
const elapsed = await df.build(options);
|
||||||
|
logger.log('ok', `${progress} Built ${df.cleanTag} in ${formatDuration(elapsed)}`);
|
||||||
|
return df;
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
await Dockerfile.runWithConcurrency(tasks, concurrency);
|
||||||
|
|
||||||
|
// After the entire level completes, push all to local registry + tag for deps
|
||||||
|
for (const df of level) {
|
||||||
|
// Tag in host daemon for dependency resolution
|
||||||
|
const dependentBaseImages = new Set<string>();
|
||||||
|
for (const other of sortedArrayArg) {
|
||||||
|
if (other.localBaseDockerfile === df && other.baseImage !== df.buildTag) {
|
||||||
|
dependentBaseImages.add(other.baseImage);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (const fullTag of dependentBaseImages) {
|
||||||
|
logger.log('info', `Tagging ${df.buildTag} as ${fullTag} for local dependency resolution`);
|
||||||
|
await smartshellInstance.exec(`docker tag ${df.buildTag} ${fullTag}`);
|
||||||
|
}
|
||||||
|
// Push ALL images to local registry (skip if already pushed via buildx)
|
||||||
|
if (!df.localRegistryTag) {
|
||||||
|
await Dockerfile.pushToLocalRegistry(session, df);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// === SEQUENTIAL MODE: build one at a time ===
|
||||||
|
for (let i = 0; i < total; i++) {
|
||||||
|
const dockerfileArg = sortedArrayArg[i];
|
||||||
|
const progress = `(${i + 1}/${total})`;
|
||||||
|
logger.log('info', `${progress} Building ${dockerfileArg.cleanTag}...`);
|
||||||
|
|
||||||
|
const elapsed = await dockerfileArg.build(options);
|
||||||
|
logger.log('ok', `${progress} Built ${dockerfileArg.cleanTag} in ${formatDuration(elapsed)}`);
|
||||||
|
|
||||||
|
// Tag in host daemon for standard docker build compatibility
|
||||||
|
const dependentBaseImages = new Set<string>();
|
||||||
|
for (const other of sortedArrayArg) {
|
||||||
|
if (other.localBaseDockerfile === dockerfileArg && other.baseImage !== dockerfileArg.buildTag) {
|
||||||
|
dependentBaseImages.add(other.baseImage);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (const fullTag of dependentBaseImages) {
|
||||||
|
logger.log('info', `Tagging ${dockerfileArg.buildTag} as ${fullTag} for local dependency resolution`);
|
||||||
|
await smartshellInstance.exec(`docker tag ${dockerfileArg.buildTag} ${fullTag}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Push ALL images to local registry (skip if already pushed via buildx)
|
||||||
|
if (!dockerfileArg.localRegistryTag) {
|
||||||
|
await Dockerfile.pushToLocalRegistry(session, dockerfileArg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
await Dockerfile.stopLocalRegistry(session);
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.log('info', `Total build time: ${formatDuration(Date.now() - overallStart)}`);
|
||||||
|
return sortedArrayArg;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests all Dockerfiles by calling Dockerfile.test()
|
||||||
|
*/
|
||||||
|
public static async testDockerfiles(sortedArrayArg: Dockerfile[]): Promise<Dockerfile[]> {
|
||||||
|
const total = sortedArrayArg.length;
|
||||||
|
const overallStart = Date.now();
|
||||||
|
|
||||||
|
for (let i = 0; i < total; i++) {
|
||||||
|
const dockerfileArg = sortedArrayArg[i];
|
||||||
|
const progress = `(${i + 1}/${total})`;
|
||||||
|
logger.log('info', `${progress} Testing ${dockerfileArg.cleanTag}...`);
|
||||||
|
|
||||||
|
const elapsed = await dockerfileArg.test();
|
||||||
|
logger.log('ok', `${progress} Tested ${dockerfileArg.cleanTag} in ${formatDuration(elapsed)}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.log('info', `Total test time: ${formatDuration(Date.now() - overallStart)}`);
|
||||||
|
return sortedArrayArg;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a version for a docker file
|
||||||
|
* Dockerfile_latest -> latest
|
||||||
|
* Dockerfile_v1.0.0 -> v1.0.0
|
||||||
|
* Dockerfile -> latest
|
||||||
|
*/
|
||||||
|
public static dockerFileVersion(
|
||||||
|
dockerfileInstanceArg: Dockerfile,
|
||||||
|
dockerfileNameArg: string
|
||||||
|
): string {
|
||||||
|
let versionString: string;
|
||||||
|
const versionRegex = /Dockerfile_(.+)$/;
|
||||||
|
const regexResultArray = versionRegex.exec(dockerfileNameArg);
|
||||||
|
if (regexResultArray && regexResultArray.length === 2) {
|
||||||
|
versionString = regexResultArray[1];
|
||||||
|
} else {
|
||||||
|
versionString = 'latest';
|
||||||
|
}
|
||||||
|
|
||||||
|
// Replace ##version## placeholder with actual package version if available
|
||||||
|
if (dockerfileInstanceArg.managerRef?.projectInfo?.npm?.version) {
|
||||||
|
versionString = versionString.replace(
|
||||||
|
'##version##',
|
||||||
|
dockerfileInstanceArg.managerRef.projectInfo.npm.version
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return versionString;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extracts the base image from a Dockerfile content
|
||||||
|
* Handles ARG substitution for variable base images
|
||||||
|
*/
|
||||||
|
public static dockerBaseImage(dockerfileContentArg: string): string {
|
||||||
|
const lines = dockerfileContentArg.split(/\r?\n/);
|
||||||
|
const args: { [key: string]: string } = {};
|
||||||
|
|
||||||
|
for (const line of lines) {
|
||||||
|
const trimmedLine = line.trim();
|
||||||
|
|
||||||
|
// Skip empty lines and comments
|
||||||
|
if (trimmedLine === '' || trimmedLine.startsWith('#')) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Match ARG instructions
|
||||||
|
const argMatch = trimmedLine.match(/^ARG\s+([^\s=]+)(?:=(.*))?$/i);
|
||||||
|
if (argMatch) {
|
||||||
|
const argName = argMatch[1];
|
||||||
|
const argValue = argMatch[2] !== undefined ? argMatch[2] : process.env[argName] || '';
|
||||||
|
args[argName] = argValue;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Match FROM instructions
|
||||||
|
const fromMatch = trimmedLine.match(/^FROM\s+(.+?)(?:\s+AS\s+[^\s]+)?$/i);
|
||||||
|
if (fromMatch) {
|
||||||
|
let baseImage = fromMatch[1].trim();
|
||||||
|
|
||||||
|
// Substitute variables in the base image name
|
||||||
|
baseImage = Dockerfile.substituteVariables(baseImage, args);
|
||||||
|
|
||||||
|
return baseImage;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
throw new Error('No FROM instruction found in Dockerfile');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Substitutes variables in a string, supporting default values like ${VAR:-default}
|
||||||
|
*/
|
||||||
|
private static substituteVariables(str: string, vars: { [key: string]: string }): string {
|
||||||
|
return str.replace(/\${([^}:]+)(:-([^}]+))?}/g, (_, varName, __, defaultValue) => {
|
||||||
|
if (vars[varName] !== undefined) {
|
||||||
|
return vars[varName];
|
||||||
|
} else if (defaultValue !== undefined) {
|
||||||
|
return defaultValue;
|
||||||
|
} else {
|
||||||
|
return '';
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extracts the repo:version part from a full image reference, stripping any registry prefix.
|
||||||
|
* Examples:
|
||||||
|
* "registry.example.com/repo:version" -> "repo:version"
|
||||||
|
* "repo:version" -> "repo:version"
|
||||||
|
* "host.today/ht-docker-node:npmci" -> "ht-docker-node:npmci"
|
||||||
|
*/
|
||||||
|
private static extractRepoVersion(imageRef: string): string {
|
||||||
|
const parts = imageRef.split('/');
|
||||||
|
if (parts.length === 1) {
|
||||||
|
// No registry prefix: "repo:version"
|
||||||
|
return imageRef;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if first part looks like a registry (contains '.' or ':' or is 'localhost')
|
||||||
|
const firstPart = parts[0];
|
||||||
|
const looksLikeRegistry =
|
||||||
|
firstPart.includes('.') || firstPart.includes(':') || firstPart === 'localhost';
|
||||||
|
|
||||||
|
if (looksLikeRegistry) {
|
||||||
|
// Strip registry: "registry.example.com/repo:version" -> "repo:version"
|
||||||
|
return parts.slice(1).join('/');
|
||||||
|
}
|
||||||
|
|
||||||
|
// No registry prefix, could be "org/repo:version"
|
||||||
|
return imageRef;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the docker tag string for a given registry and repo
|
||||||
|
*/
|
||||||
|
public static getDockerTagString(
|
||||||
|
managerRef: TsDockerManager,
|
||||||
|
registryArg: string,
|
||||||
|
repoArg: string,
|
||||||
|
versionArg: string,
|
||||||
|
suffixArg?: string
|
||||||
|
): string {
|
||||||
|
// Determine whether the repo should be mapped according to the registry
|
||||||
|
const config = managerRef.config;
|
||||||
|
const mappedRepo = config.registryRepoMap?.[registryArg];
|
||||||
|
const repo = mappedRepo || repoArg;
|
||||||
|
|
||||||
|
// Determine whether the version contains a suffix
|
||||||
|
let version = versionArg;
|
||||||
|
if (suffixArg) {
|
||||||
|
version = versionArg + '_' + suffixArg;
|
||||||
|
}
|
||||||
|
|
||||||
|
const tagString = `${registryArg}/${repo}:${version}`;
|
||||||
|
return tagString;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets build args from environment variable mapping
|
||||||
|
*/
|
||||||
|
public static async getDockerBuildArgs(managerRef: TsDockerManager): Promise<string> {
|
||||||
|
logger.log('info', 'checking for env vars to be supplied to the docker build');
|
||||||
|
let buildArgsString: string = '';
|
||||||
|
const config = managerRef.config;
|
||||||
|
|
||||||
|
if (config.buildArgEnvMap) {
|
||||||
|
for (const dockerArgKey of Object.keys(config.buildArgEnvMap)) {
|
||||||
|
const dockerArgOuterEnvVar = config.buildArgEnvMap[dockerArgKey];
|
||||||
|
logger.log(
|
||||||
|
'note',
|
||||||
|
`docker ARG "${dockerArgKey}" maps to outer env var "${dockerArgOuterEnvVar}"`
|
||||||
|
);
|
||||||
|
const targetValue = process.env[dockerArgOuterEnvVar];
|
||||||
|
if (targetValue) {
|
||||||
|
buildArgsString = `${buildArgsString} --build-arg ${dockerArgKey}="${targetValue}"`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return buildArgsString;
|
||||||
|
}
|
||||||
|
|
||||||
|
// INSTANCE PROPERTIES
|
||||||
|
public managerRef: TsDockerManager;
|
||||||
|
public session?: TsDockerSession;
|
||||||
|
public filePath!: string;
|
||||||
|
public repo: string;
|
||||||
|
public version: string;
|
||||||
|
public cleanTag: string;
|
||||||
|
public buildTag: string;
|
||||||
|
public pushTag!: string;
|
||||||
|
public containerName: string;
|
||||||
|
public content!: string;
|
||||||
|
public baseImage: string;
|
||||||
|
public localBaseImageDependent: boolean;
|
||||||
|
public localBaseDockerfile!: Dockerfile;
|
||||||
|
public localRegistryTag?: string;
|
||||||
|
|
||||||
|
constructor(managerRefArg: TsDockerManager, options: IDockerfileOptions) {
|
||||||
|
this.managerRef = managerRefArg;
|
||||||
|
this.filePath = options.filePath!;
|
||||||
|
|
||||||
|
// Build repo name from project info or directory name
|
||||||
|
const projectInfo = this.managerRef.projectInfo;
|
||||||
|
if (projectInfo?.npm?.name) {
|
||||||
|
// Use package name, removing scope if present
|
||||||
|
const packageName = projectInfo.npm.name.replace(/^@[^/]+\//, '');
|
||||||
|
this.repo = packageName;
|
||||||
|
} else {
|
||||||
|
// Fallback to directory name
|
||||||
|
this.repo = plugins.path.basename(paths.cwd);
|
||||||
|
}
|
||||||
|
|
||||||
|
this.version = Dockerfile.dockerFileVersion(this, plugins.path.parse(this.filePath).base);
|
||||||
|
this.cleanTag = this.repo + ':' + this.version;
|
||||||
|
this.buildTag = this.cleanTag;
|
||||||
|
this.containerName = 'dockerfile-' + this.version;
|
||||||
|
|
||||||
|
if (options.filePath && options.read) {
|
||||||
|
this.content = fs.readFileSync(plugins.path.resolve(options.filePath), 'utf-8');
|
||||||
|
} else if (options.fileContents) {
|
||||||
|
this.content = options.fileContents;
|
||||||
|
}
|
||||||
|
|
||||||
|
this.baseImage = Dockerfile.dockerBaseImage(this.content);
|
||||||
|
this.localBaseImageDependent = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a line-by-line handler for Docker build output that logs
|
||||||
|
* recognized layer/step lines in an emphasized format.
|
||||||
|
*/
|
||||||
|
private createBuildOutputHandler(verbose: boolean): {
|
||||||
|
handleChunk: (chunk: Buffer | string) => void;
|
||||||
|
} {
|
||||||
|
let buffer = '';
|
||||||
|
const tag = this.cleanTag;
|
||||||
|
|
||||||
|
const handleLine = (line: string) => {
|
||||||
|
// In verbose mode, write raw output prefixed with tag for identification
|
||||||
|
if (verbose) {
|
||||||
|
process.stdout.write(`[${tag}] ${line}\n`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Buildx step: #N [platform step/total] INSTRUCTION
|
||||||
|
const bxStep = line.match(/^#\d+ \[([^\]]+?)(\d+\/\d+)\] (.+)/);
|
||||||
|
if (bxStep) {
|
||||||
|
const prefix = bxStep[1].trim();
|
||||||
|
const step = bxStep[2];
|
||||||
|
const instruction = bxStep[3];
|
||||||
|
const platform = extractPlatform(prefix);
|
||||||
|
const platStr = platform ? `${platform} ▸ ` : '';
|
||||||
|
logger.log('note', `[${tag}] ${platStr}[${step}] ${instruction}`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Buildx CACHED: #N CACHED
|
||||||
|
const bxCached = line.match(/^#(\d+) CACHED/);
|
||||||
|
if (bxCached) {
|
||||||
|
logger.log('note', `[${tag}] CACHED`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Buildx DONE: #N DONE 12.3s
|
||||||
|
const bxDone = line.match(/^#\d+ DONE (.+)/);
|
||||||
|
if (bxDone) {
|
||||||
|
const timing = bxDone[1];
|
||||||
|
if (!timing.startsWith('0.0')) {
|
||||||
|
logger.log('note', `[${tag}] DONE ${timing}`);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Buildx export phase: #N exporting ...
|
||||||
|
const bxExport = line.match(/^#\d+ exporting (.+)/);
|
||||||
|
if (bxExport) {
|
||||||
|
logger.log('note', `[${tag}] exporting ${bxExport[1]}`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Standard docker build: Step N/M : INSTRUCTION
|
||||||
|
const stdStep = line.match(/^Step (\d+\/\d+) : (.+)/);
|
||||||
|
if (stdStep) {
|
||||||
|
logger.log('note', `[${tag}] Step ${stdStep[1]}: ${stdStep[2]}`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
return {
|
||||||
|
handleChunk: (chunk: Buffer | string) => {
|
||||||
|
buffer += chunk.toString();
|
||||||
|
const lines = buffer.split('\n');
|
||||||
|
buffer = lines.pop() || '';
|
||||||
|
for (const line of lines) {
|
||||||
|
const trimmed = line.replace(/\r$/, '').trim();
|
||||||
|
if (trimmed) handleLine(trimmed);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Builds the Dockerfile
|
||||||
|
*/
|
||||||
|
public async build(options?: { platform?: string; timeout?: number; noCache?: boolean; verbose?: boolean }): Promise<number> {
|
||||||
|
const startTime = Date.now();
|
||||||
|
const buildArgsString = await Dockerfile.getDockerBuildArgs(this.managerRef);
|
||||||
|
const config = this.managerRef.config;
|
||||||
|
const platformOverride = options?.platform;
|
||||||
|
const timeout = options?.timeout;
|
||||||
|
const noCacheFlag = options?.noCache ? ' --no-cache' : '';
|
||||||
|
const verbose = options?.verbose ?? false;
|
||||||
|
|
||||||
|
let buildContextFlag = '';
|
||||||
|
if (this.localBaseImageDependent && this.localBaseDockerfile) {
|
||||||
|
const fromImage = this.baseImage;
|
||||||
|
if (this.localBaseDockerfile.localRegistryTag) {
|
||||||
|
// BuildKit pulls from the local registry (reachable via host network)
|
||||||
|
const registryTag = this.localBaseDockerfile.localRegistryTag;
|
||||||
|
buildContextFlag = ` --build-context "${fromImage}=docker-image://${registryTag}"`;
|
||||||
|
logger.log('info', `Using local registry build context: ${fromImage} -> docker-image://${registryTag}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let buildCommand: string;
|
||||||
|
|
||||||
|
if (platformOverride) {
|
||||||
|
// Single platform override via buildx
|
||||||
|
buildCommand = `docker buildx build --progress=plain --platform ${platformOverride}${noCacheFlag}${buildContextFlag} --load -t ${this.buildTag} -f ${this.filePath} ${buildArgsString} .`;
|
||||||
|
logger.log('info', `Build: buildx --platform ${platformOverride} --load`);
|
||||||
|
} else if (config.platforms && config.platforms.length > 1) {
|
||||||
|
// Multi-platform build using buildx — always push to local registry
|
||||||
|
const platformString = config.platforms.join(',');
|
||||||
|
const registryHost = this.session?.config.registryHost || 'localhost:5234';
|
||||||
|
const localTag = `${registryHost}/${this.buildTag}`;
|
||||||
|
buildCommand = `docker buildx build --progress=plain --platform ${platformString}${noCacheFlag}${buildContextFlag} -t ${localTag} -f ${this.filePath} ${buildArgsString} --push .`;
|
||||||
|
this.localRegistryTag = localTag;
|
||||||
|
logger.log('info', `Build: buildx --platform ${platformString} --push to local registry`);
|
||||||
|
} else {
|
||||||
|
// Standard build
|
||||||
|
const versionLabel = this.managerRef.projectInfo?.npm?.version || 'unknown';
|
||||||
|
buildCommand = `docker build --progress=plain --label="version=${versionLabel}"${noCacheFlag} -t ${this.buildTag} -f ${this.filePath} ${buildArgsString} .`;
|
||||||
|
logger.log('info', 'Build: docker build (standard)');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute build with real-time layer logging
|
||||||
|
const handler = this.createBuildOutputHandler(verbose);
|
||||||
|
const streaming = await smartshellInstance.execStreamingSilent(buildCommand);
|
||||||
|
|
||||||
|
// Intercept output for layer logging
|
||||||
|
streaming.childProcess.stdout?.on('data', handler.handleChunk);
|
||||||
|
streaming.childProcess.stderr?.on('data', handler.handleChunk);
|
||||||
|
|
||||||
|
if (timeout) {
|
||||||
|
const timeoutPromise = new Promise<never>((_, reject) => {
|
||||||
|
setTimeout(() => {
|
||||||
|
streaming.childProcess.kill();
|
||||||
|
reject(new Error(`Build timed out after ${timeout}s for ${this.cleanTag}`));
|
||||||
|
}, timeout * 1000);
|
||||||
|
});
|
||||||
|
const result = await Promise.race([streaming.finalPromise, timeoutPromise]);
|
||||||
|
if (result.exitCode !== 0) {
|
||||||
|
logger.log('error', `Build failed for ${this.cleanTag}`);
|
||||||
|
throw new Error(`Build failed for ${this.cleanTag}`);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
const result = await streaming.finalPromise;
|
||||||
|
if (result.exitCode !== 0) {
|
||||||
|
logger.log('error', `Build failed for ${this.cleanTag}`);
|
||||||
|
if (!verbose && result.stdout) {
|
||||||
|
logger.log('error', `Build output:\n${result.stdout}`);
|
||||||
|
}
|
||||||
|
throw new Error(`Build failed for ${this.cleanTag}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return Date.now() - startTime;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Pushes the Dockerfile to a registry using OCI Distribution API copy
|
||||||
|
* from the local registry to the remote registry.
|
||||||
|
*/
|
||||||
|
public async push(dockerRegistryArg: DockerRegistry, versionSuffix?: string): Promise<void> {
|
||||||
|
const destRepo = this.getDestRepo(dockerRegistryArg.registryUrl);
|
||||||
|
const destTag = versionSuffix ? `${this.version}_${versionSuffix}` : this.version;
|
||||||
|
const registryCopy = new RegistryCopy();
|
||||||
|
const registryHost = this.session?.config.registryHost || 'localhost:5234';
|
||||||
|
|
||||||
|
this.pushTag = `${dockerRegistryArg.registryUrl}/${destRepo}:${destTag}`;
|
||||||
|
logger.log('info', `Pushing ${this.pushTag} via OCI copy from local registry...`);
|
||||||
|
|
||||||
|
await registryCopy.copyImage(
|
||||||
|
registryHost,
|
||||||
|
this.repo,
|
||||||
|
this.version,
|
||||||
|
dockerRegistryArg.registryUrl,
|
||||||
|
destRepo,
|
||||||
|
destTag,
|
||||||
|
{ username: dockerRegistryArg.username, password: dockerRegistryArg.password },
|
||||||
|
);
|
||||||
|
|
||||||
|
logger.log('ok', `Pushed ${this.pushTag}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the destination repository for a given registry URL,
|
||||||
|
* using registryRepoMap if configured, otherwise the default repo.
|
||||||
|
*/
|
||||||
|
private getDestRepo(registryUrl: string): string {
|
||||||
|
const config = this.managerRef.config;
|
||||||
|
return config.registryRepoMap?.[registryUrl] || this.repo;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Pulls the Dockerfile from a registry
|
||||||
|
*/
|
||||||
|
public async pull(registryArg: DockerRegistry, versionSuffixArg?: string): Promise<void> {
|
||||||
|
const pullTag = Dockerfile.getDockerTagString(
|
||||||
|
this.managerRef,
|
||||||
|
registryArg.registryUrl,
|
||||||
|
this.repo,
|
||||||
|
this.version,
|
||||||
|
versionSuffixArg
|
||||||
|
);
|
||||||
|
|
||||||
|
await smartshellInstance.exec(`docker pull ${pullTag}`);
|
||||||
|
await smartshellInstance.exec(`docker tag ${pullTag} ${this.buildTag}`);
|
||||||
|
|
||||||
|
logger.log('ok', `Pulled and tagged ${pullTag} as ${this.buildTag}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests the Dockerfile by running a test script if it exists.
|
||||||
|
* For multi-platform builds, uses the local registry tag so Docker can auto-pull.
|
||||||
|
*/
|
||||||
|
public async test(): Promise<number> {
|
||||||
|
const startTime = Date.now();
|
||||||
|
const testDir = this.managerRef.config.testDir || plugins.path.join(paths.cwd, 'test');
|
||||||
|
const testFile = plugins.path.join(testDir, 'test_' + this.version + '.sh');
|
||||||
|
// Use local registry tag for multi-platform images (not in daemon), otherwise buildTag
|
||||||
|
const imageRef = this.localRegistryTag || this.buildTag;
|
||||||
|
|
||||||
|
const sessionId = this.session?.config.sessionId || 'default';
|
||||||
|
const testContainerName = `tsdocker_test_${sessionId}`;
|
||||||
|
const testImageName = `tsdocker_test_image_${sessionId}`;
|
||||||
|
|
||||||
|
const testFileExists = fs.existsSync(testFile);
|
||||||
|
|
||||||
|
if (testFileExists) {
|
||||||
|
// Run tests in container
|
||||||
|
await smartshellInstance.exec(
|
||||||
|
`docker run --name ${testContainerName} --entrypoint="bash" ${imageRef} -c "mkdir /tsdocker_test"`
|
||||||
|
);
|
||||||
|
await smartshellInstance.exec(`docker cp ${testFile} ${testContainerName}:/tsdocker_test/test.sh`);
|
||||||
|
await smartshellInstance.exec(`docker commit ${testContainerName} ${testImageName}`);
|
||||||
|
|
||||||
|
const testResult = await smartshellInstance.exec(
|
||||||
|
`docker run --entrypoint="bash" ${testImageName} -x /tsdocker_test/test.sh`
|
||||||
|
);
|
||||||
|
|
||||||
|
// Cleanup
|
||||||
|
await smartshellInstance.exec(`docker rm ${testContainerName}`);
|
||||||
|
await smartshellInstance.exec(`docker rmi --force ${testImageName}`);
|
||||||
|
|
||||||
|
if (testResult.exitCode !== 0) {
|
||||||
|
throw new Error(`Tests failed for ${this.cleanTag}`);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
logger.log('warn', `Skipping tests for ${this.cleanTag} — no test file at ${testFile}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
return Date.now() - startTime;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the ID of a built Docker image
|
||||||
|
*/
|
||||||
|
public async getId(): Promise<string> {
|
||||||
|
const result = await smartshellInstance.exec(
|
||||||
|
'docker inspect --type=image --format="{{.Id}}" ' + this.buildTag
|
||||||
|
);
|
||||||
|
return result.stdout.trim();
|
||||||
|
}
|
||||||
|
}
|
||||||
91
ts/classes.dockerregistry.ts
Normal file
91
ts/classes.dockerregistry.ts
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
import * as plugins from './tsdocker.plugins.js';
|
||||||
|
import { logger } from './tsdocker.logging.js';
|
||||||
|
import type { IDockerRegistryOptions } from './interfaces/index.js';
|
||||||
|
|
||||||
|
const smartshellInstance = new plugins.smartshell.Smartshell({
|
||||||
|
executor: 'bash',
|
||||||
|
});
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Represents a Docker registry with authentication capabilities
|
||||||
|
*/
|
||||||
|
export class DockerRegistry {
|
||||||
|
public registryUrl: string;
|
||||||
|
public username: string;
|
||||||
|
public password: string;
|
||||||
|
|
||||||
|
constructor(optionsArg: IDockerRegistryOptions) {
|
||||||
|
this.registryUrl = optionsArg.registryUrl;
|
||||||
|
this.username = optionsArg.username;
|
||||||
|
this.password = optionsArg.password;
|
||||||
|
logger.log('info', `created DockerRegistry for ${this.registryUrl}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a DockerRegistry instance from a pipe-delimited environment string
|
||||||
|
* Format: "registryUrl|username|password"
|
||||||
|
*/
|
||||||
|
public static fromEnvString(envString: string): DockerRegistry {
|
||||||
|
const dockerRegexResultArray = envString.split('|');
|
||||||
|
if (dockerRegexResultArray.length !== 3) {
|
||||||
|
logger.log('error', 'malformed docker env var...');
|
||||||
|
throw new Error('malformed docker env var, expected format: registryUrl|username|password');
|
||||||
|
}
|
||||||
|
const registryUrl = dockerRegexResultArray[0].replace('https://', '').replace('http://', '');
|
||||||
|
const username = dockerRegexResultArray[1];
|
||||||
|
const password = dockerRegexResultArray[2];
|
||||||
|
return new DockerRegistry({
|
||||||
|
registryUrl: registryUrl,
|
||||||
|
username: username,
|
||||||
|
password: password,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a DockerRegistry from environment variables
|
||||||
|
* Looks for DOCKER_REGISTRY, DOCKER_REGISTRY_USER, DOCKER_REGISTRY_PASSWORD
|
||||||
|
* Or for a specific registry: DOCKER_REGISTRY_<NAME>, etc.
|
||||||
|
*/
|
||||||
|
public static fromEnv(registryName?: string): DockerRegistry | null {
|
||||||
|
const prefix = registryName ? `DOCKER_REGISTRY_${registryName.toUpperCase()}_` : 'DOCKER_REGISTRY_';
|
||||||
|
|
||||||
|
const registryUrl = process.env[`${prefix}URL`] || process.env['DOCKER_REGISTRY'];
|
||||||
|
const username = process.env[`${prefix}USER`] || process.env['DOCKER_REGISTRY_USER'];
|
||||||
|
const password = process.env[`${prefix}PASSWORD`] || process.env['DOCKER_REGISTRY_PASSWORD'];
|
||||||
|
|
||||||
|
if (!registryUrl || !username || !password) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
return new DockerRegistry({
|
||||||
|
registryUrl: registryUrl.replace('https://', '').replace('http://', ''),
|
||||||
|
username,
|
||||||
|
password,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Logs in to the Docker registry
|
||||||
|
*/
|
||||||
|
public async login(): Promise<void> {
|
||||||
|
if (this.registryUrl === 'docker.io') {
|
||||||
|
await smartshellInstance.exec(`docker login -u ${this.username} -p ${this.password}`);
|
||||||
|
logger.log('info', 'Logged in to standard docker hub');
|
||||||
|
} else {
|
||||||
|
await smartshellInstance.exec(`docker login -u ${this.username} -p ${this.password} ${this.registryUrl}`);
|
||||||
|
}
|
||||||
|
logger.log('ok', `docker authenticated for ${this.registryUrl}!`);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Logs out from the Docker registry
|
||||||
|
*/
|
||||||
|
public async logout(): Promise<void> {
|
||||||
|
if (this.registryUrl === 'docker.io') {
|
||||||
|
await smartshellInstance.exec('docker logout');
|
||||||
|
} else {
|
||||||
|
await smartshellInstance.exec(`docker logout ${this.registryUrl}`);
|
||||||
|
}
|
||||||
|
logger.log('info', `logged out from ${this.registryUrl}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
511
ts/classes.registrycopy.ts
Normal file
511
ts/classes.registrycopy.ts
Normal file
@@ -0,0 +1,511 @@
|
|||||||
|
import * as fs from 'fs';
|
||||||
|
import * as os from 'os';
|
||||||
|
import * as path from 'path';
|
||||||
|
import { logger } from './tsdocker.logging.js';
|
||||||
|
|
||||||
|
interface IRegistryCredentials {
|
||||||
|
username: string;
|
||||||
|
password: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface ITokenCache {
|
||||||
|
[scope: string]: { token: string; expiry: number };
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* OCI Distribution API client for copying images between registries.
|
||||||
|
* Supports manifest lists (multi-arch) and single-platform manifests.
|
||||||
|
* Uses native fetch (Node 18+).
|
||||||
|
*/
|
||||||
|
export class RegistryCopy {
|
||||||
|
private tokenCache: ITokenCache = {};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Reads Docker credentials from ~/.docker/config.json for a given registry.
|
||||||
|
* Supports base64-encoded "auth" field in the config.
|
||||||
|
*/
|
||||||
|
public static getDockerConfigCredentials(registryUrl: string): IRegistryCredentials | null {
|
||||||
|
try {
|
||||||
|
const configPath = path.join(os.homedir(), '.docker', 'config.json');
|
||||||
|
if (!fs.existsSync(configPath)) return null;
|
||||||
|
|
||||||
|
const config = JSON.parse(fs.readFileSync(configPath, 'utf-8'));
|
||||||
|
const auths = config.auths || {};
|
||||||
|
|
||||||
|
// Try exact match first, then common variations
|
||||||
|
const keys = [
|
||||||
|
registryUrl,
|
||||||
|
`https://${registryUrl}`,
|
||||||
|
`http://${registryUrl}`,
|
||||||
|
];
|
||||||
|
|
||||||
|
// Docker Hub special cases
|
||||||
|
if (registryUrl === 'docker.io' || registryUrl === 'registry-1.docker.io') {
|
||||||
|
keys.push(
|
||||||
|
'https://index.docker.io/v1/',
|
||||||
|
'https://index.docker.io/v2/',
|
||||||
|
'index.docker.io',
|
||||||
|
'docker.io',
|
||||||
|
'registry-1.docker.io',
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const key of keys) {
|
||||||
|
if (auths[key]?.auth) {
|
||||||
|
const decoded = Buffer.from(auths[key].auth, 'base64').toString('utf-8');
|
||||||
|
const colonIndex = decoded.indexOf(':');
|
||||||
|
if (colonIndex > 0) {
|
||||||
|
return {
|
||||||
|
username: decoded.substring(0, colonIndex),
|
||||||
|
password: decoded.substring(colonIndex + 1),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return null;
|
||||||
|
} catch {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the API base URL for a registry.
|
||||||
|
* Docker Hub uses registry-1.docker.io as API endpoint.
|
||||||
|
*/
|
||||||
|
private getRegistryApiBase(registry: string): string {
|
||||||
|
if (registry === 'docker.io' || registry === 'index.docker.io') {
|
||||||
|
return 'https://registry-1.docker.io';
|
||||||
|
}
|
||||||
|
// Local registries (localhost) use HTTP
|
||||||
|
if (registry.startsWith('localhost') || registry.startsWith('127.0.0.1')) {
|
||||||
|
return `http://${registry}`;
|
||||||
|
}
|
||||||
|
return `https://${registry}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Obtains a Bearer token for registry operations.
|
||||||
|
* Follows the standard Docker auth flow:
|
||||||
|
* GET /v2/ → 401 with Www-Authenticate → request token
|
||||||
|
*/
|
||||||
|
private async getToken(
|
||||||
|
registry: string,
|
||||||
|
repo: string,
|
||||||
|
actions: string,
|
||||||
|
credentials?: IRegistryCredentials | null,
|
||||||
|
): Promise<string | null> {
|
||||||
|
const scope = `repository:${repo}:${actions}`;
|
||||||
|
const cached = this.tokenCache[`${registry}/${scope}`];
|
||||||
|
if (cached && cached.expiry > Date.now()) {
|
||||||
|
return cached.token;
|
||||||
|
}
|
||||||
|
|
||||||
|
const apiBase = this.getRegistryApiBase(registry);
|
||||||
|
|
||||||
|
// Local registries typically don't need auth
|
||||||
|
if (registry.startsWith('localhost') || registry.startsWith('127.0.0.1')) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const checkResp = await fetch(`${apiBase}/v2/`, { method: 'GET' });
|
||||||
|
if (checkResp.ok) return null; // No auth needed
|
||||||
|
|
||||||
|
const wwwAuth = checkResp.headers.get('www-authenticate') || '';
|
||||||
|
const realmMatch = wwwAuth.match(/realm="([^"]+)"/);
|
||||||
|
const serviceMatch = wwwAuth.match(/service="([^"]+)"/);
|
||||||
|
|
||||||
|
if (!realmMatch) return null;
|
||||||
|
|
||||||
|
const realm = realmMatch[1];
|
||||||
|
const service = serviceMatch ? serviceMatch[1] : '';
|
||||||
|
|
||||||
|
const tokenUrl = new URL(realm);
|
||||||
|
tokenUrl.searchParams.set('scope', scope);
|
||||||
|
if (service) tokenUrl.searchParams.set('service', service);
|
||||||
|
|
||||||
|
const headers: Record<string, string> = {};
|
||||||
|
const creds = credentials || RegistryCopy.getDockerConfigCredentials(registry);
|
||||||
|
if (creds) {
|
||||||
|
headers['Authorization'] = 'Basic ' + Buffer.from(`${creds.username}:${creds.password}`).toString('base64');
|
||||||
|
}
|
||||||
|
|
||||||
|
const tokenResp = await fetch(tokenUrl.toString(), { headers });
|
||||||
|
if (!tokenResp.ok) {
|
||||||
|
const body = await tokenResp.text();
|
||||||
|
throw new Error(`Token request failed (${tokenResp.status}): ${body}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const tokenData = await tokenResp.json() as any;
|
||||||
|
const token = tokenData.token || tokenData.access_token;
|
||||||
|
|
||||||
|
if (token) {
|
||||||
|
// Cache for 5 minutes (conservative)
|
||||||
|
this.tokenCache[`${registry}/${scope}`] = {
|
||||||
|
token,
|
||||||
|
expiry: Date.now() + 5 * 60 * 1000,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
return token;
|
||||||
|
} catch (err) {
|
||||||
|
logger.log('warn', `Auth for ${registry}: ${(err as Error).message}`);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Makes an authenticated request to a registry.
|
||||||
|
*/
|
||||||
|
private async registryFetch(
|
||||||
|
registry: string,
|
||||||
|
path: string,
|
||||||
|
options: {
|
||||||
|
method?: string;
|
||||||
|
headers?: Record<string, string>;
|
||||||
|
body?: Buffer | ReadableStream | null;
|
||||||
|
repo?: string;
|
||||||
|
actions?: string;
|
||||||
|
credentials?: IRegistryCredentials | null;
|
||||||
|
} = {},
|
||||||
|
): Promise<Response> {
|
||||||
|
const apiBase = this.getRegistryApiBase(registry);
|
||||||
|
const method = options.method || 'GET';
|
||||||
|
const headers: Record<string, string> = { ...(options.headers || {}) };
|
||||||
|
|
||||||
|
const repo = options.repo || '';
|
||||||
|
const actions = options.actions || 'pull';
|
||||||
|
const token = await this.getToken(registry, repo, actions, options.credentials);
|
||||||
|
|
||||||
|
if (token) {
|
||||||
|
headers['Authorization'] = `Bearer ${token}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
const url = `${apiBase}${path}`;
|
||||||
|
const fetchOptions: any = { method, headers };
|
||||||
|
if (options.body) {
|
||||||
|
fetchOptions.body = options.body;
|
||||||
|
fetchOptions.duplex = 'half'; // Required for streaming body in Node
|
||||||
|
}
|
||||||
|
|
||||||
|
return fetch(url, fetchOptions);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets a manifest from a registry (supports both manifest lists and single manifests).
|
||||||
|
*/
|
||||||
|
private async getManifest(
|
||||||
|
registry: string,
|
||||||
|
repo: string,
|
||||||
|
reference: string,
|
||||||
|
credentials?: IRegistryCredentials | null,
|
||||||
|
): Promise<{ contentType: string; body: any; digest: string; raw: Buffer }> {
|
||||||
|
const accept = [
|
||||||
|
'application/vnd.oci.image.index.v1+json',
|
||||||
|
'application/vnd.docker.distribution.manifest.list.v2+json',
|
||||||
|
'application/vnd.oci.image.manifest.v1+json',
|
||||||
|
'application/vnd.docker.distribution.manifest.v2+json',
|
||||||
|
].join(', ');
|
||||||
|
|
||||||
|
const resp = await this.registryFetch(registry, `/v2/${repo}/manifests/${reference}`, {
|
||||||
|
headers: { 'Accept': accept },
|
||||||
|
repo,
|
||||||
|
actions: 'pull',
|
||||||
|
credentials,
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!resp.ok) {
|
||||||
|
const body = await resp.text();
|
||||||
|
throw new Error(`Failed to get manifest ${registry}/${repo}:${reference} (${resp.status}): ${body}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const raw = Buffer.from(await resp.arrayBuffer());
|
||||||
|
const contentType = resp.headers.get('content-type') || '';
|
||||||
|
const digest = resp.headers.get('docker-content-digest') || this.computeDigest(raw);
|
||||||
|
const body = JSON.parse(raw.toString('utf-8'));
|
||||||
|
|
||||||
|
return { contentType, body, digest, raw };
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks if a blob exists in the destination registry.
|
||||||
|
*/
|
||||||
|
private async blobExists(
|
||||||
|
registry: string,
|
||||||
|
repo: string,
|
||||||
|
digest: string,
|
||||||
|
credentials?: IRegistryCredentials | null,
|
||||||
|
): Promise<boolean> {
|
||||||
|
const resp = await this.registryFetch(registry, `/v2/${repo}/blobs/${digest}`, {
|
||||||
|
method: 'HEAD',
|
||||||
|
repo,
|
||||||
|
actions: 'pull,push',
|
||||||
|
credentials,
|
||||||
|
});
|
||||||
|
return resp.ok;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Copies a single blob from source to destination registry.
|
||||||
|
* Uses monolithic upload (POST initiate + PUT complete).
|
||||||
|
*/
|
||||||
|
private async copyBlob(
|
||||||
|
srcRegistry: string,
|
||||||
|
srcRepo: string,
|
||||||
|
destRegistry: string,
|
||||||
|
destRepo: string,
|
||||||
|
digest: string,
|
||||||
|
srcCredentials?: IRegistryCredentials | null,
|
||||||
|
destCredentials?: IRegistryCredentials | null,
|
||||||
|
): Promise<void> {
|
||||||
|
// Check if blob already exists at destination
|
||||||
|
const exists = await this.blobExists(destRegistry, destRepo, digest, destCredentials);
|
||||||
|
if (exists) {
|
||||||
|
logger.log('info', ` Blob ${digest.substring(0, 19)}... already exists, skipping`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Download blob from source
|
||||||
|
const getResp = await this.registryFetch(srcRegistry, `/v2/${srcRepo}/blobs/${digest}`, {
|
||||||
|
repo: srcRepo,
|
||||||
|
actions: 'pull',
|
||||||
|
credentials: srcCredentials,
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!getResp.ok) {
|
||||||
|
throw new Error(`Failed to get blob ${digest} from ${srcRegistry}/${srcRepo}: ${getResp.status}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const blobData = Buffer.from(await getResp.arrayBuffer());
|
||||||
|
const blobSize = blobData.length;
|
||||||
|
|
||||||
|
// Initiate upload at destination
|
||||||
|
const postResp = await this.registryFetch(destRegistry, `/v2/${destRepo}/blobs/uploads/`, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: { 'Content-Length': '0' },
|
||||||
|
repo: destRepo,
|
||||||
|
actions: 'pull,push',
|
||||||
|
credentials: destCredentials,
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!postResp.ok && postResp.status !== 202) {
|
||||||
|
const body = await postResp.text();
|
||||||
|
throw new Error(`Failed to initiate upload at ${destRegistry}/${destRepo}: ${postResp.status} ${body}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get upload URL from Location header
|
||||||
|
let uploadUrl = postResp.headers.get('location') || '';
|
||||||
|
if (!uploadUrl) {
|
||||||
|
throw new Error(`No upload location returned from ${destRegistry}/${destRepo}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make upload URL absolute if relative
|
||||||
|
if (uploadUrl.startsWith('/')) {
|
||||||
|
const apiBase = this.getRegistryApiBase(destRegistry);
|
||||||
|
uploadUrl = `${apiBase}${uploadUrl}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Complete upload with PUT (monolithic)
|
||||||
|
const separator = uploadUrl.includes('?') ? '&' : '?';
|
||||||
|
const putUrl = `${uploadUrl}${separator}digest=${encodeURIComponent(digest)}`;
|
||||||
|
|
||||||
|
// For PUT to the upload URL, we need auth
|
||||||
|
const token = await this.getToken(destRegistry, destRepo, 'pull,push', destCredentials);
|
||||||
|
const putHeaders: Record<string, string> = {
|
||||||
|
'Content-Type': 'application/octet-stream',
|
||||||
|
'Content-Length': String(blobSize),
|
||||||
|
};
|
||||||
|
if (token) {
|
||||||
|
putHeaders['Authorization'] = `Bearer ${token}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
const putResp = await fetch(putUrl, {
|
||||||
|
method: 'PUT',
|
||||||
|
headers: putHeaders,
|
||||||
|
body: blobData,
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!putResp.ok) {
|
||||||
|
const body = await putResp.text();
|
||||||
|
throw new Error(`Failed to upload blob ${digest} to ${destRegistry}/${destRepo}: ${putResp.status} ${body}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const sizeStr = blobSize > 1048576
|
||||||
|
? `${(blobSize / 1048576).toFixed(1)} MB`
|
||||||
|
: `${(blobSize / 1024).toFixed(1)} KB`;
|
||||||
|
logger.log('info', ` Copied blob ${digest.substring(0, 19)}... (${sizeStr})`);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Pushes a manifest to a registry.
|
||||||
|
*/
|
||||||
|
private async putManifest(
|
||||||
|
registry: string,
|
||||||
|
repo: string,
|
||||||
|
reference: string,
|
||||||
|
manifest: Buffer,
|
||||||
|
contentType: string,
|
||||||
|
credentials?: IRegistryCredentials | null,
|
||||||
|
): Promise<string> {
|
||||||
|
const resp = await this.registryFetch(registry, `/v2/${repo}/manifests/${reference}`, {
|
||||||
|
method: 'PUT',
|
||||||
|
headers: {
|
||||||
|
'Content-Type': contentType,
|
||||||
|
'Content-Length': String(manifest.length),
|
||||||
|
},
|
||||||
|
body: manifest,
|
||||||
|
repo,
|
||||||
|
actions: 'pull,push',
|
||||||
|
credentials,
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!resp.ok) {
|
||||||
|
const body = await resp.text();
|
||||||
|
throw new Error(`Failed to put manifest ${registry}/${repo}:${reference} (${resp.status}): ${body}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const digest = resp.headers.get('docker-content-digest') || this.computeDigest(manifest);
|
||||||
|
return digest;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Copies a single-platform manifest and all its blobs from source to destination.
|
||||||
|
*/
|
||||||
|
private async copySingleManifest(
|
||||||
|
srcRegistry: string,
|
||||||
|
srcRepo: string,
|
||||||
|
destRegistry: string,
|
||||||
|
destRepo: string,
|
||||||
|
manifestDigest: string,
|
||||||
|
srcCredentials?: IRegistryCredentials | null,
|
||||||
|
destCredentials?: IRegistryCredentials | null,
|
||||||
|
): Promise<void> {
|
||||||
|
// Get the platform manifest
|
||||||
|
const { body: manifest, contentType, raw } = await this.getManifest(
|
||||||
|
srcRegistry, srcRepo, manifestDigest, srcCredentials,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Copy config blob
|
||||||
|
if (manifest.config?.digest) {
|
||||||
|
logger.log('info', ` Copying config blob...`);
|
||||||
|
await this.copyBlob(
|
||||||
|
srcRegistry, srcRepo, destRegistry, destRepo,
|
||||||
|
manifest.config.digest, srcCredentials, destCredentials,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy layer blobs
|
||||||
|
const layers = manifest.layers || [];
|
||||||
|
for (let i = 0; i < layers.length; i++) {
|
||||||
|
const layer = layers[i];
|
||||||
|
logger.log('info', ` Copying layer ${i + 1}/${layers.length}...`);
|
||||||
|
await this.copyBlob(
|
||||||
|
srcRegistry, srcRepo, destRegistry, destRepo,
|
||||||
|
layer.digest, srcCredentials, destCredentials,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Push the platform manifest by digest
|
||||||
|
await this.putManifest(
|
||||||
|
destRegistry, destRepo, manifestDigest, raw, contentType, destCredentials,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Copies a complete image (single or multi-arch) from source to destination registry.
|
||||||
|
*
|
||||||
|
* @param srcRegistry - Source registry host (e.g., "localhost:5234")
|
||||||
|
* @param srcRepo - Source repository (e.g., "myapp")
|
||||||
|
* @param srcTag - Source tag (e.g., "v1.0.0")
|
||||||
|
* @param destRegistry - Destination registry host (e.g., "registry.gitlab.com")
|
||||||
|
* @param destRepo - Destination repository (e.g., "org/myapp")
|
||||||
|
* @param destTag - Destination tag (e.g., "v1.0.0" or "v1.0.0_arm64")
|
||||||
|
* @param credentials - Optional credentials for destination registry
|
||||||
|
*/
|
||||||
|
public async copyImage(
|
||||||
|
srcRegistry: string,
|
||||||
|
srcRepo: string,
|
||||||
|
srcTag: string,
|
||||||
|
destRegistry: string,
|
||||||
|
destRepo: string,
|
||||||
|
destTag: string,
|
||||||
|
credentials?: IRegistryCredentials | null,
|
||||||
|
): Promise<void> {
|
||||||
|
logger.log('info', `Copying ${srcRegistry}/${srcRepo}:${srcTag} -> ${destRegistry}/${destRepo}:${destTag}`);
|
||||||
|
|
||||||
|
// Source is always the local registry (no credentials needed)
|
||||||
|
const srcCredentials: IRegistryCredentials | null = null;
|
||||||
|
const destCredentials = credentials || RegistryCopy.getDockerConfigCredentials(destRegistry);
|
||||||
|
|
||||||
|
// Get the top-level manifest
|
||||||
|
const topManifest = await this.getManifest(srcRegistry, srcRepo, srcTag, srcCredentials);
|
||||||
|
const { body, contentType, raw } = topManifest;
|
||||||
|
|
||||||
|
const isManifestList =
|
||||||
|
contentType.includes('manifest.list') ||
|
||||||
|
contentType.includes('image.index') ||
|
||||||
|
body.manifests !== undefined;
|
||||||
|
|
||||||
|
if (isManifestList) {
|
||||||
|
// Multi-arch: copy each platform manifest + blobs, then push the manifest list
|
||||||
|
const platforms = (body.manifests || []) as any[];
|
||||||
|
logger.log('info', `Multi-arch manifest with ${platforms.length} platform(s)`);
|
||||||
|
|
||||||
|
for (const platformEntry of platforms) {
|
||||||
|
const platDesc = platformEntry.platform
|
||||||
|
? `${platformEntry.platform.os}/${platformEntry.platform.architecture}`
|
||||||
|
: platformEntry.digest;
|
||||||
|
logger.log('info', `Copying platform: ${platDesc}`);
|
||||||
|
|
||||||
|
await this.copySingleManifest(
|
||||||
|
srcRegistry, srcRepo, destRegistry, destRepo,
|
||||||
|
platformEntry.digest, srcCredentials, destCredentials,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Push the manifest list/index with the destination tag
|
||||||
|
const digest = await this.putManifest(
|
||||||
|
destRegistry, destRepo, destTag, raw, contentType, destCredentials,
|
||||||
|
);
|
||||||
|
logger.log('ok', `Pushed manifest list to ${destRegistry}/${destRepo}:${destTag} (${digest.substring(0, 19)}...)`);
|
||||||
|
} else {
|
||||||
|
// Single-platform manifest: copy blobs + push manifest
|
||||||
|
logger.log('info', 'Single-platform manifest');
|
||||||
|
|
||||||
|
// Copy config blob
|
||||||
|
if (body.config?.digest) {
|
||||||
|
logger.log('info', ' Copying config blob...');
|
||||||
|
await this.copyBlob(
|
||||||
|
srcRegistry, srcRepo, destRegistry, destRepo,
|
||||||
|
body.config.digest, srcCredentials, destCredentials,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy layer blobs
|
||||||
|
const layers = body.layers || [];
|
||||||
|
for (let i = 0; i < layers.length; i++) {
|
||||||
|
logger.log('info', ` Copying layer ${i + 1}/${layers.length}...`);
|
||||||
|
await this.copyBlob(
|
||||||
|
srcRegistry, srcRepo, destRegistry, destRepo,
|
||||||
|
layers[i].digest, srcCredentials, destCredentials,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Push the manifest with the destination tag
|
||||||
|
const digest = await this.putManifest(
|
||||||
|
destRegistry, destRepo, destTag, raw, contentType, destCredentials,
|
||||||
|
);
|
||||||
|
logger.log('ok', `Pushed manifest to ${destRegistry}/${destRepo}:${destTag} (${digest.substring(0, 19)}...)`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Computes sha256 digest of a buffer.
|
||||||
|
*/
|
||||||
|
private computeDigest(data: Buffer): string {
|
||||||
|
const crypto = require('crypto');
|
||||||
|
const hash = crypto.createHash('sha256').update(data).digest('hex');
|
||||||
|
return `sha256:${hash}`;
|
||||||
|
}
|
||||||
|
}
|
||||||
83
ts/classes.registrystorage.ts
Normal file
83
ts/classes.registrystorage.ts
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
import * as plugins from './tsdocker.plugins.js';
|
||||||
|
import { logger } from './tsdocker.logging.js';
|
||||||
|
import { DockerRegistry } from './classes.dockerregistry.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Storage class for managing multiple Docker registries
|
||||||
|
*/
|
||||||
|
export class RegistryStorage {
|
||||||
|
public objectMap = new plugins.lik.ObjectMap<DockerRegistry>();
|
||||||
|
|
||||||
|
constructor() {
|
||||||
|
// Nothing here
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Adds a registry to the storage
|
||||||
|
*/
|
||||||
|
public addRegistry(registryArg: DockerRegistry): void {
|
||||||
|
this.objectMap.add(registryArg);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets a registry by its URL
|
||||||
|
*/
|
||||||
|
public getRegistryByUrl(registryUrlArg: string): DockerRegistry | undefined {
|
||||||
|
return this.objectMap.findSync((registryArg) => {
|
||||||
|
return registryArg.registryUrl === registryUrlArg;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets all registries
|
||||||
|
*/
|
||||||
|
public getAllRegistries(): DockerRegistry[] {
|
||||||
|
return this.objectMap.getArray();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Logs in to all registries
|
||||||
|
*/
|
||||||
|
public async loginAll(): Promise<void> {
|
||||||
|
await this.objectMap.forEach(async (registryArg) => {
|
||||||
|
await registryArg.login();
|
||||||
|
});
|
||||||
|
logger.log('success', 'logged in successfully into all available DockerRegistries!');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Logs out from all registries
|
||||||
|
*/
|
||||||
|
public async logoutAll(): Promise<void> {
|
||||||
|
await this.objectMap.forEach(async (registryArg) => {
|
||||||
|
await registryArg.logout();
|
||||||
|
});
|
||||||
|
logger.log('info', 'logged out from all DockerRegistries');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Loads registries from environment variables
|
||||||
|
* Looks for DOCKER_REGISTRY_1, DOCKER_REGISTRY_2, etc. (pipe-delimited format)
|
||||||
|
* Or individual registries like DOCKER_REGISTRY_GITLAB_URL, etc.
|
||||||
|
*/
|
||||||
|
public loadFromEnv(): void {
|
||||||
|
// Check for numbered registry env vars (pipe-delimited format)
|
||||||
|
for (let i = 1; i <= 10; i++) {
|
||||||
|
const envVar = process.env[`DOCKER_REGISTRY_${i}`];
|
||||||
|
if (envVar) {
|
||||||
|
try {
|
||||||
|
const registry = DockerRegistry.fromEnvString(envVar);
|
||||||
|
this.addRegistry(registry);
|
||||||
|
} catch (err) {
|
||||||
|
logger.log('warn', `Failed to parse DOCKER_REGISTRY_${i}: ${(err as Error).message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for default registry
|
||||||
|
const defaultRegistry = DockerRegistry.fromEnv();
|
||||||
|
if (defaultRegistry) {
|
||||||
|
this.addRegistry(defaultRegistry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
108
ts/classes.tsdockercache.ts
Normal file
108
ts/classes.tsdockercache.ts
Normal file
@@ -0,0 +1,108 @@
|
|||||||
|
import * as crypto from 'crypto';
|
||||||
|
import * as fs from 'fs';
|
||||||
|
import * as path from 'path';
|
||||||
|
import * as plugins from './tsdocker.plugins.js';
|
||||||
|
import * as paths from './tsdocker.paths.js';
|
||||||
|
import { logger } from './tsdocker.logging.js';
|
||||||
|
import type { ICacheData, ICacheEntry } from './interfaces/index.js';
|
||||||
|
|
||||||
|
const smartshellInstance = new plugins.smartshell.Smartshell({
|
||||||
|
executor: 'bash',
|
||||||
|
});
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Manages content-hash-based build caching for Dockerfiles.
|
||||||
|
* Cache is stored in .nogit/tsdocker_support.json.
|
||||||
|
*/
|
||||||
|
export class TsDockerCache {
|
||||||
|
private cacheFilePath: string;
|
||||||
|
private data: ICacheData;
|
||||||
|
|
||||||
|
constructor() {
|
||||||
|
this.cacheFilePath = path.join(paths.cwd, '.nogit', 'tsdocker_support.json');
|
||||||
|
this.data = { version: 1, entries: {} };
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Loads cache data from disk. Falls back to empty cache on missing/corrupt file.
|
||||||
|
*/
|
||||||
|
public load(): void {
|
||||||
|
try {
|
||||||
|
const raw = fs.readFileSync(this.cacheFilePath, 'utf-8');
|
||||||
|
const parsed = JSON.parse(raw);
|
||||||
|
if (parsed && parsed.version === 1 && parsed.entries) {
|
||||||
|
this.data = parsed;
|
||||||
|
} else {
|
||||||
|
logger.log('warn', '[cache] Cache file has unexpected format, starting fresh');
|
||||||
|
this.data = { version: 1, entries: {} };
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// Missing or corrupt file — start fresh
|
||||||
|
this.data = { version: 1, entries: {} };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Saves cache data to disk. Creates .nogit directory if needed.
|
||||||
|
*/
|
||||||
|
public save(): void {
|
||||||
|
const dir = path.dirname(this.cacheFilePath);
|
||||||
|
fs.mkdirSync(dir, { recursive: true });
|
||||||
|
fs.writeFileSync(this.cacheFilePath, JSON.stringify(this.data, null, 2), 'utf-8');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Computes SHA-256 hash of Dockerfile content.
|
||||||
|
*/
|
||||||
|
public computeContentHash(content: string): string {
|
||||||
|
return crypto.createHash('sha256').update(content).digest('hex');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks whether a build can be skipped for the given Dockerfile.
|
||||||
|
* Logs detailed diagnostics and returns true if the build should be skipped.
|
||||||
|
*/
|
||||||
|
public async shouldSkipBuild(cleanTag: string, content: string): Promise<boolean> {
|
||||||
|
const contentHash = this.computeContentHash(content);
|
||||||
|
const entry = this.data.entries[cleanTag];
|
||||||
|
|
||||||
|
if (!entry) {
|
||||||
|
logger.log('info', `[cache] ${cleanTag}: no cached entry, will build`);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
const hashMatch = entry.contentHash === contentHash;
|
||||||
|
logger.log('info', `[cache] ${cleanTag}: hash ${hashMatch ? 'matches' : 'changed'}`);
|
||||||
|
|
||||||
|
if (!hashMatch) {
|
||||||
|
logger.log('info', `[cache] ${cleanTag}: content changed, will build`);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hash matches — verify the image still exists locally
|
||||||
|
const inspectResult = await smartshellInstance.exec(
|
||||||
|
`docker image inspect ${entry.imageId} > /dev/null 2>&1`
|
||||||
|
);
|
||||||
|
const available = inspectResult.exitCode === 0;
|
||||||
|
|
||||||
|
if (available) {
|
||||||
|
logger.log('info', `[cache] ${cleanTag}: cache hit, skipping build`);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.log('info', `[cache] ${cleanTag}: image no longer available, will build`);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Records a successful build in the cache.
|
||||||
|
*/
|
||||||
|
public recordBuild(cleanTag: string, content: string, imageId: string, buildTag: string): void {
|
||||||
|
this.data.entries[cleanTag] = {
|
||||||
|
contentHash: this.computeContentHash(content),
|
||||||
|
imageId,
|
||||||
|
buildTag,
|
||||||
|
timestamp: Date.now(),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
514
ts/classes.tsdockermanager.ts
Normal file
514
ts/classes.tsdockermanager.ts
Normal file
@@ -0,0 +1,514 @@
|
|||||||
|
import * as plugins from './tsdocker.plugins.js';
|
||||||
|
import * as paths from './tsdocker.paths.js';
|
||||||
|
import { logger, formatDuration } from './tsdocker.logging.js';
|
||||||
|
import { Dockerfile } from './classes.dockerfile.js';
|
||||||
|
import { DockerRegistry } from './classes.dockerregistry.js';
|
||||||
|
import { RegistryStorage } from './classes.registrystorage.js';
|
||||||
|
import { TsDockerCache } from './classes.tsdockercache.js';
|
||||||
|
import { DockerContext } from './classes.dockercontext.js';
|
||||||
|
import { TsDockerSession } from './classes.tsdockersession.js';
|
||||||
|
import type { ITsDockerConfig, IBuildCommandOptions } from './interfaces/index.js';
|
||||||
|
|
||||||
|
const smartshellInstance = new plugins.smartshell.Smartshell({
|
||||||
|
executor: 'bash',
|
||||||
|
});
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Main orchestrator class for Docker operations
|
||||||
|
*/
|
||||||
|
export class TsDockerManager {
|
||||||
|
public registryStorage: RegistryStorage;
|
||||||
|
public config: ITsDockerConfig;
|
||||||
|
public projectInfo: any;
|
||||||
|
public dockerContext: DockerContext;
|
||||||
|
public session!: TsDockerSession;
|
||||||
|
private dockerfiles: Dockerfile[] = [];
|
||||||
|
|
||||||
|
constructor(config: ITsDockerConfig) {
|
||||||
|
this.config = config;
|
||||||
|
this.registryStorage = new RegistryStorage();
|
||||||
|
this.dockerContext = new DockerContext();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Prepares the manager by loading project info and registries
|
||||||
|
*/
|
||||||
|
public async prepare(contextArg?: string): Promise<void> {
|
||||||
|
// Detect Docker context
|
||||||
|
if (contextArg) {
|
||||||
|
this.dockerContext.setContext(contextArg);
|
||||||
|
}
|
||||||
|
await this.dockerContext.detect();
|
||||||
|
this.dockerContext.logContextInfo();
|
||||||
|
this.dockerContext.logRootlessWarnings();
|
||||||
|
|
||||||
|
// Load project info
|
||||||
|
try {
|
||||||
|
const projectinfoInstance = new plugins.projectinfo.ProjectInfo(paths.cwd);
|
||||||
|
this.projectInfo = {
|
||||||
|
npm: {
|
||||||
|
name: projectinfoInstance.npm.name,
|
||||||
|
version: projectinfoInstance.npm.version,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
} catch (err) {
|
||||||
|
logger.log('warn', 'Could not load project info');
|
||||||
|
this.projectInfo = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load registries from environment
|
||||||
|
this.registryStorage.loadFromEnv();
|
||||||
|
|
||||||
|
// Add registries from config if specified
|
||||||
|
if (this.config.registries) {
|
||||||
|
for (const registryUrl of this.config.registries) {
|
||||||
|
// Check if already loaded from env
|
||||||
|
if (!this.registryStorage.getRegistryByUrl(registryUrl)) {
|
||||||
|
// Try to load credentials for this registry from env
|
||||||
|
const envVarName = registryUrl.replace(/\./g, '_').toUpperCase();
|
||||||
|
const envString = process.env[`DOCKER_REGISTRY_${envVarName}`];
|
||||||
|
if (envString) {
|
||||||
|
try {
|
||||||
|
const registry = DockerRegistry.fromEnvString(envString);
|
||||||
|
this.registryStorage.addRegistry(registry);
|
||||||
|
} catch (err) {
|
||||||
|
logger.log('warn', `Could not load credentials for registry ${registryUrl}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create session identity (unique ports, names for CI concurrency)
|
||||||
|
this.session = await TsDockerSession.create();
|
||||||
|
|
||||||
|
logger.log('info', `Prepared TsDockerManager with ${this.registryStorage.getAllRegistries().length} registries`);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Logs in to all configured registries
|
||||||
|
*/
|
||||||
|
public async login(): Promise<void> {
|
||||||
|
if (this.registryStorage.getAllRegistries().length === 0) {
|
||||||
|
logger.log('warn', 'No registries configured');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
await this.registryStorage.loginAll();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Discovers and sorts Dockerfiles in the current directory
|
||||||
|
*/
|
||||||
|
public async discoverDockerfiles(): Promise<Dockerfile[]> {
|
||||||
|
this.dockerfiles = await Dockerfile.readDockerfiles(this);
|
||||||
|
this.dockerfiles = await Dockerfile.sortDockerfiles(this.dockerfiles);
|
||||||
|
this.dockerfiles = await Dockerfile.mapDockerfiles(this.dockerfiles);
|
||||||
|
// Inject session into each Dockerfile
|
||||||
|
for (const df of this.dockerfiles) {
|
||||||
|
df.session = this.session;
|
||||||
|
}
|
||||||
|
return this.dockerfiles;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Builds discovered Dockerfiles in dependency order.
|
||||||
|
* When options.patterns is provided, only matching Dockerfiles (and their dependencies) are built.
|
||||||
|
*/
|
||||||
|
public async build(options?: IBuildCommandOptions): Promise<Dockerfile[]> {
|
||||||
|
if (this.dockerfiles.length === 0) {
|
||||||
|
await this.discoverDockerfiles();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.dockerfiles.length === 0) {
|
||||||
|
logger.log('warn', 'No Dockerfiles found');
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine which Dockerfiles to build
|
||||||
|
let toBuild = this.dockerfiles;
|
||||||
|
|
||||||
|
if (options?.patterns && options.patterns.length > 0) {
|
||||||
|
// Filter to matching Dockerfiles
|
||||||
|
const matched = this.dockerfiles.filter((df) => {
|
||||||
|
const basename = plugins.path.basename(df.filePath);
|
||||||
|
return options.patterns!.some((pattern) => {
|
||||||
|
if (pattern.includes('*') || pattern.includes('?')) {
|
||||||
|
// Convert glob pattern to regex
|
||||||
|
const regexStr = '^' + pattern.replace(/\*/g, '.*').replace(/\?/g, '.') + '$';
|
||||||
|
return new RegExp(regexStr).test(basename);
|
||||||
|
}
|
||||||
|
return basename === pattern;
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
if (matched.length === 0) {
|
||||||
|
logger.log('warn', `No Dockerfiles matched patterns: ${options.patterns.join(', ')}`);
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resolve dependency chain and preserve topological order
|
||||||
|
toBuild = this.resolveWithDependencies(matched, this.dockerfiles);
|
||||||
|
logger.log('info', `Matched ${matched.length} Dockerfile(s), building ${toBuild.length} (including dependencies)`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if buildx is needed
|
||||||
|
const useBuildx = !!(options?.platform || (this.config.platforms && this.config.platforms.length > 1));
|
||||||
|
if (useBuildx) {
|
||||||
|
await this.ensureBuildx();
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.log('info', '');
|
||||||
|
logger.log('info', '=== BUILD PHASE ===');
|
||||||
|
|
||||||
|
if (useBuildx) {
|
||||||
|
const platforms = options?.platform || this.config.platforms!.join(', ');
|
||||||
|
logger.log('info', `Build mode: buildx multi-platform [${platforms}]`);
|
||||||
|
} else {
|
||||||
|
logger.log('info', 'Build mode: standard docker build');
|
||||||
|
}
|
||||||
|
|
||||||
|
const localDeps = toBuild.filter(df => df.localBaseImageDependent);
|
||||||
|
if (localDeps.length > 0) {
|
||||||
|
logger.log('info', `Local dependencies: ${localDeps.map(df => `${df.cleanTag} -> ${df.localBaseDockerfile?.cleanTag}`).join(', ')}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (options?.noCache) {
|
||||||
|
logger.log('info', 'Cache: disabled (--no-cache)');
|
||||||
|
}
|
||||||
|
|
||||||
|
if (options?.parallel) {
|
||||||
|
const concurrency = options.parallelConcurrency ?? 4;
|
||||||
|
const levels = Dockerfile.computeLevels(toBuild);
|
||||||
|
logger.log('info', `Parallel build: ${levels.length} level(s), concurrency ${concurrency}`);
|
||||||
|
for (let l = 0; l < levels.length; l++) {
|
||||||
|
const level = levels[l];
|
||||||
|
logger.log('info', ` Level ${l} (${level.length}): ${level.map(df => df.cleanTag).join(', ')}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.log('info', `Building ${toBuild.length} Dockerfile(s)...`);
|
||||||
|
|
||||||
|
if (options?.cached) {
|
||||||
|
// === CACHED MODE: skip builds for unchanged Dockerfiles ===
|
||||||
|
logger.log('info', '(cached mode active)');
|
||||||
|
const cache = new TsDockerCache();
|
||||||
|
cache.load();
|
||||||
|
|
||||||
|
const total = toBuild.length;
|
||||||
|
const overallStart = Date.now();
|
||||||
|
await Dockerfile.startLocalRegistry(this.session, this.dockerContext.contextInfo?.isRootless);
|
||||||
|
|
||||||
|
try {
|
||||||
|
if (options?.parallel) {
|
||||||
|
// === PARALLEL CACHED MODE ===
|
||||||
|
const concurrency = options.parallelConcurrency ?? 4;
|
||||||
|
const levels = Dockerfile.computeLevels(toBuild);
|
||||||
|
|
||||||
|
let built = 0;
|
||||||
|
for (let l = 0; l < levels.length; l++) {
|
||||||
|
const level = levels[l];
|
||||||
|
logger.log('info', `--- Level ${l}: building ${level.length} image(s) in parallel ---`);
|
||||||
|
|
||||||
|
const tasks = level.map((df) => {
|
||||||
|
const myIndex = ++built;
|
||||||
|
return async () => {
|
||||||
|
const progress = `(${myIndex}/${total})`;
|
||||||
|
const skip = await cache.shouldSkipBuild(df.cleanTag, df.content);
|
||||||
|
|
||||||
|
if (skip) {
|
||||||
|
logger.log('ok', `${progress} Skipped ${df.cleanTag} (cached)`);
|
||||||
|
} else {
|
||||||
|
logger.log('info', `${progress} Building ${df.cleanTag}...`);
|
||||||
|
const elapsed = await df.build({
|
||||||
|
platform: options?.platform,
|
||||||
|
timeout: options?.timeout,
|
||||||
|
noCache: options?.noCache,
|
||||||
|
verbose: options?.verbose,
|
||||||
|
});
|
||||||
|
logger.log('ok', `${progress} Built ${df.cleanTag} in ${formatDuration(elapsed)}`);
|
||||||
|
const imageId = await df.getId();
|
||||||
|
cache.recordBuild(df.cleanTag, df.content, imageId, df.buildTag);
|
||||||
|
}
|
||||||
|
return df;
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
await Dockerfile.runWithConcurrency(tasks, concurrency);
|
||||||
|
|
||||||
|
// After the entire level completes, push all to local registry + tag for deps
|
||||||
|
for (const df of level) {
|
||||||
|
const dependentBaseImages = new Set<string>();
|
||||||
|
for (const other of toBuild) {
|
||||||
|
if (other.localBaseDockerfile === df && other.baseImage !== df.buildTag) {
|
||||||
|
dependentBaseImages.add(other.baseImage);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (const fullTag of dependentBaseImages) {
|
||||||
|
logger.log('info', `Tagging ${df.buildTag} as ${fullTag} for local dependency resolution`);
|
||||||
|
await smartshellInstance.exec(`docker tag ${df.buildTag} ${fullTag}`);
|
||||||
|
}
|
||||||
|
// Push ALL images to local registry (skip if already pushed via buildx)
|
||||||
|
if (!df.localRegistryTag) {
|
||||||
|
await Dockerfile.pushToLocalRegistry(this.session, df);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// === SEQUENTIAL CACHED MODE ===
|
||||||
|
for (let i = 0; i < total; i++) {
|
||||||
|
const dockerfileArg = toBuild[i];
|
||||||
|
const progress = `(${i + 1}/${total})`;
|
||||||
|
const skip = await cache.shouldSkipBuild(dockerfileArg.cleanTag, dockerfileArg.content);
|
||||||
|
|
||||||
|
if (skip) {
|
||||||
|
logger.log('ok', `${progress} Skipped ${dockerfileArg.cleanTag} (cached)`);
|
||||||
|
} else {
|
||||||
|
logger.log('info', `${progress} Building ${dockerfileArg.cleanTag}...`);
|
||||||
|
const elapsed = await dockerfileArg.build({
|
||||||
|
platform: options?.platform,
|
||||||
|
timeout: options?.timeout,
|
||||||
|
noCache: options?.noCache,
|
||||||
|
verbose: options?.verbose,
|
||||||
|
});
|
||||||
|
logger.log('ok', `${progress} Built ${dockerfileArg.cleanTag} in ${formatDuration(elapsed)}`);
|
||||||
|
const imageId = await dockerfileArg.getId();
|
||||||
|
cache.recordBuild(dockerfileArg.cleanTag, dockerfileArg.content, imageId, dockerfileArg.buildTag);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tag for dependents IMMEDIATELY (not after all builds)
|
||||||
|
const dependentBaseImages = new Set<string>();
|
||||||
|
for (const other of toBuild) {
|
||||||
|
if (other.localBaseDockerfile === dockerfileArg && other.baseImage !== dockerfileArg.buildTag) {
|
||||||
|
dependentBaseImages.add(other.baseImage);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (const fullTag of dependentBaseImages) {
|
||||||
|
logger.log('info', `Tagging ${dockerfileArg.buildTag} as ${fullTag} for local dependency resolution`);
|
||||||
|
await smartshellInstance.exec(`docker tag ${dockerfileArg.buildTag} ${fullTag}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Push ALL images to local registry (skip if already pushed via buildx)
|
||||||
|
if (!dockerfileArg.localRegistryTag) {
|
||||||
|
await Dockerfile.pushToLocalRegistry(this.session, dockerfileArg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
await Dockerfile.stopLocalRegistry(this.session);
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.log('info', `Total build time: ${formatDuration(Date.now() - overallStart)}`);
|
||||||
|
cache.save();
|
||||||
|
} else {
|
||||||
|
// === STANDARD MODE: build all via static helper ===
|
||||||
|
await Dockerfile.buildDockerfiles(toBuild, this.session, {
|
||||||
|
platform: options?.platform,
|
||||||
|
timeout: options?.timeout,
|
||||||
|
noCache: options?.noCache,
|
||||||
|
verbose: options?.verbose,
|
||||||
|
isRootless: this.dockerContext.contextInfo?.isRootless,
|
||||||
|
parallel: options?.parallel,
|
||||||
|
parallelConcurrency: options?.parallelConcurrency,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.log('success', 'All Dockerfiles built successfully');
|
||||||
|
|
||||||
|
return toBuild;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Resolves a set of target Dockerfiles to include all their local base image dependencies,
|
||||||
|
* preserving the original topological build order.
|
||||||
|
*/
|
||||||
|
private resolveWithDependencies(targets: Dockerfile[], allSorted: Dockerfile[]): Dockerfile[] {
|
||||||
|
const needed = new Set<Dockerfile>();
|
||||||
|
const addWithDeps = (df: Dockerfile) => {
|
||||||
|
if (needed.has(df)) return;
|
||||||
|
needed.add(df);
|
||||||
|
if (df.localBaseImageDependent && df.localBaseDockerfile) {
|
||||||
|
addWithDeps(df.localBaseDockerfile);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
for (const df of targets) addWithDeps(df);
|
||||||
|
return allSorted.filter((df) => needed.has(df));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Ensures Docker buildx is set up for multi-architecture builds
|
||||||
|
*/
|
||||||
|
private async ensureBuildx(): Promise<void> {
|
||||||
|
const builderName = this.dockerContext.getBuilderName() + (this.session?.config.builderSuffix || '');
|
||||||
|
const platforms = this.config.platforms?.join(', ') || 'default';
|
||||||
|
logger.log('info', `Setting up Docker buildx [${platforms}]...`);
|
||||||
|
logger.log('info', `Builder: ${builderName}`);
|
||||||
|
const inspectResult = await smartshellInstance.exec(`docker buildx inspect ${builderName} 2>/dev/null`);
|
||||||
|
|
||||||
|
if (inspectResult.exitCode !== 0) {
|
||||||
|
logger.log('info', 'Creating new buildx builder with host network...');
|
||||||
|
await smartshellInstance.exec(
|
||||||
|
`docker buildx create --name ${builderName} --driver docker-container --driver-opt network=host --use`
|
||||||
|
);
|
||||||
|
await smartshellInstance.exec('docker buildx inspect --bootstrap');
|
||||||
|
} else {
|
||||||
|
const inspectOutput = inspectResult.stdout || '';
|
||||||
|
if (!inspectOutput.includes('network=host')) {
|
||||||
|
logger.log('info', 'Recreating buildx builder with host network (migration)...');
|
||||||
|
await smartshellInstance.exec(`docker buildx rm ${builderName} 2>/dev/null`);
|
||||||
|
await smartshellInstance.exec(
|
||||||
|
`docker buildx create --name ${builderName} --driver docker-container --driver-opt network=host --use`
|
||||||
|
);
|
||||||
|
await smartshellInstance.exec('docker buildx inspect --bootstrap');
|
||||||
|
} else {
|
||||||
|
await smartshellInstance.exec(`docker buildx use ${builderName}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
logger.log('ok', `Docker buildx ready (builder: ${builderName}, platforms: ${platforms})`);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Pushes all built images to specified registries
|
||||||
|
*/
|
||||||
|
public async push(registryUrls?: string[]): Promise<void> {
|
||||||
|
if (this.dockerfiles.length === 0) {
|
||||||
|
await this.discoverDockerfiles();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.dockerfiles.length === 0) {
|
||||||
|
logger.log('warn', 'No Dockerfiles found to push');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine which registries to push to
|
||||||
|
let registriesToPush: DockerRegistry[] = [];
|
||||||
|
|
||||||
|
if (registryUrls && registryUrls.length > 0) {
|
||||||
|
// Push to specified registries
|
||||||
|
for (const url of registryUrls) {
|
||||||
|
const registry = this.registryStorage.getRegistryByUrl(url);
|
||||||
|
if (registry) {
|
||||||
|
registriesToPush.push(registry);
|
||||||
|
} else {
|
||||||
|
logger.log('warn', `Registry ${url} not found in storage`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Push to all configured registries
|
||||||
|
registriesToPush = this.registryStorage.getAllRegistries();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (registriesToPush.length === 0) {
|
||||||
|
logger.log('warn', 'No registries available to push to');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start local registry (reads from persistent .nogit/docker-registry/)
|
||||||
|
await Dockerfile.startLocalRegistry(this.session, this.dockerContext.contextInfo?.isRootless);
|
||||||
|
try {
|
||||||
|
// Push each Dockerfile to each registry via OCI copy
|
||||||
|
for (const dockerfile of this.dockerfiles) {
|
||||||
|
for (const registry of registriesToPush) {
|
||||||
|
await dockerfile.push(registry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
await Dockerfile.stopLocalRegistry(this.session);
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.log('success', 'All images pushed successfully');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Pulls images from a specified registry
|
||||||
|
*/
|
||||||
|
public async pull(registryUrl: string): Promise<void> {
|
||||||
|
if (this.dockerfiles.length === 0) {
|
||||||
|
await this.discoverDockerfiles();
|
||||||
|
}
|
||||||
|
|
||||||
|
const registry = this.registryStorage.getRegistryByUrl(registryUrl);
|
||||||
|
if (!registry) {
|
||||||
|
throw new Error(`Registry ${registryUrl} not found`);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const dockerfile of this.dockerfiles) {
|
||||||
|
await dockerfile.pull(registry);
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.log('success', 'All images pulled successfully');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Runs tests for all Dockerfiles.
|
||||||
|
* Starts the local registry so multi-platform images can be auto-pulled.
|
||||||
|
*/
|
||||||
|
public async test(): Promise<void> {
|
||||||
|
if (this.dockerfiles.length === 0) {
|
||||||
|
await this.discoverDockerfiles();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.dockerfiles.length === 0) {
|
||||||
|
logger.log('warn', 'No Dockerfiles found to test');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.log('info', '');
|
||||||
|
logger.log('info', '=== TEST PHASE ===');
|
||||||
|
|
||||||
|
await Dockerfile.startLocalRegistry(this.session, this.dockerContext.contextInfo?.isRootless);
|
||||||
|
try {
|
||||||
|
await Dockerfile.testDockerfiles(this.dockerfiles);
|
||||||
|
} finally {
|
||||||
|
await Dockerfile.stopLocalRegistry(this.session);
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.log('success', 'All tests completed');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Lists all discovered Dockerfiles and their info
|
||||||
|
*/
|
||||||
|
public async list(): Promise<Dockerfile[]> {
|
||||||
|
if (this.dockerfiles.length === 0) {
|
||||||
|
await this.discoverDockerfiles();
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.log('info', '');
|
||||||
|
logger.log('info', 'Discovered Dockerfiles:');
|
||||||
|
logger.log('info', '========================');
|
||||||
|
logger.log('info', '');
|
||||||
|
|
||||||
|
for (let i = 0; i < this.dockerfiles.length; i++) {
|
||||||
|
const df = this.dockerfiles[i];
|
||||||
|
logger.log('info', `${i + 1}. ${df.filePath}`);
|
||||||
|
logger.log('info', ` Tag: ${df.cleanTag}`);
|
||||||
|
logger.log('info', ` Base Image: ${df.baseImage}`);
|
||||||
|
logger.log('info', ` Version: ${df.version}`);
|
||||||
|
if (df.localBaseImageDependent) {
|
||||||
|
logger.log('info', ` Depends on: ${df.localBaseDockerfile?.cleanTag}`);
|
||||||
|
}
|
||||||
|
logger.log('info', '');
|
||||||
|
}
|
||||||
|
|
||||||
|
return this.dockerfiles;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the cached Dockerfiles (after discovery)
|
||||||
|
*/
|
||||||
|
public getDockerfiles(): Dockerfile[] {
|
||||||
|
return this.dockerfiles;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Cleans up session-specific resources.
|
||||||
|
* In CI, removes the session-specific buildx builder to avoid accumulation.
|
||||||
|
*/
|
||||||
|
public async cleanup(): Promise<void> {
|
||||||
|
if (this.session?.config.isCI && this.session.config.builderSuffix) {
|
||||||
|
const builderName = this.dockerContext.getBuilderName() + this.session.config.builderSuffix;
|
||||||
|
logger.log('info', `CI cleanup: removing buildx builder ${builderName}`);
|
||||||
|
await smartshellInstance.execSilent(`docker buildx rm ${builderName} 2>/dev/null || true`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
107
ts/classes.tsdockersession.ts
Normal file
107
ts/classes.tsdockersession.ts
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
import * as crypto from 'crypto';
|
||||||
|
import * as net from 'net';
|
||||||
|
import { logger } from './tsdocker.logging.js';
|
||||||
|
|
||||||
|
export interface ISessionConfig {
|
||||||
|
sessionId: string;
|
||||||
|
registryPort: number;
|
||||||
|
registryHost: string;
|
||||||
|
registryContainerName: string;
|
||||||
|
isCI: boolean;
|
||||||
|
ciSystem: string | null;
|
||||||
|
builderSuffix: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Per-invocation session identity for tsdocker.
|
||||||
|
* Generates unique ports, container names, and builder names so that
|
||||||
|
* concurrent CI jobs on the same Docker host don't collide.
|
||||||
|
*
|
||||||
|
* In local (non-CI) dev the builder suffix is empty, preserving the
|
||||||
|
* persistent builder behavior.
|
||||||
|
*/
|
||||||
|
export class TsDockerSession {
|
||||||
|
public config: ISessionConfig;
|
||||||
|
|
||||||
|
private constructor(config: ISessionConfig) {
|
||||||
|
this.config = config;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new session. Allocates a dynamic port unless overridden
|
||||||
|
* via `TSDOCKER_REGISTRY_PORT`.
|
||||||
|
*/
|
||||||
|
public static async create(): Promise<TsDockerSession> {
|
||||||
|
const sessionId =
|
||||||
|
process.env.TSDOCKER_SESSION_ID || crypto.randomBytes(4).toString('hex');
|
||||||
|
|
||||||
|
const registryPort = await TsDockerSession.allocatePort();
|
||||||
|
const registryHost = `localhost:${registryPort}`;
|
||||||
|
const registryContainerName = `tsdocker-registry-${sessionId}`;
|
||||||
|
|
||||||
|
const { isCI, ciSystem } = TsDockerSession.detectCI();
|
||||||
|
const builderSuffix = isCI ? `-${sessionId}` : '';
|
||||||
|
|
||||||
|
const config: ISessionConfig = {
|
||||||
|
sessionId,
|
||||||
|
registryPort,
|
||||||
|
registryHost,
|
||||||
|
registryContainerName,
|
||||||
|
isCI,
|
||||||
|
ciSystem,
|
||||||
|
builderSuffix,
|
||||||
|
};
|
||||||
|
|
||||||
|
const session = new TsDockerSession(config);
|
||||||
|
session.logInfo();
|
||||||
|
return session;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Allocates a free TCP port. Respects `TSDOCKER_REGISTRY_PORT` override.
|
||||||
|
*/
|
||||||
|
public static async allocatePort(): Promise<number> {
|
||||||
|
const envPort = process.env.TSDOCKER_REGISTRY_PORT;
|
||||||
|
if (envPort) {
|
||||||
|
const parsed = parseInt(envPort, 10);
|
||||||
|
if (!isNaN(parsed) && parsed > 0) {
|
||||||
|
return parsed;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return new Promise<number>((resolve, reject) => {
|
||||||
|
const srv = net.createServer();
|
||||||
|
srv.listen(0, '127.0.0.1', () => {
|
||||||
|
const addr = srv.address() as net.AddressInfo;
|
||||||
|
const port = addr.port;
|
||||||
|
srv.close((err) => {
|
||||||
|
if (err) reject(err);
|
||||||
|
else resolve(port);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
srv.on('error', reject);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Detects whether we're running inside a CI system.
|
||||||
|
*/
|
||||||
|
private static detectCI(): { isCI: boolean; ciSystem: string | null } {
|
||||||
|
if (process.env.GITEA_ACTIONS) return { isCI: true, ciSystem: 'gitea-actions' };
|
||||||
|
if (process.env.GITHUB_ACTIONS) return { isCI: true, ciSystem: 'github-actions' };
|
||||||
|
if (process.env.GITLAB_CI) return { isCI: true, ciSystem: 'gitlab-ci' };
|
||||||
|
if (process.env.CI) return { isCI: true, ciSystem: 'generic' };
|
||||||
|
return { isCI: false, ciSystem: null };
|
||||||
|
}
|
||||||
|
|
||||||
|
private logInfo(): void {
|
||||||
|
const c = this.config;
|
||||||
|
logger.log('info', '=== TSDOCKER SESSION ===');
|
||||||
|
logger.log('info', `Session ID: ${c.sessionId}`);
|
||||||
|
logger.log('info', `Registry: ${c.registryHost} (container: ${c.registryContainerName})`);
|
||||||
|
if (c.isCI) {
|
||||||
|
logger.log('info', `CI detected: ${c.ciSystem}`);
|
||||||
|
logger.log('info', `Builder suffix: ${c.builderSuffix}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,5 +1,4 @@
|
|||||||
import './analytics';
|
import * as plugins from './tsdocker.plugins.js';
|
||||||
import * as plugins from './tsdocker.plugins';
|
import * as cli from './tsdocker.cli.js';
|
||||||
import * as cli from './tsdocker.cli';
|
|
||||||
|
|
||||||
cli.run();
|
cli.run();
|
||||||
|
|||||||
105
ts/interfaces/index.ts
Normal file
105
ts/interfaces/index.ts
Normal file
@@ -0,0 +1,105 @@
|
|||||||
|
/**
|
||||||
|
* Configuration interface for tsdocker
|
||||||
|
* Extends legacy config with new Docker build capabilities
|
||||||
|
*/
|
||||||
|
export interface ITsDockerConfig {
|
||||||
|
// Legacy (backward compatible)
|
||||||
|
baseImage: string;
|
||||||
|
command: string;
|
||||||
|
dockerSock: boolean;
|
||||||
|
keyValueObject: { [key: string]: any };
|
||||||
|
|
||||||
|
// New Docker build config
|
||||||
|
registries?: string[];
|
||||||
|
registryRepoMap?: { [registry: string]: string };
|
||||||
|
buildArgEnvMap?: { [dockerArg: string]: string };
|
||||||
|
platforms?: string[]; // ['linux/amd64', 'linux/arm64']
|
||||||
|
push?: boolean;
|
||||||
|
testDir?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Options for constructing a DockerRegistry
|
||||||
|
*/
|
||||||
|
export interface IDockerRegistryOptions {
|
||||||
|
registryUrl: string;
|
||||||
|
username: string;
|
||||||
|
password: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Information about a discovered Dockerfile
|
||||||
|
*/
|
||||||
|
export interface IDockerfileInfo {
|
||||||
|
filePath: string;
|
||||||
|
fileName: string;
|
||||||
|
version: string;
|
||||||
|
baseImage: string;
|
||||||
|
buildTag: string;
|
||||||
|
localBaseImageDependent: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Options for creating a Dockerfile instance
|
||||||
|
*/
|
||||||
|
export interface IDockerfileOptions {
|
||||||
|
filePath?: string;
|
||||||
|
fileContents?: string;
|
||||||
|
read?: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Result from a Docker build operation
|
||||||
|
*/
|
||||||
|
export interface IBuildResult {
|
||||||
|
success: boolean;
|
||||||
|
tag: string;
|
||||||
|
duration?: number;
|
||||||
|
error?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Result from a Docker push operation
|
||||||
|
*/
|
||||||
|
export interface IPushResult {
|
||||||
|
success: boolean;
|
||||||
|
registry: string;
|
||||||
|
tag: string;
|
||||||
|
digest?: string;
|
||||||
|
error?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Options for the build command
|
||||||
|
*/
|
||||||
|
export interface IBuildCommandOptions {
|
||||||
|
patterns?: string[]; // Dockerfile name patterns (e.g., ['Dockerfile_base', 'Dockerfile_*'])
|
||||||
|
platform?: string; // Single platform override (e.g., 'linux/arm64')
|
||||||
|
timeout?: number; // Build timeout in seconds
|
||||||
|
noCache?: boolean; // Force rebuild without Docker layer cache (--no-cache)
|
||||||
|
cached?: boolean; // Skip builds when Dockerfile content hasn't changed
|
||||||
|
verbose?: boolean; // Stream raw docker build output (default: silent)
|
||||||
|
context?: string; // Explicit Docker context name (--context flag)
|
||||||
|
parallel?: boolean; // Enable parallel builds within dependency levels
|
||||||
|
parallelConcurrency?: number; // Max concurrent builds per level (default 4)
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface ICacheEntry {
|
||||||
|
contentHash: string; // SHA-256 hex of Dockerfile content
|
||||||
|
imageId: string; // Docker image ID (sha256:...)
|
||||||
|
buildTag: string;
|
||||||
|
timestamp: number; // Unix ms
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface ICacheData {
|
||||||
|
version: 1;
|
||||||
|
entries: { [cleanTag: string]: ICacheEntry };
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface IDockerContextInfo {
|
||||||
|
name: string; // 'default', 'rootless', 'colima', etc.
|
||||||
|
endpoint: string; // 'unix:///var/run/docker.sock'
|
||||||
|
isRootless: boolean;
|
||||||
|
dockerHost?: string; // value of DOCKER_HOST env var, if set
|
||||||
|
topology?: 'socket-mount' | 'dind' | 'local';
|
||||||
|
}
|
||||||
@@ -1,17 +1,23 @@
|
|||||||
import * as plugins from './tsdocker.plugins';
|
import * as plugins from './tsdocker.plugins.js';
|
||||||
import * as paths from './tsdocker.paths';
|
import * as paths from './tsdocker.paths.js';
|
||||||
|
|
||||||
// modules
|
// modules
|
||||||
import * as ConfigModule from './tsdocker.config';
|
import * as ConfigModule from './tsdocker.config.js';
|
||||||
import * as DockerModule from './tsdocker.docker';
|
import * as DockerModule from './tsdocker.docker.js';
|
||||||
|
|
||||||
import { logger, ora } from './tsdocker.logging';
|
import { logger, ora } from './tsdocker.logging.js';
|
||||||
|
import { TsDockerManager } from './classes.tsdockermanager.js';
|
||||||
|
import { DockerContext } from './classes.dockercontext.js';
|
||||||
|
import type { IBuildCommandOptions } from './interfaces/index.js';
|
||||||
|
import { commitinfo } from './00_commitinfo_data.js';
|
||||||
|
|
||||||
const tsdockerCli = new plugins.smartcli.Smartcli();
|
const tsdockerCli = new plugins.smartcli.Smartcli();
|
||||||
|
tsdockerCli.addVersion(commitinfo.version);
|
||||||
|
|
||||||
export let run = () => {
|
export let run = () => {
|
||||||
tsdockerCli.standardTask().subscribe(async argvArg => {
|
// Default command: run tests in container (legacy behavior)
|
||||||
let configArg = await ConfigModule.run().then(DockerModule.run);
|
tsdockerCli.standardCommand().subscribe(async argvArg => {
|
||||||
|
const configArg = await ConfigModule.run().then(DockerModule.run);
|
||||||
if (configArg.exitCode === 0) {
|
if (configArg.exitCode === 0) {
|
||||||
logger.log('success', 'container ended all right!');
|
logger.log('success', 'container ended all right!');
|
||||||
} else {
|
} else {
|
||||||
@@ -20,6 +26,201 @@ export let run = () => {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Build Dockerfiles in dependency order
|
||||||
|
* Usage: tsdocker build [Dockerfile_patterns...] [--platform=linux/arm64] [--timeout=600]
|
||||||
|
*/
|
||||||
|
tsdockerCli.addCommand('build').subscribe(async argvArg => {
|
||||||
|
try {
|
||||||
|
const config = await ConfigModule.run();
|
||||||
|
const manager = new TsDockerManager(config);
|
||||||
|
await manager.prepare(argvArg.context as string | undefined);
|
||||||
|
|
||||||
|
const buildOptions: IBuildCommandOptions = {};
|
||||||
|
const patterns = argvArg._.slice(1) as string[];
|
||||||
|
if (patterns.length > 0) {
|
||||||
|
buildOptions.patterns = patterns;
|
||||||
|
}
|
||||||
|
if (argvArg.platform) {
|
||||||
|
buildOptions.platform = argvArg.platform as string;
|
||||||
|
}
|
||||||
|
if (argvArg.timeout) {
|
||||||
|
buildOptions.timeout = Number(argvArg.timeout);
|
||||||
|
}
|
||||||
|
if (argvArg.cache === false) {
|
||||||
|
buildOptions.noCache = true;
|
||||||
|
}
|
||||||
|
if (argvArg.cached) {
|
||||||
|
buildOptions.cached = true;
|
||||||
|
}
|
||||||
|
if (argvArg.verbose) {
|
||||||
|
buildOptions.verbose = true;
|
||||||
|
}
|
||||||
|
if (argvArg.parallel) {
|
||||||
|
buildOptions.parallel = true;
|
||||||
|
if (typeof argvArg.parallel === 'number') {
|
||||||
|
buildOptions.parallelConcurrency = argvArg.parallel;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
await manager.build(buildOptions);
|
||||||
|
await manager.cleanup();
|
||||||
|
logger.log('success', 'Build completed successfully');
|
||||||
|
} catch (err) {
|
||||||
|
logger.log('error', `Build failed: ${(err as Error).message}`);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Push built images to configured registries
|
||||||
|
* Usage: tsdocker push [Dockerfile_patterns...] [--platform=linux/arm64] [--timeout=600] [--registry=url]
|
||||||
|
*/
|
||||||
|
tsdockerCli.addCommand('push').subscribe(async argvArg => {
|
||||||
|
try {
|
||||||
|
const config = await ConfigModule.run();
|
||||||
|
const manager = new TsDockerManager(config);
|
||||||
|
await manager.prepare(argvArg.context as string | undefined);
|
||||||
|
|
||||||
|
// Login first
|
||||||
|
await manager.login();
|
||||||
|
|
||||||
|
// Parse build options from positional args and flags
|
||||||
|
const buildOptions: IBuildCommandOptions = {};
|
||||||
|
const patterns = argvArg._.slice(1) as string[];
|
||||||
|
if (patterns.length > 0) {
|
||||||
|
buildOptions.patterns = patterns;
|
||||||
|
}
|
||||||
|
if (argvArg.platform) {
|
||||||
|
buildOptions.platform = argvArg.platform as string;
|
||||||
|
}
|
||||||
|
if (argvArg.timeout) {
|
||||||
|
buildOptions.timeout = Number(argvArg.timeout);
|
||||||
|
}
|
||||||
|
if (argvArg.cache === false) {
|
||||||
|
buildOptions.noCache = true;
|
||||||
|
}
|
||||||
|
if (argvArg.verbose) {
|
||||||
|
buildOptions.verbose = true;
|
||||||
|
}
|
||||||
|
if (argvArg.parallel) {
|
||||||
|
buildOptions.parallel = true;
|
||||||
|
if (typeof argvArg.parallel === 'number') {
|
||||||
|
buildOptions.parallelConcurrency = argvArg.parallel;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build images first (if not already built)
|
||||||
|
await manager.build(buildOptions);
|
||||||
|
|
||||||
|
// Get registry from --registry flag
|
||||||
|
const registryArg = argvArg.registry as string | undefined;
|
||||||
|
const registries = registryArg ? [registryArg] : undefined;
|
||||||
|
|
||||||
|
await manager.push(registries);
|
||||||
|
await manager.cleanup();
|
||||||
|
logger.log('success', 'Push completed successfully');
|
||||||
|
} catch (err) {
|
||||||
|
logger.log('error', `Push failed: ${(err as Error).message}`);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Pull images from a specified registry
|
||||||
|
*/
|
||||||
|
tsdockerCli.addCommand('pull').subscribe(async argvArg => {
|
||||||
|
try {
|
||||||
|
const registryArg = argvArg._[1]; // e.g., tsdocker pull registry.gitlab.com
|
||||||
|
if (!registryArg) {
|
||||||
|
logger.log('error', 'Registry URL required. Usage: tsdocker pull <registry-url>');
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
const config = await ConfigModule.run();
|
||||||
|
const manager = new TsDockerManager(config);
|
||||||
|
await manager.prepare(argvArg.context as string | undefined);
|
||||||
|
|
||||||
|
// Login first
|
||||||
|
await manager.login();
|
||||||
|
|
||||||
|
await manager.pull(registryArg);
|
||||||
|
logger.log('success', 'Pull completed successfully');
|
||||||
|
} catch (err) {
|
||||||
|
logger.log('error', `Pull failed: ${(err as Error).message}`);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Run container tests for all Dockerfiles
|
||||||
|
*/
|
||||||
|
tsdockerCli.addCommand('test').subscribe(async argvArg => {
|
||||||
|
try {
|
||||||
|
const config = await ConfigModule.run();
|
||||||
|
const manager = new TsDockerManager(config);
|
||||||
|
await manager.prepare(argvArg.context as string | undefined);
|
||||||
|
|
||||||
|
// Build images first
|
||||||
|
const buildOptions: IBuildCommandOptions = {};
|
||||||
|
if (argvArg.cache === false) {
|
||||||
|
buildOptions.noCache = true;
|
||||||
|
}
|
||||||
|
if (argvArg.cached) {
|
||||||
|
buildOptions.cached = true;
|
||||||
|
}
|
||||||
|
if (argvArg.verbose) {
|
||||||
|
buildOptions.verbose = true;
|
||||||
|
}
|
||||||
|
if (argvArg.parallel) {
|
||||||
|
buildOptions.parallel = true;
|
||||||
|
if (typeof argvArg.parallel === 'number') {
|
||||||
|
buildOptions.parallelConcurrency = argvArg.parallel;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
await manager.build(buildOptions);
|
||||||
|
|
||||||
|
// Run tests
|
||||||
|
await manager.test();
|
||||||
|
await manager.cleanup();
|
||||||
|
logger.log('success', 'Tests completed successfully');
|
||||||
|
} catch (err) {
|
||||||
|
logger.log('error', `Tests failed: ${(err as Error).message}`);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Login to configured registries
|
||||||
|
*/
|
||||||
|
tsdockerCli.addCommand('login').subscribe(async argvArg => {
|
||||||
|
try {
|
||||||
|
const config = await ConfigModule.run();
|
||||||
|
const manager = new TsDockerManager(config);
|
||||||
|
await manager.prepare(argvArg.context as string | undefined);
|
||||||
|
await manager.login();
|
||||||
|
logger.log('success', 'Login completed successfully');
|
||||||
|
} catch (err) {
|
||||||
|
logger.log('error', `Login failed: ${(err as Error).message}`);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
/**
|
||||||
|
* List discovered Dockerfiles and their dependencies
|
||||||
|
*/
|
||||||
|
tsdockerCli.addCommand('list').subscribe(async argvArg => {
|
||||||
|
try {
|
||||||
|
const config = await ConfigModule.run();
|
||||||
|
const manager = new TsDockerManager(config);
|
||||||
|
await manager.prepare(argvArg.context as string | undefined);
|
||||||
|
await manager.list();
|
||||||
|
} catch (err) {
|
||||||
|
logger.log('error', `List failed: ${(err as Error).message}`);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* this command is executed inside docker and meant for use from outside docker
|
* this command is executed inside docker and meant for use from outside docker
|
||||||
*/
|
*/
|
||||||
@@ -39,37 +240,200 @@ export let run = () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
tsdockerCli.addCommand('clean').subscribe(async argvArg => {
|
tsdockerCli.addCommand('clean').subscribe(async argvArg => {
|
||||||
ora.text('cleaning up docker env...');
|
try {
|
||||||
if (argvArg.all) {
|
const autoYes = !!argvArg.y;
|
||||||
const smartshellInstance = new plugins.smartshell.Smartshell({
|
const includeAll = !!argvArg.all;
|
||||||
executor: 'bash'
|
|
||||||
});
|
|
||||||
ora.text('killing any running docker containers...');
|
|
||||||
await smartshellInstance.exec(`docker kill $(docker ps -q)`);
|
|
||||||
|
|
||||||
ora.text('removing stopped containers...');
|
const smartshellInstance = new plugins.smartshell.Smartshell({ executor: 'bash' });
|
||||||
await smartshellInstance.exec(`docker rm $(docker ps -a -q)`);
|
const interact = new plugins.smartinteract.SmartInteract();
|
||||||
|
|
||||||
ora.text('removing images...');
|
// --- Docker context detection ---
|
||||||
await smartshellInstance.exec(`docker rmi -f $(docker images -q -f dangling=true)`);
|
ora.text('detecting docker context...');
|
||||||
|
const dockerContext = new DockerContext();
|
||||||
|
if (argvArg.context) {
|
||||||
|
dockerContext.setContext(argvArg.context as string);
|
||||||
|
}
|
||||||
|
await dockerContext.detect();
|
||||||
|
ora.stop();
|
||||||
|
dockerContext.logContextInfo();
|
||||||
|
|
||||||
ora.text('removing all other images...');
|
// --- Helper: parse docker output into resource list ---
|
||||||
await smartshellInstance.exec(`docker rmi $(docker images -a -q)`);
|
interface IDockerResource {
|
||||||
|
id: string;
|
||||||
|
display: string;
|
||||||
|
}
|
||||||
|
|
||||||
ora.text('removing all volumes...');
|
const listResources = async (command: string): Promise<IDockerResource[]> => {
|
||||||
await smartshellInstance.exec(`docker volume rm $(docker volume ls -f dangling=true -q)`);
|
const result = await smartshellInstance.execSilent(command);
|
||||||
|
if (result.exitCode !== 0 || !result.stdout.trim()) {
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
return result.stdout.trim().split('\n').filter(Boolean).map((line) => {
|
||||||
|
const parts = line.split('\t');
|
||||||
|
return {
|
||||||
|
id: parts[0],
|
||||||
|
display: parts.join(' | '),
|
||||||
|
};
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
// --- Helper: checkbox selection ---
|
||||||
|
const selectResources = async (
|
||||||
|
name: string,
|
||||||
|
message: string,
|
||||||
|
resources: IDockerResource[],
|
||||||
|
): Promise<string[]> => {
|
||||||
|
if (autoYes) {
|
||||||
|
return resources.map((r) => r.id);
|
||||||
|
}
|
||||||
|
const answer = await interact.askQuestion({
|
||||||
|
name,
|
||||||
|
type: 'checkbox',
|
||||||
|
message,
|
||||||
|
default: [],
|
||||||
|
choices: resources.map((r) => ({ name: r.display, value: r.id })),
|
||||||
|
});
|
||||||
|
return answer.value as string[];
|
||||||
|
};
|
||||||
|
|
||||||
|
// --- Helper: confirm action ---
|
||||||
|
const confirmAction = async (
|
||||||
|
name: string,
|
||||||
|
message: string,
|
||||||
|
): Promise<boolean> => {
|
||||||
|
if (autoYes) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
const answer = await interact.askQuestion({
|
||||||
|
name,
|
||||||
|
type: 'confirm',
|
||||||
|
message,
|
||||||
|
default: false,
|
||||||
|
});
|
||||||
|
return answer.value as boolean;
|
||||||
|
};
|
||||||
|
|
||||||
|
// === RUNNING CONTAINERS ===
|
||||||
|
const runningContainers = await listResources(
|
||||||
|
`docker ps --format '{{.ID}}\t{{.Names}}\t{{.Image}}\t{{.Status}}'`
|
||||||
|
);
|
||||||
|
if (runningContainers.length > 0) {
|
||||||
|
logger.log('info', `Found ${runningContainers.length} running container(s)`);
|
||||||
|
const selectedIds = await selectResources(
|
||||||
|
'runningContainers',
|
||||||
|
'Select running containers to kill:',
|
||||||
|
runningContainers,
|
||||||
|
);
|
||||||
|
if (selectedIds.length > 0) {
|
||||||
|
logger.log('info', `Killing ${selectedIds.length} container(s)...`);
|
||||||
|
await smartshellInstance.exec(`docker kill ${selectedIds.join(' ')}`);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
logger.log('info', 'No running containers found');
|
||||||
|
}
|
||||||
|
|
||||||
|
// === STOPPED CONTAINERS ===
|
||||||
|
const stoppedContainers = await listResources(
|
||||||
|
`docker ps -a --filter status=exited --filter status=created --format '{{.ID}}\t{{.Names}}\t{{.Image}}\t{{.Status}}'`
|
||||||
|
);
|
||||||
|
if (stoppedContainers.length > 0) {
|
||||||
|
logger.log('info', `Found ${stoppedContainers.length} stopped container(s)`);
|
||||||
|
const selectedIds = await selectResources(
|
||||||
|
'stoppedContainers',
|
||||||
|
'Select stopped containers to remove:',
|
||||||
|
stoppedContainers,
|
||||||
|
);
|
||||||
|
if (selectedIds.length > 0) {
|
||||||
|
logger.log('info', `Removing ${selectedIds.length} container(s)...`);
|
||||||
|
await smartshellInstance.exec(`docker rm ${selectedIds.join(' ')}`);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
logger.log('info', 'No stopped containers found');
|
||||||
|
}
|
||||||
|
|
||||||
|
// === DANGLING IMAGES ===
|
||||||
|
const danglingImages = await listResources(
|
||||||
|
`docker images -f dangling=true --format '{{.ID}}\t{{.Repository}}:{{.Tag}}\t{{.Size}}'`
|
||||||
|
);
|
||||||
|
if (danglingImages.length > 0) {
|
||||||
|
const confirmed = await confirmAction(
|
||||||
|
'removeDanglingImages',
|
||||||
|
`Remove ${danglingImages.length} dangling image(s)?`,
|
||||||
|
);
|
||||||
|
if (confirmed) {
|
||||||
|
logger.log('info', `Removing ${danglingImages.length} dangling image(s)...`);
|
||||||
|
const ids = danglingImages.map((r) => r.id).join(' ');
|
||||||
|
await smartshellInstance.exec(`docker rmi ${ids}`);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
logger.log('info', 'No dangling images found');
|
||||||
|
}
|
||||||
|
|
||||||
|
// === ALL IMAGES (only with --all) ===
|
||||||
|
if (includeAll) {
|
||||||
|
const allImages = await listResources(
|
||||||
|
`docker images --format '{{.ID}}\t{{.Repository}}:{{.Tag}}\t{{.Size}}'`
|
||||||
|
);
|
||||||
|
if (allImages.length > 0) {
|
||||||
|
logger.log('info', `Found ${allImages.length} image(s) total`);
|
||||||
|
const selectedIds = await selectResources(
|
||||||
|
'allImages',
|
||||||
|
'Select images to remove:',
|
||||||
|
allImages,
|
||||||
|
);
|
||||||
|
if (selectedIds.length > 0) {
|
||||||
|
logger.log('info', `Removing ${selectedIds.length} image(s)...`);
|
||||||
|
await smartshellInstance.exec(`docker rmi -f ${selectedIds.join(' ')}`);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
logger.log('info', 'No images found');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// === DANGLING VOLUMES ===
|
||||||
|
const danglingVolumes = await listResources(
|
||||||
|
`docker volume ls -f dangling=true --format '{{.Name}}\t{{.Driver}}'`
|
||||||
|
);
|
||||||
|
if (danglingVolumes.length > 0) {
|
||||||
|
const confirmed = await confirmAction(
|
||||||
|
'removeDanglingVolumes',
|
||||||
|
`Remove ${danglingVolumes.length} dangling volume(s)?`,
|
||||||
|
);
|
||||||
|
if (confirmed) {
|
||||||
|
logger.log('info', `Removing ${danglingVolumes.length} dangling volume(s)...`);
|
||||||
|
const names = danglingVolumes.map((r) => r.id).join(' ');
|
||||||
|
await smartshellInstance.exec(`docker volume rm ${names}`);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
logger.log('info', 'No dangling volumes found');
|
||||||
|
}
|
||||||
|
|
||||||
|
// === ALL VOLUMES (only with --all) ===
|
||||||
|
if (includeAll) {
|
||||||
|
const allVolumes = await listResources(
|
||||||
|
`docker volume ls --format '{{.Name}}\t{{.Driver}}'`
|
||||||
|
);
|
||||||
|
if (allVolumes.length > 0) {
|
||||||
|
logger.log('info', `Found ${allVolumes.length} volume(s) total`);
|
||||||
|
const selectedIds = await selectResources(
|
||||||
|
'allVolumes',
|
||||||
|
'Select volumes to remove:',
|
||||||
|
allVolumes,
|
||||||
|
);
|
||||||
|
if (selectedIds.length > 0) {
|
||||||
|
logger.log('info', `Removing ${selectedIds.length} volume(s)...`);
|
||||||
|
await smartshellInstance.exec(`docker volume rm ${selectedIds.join(' ')}`);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
logger.log('info', 'No volumes found');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.log('success', 'Docker cleanup completed!');
|
||||||
|
} catch (err) {
|
||||||
|
logger.log('error', `Clean failed: ${(err as Error).message}`);
|
||||||
|
process.exit(1);
|
||||||
}
|
}
|
||||||
ora.finishSuccess('docker environment now is clean!');
|
|
||||||
});
|
|
||||||
|
|
||||||
tsdockerCli.addCommand('speedtest').subscribe(async argvArg => {
|
|
||||||
const smartshellInstance = new plugins.smartshell.Smartshell({
|
|
||||||
executor: 'bash'
|
|
||||||
});
|
|
||||||
logger.log('ok', 'Starting speedtest');
|
|
||||||
await smartshellInstance.exec(
|
|
||||||
`docker pull tianon/speedtest && docker run --rm tianon/speedtest`
|
|
||||||
);
|
|
||||||
});
|
});
|
||||||
|
|
||||||
tsdockerCli.addCommand('vscode').subscribe(async argvArg => {
|
tsdockerCli.addCommand('vscode').subscribe(async argvArg => {
|
||||||
|
|||||||
@@ -1,17 +1,16 @@
|
|||||||
import * as plugins from './tsdocker.plugins';
|
import * as plugins from './tsdocker.plugins.js';
|
||||||
import * as paths from './tsdocker.paths';
|
import * as paths from './tsdocker.paths.js';
|
||||||
|
import * as fs from 'fs';
|
||||||
|
import type { ITsDockerConfig } from './interfaces/index.js';
|
||||||
|
|
||||||
export interface IConfig {
|
// Re-export ITsDockerConfig as IConfig for backward compatibility
|
||||||
baseImage: string;
|
export type IConfig = ITsDockerConfig & {
|
||||||
command: string;
|
|
||||||
dockerSock: boolean;
|
|
||||||
exitCode?: number;
|
exitCode?: number;
|
||||||
keyValueObject: any[];
|
};
|
||||||
}
|
|
||||||
|
|
||||||
const getQenvKeyValueObject = async () => {
|
const getQenvKeyValueObject = async () => {
|
||||||
let qenvKeyValueObjectArray: { [key: string]: string | number };
|
let qenvKeyValueObjectArray: { [key: string]: string | number };
|
||||||
if (plugins.smartfile.fs.fileExistsSync(plugins.path.join(paths.cwd, 'qenv.yml'))) {
|
if (fs.existsSync(plugins.path.join(paths.cwd, 'qenv.yml'))) {
|
||||||
qenvKeyValueObjectArray = new plugins.qenv.Qenv(paths.cwd, '.nogit/').keyValueObject;
|
qenvKeyValueObjectArray = new plugins.qenv.Qenv(paths.cwd, '.nogit/').keyValueObject;
|
||||||
} else {
|
} else {
|
||||||
qenvKeyValueObjectArray = {};
|
qenvKeyValueObjectArray = {};
|
||||||
@@ -21,12 +20,21 @@ const getQenvKeyValueObject = async () => {
|
|||||||
|
|
||||||
const buildConfig = async (qenvKeyValueObjectArg: { [key: string]: string | number }) => {
|
const buildConfig = async (qenvKeyValueObjectArg: { [key: string]: string | number }) => {
|
||||||
const npmextra = new plugins.npmextra.Npmextra(paths.cwd);
|
const npmextra = new plugins.npmextra.Npmextra(paths.cwd);
|
||||||
const config = npmextra.dataFor<IConfig>('npmdocker', {
|
const config = npmextra.dataFor<IConfig>('@git.zone/tsdocker', {
|
||||||
|
// Legacy options (backward compatible)
|
||||||
baseImage: 'hosttoday/ht-docker-node:npmdocker',
|
baseImage: 'hosttoday/ht-docker-node:npmdocker',
|
||||||
init: 'rm -rf node_nodules/ && yarn install',
|
init: 'rm -rf node_nodules/ && yarn install',
|
||||||
command: 'npmci npm test',
|
command: 'npmci npm test',
|
||||||
dockerSock: false,
|
dockerSock: false,
|
||||||
keyValueObject: qenvKeyValueObjectArg
|
keyValueObject: qenvKeyValueObjectArg,
|
||||||
|
|
||||||
|
// New Docker build options
|
||||||
|
registries: [],
|
||||||
|
registryRepoMap: {},
|
||||||
|
buildArgEnvMap: {},
|
||||||
|
platforms: ['linux/amd64'],
|
||||||
|
push: false,
|
||||||
|
testDir: undefined,
|
||||||
});
|
});
|
||||||
return config;
|
return config;
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -1,15 +1,15 @@
|
|||||||
import * as plugins from './tsdocker.plugins';
|
import * as plugins from './tsdocker.plugins.js';
|
||||||
import * as paths from './tsdocker.paths';
|
import * as paths from './tsdocker.paths.js';
|
||||||
import * as snippets from './tsdocker.snippets';
|
import * as snippets from './tsdocker.snippets.js';
|
||||||
|
|
||||||
import { logger, ora } from './tsdocker.logging';
|
import { logger, ora } from './tsdocker.logging.js';
|
||||||
|
|
||||||
const smartshellInstance = new plugins.smartshell.Smartshell({
|
const smartshellInstance = new plugins.smartshell.Smartshell({
|
||||||
executor: 'bash'
|
executor: 'bash'
|
||||||
});
|
});
|
||||||
|
|
||||||
// interfaces
|
// interfaces
|
||||||
import { IConfig } from './tsdocker.config';
|
import type { IConfig } from './tsdocker.config.js';
|
||||||
|
|
||||||
let config: IConfig;
|
let config: IConfig;
|
||||||
|
|
||||||
@@ -43,7 +43,7 @@ const checkDocker = () => {
|
|||||||
/**
|
/**
|
||||||
* builds the Dockerfile according to the config in the project
|
* builds the Dockerfile according to the config in the project
|
||||||
*/
|
*/
|
||||||
const buildDockerFile = () => {
|
const buildDockerFile = async () => {
|
||||||
const done = plugins.smartpromise.defer();
|
const done = plugins.smartpromise.defer();
|
||||||
ora.text('building Dockerfile...');
|
ora.text('building Dockerfile...');
|
||||||
const dockerfile: string = snippets.dockerfileSnippet({
|
const dockerfile: string = snippets.dockerfileSnippet({
|
||||||
@@ -52,7 +52,7 @@ const buildDockerFile = () => {
|
|||||||
});
|
});
|
||||||
logger.log('info', `Base image is: ${config.baseImage}`);
|
logger.log('info', `Base image is: ${config.baseImage}`);
|
||||||
logger.log('info', `Command is: ${config.command}`);
|
logger.log('info', `Command is: ${config.command}`);
|
||||||
plugins.smartfile.memory.toFsSync(dockerfile, plugins.path.join(paths.cwd, 'npmdocker'));
|
await plugins.smartfs.file(plugins.path.join(paths.cwd, 'npmdocker')).write(dockerfile);
|
||||||
logger.log('ok', 'Dockerfile created!');
|
logger.log('ok', 'Dockerfile created!');
|
||||||
ora.stop();
|
ora.stop();
|
||||||
done.resolve();
|
done.resolve();
|
||||||
@@ -67,7 +67,7 @@ const buildDockerImage = async () => {
|
|||||||
await smartshellInstance.exec(`docker pull ${config.baseImage}`);
|
await smartshellInstance.exec(`docker pull ${config.baseImage}`);
|
||||||
ora.text('building Dockerimage...');
|
ora.text('building Dockerimage...');
|
||||||
const execResult = await smartshellInstance.execSilent(
|
const execResult = await smartshellInstance.execSilent(
|
||||||
`docker build -f npmdocker -t ${dockerData.imageTag} ${paths.cwd}`
|
`docker build --load -f npmdocker -t ${dockerData.imageTag} ${paths.cwd}`
|
||||||
);
|
);
|
||||||
if (execResult.exitCode !== 0) {
|
if (execResult.exitCode !== 0) {
|
||||||
console.log(execResult.stdout);
|
console.log(execResult.stdout);
|
||||||
@@ -148,7 +148,7 @@ const postClean = async () => {
|
|||||||
.then(async () => {
|
.then(async () => {
|
||||||
logger.log('ok', 'cleaned up!');
|
logger.log('ok', 'cleaned up!');
|
||||||
});
|
});
|
||||||
plugins.smartfile.fs.removeSync(paths.npmdockerFile);
|
await plugins.smartfs.file(paths.npmdockerFile).delete();
|
||||||
};
|
};
|
||||||
|
|
||||||
export let run = async (configArg: IConfig): Promise<IConfig> => {
|
export let run = async (configArg: IConfig): Promise<IConfig> => {
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
import * as plugins from './tsdocker.plugins';
|
import * as plugins from './tsdocker.plugins.js';
|
||||||
|
|
||||||
export const logger = new plugins.smartlog.Smartlog({
|
export const logger = new plugins.smartlog.Smartlog({
|
||||||
logContext: {
|
logContext: {
|
||||||
@@ -15,3 +15,12 @@ export const logger = new plugins.smartlog.Smartlog({
|
|||||||
logger.addLogDestination(new plugins.smartlogDestinationLocal.DestinationLocal());
|
logger.addLogDestination(new plugins.smartlogDestinationLocal.DestinationLocal());
|
||||||
|
|
||||||
export const ora = new plugins.smartlogSouceOra.SmartlogSourceOra();
|
export const ora = new plugins.smartlogSouceOra.SmartlogSourceOra();
|
||||||
|
|
||||||
|
export function formatDuration(ms: number): string {
|
||||||
|
if (ms < 1000) return `${ms}ms`;
|
||||||
|
const totalSeconds = ms / 1000;
|
||||||
|
if (totalSeconds < 60) return `${totalSeconds.toFixed(1)}s`;
|
||||||
|
const minutes = Math.floor(totalSeconds / 60);
|
||||||
|
const seconds = Math.round(totalSeconds % 60);
|
||||||
|
return `${minutes}m ${seconds}s`;
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,8 +1,14 @@
|
|||||||
import * as plugins from './tsdocker.plugins';
|
import * as plugins from './tsdocker.plugins.js';
|
||||||
|
import * as fs from 'fs';
|
||||||
|
import { fileURLToPath } from 'url';
|
||||||
|
import { dirname } from 'path';
|
||||||
|
|
||||||
|
const __filename = fileURLToPath(import.meta.url);
|
||||||
|
const __dirname = dirname(__filename);
|
||||||
|
|
||||||
// directories
|
// directories
|
||||||
export let cwd = process.cwd();
|
export let cwd = process.cwd();
|
||||||
export let packageBase = plugins.path.join(__dirname, '../');
|
export let packageBase = plugins.path.join(__dirname, '../');
|
||||||
export let assets = plugins.path.join(packageBase, 'assets/');
|
export let assets = plugins.path.join(packageBase, 'assets/');
|
||||||
plugins.smartfile.fs.ensureDirSync(assets);
|
fs.mkdirSync(assets, { recursive: true });
|
||||||
export let npmdockerFile = plugins.path.join(cwd, 'npmdocker');
|
export let npmdockerFile = plugins.path.join(cwd, 'npmdocker');
|
||||||
|
|||||||
@@ -1,26 +1,32 @@
|
|||||||
// pushrocks scope
|
// push.rocks scope
|
||||||
import * as npmextra from '@pushrocks/npmextra';
|
import * as lik from '@push.rocks/lik';
|
||||||
|
import * as npmextra from '@push.rocks/npmextra';
|
||||||
import * as path from 'path';
|
import * as path from 'path';
|
||||||
import * as projectinfo from '@pushrocks/projectinfo';
|
import * as projectinfo from '@push.rocks/projectinfo';
|
||||||
import * as smartpromise from '@pushrocks/smartpromise';
|
import * as smartpromise from '@push.rocks/smartpromise';
|
||||||
import * as qenv from '@pushrocks/qenv';
|
import * as qenv from '@push.rocks/qenv';
|
||||||
import * as smartcli from '@pushrocks/smartcli';
|
import * as smartcli from '@push.rocks/smartcli';
|
||||||
import * as smartfile from '@pushrocks/smartfile';
|
import { SmartFs, SmartFsProviderNode } from '@push.rocks/smartfs';
|
||||||
import * as smartlog from '@pushrocks/smartlog';
|
import * as smartlog from '@push.rocks/smartlog';
|
||||||
import * as smartlogDestinationLocal from '@pushrocks/smartlog-destination-local';
|
import * as smartlogDestinationLocal from '@push.rocks/smartlog-destination-local';
|
||||||
import * as smartlogSouceOra from '@pushrocks/smartlog-source-ora';
|
import * as smartlogSouceOra from '@push.rocks/smartlog-source-ora';
|
||||||
import * as smartopen from '@pushrocks/smartopen';
|
import * as smartopen from '@push.rocks/smartopen';
|
||||||
import * as smartshell from '@pushrocks/smartshell';
|
import * as smartinteract from '@push.rocks/smartinteract';
|
||||||
import * as smartstring from '@pushrocks/smartstring';
|
import * as smartshell from '@push.rocks/smartshell';
|
||||||
|
import * as smartstring from '@push.rocks/smartstring';
|
||||||
|
|
||||||
|
// Create smartfs instance
|
||||||
|
export const smartfs = new SmartFs(new SmartFsProviderNode());
|
||||||
|
|
||||||
export {
|
export {
|
||||||
|
lik,
|
||||||
npmextra,
|
npmextra,
|
||||||
path,
|
path,
|
||||||
projectinfo,
|
projectinfo,
|
||||||
smartpromise,
|
smartpromise,
|
||||||
qenv,
|
qenv,
|
||||||
smartcli,
|
smartcli,
|
||||||
smartfile,
|
smartinteract,
|
||||||
smartlog,
|
smartlog,
|
||||||
smartlogDestinationLocal,
|
smartlogDestinationLocal,
|
||||||
smartlogSouceOra,
|
smartlogSouceOra,
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
import * as plugins from './tsdocker.plugins';
|
import * as plugins from './tsdocker.plugins.js';
|
||||||
|
|
||||||
export interface IDockerfileSnippet {
|
export interface IDockerfileSnippet {
|
||||||
baseImage: string;
|
baseImage: string;
|
||||||
@@ -14,23 +14,20 @@ let getMountSolutionString = (optionsArg: IDockerfileSnippet) => {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let getGlobalPreparationString = (optionsArg: IDockerfileSnippet) => {
|
let getGlobalPreparationString = (optionsArg: IDockerfileSnippet) => {
|
||||||
if (optionsArg.baseImage !== 'hosttoday/ht-docker-node:npmdocker') {
|
// Always install tsdocker to ensure the latest version is available
|
||||||
return 'RUN npm install -g npmdocker';
|
return 'RUN npm install -g @git.zone/tsdocker';
|
||||||
} else {
|
|
||||||
return '# not installing npmdocker since it is included in the base image';
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
export let dockerfileSnippet = (optionsArg: IDockerfileSnippet): string => {
|
export let dockerfileSnippet = (optionsArg: IDockerfileSnippet): string => {
|
||||||
return plugins.smartstring.indent.normalize(
|
return plugins.smartstring.indent.normalize(
|
||||||
`
|
`
|
||||||
FROM ${optionsArg.baseImage}
|
FROM ${optionsArg.baseImage}
|
||||||
# For info about what npmdocker does read the docs at https://gitzone.github.io/npmdocker
|
# For info about what tsdocker does read the docs at https://gitzone.github.io/tsdocker
|
||||||
${getGlobalPreparationString(optionsArg)}
|
${getGlobalPreparationString(optionsArg)}
|
||||||
${getMountSolutionString(optionsArg)}
|
${getMountSolutionString(optionsArg)}
|
||||||
WORKDIR /workspace
|
WORKDIR /workspace
|
||||||
ENV CI=true
|
ENV CI=true
|
||||||
ENTRYPOINT ["npmdocker"]
|
ENTRYPOINT ["tsdocker"]
|
||||||
CMD ["runinside"]
|
CMD ["runinside"]
|
||||||
`
|
`
|
||||||
);
|
);
|
||||||
|
|||||||
12
tsconfig.json
Normal file
12
tsconfig.json
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
{
|
||||||
|
"compilerOptions": {
|
||||||
|
"target": "ES2022",
|
||||||
|
"module": "NodeNext",
|
||||||
|
"moduleResolution": "NodeNext",
|
||||||
|
"esModuleInterop": true,
|
||||||
|
"verbatimModuleSyntax": true,
|
||||||
|
"baseUrl": ".",
|
||||||
|
"paths": {}
|
||||||
|
},
|
||||||
|
"exclude": ["dist_*/**/*.d.ts"]
|
||||||
|
}
|
||||||
17
tslint.json
17
tslint.json
@@ -1,17 +0,0 @@
|
|||||||
{
|
|
||||||
"extends": ["tslint:latest", "tslint-config-prettier"],
|
|
||||||
"rules": {
|
|
||||||
"semicolon": [true, "always"],
|
|
||||||
"no-console": false,
|
|
||||||
"ordered-imports": false,
|
|
||||||
"object-literal-sort-keys": false,
|
|
||||||
"member-ordering": {
|
|
||||||
"options":{
|
|
||||||
"order": [
|
|
||||||
"static-method"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"defaultSeverity": "warning"
|
|
||||||
}
|
|
||||||
Reference in New Issue
Block a user