Compare commits
28 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 3085eb590f | |||
| 04b75b42f3 | |||
| b04b8c9033 | |||
| 2130a8a879 | |||
| 17de78aed3 | |||
| eddb8cd156 | |||
| cfc7798d49 | |||
| 37dfde005e | |||
| d1785aab86 | |||
| 31fb4aea3c | |||
| 907048fa87 | |||
| 02b267ee10 | |||
| 16cd0bbd87 | |||
| cc83743f9a | |||
| 7131c16f80 | |||
| 02688861f4 | |||
| 3a8b301b3e | |||
| c09bef33c3 | |||
| 32eb0d1d77 | |||
| 7cac628975 | |||
| c279dbd55e | |||
| 7b7064864e | |||
| 36f06cef09 | |||
| b0f87deb4b | |||
| 9805324746 | |||
| 808066d8c3 | |||
| 6922d19454 | |||
| e1492f8ec4 |
122
changelog.md
122
changelog.md
@@ -1,5 +1,127 @@
|
||||
# Changelog
|
||||
|
||||
## 2026-02-07 - 1.14.0 - feat(build)
|
||||
add level-based parallel builds with --parallel and configurable concurrency
|
||||
|
||||
- Introduces --parallel and --parallel=<n> CLI flags to enable level-based parallel Docker builds (default concurrency 4).
|
||||
- Adds Dockerfile.computeLevels() to group topologically-sorted Dockerfiles into dependency levels.
|
||||
- Adds Dockerfile.runWithConcurrency() implementing a bounded-concurrency worker-pool (fast-fail via Promise.all).
|
||||
- Integrates parallel build mode into Dockerfile.buildDockerfiles() and TsDockerManager.build() for both cached and non-cached flows, including tagging and pushing for dependency resolution after each level.
|
||||
- Adds options.parallel and options.parallelConcurrency to the build interface and wires them through the CLI and manager.
|
||||
- Updates documentation (readme.hints.md) with usage examples and implementation notes.
|
||||
|
||||
## 2026-02-07 - 1.13.0 - feat(docker)
|
||||
add Docker context detection, rootless support, and context-aware buildx registry handling
|
||||
|
||||
- Introduce DockerContext class to detect current Docker context and rootless mode and to log warnings and context info
|
||||
- Add IDockerContextInfo interface and a new context option on build/config to pass explicit Docker context
|
||||
- Propagate --context CLI flag into TsDockerManager.prepare so CLI commands can set an explicit Docker context
|
||||
- Make buildx builder name context-aware (tsdocker-builder-<sanitized-context>) and log builder name/platforms
|
||||
- Pass isRootless into local registry startup and build pipeline; emit rootless-specific warnings and registry reachability hint
|
||||
|
||||
## 2026-02-06 - 1.12.0 - feat(docker)
|
||||
add detailed logging for buildx, build commands, local registry, and local dependency info
|
||||
|
||||
- Log startup of local registry including a note about buildx dependency bridging
|
||||
- Log constructed build commands and indicate whether buildx or standard docker build is used (including platforms and --push/--load distinctions)
|
||||
- Emit build mode summary at start of build phase and report local base-image dependency mappings
|
||||
- Report when --no-cache is enabled and surface buildx setup readiness with configured platforms
|
||||
- Non-functional change: purely adds informational logging to improve observability during builds
|
||||
|
||||
## 2026-02-06 - 1.11.0 - feat(docker)
|
||||
start temporary local registry for buildx dependency resolution and ensure buildx builder uses host network
|
||||
|
||||
- Introduce a temporary local registry (localhost:5234) with start/stop helpers and push support to expose local images for buildx
|
||||
- Add Dockerfile.needsLocalRegistry to decide when a local registry is required (local base dependencies + multi-platform or platform option)
|
||||
- Push built images to the local registry and set localRegistryTag on Dockerfile instances for BuildKit build-context usage
|
||||
- Tag built images in the host daemon for dependent Dockerfiles to resolve local FROM references
|
||||
- Integrate registry lifecycle into Dockerfile.buildDockerfiles and TsDockerManager build flows (start before builds, stop after)
|
||||
- Ensure buildx builder is created with --driver-opt network=host and recreate existing builder if it lacks host network to allow registry access from build containers
|
||||
|
||||
## 2026-02-06 - 1.10.0 - feat(classes.dockerfile)
|
||||
support using a local base image as a build context in buildx commands
|
||||
|
||||
- Adds --build-context flag mapping base image to docker-image://<localTag> when localBaseImageDependent && localBaseDockerfile are set
|
||||
- Appends the build context flag to both single-platform and multi-platform docker buildx commands
|
||||
- Logs an info message indicating the local build context mapping
|
||||
|
||||
## 2026-02-06 - 1.9.0 - feat(build)
|
||||
add verbose build output, progress logging, and timing for builds/tests
|
||||
|
||||
- Add 'verbose' option to build/test flows (interfaces, CLI, and method signatures) to allow streaming raw docker build output or run silently
|
||||
- Log per-item progress for build and test phases (e.g. (1/N) Building/Testing <tag>) and report individual durations
|
||||
- Return elapsed time from Dockerfile.build() and Dockerfile.test() and aggregate total build/test times in manager
|
||||
- Introduce formatDuration(ms) helper in logging module to format timings
|
||||
- Switch from console.log to structured logger calls across cache, manager, dockerfile and push paths
|
||||
- Use silent exec variants when verbose is false and stream exec when verbose is true
|
||||
|
||||
## 2026-02-06 - 1.8.0 - feat(build)
|
||||
add optional content-hash based build cache to skip rebuilding unchanged Dockerfiles
|
||||
|
||||
- Introduce TsDockerCache to compute SHA-256 of Dockerfile content and persist cache to .nogit/tsdocker_support.json
|
||||
- Add ICacheEntry and ICacheData interfaces and a cached flag to IBuildCommandOptions
|
||||
- Integrate cached mode in TsDockerManager: skip builds on cache hits, verify image presence, record builds on misses, and still perform dependency tagging
|
||||
- Expose --cached option in CLI to enable the cached build flow
|
||||
- Cache records store contentHash, imageId, buildTag and timestamp
|
||||
|
||||
## 2026-02-06 - 1.7.0 - feat(cli)
|
||||
add CLI version display using commitinfo
|
||||
|
||||
- Imported commitinfo from './00_commitinfo_data.js' and called tsdockerCli.addVersion(commitinfo.version) to surface package/commit version in the Smartcli instance
|
||||
- Change made in ts/tsdocker.cli.ts — small user-facing CLI enhancement; no breaking changes
|
||||
|
||||
## 2026-02-06 - 1.6.0 - feat(docker)
|
||||
add support for no-cache builds and tag built images for local dependency resolution
|
||||
|
||||
- Introduce IBuildCommandOptions.noCache to control --no-cache behavior
|
||||
- Propagate noCache from CLI (via cache flag) through TsDockerManager to Dockerfile.build
|
||||
- Append --no-cache to docker build/buildx commands when noCache is true
|
||||
- After building an image, tag it with full base image references used by dependent Dockerfiles so their FROM lines resolve to the locally-built image
|
||||
- Log tagging actions and execute docker tag via smartshellInstance
|
||||
|
||||
## 2026-02-06 - 1.5.0 - feat(build)
|
||||
add support for selective builds, platform override and build timeout
|
||||
|
||||
- Introduce IBuildCommandOptions with patterns, platform and timeout to control build behavior
|
||||
- Allow manager.build() to accept options and build only matching Dockerfiles (including dependencies) preserving topological order
|
||||
- Add CLI parsing for build/push to accept positional Dockerfile patterns and --platform/--timeout flags
|
||||
- Support single-platform override via docker buildx and multi-platform buildx detection
|
||||
- Implement streaming exec with timeout to kill long-running builds and surface timeout errors
|
||||
|
||||
## 2026-02-04 - 1.4.3 - fix(dockerfile)
|
||||
fix matching of base images to local Dockerfiles by stripping registry prefixes when comparing image references
|
||||
|
||||
- Added Dockerfile.extractRepoVersion(imageRef) to normalize image references by removing registry prefixes (detects registries containing '.' or ':' or 'localhost').
|
||||
- Use extractRepoVersion when checking tagToDockerfile and when mapping local base dockerfiles to ensure comparisons use repo:tag keys rather than full registry-prefixed references.
|
||||
- Prevents mismatches when baseImage includes a registry (e.g. "host.today/repo:version") so it correctly matches a local cleanTag like "repo:version".
|
||||
|
||||
## 2026-01-21 - 1.4.2 - fix(classes.dockerfile)
|
||||
use a single top-level fs import instead of requiring fs inside methods
|
||||
|
||||
- Added top-level import: import * as fs from 'fs' in ts/classes.dockerfile.ts
|
||||
- Removed inline require('fs') calls and replaced with the imported fs in constructor and test() to keep imports consistent
|
||||
- No behavioral change expected; this is a cleanup/refactor to standardize module usage
|
||||
|
||||
## 2026-01-20 - 1.4.1 - fix(docs)
|
||||
update README: expand usage, installation, quick start, features, troubleshooting and migration notes
|
||||
|
||||
- Expanded README content: new Quick Start, Installation examples, and detailed Features section (containerized testing, smart Docker builds, multi-registry push, multi-architecture support, zero-config start)
|
||||
- Added troubleshooting and performance tips including registry login guidance and circular dependency advice
|
||||
- Updated migration notes from legacy npmdocker to @git.zone/tsdocker (command and config key changes, ESM guidance)
|
||||
- Documentation-only change — no source code modified
|
||||
|
||||
## 2026-01-20 - 1.4.0 - feat(tsdocker)
|
||||
add multi-registry and multi-arch Docker build/push/pull manager, registry storage, Dockerfile handling, and new CLI commands
|
||||
|
||||
- Introduce TsDockerManager orchestrator to discover, sort, build, test, push and pull Dockerfiles
|
||||
- Add Dockerfile class with dependency-aware build order, buildx support, push/pull and test flows (new large module)
|
||||
- Add DockerRegistry and RegistryStorage classes to manage registry credentials, login/logout and environment loading
|
||||
- Add CLI commands: build, push, pull, test, login, list (and integrate TsDockerManager into CLI)
|
||||
- Extend configuration (ITsDockerConfig) with registries, registryRepoMap, buildArgEnvMap, platforms, push and testDir; re-export as IConfig for backwards compatibility
|
||||
- Add @push.rocks/lik to dependencies and import it in tsdocker.plugins
|
||||
- Remove legacy speedtest command and related package.json script
|
||||
- Update README and readme.hints with new features, configuration examples and command list
|
||||
|
||||
## 2026-01-19 - 1.3.0 - feat(packaging)
|
||||
Rename package scope to @git.zone and migrate to ESM; rename CLI/config keys, update entrypoints and imports, bump Node requirement to 18, and adjust scripts/dependencies
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@git.zone/tsdocker",
|
||||
"version": "1.3.0",
|
||||
"version": "1.14.0",
|
||||
"private": false,
|
||||
"description": "develop npm modules cross platform with docker",
|
||||
"main": "dist_ts/index.js",
|
||||
@@ -13,7 +13,6 @@
|
||||
"build": "(tsbuild)",
|
||||
"testIntegration": "(npm run clean && npm run setupCheck && npm run testStandard)",
|
||||
"testStandard": "(cd test/ && tsx ../ts/index.ts)",
|
||||
"testSpeed": "(cd test/ && tsx ../ts/index.ts speedtest)",
|
||||
"testClean": "(cd test/ && tsx ../ts/index.ts clean --all)",
|
||||
"testVscode": "(cd test/ && tsx ../ts/index.ts vscode)",
|
||||
"clean": "(rm -rf test/)",
|
||||
@@ -41,6 +40,7 @@
|
||||
"@types/node": "^25.0.9"
|
||||
},
|
||||
"dependencies": {
|
||||
"@push.rocks/lik": "^6.2.2",
|
||||
"@push.rocks/npmextra": "^5.3.3",
|
||||
"@push.rocks/projectinfo": "^5.0.2",
|
||||
"@push.rocks/qenv": "^6.1.3",
|
||||
|
||||
3
pnpm-lock.yaml
generated
3
pnpm-lock.yaml
generated
@@ -8,6 +8,9 @@ importers:
|
||||
|
||||
.:
|
||||
dependencies:
|
||||
'@push.rocks/lik':
|
||||
specifier: ^6.2.2
|
||||
version: 6.2.2
|
||||
'@push.rocks/npmextra':
|
||||
specifier: ^5.3.3
|
||||
version: 5.3.3
|
||||
|
||||
133
readme.hints.md
133
readme.hints.md
@@ -2,39 +2,120 @@
|
||||
|
||||
## Module Purpose
|
||||
|
||||
tsdocker is a tool for developing npm modules cross-platform using Docker. It allows testing in clean, reproducible Linux environments locally.
|
||||
tsdocker is a comprehensive Docker development and building tool. It provides:
|
||||
- Testing npm modules in clean Docker environments (legacy feature)
|
||||
- Building Dockerfiles with dependency ordering
|
||||
- Multi-registry push/pull support
|
||||
- Multi-architecture builds (amd64/arm64)
|
||||
|
||||
## Recent Upgrades (2025-11-22)
|
||||
## New CLI Commands (2026-01-19)
|
||||
|
||||
- Updated all @git.zone/_ dependencies to @git.zone/_ scope (latest versions)
|
||||
- Updated all @pushrocks/_ dependencies to @push.rocks/_ scope (latest versions)
|
||||
- Migrated from smartfile v8 to smartfs v1.1.0
|
||||
- All filesystem operations now use smartfs fluent API
|
||||
- Operations are now async (smartfs is async-only)
|
||||
- Updated dev dependencies:
|
||||
- @git.zone/tsbuild: ^3.1.0
|
||||
- @git.zone/tsrun: ^2.0.0
|
||||
- @git.zone/tstest: ^3.1.3
|
||||
- Removed @pushrocks/tapbundle (now use @git.zone/tstest/tapbundle)
|
||||
- Updated @types/node to ^22.10.2
|
||||
- Removed tslint and tslint-config-prettier (no longer needed)
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `tsdocker` | Run tests in container (legacy default behavior) |
|
||||
| `tsdocker build` | Build all Dockerfiles with dependency ordering |
|
||||
| `tsdocker push [registry]` | Push images to configured registries |
|
||||
| `tsdocker pull <registry>` | Pull images from registry |
|
||||
| `tsdocker test` | Run container tests (test scripts) |
|
||||
| `tsdocker login` | Login to configured registries |
|
||||
| `tsdocker list` | List discovered Dockerfiles and dependencies |
|
||||
| `tsdocker clean --all` | Clean up Docker environment |
|
||||
| `tsdocker vscode` | Start VS Code in Docker |
|
||||
|
||||
## SmartFS Migration Details
|
||||
## Configuration
|
||||
|
||||
The following operations were converted:
|
||||
Configure in `package.json` under `@git.zone/tsdocker`:
|
||||
|
||||
- `smartfile.fs.fileExistsSync()` → Node.js `fs.existsSync()` (for sync needs)
|
||||
- `smartfile.fs.ensureDirSync()` → Node.js `fs.mkdirSync(..., { recursive: true })`
|
||||
- `smartfile.memory.toFsSync()` → `smartfs.file(path).write(content)` (async)
|
||||
- `smartfile.fs.removeSync()` → `smartfs.file(path).delete()` (async)
|
||||
```json
|
||||
{
|
||||
"@git.zone/tsdocker": {
|
||||
"registries": ["registry.gitlab.com", "docker.io"],
|
||||
"registryRepoMap": {
|
||||
"registry.gitlab.com": "host.today/ht-docker-node"
|
||||
},
|
||||
"buildArgEnvMap": {
|
||||
"NODE_VERSION": "NODE_VERSION"
|
||||
},
|
||||
"platforms": ["linux/amd64", "linux/arm64"],
|
||||
"push": false,
|
||||
"testDir": "./test"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Test Status
|
||||
### Configuration Options
|
||||
|
||||
- Build: ✅ Passes
|
||||
- The integration test requires cloning an external test repository (sandbox-npmts)
|
||||
- The external test repo uses top-level await which requires ESM module handling
|
||||
- This is not a tsdocker issue but rather the test repository's structure
|
||||
- `baseImage`: Base Docker image for testing (legacy)
|
||||
- `command`: Command to run in container (legacy)
|
||||
- `dockerSock`: Mount Docker socket (legacy)
|
||||
- `registries`: Array of registry URLs to push to
|
||||
- `registryRepoMap`: Map registry URLs to different repo paths
|
||||
- `buildArgEnvMap`: Map Docker build ARGs to environment variables
|
||||
- `platforms`: Target architectures for buildx
|
||||
- `push`: Auto-push after build
|
||||
- `testDir`: Directory containing test scripts
|
||||
|
||||
## Registry Authentication
|
||||
|
||||
Set environment variables for registry login:
|
||||
|
||||
```bash
|
||||
# Pipe-delimited format (numbered 1-10)
|
||||
export DOCKER_REGISTRY_1="registry.gitlab.com|username|password"
|
||||
export DOCKER_REGISTRY_2="docker.io|username|password"
|
||||
|
||||
# Or individual registry format
|
||||
export DOCKER_REGISTRY_URL="registry.gitlab.com"
|
||||
export DOCKER_REGISTRY_USER="username"
|
||||
export DOCKER_REGISTRY_PASSWORD="password"
|
||||
```
|
||||
|
||||
## File Structure
|
||||
|
||||
```
|
||||
ts/
|
||||
├── index.ts (entry point)
|
||||
├── tsdocker.cli.ts (CLI commands)
|
||||
├── tsdocker.config.ts (configuration)
|
||||
├── tsdocker.plugins.ts (plugin imports)
|
||||
├── tsdocker.docker.ts (legacy test runner)
|
||||
├── tsdocker.snippets.ts (Dockerfile generation)
|
||||
├── classes.dockerfile.ts (Dockerfile management)
|
||||
├── classes.dockerregistry.ts (registry authentication)
|
||||
├── classes.registrystorage.ts (registry storage)
|
||||
├── classes.tsdockermanager.ts (orchestrator)
|
||||
└── interfaces/
|
||||
└── index.ts (type definitions)
|
||||
```
|
||||
|
||||
## Dependencies
|
||||
|
||||
All dependencies are now at their latest versions compatible with Node.js without introducing new Node.js-specific dependencies.
|
||||
- `@push.rocks/lik`: Object mapping utilities
|
||||
- `@push.rocks/smartfs`: Filesystem operations
|
||||
- `@push.rocks/smartshell`: Shell command execution
|
||||
- `@push.rocks/smartcli`: CLI framework
|
||||
- `@push.rocks/projectinfo`: Project metadata
|
||||
|
||||
## Parallel Builds
|
||||
|
||||
`--parallel` flag enables level-based parallel Docker builds:
|
||||
|
||||
```bash
|
||||
tsdocker build --parallel # parallel, default concurrency (4)
|
||||
tsdocker build --parallel=8 # parallel, concurrency 8
|
||||
tsdocker build --parallel --cached # works with both modes
|
||||
```
|
||||
|
||||
Implementation: `Dockerfile.computeLevels()` groups topologically sorted Dockerfiles into dependency levels. `Dockerfile.runWithConcurrency()` provides a worker-pool pattern for bounded concurrency. Both are public static methods on the `Dockerfile` class. The parallel logic exists in both `Dockerfile.buildDockerfiles()` (standard mode) and `TsDockerManager.build()` (cached mode).
|
||||
|
||||
## Build Status
|
||||
|
||||
- Build: ✅ Passes
|
||||
- Legacy test functionality preserved
|
||||
- New Docker build functionality added
|
||||
|
||||
## Previous Upgrades (2025-11-22)
|
||||
|
||||
- Updated all @git.zone/_ dependencies to @git.zone/_ scope
|
||||
- Updated all @pushrocks/_ dependencies to @push.rocks/_ scope
|
||||
- Migrated from smartfile v8 to smartfs v1.1.0
|
||||
|
||||
563
readme.md
563
readme.md
@@ -1,6 +1,6 @@
|
||||
# @git.zone/tsdocker
|
||||
|
||||
> 🐳 Cross-platform npm module development with Docker — test your packages in clean, reproducible Linux environments every time.
|
||||
> 🐳 The ultimate Docker development toolkit for TypeScript projects — build, test, and ship containerized applications with ease.
|
||||
|
||||
## Issue Reporting and Security
|
||||
|
||||
@@ -8,313 +8,454 @@ For reporting bugs, issues, or security vulnerabilities, please visit [community
|
||||
|
||||
## What is tsdocker?
|
||||
|
||||
**tsdocker** provides containerized testing environments for npm packages, ensuring your code works consistently across different systems. It's perfect for:
|
||||
**tsdocker** is a comprehensive Docker development and building tool that handles everything from testing npm packages in clean environments to building and pushing multi-architecture Docker images across multiple registries.
|
||||
|
||||
- 🧪 **Testing in clean environments** — Every test run starts fresh, just like CI
|
||||
- 🔄 **Reproducing CI behavior locally** — No more "works on my machine" surprises
|
||||
- 🐧 **Cross-platform development** — Develop on macOS/Windows, test on Linux
|
||||
- 🚀 **Quick validation** — Spin up isolated containers for testing without polluting your system
|
||||
### 🎯 Key Capabilities
|
||||
|
||||
## Features
|
||||
|
||||
✨ **Works Everywhere Docker Does**
|
||||
|
||||
- Docker Toolbox
|
||||
- Native Docker Desktop
|
||||
- Docker-in-Docker (DinD)
|
||||
- Mounted docker.sock scenarios
|
||||
|
||||
🔧 **Flexible Configuration**
|
||||
|
||||
- Custom base images
|
||||
- Configurable test commands
|
||||
- Environment variable injection via qenv
|
||||
- Optional docker.sock mounting for nested container tests
|
||||
|
||||
📦 **TypeScript-First**
|
||||
|
||||
- Full TypeScript support with excellent IntelliSense
|
||||
- Type-safe configuration
|
||||
- Modern ESM with async/await patterns throughout
|
||||
- 🧪 **Containerized Testing** — Run your tests in pristine Docker environments
|
||||
- 🏗️ **Smart Docker Builds** — Automatically discover, sort, and build Dockerfiles by dependency
|
||||
- 🚀 **Multi-Registry Push** — Ship to Docker Hub, GitLab, GitHub Container Registry, and more
|
||||
- 🔧 **Multi-Architecture** — Build for `amd64` and `arm64` with Docker Buildx
|
||||
- ⚡ **Zero Config Start** — Works out of the box, scales with your needs
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
# Global installation (recommended for CLI usage)
|
||||
npm install -g @git.zone/tsdocker
|
||||
# or for project-local installation
|
||||
|
||||
# Or project-local installation
|
||||
pnpm install --save-dev @git.zone/tsdocker
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 1. Configure Your Project
|
||||
### 🧪 Run Tests in Docker
|
||||
|
||||
Create an `npmextra.json` file in your project root:
|
||||
The simplest use case — run your tests in a clean container:
|
||||
|
||||
```bash
|
||||
tsdocker
|
||||
```
|
||||
|
||||
This pulls your configured base image, mounts your project, and executes your test command in isolation.
|
||||
|
||||
### 🏗️ Build Docker Images
|
||||
|
||||
Got `Dockerfile` files? Build them all with automatic dependency ordering:
|
||||
|
||||
```bash
|
||||
tsdocker build
|
||||
```
|
||||
|
||||
tsdocker will:
|
||||
1. 🔍 Discover all `Dockerfile*` files in your project
|
||||
2. 📊 Analyze `FROM` dependencies between them
|
||||
3. 🔄 Sort them topologically
|
||||
4. 🏗️ Build each image in the correct order
|
||||
|
||||
### 📤 Push to Registries
|
||||
|
||||
Ship your images to one or all configured registries:
|
||||
|
||||
```bash
|
||||
# Push to all configured registries
|
||||
tsdocker push
|
||||
|
||||
# Push to a specific registry
|
||||
tsdocker push registry.gitlab.com
|
||||
```
|
||||
|
||||
## CLI Commands
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `tsdocker` | Run tests in a fresh Docker container |
|
||||
| `tsdocker build` | Build all Dockerfiles with dependency ordering |
|
||||
| `tsdocker push [registry]` | Push images to configured registries |
|
||||
| `tsdocker pull <registry>` | Pull images from a specific registry |
|
||||
| `tsdocker test` | Run container test scripts (test_*.sh) |
|
||||
| `tsdocker login` | Authenticate with configured registries |
|
||||
| `tsdocker list` | Display discovered Dockerfiles and their dependencies |
|
||||
| `tsdocker clean --all` | ⚠️ Aggressively clean Docker environment |
|
||||
| `tsdocker vscode` | Launch containerized VS Code in browser |
|
||||
|
||||
## Configuration
|
||||
|
||||
Configure tsdocker in your `package.json` or `npmextra.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"@git.zone/tsdocker": {
|
||||
"baseImage": "node:20",
|
||||
"command": "npm test",
|
||||
"dockerSock": false
|
||||
"dockerSock": false,
|
||||
"registries": ["registry.gitlab.com", "docker.io"],
|
||||
"registryRepoMap": {
|
||||
"registry.gitlab.com": "myorg/myproject"
|
||||
},
|
||||
"buildArgEnvMap": {
|
||||
"NODE_VERSION": "NODE_VERSION"
|
||||
},
|
||||
"platforms": ["linux/amd64", "linux/arm64"],
|
||||
"push": false,
|
||||
"testDir": "./test"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Run Your Tests
|
||||
### Configuration Options
|
||||
|
||||
```bash
|
||||
tsdocker
|
||||
```
|
||||
#### Testing Options (Legacy)
|
||||
|
||||
That's it! tsdocker will:
|
||||
| Option | Type | Description |
|
||||
|--------|------|-------------|
|
||||
| `baseImage` | `string` | Docker image for test environment (default: `hosttoday/ht-docker-node:npmdocker`) |
|
||||
| `command` | `string` | Command to run inside container (default: `npmci npm test`) |
|
||||
| `dockerSock` | `boolean` | Mount Docker socket for DinD scenarios (default: `false`) |
|
||||
|
||||
1. ✅ Verify Docker is available
|
||||
2. 🏗️ Build a test container with your specified base image
|
||||
3. 📂 Mount your project directory
|
||||
4. 🚀 Execute your test command
|
||||
5. 🧹 Clean up automatically
|
||||
#### Build & Push Options
|
||||
|
||||
## Configuration Options
|
||||
| Option | Type | Description |
|
||||
|--------|------|-------------|
|
||||
| `registries` | `string[]` | Registry URLs to push to |
|
||||
| `registryRepoMap` | `object` | Map registries to different repository paths |
|
||||
| `buildArgEnvMap` | `object` | Map Docker build ARGs to environment variables |
|
||||
| `platforms` | `string[]` | Target architectures (default: `["linux/amd64"]`) |
|
||||
| `push` | `boolean` | Auto-push after build (default: `false`) |
|
||||
| `testDir` | `string` | Directory containing test scripts |
|
||||
|
||||
| Option | Type | Description |
|
||||
| ------------ | --------- | ---------------------------------------------------------------------- |
|
||||
| `baseImage` | `string` | Docker image to use as the test environment base |
|
||||
| `command` | `string` | CLI command to execute inside the container |
|
||||
| `dockerSock` | `boolean` | Whether to mount `/var/run/docker.sock` for Docker-in-Docker scenarios |
|
||||
## Registry Authentication
|
||||
|
||||
### Environment Variables
|
||||
|
||||
If you have a `qenv.yml` file in your project, tsdocker automatically loads and injects those environment variables into your test container.
|
||||
```bash
|
||||
# Pipe-delimited format (supports DOCKER_REGISTRY_1 through DOCKER_REGISTRY_10)
|
||||
export DOCKER_REGISTRY_1="registry.gitlab.com|username|password"
|
||||
export DOCKER_REGISTRY_2="docker.io|username|password"
|
||||
|
||||
Example `qenv.yml`:
|
||||
|
||||
```yaml
|
||||
demoKey: demoValue
|
||||
API_KEY: your-key-here
|
||||
# Individual registry format
|
||||
export DOCKER_REGISTRY_URL="registry.gitlab.com"
|
||||
export DOCKER_REGISTRY_USER="username"
|
||||
export DOCKER_REGISTRY_PASSWORD="password"
|
||||
```
|
||||
|
||||
## CLI Commands
|
||||
|
||||
### Standard Test Run
|
||||
### Login Command
|
||||
|
||||
```bash
|
||||
tsdocker
|
||||
tsdocker login
|
||||
```
|
||||
|
||||
Runs your configured test command in a fresh Docker container.
|
||||
|
||||
### Clean Docker Environment
|
||||
|
||||
```bash
|
||||
tsdocker clean --all
|
||||
```
|
||||
|
||||
⚠️ **WARNING**: This aggressively cleans your Docker environment by:
|
||||
|
||||
- Killing all running containers
|
||||
- Removing all stopped containers
|
||||
- Removing dangling images
|
||||
- Removing all images
|
||||
- Removing dangling volumes
|
||||
|
||||
Use with caution!
|
||||
|
||||
### VSCode in Docker
|
||||
|
||||
```bash
|
||||
tsdocker vscode
|
||||
```
|
||||
|
||||
Launches a containerized VS Code instance accessible via browser at `testing-vscode.git.zone:8443`.
|
||||
|
||||
### Speed Test
|
||||
|
||||
```bash
|
||||
tsdocker speedtest
|
||||
```
|
||||
|
||||
Runs a network speed test inside a Docker container.
|
||||
Authenticates with all configured registries.
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
### Docker-in-Docker Testing
|
||||
### 🔀 Multi-Architecture Builds
|
||||
|
||||
If you need to run Docker commands inside your test container (e.g., testing Docker-related tools):
|
||||
Build for multiple platforms using Docker Buildx:
|
||||
|
||||
```json
|
||||
{
|
||||
"@git.zone/tsdocker": {
|
||||
"platforms": ["linux/amd64", "linux/arm64"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
tsdocker automatically sets up a Buildx builder when multiple platforms are specified.
|
||||
|
||||
### 📦 Dockerfile Naming Conventions
|
||||
|
||||
tsdocker discovers files matching `Dockerfile*`:
|
||||
|
||||
| File Name | Version Tag |
|
||||
|-----------|-------------|
|
||||
| `Dockerfile` | `latest` |
|
||||
| `Dockerfile_v1.0.0` | `v1.0.0` |
|
||||
| `Dockerfile_alpine` | `alpine` |
|
||||
| `Dockerfile_##version##` | Uses `package.json` version |
|
||||
|
||||
### 🔗 Dependency-Aware Builds
|
||||
|
||||
If you have multiple Dockerfiles that depend on each other:
|
||||
|
||||
```dockerfile
|
||||
# Dockerfile_base
|
||||
FROM node:20-alpine
|
||||
RUN npm install -g typescript
|
||||
|
||||
# Dockerfile_app
|
||||
FROM myproject:base
|
||||
COPY . .
|
||||
RUN npm run build
|
||||
```
|
||||
|
||||
tsdocker automatically detects that `Dockerfile_app` depends on `Dockerfile_base` and builds them in the correct order.
|
||||
|
||||
### 🧪 Container Test Scripts
|
||||
|
||||
Create test scripts in your test directory:
|
||||
|
||||
```bash
|
||||
# test/test_latest.sh
|
||||
#!/bin/bash
|
||||
node --version
|
||||
npm --version
|
||||
echo "Container tests passed!"
|
||||
```
|
||||
|
||||
Run with:
|
||||
|
||||
```bash
|
||||
tsdocker test
|
||||
```
|
||||
|
||||
### 🔧 Build Args from Environment
|
||||
|
||||
Pass environment variables as Docker build arguments:
|
||||
|
||||
```json
|
||||
{
|
||||
"@git.zone/tsdocker": {
|
||||
"buildArgEnvMap": {
|
||||
"NPM_TOKEN": "NPM_TOKEN",
|
||||
"NODE_VERSION": "NODE_VERSION"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```dockerfile
|
||||
ARG NPM_TOKEN
|
||||
ARG NODE_VERSION=20
|
||||
FROM node:${NODE_VERSION}
|
||||
RUN echo "//registry.npmjs.org/:_authToken=${NPM_TOKEN}" > ~/.npmrc
|
||||
```
|
||||
|
||||
### 🐳 Docker-in-Docker Testing
|
||||
|
||||
Test Docker-related tools by mounting the Docker socket:
|
||||
|
||||
```json
|
||||
{
|
||||
"@git.zone/tsdocker": {
|
||||
"baseImage": "docker:latest",
|
||||
"command": "docker run hello-world",
|
||||
"command": "docker version && docker ps",
|
||||
"dockerSock": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Setting `"dockerSock": true` mounts the host's Docker socket into the container.
|
||||
### 📋 Listing Dockerfiles
|
||||
|
||||
### Custom Base Images
|
||||
Inspect your project's Dockerfiles and their relationships:
|
||||
|
||||
You can use any Docker image as your base:
|
||||
```bash
|
||||
tsdocker list
|
||||
```
|
||||
|
||||
Output:
|
||||
```
|
||||
Discovered Dockerfiles:
|
||||
========================
|
||||
|
||||
1. Dockerfile_base
|
||||
Tag: myproject:base
|
||||
Base Image: node:20-alpine
|
||||
Version: base
|
||||
|
||||
2. Dockerfile_app
|
||||
Tag: myproject:app
|
||||
Base Image: myproject:base
|
||||
Version: app
|
||||
Depends on: myproject:base
|
||||
```
|
||||
|
||||
### 🗺️ Registry Repo Mapping
|
||||
|
||||
Use different repository names for different registries:
|
||||
|
||||
```json
|
||||
{
|
||||
"@git.zone/tsdocker": {
|
||||
"registries": ["registry.gitlab.com", "docker.io"],
|
||||
"registryRepoMap": {
|
||||
"registry.gitlab.com": "mygroup/myproject",
|
||||
"docker.io": "myuser/myproject"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Environment Variables
|
||||
|
||||
### qenv Integration
|
||||
|
||||
tsdocker automatically loads environment variables from `qenv.yml`:
|
||||
|
||||
```yaml
|
||||
# qenv.yml
|
||||
API_KEY: your-api-key
|
||||
DATABASE_URL: postgres://localhost/test
|
||||
```
|
||||
|
||||
These are injected into your test container automatically.
|
||||
|
||||
## Examples
|
||||
|
||||
### Basic Test Configuration
|
||||
|
||||
```json
|
||||
{
|
||||
"@git.zone/tsdocker": {
|
||||
"baseImage": "node:20",
|
||||
"command": "npm test"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Full Production Setup
|
||||
|
||||
```json
|
||||
{
|
||||
"@git.zone/tsdocker": {
|
||||
"baseImage": "node:20-alpine",
|
||||
"command": "npm test"
|
||||
"command": "pnpm test",
|
||||
"registries": ["registry.gitlab.com", "ghcr.io", "docker.io"],
|
||||
"registryRepoMap": {
|
||||
"registry.gitlab.com": "myorg/myapp",
|
||||
"ghcr.io": "myorg/myapp",
|
||||
"docker.io": "myuser/myapp"
|
||||
},
|
||||
"buildArgEnvMap": {
|
||||
"NPM_TOKEN": "NPM_TOKEN"
|
||||
},
|
||||
"platforms": ["linux/amd64", "linux/arm64"],
|
||||
"testDir": "./docker-tests"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Popular choices:
|
||||
### CI/CD Integration
|
||||
|
||||
- `node:20` — Official Node.js images
|
||||
- `node:20-alpine` — Lightweight Alpine-based images
|
||||
- `node:lts` — Long-term support Node.js version
|
||||
```yaml
|
||||
# .gitlab-ci.yml
|
||||
build:
|
||||
stage: build
|
||||
script:
|
||||
- npm install -g @git.zone/tsdocker
|
||||
- tsdocker build
|
||||
- tsdocker push
|
||||
|
||||
### CI Integration
|
||||
|
||||
tsdocker automatically detects CI environments (via `CI=true` env var) and adjusts behavior:
|
||||
|
||||
- Copies project files into container in CI (instead of mounting)
|
||||
- Optimizes for CI execution patterns
|
||||
|
||||
## Why tsdocker?
|
||||
|
||||
### The Problem
|
||||
|
||||
Local development environments drift over time. You might have:
|
||||
|
||||
- Stale global packages
|
||||
- Modified system configurations
|
||||
- Cached dependencies
|
||||
- Different Node.js versions
|
||||
|
||||
Your tests pass locally but fail in CI — or vice versa.
|
||||
|
||||
### The Solution
|
||||
|
||||
tsdocker ensures every test run happens in a **clean, reproducible environment**, just like your CI pipeline. This means:
|
||||
|
||||
✅ Consistent behavior between local and CI
|
||||
✅ No dependency pollution between test runs
|
||||
✅ Easy cross-platform testing
|
||||
✅ Reproducible bug investigations
|
||||
|
||||
## TypeScript Usage
|
||||
|
||||
tsdocker is built with TypeScript and provides full type definitions:
|
||||
|
||||
```typescript
|
||||
import type { IConfig } from '@git.zone/tsdocker/dist_ts/tsdocker.config.js';
|
||||
|
||||
const config: IConfig = {
|
||||
baseImage: 'node:20',
|
||||
command: 'npm test',
|
||||
dockerSock: false,
|
||||
keyValueObject: {
|
||||
NODE_ENV: 'test',
|
||||
},
|
||||
};
|
||||
# GitHub Actions
|
||||
- name: Build and Push
|
||||
run: |
|
||||
npm install -g @git.zone/tsdocker
|
||||
tsdocker login
|
||||
tsdocker build
|
||||
tsdocker push
|
||||
env:
|
||||
DOCKER_REGISTRY_1: "ghcr.io|${{ github.actor }}|${{ secrets.GITHUB_TOKEN }}"
|
||||
```
|
||||
|
||||
## Requirements
|
||||
|
||||
- **Docker**: Docker must be installed and accessible via CLI
|
||||
- **Node.js**: Version 18 or higher (ESM support required)
|
||||
- **Docker** — Docker Engine or Docker Desktop must be installed
|
||||
- **Node.js** — Version 18 or higher (ESM support required)
|
||||
- **Docker Buildx** — Required for multi-architecture builds (included in Docker Desktop)
|
||||
|
||||
## How It Works
|
||||
## Why tsdocker?
|
||||
|
||||
Under the hood, tsdocker:
|
||||
### 🎯 The Problem
|
||||
|
||||
1. 📋 Reads your `npmextra.json` configuration
|
||||
2. 🔍 Optionally loads environment variables from `qenv.yml`
|
||||
3. 🐳 Generates a temporary Dockerfile
|
||||
4. 🏗️ Builds a Docker image with your base image
|
||||
5. 📦 Mounts your project directory (unless in CI)
|
||||
6. ▶️ Runs your test command inside the container
|
||||
7. 📊 Captures the exit code
|
||||
8. 🧹 Cleans up containers and images
|
||||
9. ✅ Exits with the same code as your tests
|
||||
Managing Docker workflows manually is tedious:
|
||||
- Remembering build order for dependent images
|
||||
- Pushing to multiple registries with different credentials
|
||||
- Setting up Buildx for multi-arch builds
|
||||
- Ensuring consistent test environments
|
||||
|
||||
### ✨ The Solution
|
||||
|
||||
tsdocker automates the entire workflow:
|
||||
- **One command** to build all images in dependency order
|
||||
- **One command** to push to all registries
|
||||
- **Automatic** Buildx setup for multi-platform builds
|
||||
- **Consistent** containerized test environments
|
||||
|
||||
## TypeScript API
|
||||
|
||||
tsdocker exposes its types for programmatic use:
|
||||
|
||||
```typescript
|
||||
import type { ITsDockerConfig } from '@git.zone/tsdocker/dist_ts/interfaces/index.js';
|
||||
import { TsDockerManager } from '@git.zone/tsdocker/dist_ts/classes.tsdockermanager.js';
|
||||
|
||||
const config: ITsDockerConfig = {
|
||||
baseImage: 'node:20',
|
||||
command: 'npm test',
|
||||
dockerSock: false,
|
||||
keyValueObject: {},
|
||||
registries: ['docker.io'],
|
||||
platforms: ['linux/amd64'],
|
||||
};
|
||||
|
||||
const manager = new TsDockerManager(config);
|
||||
await manager.prepare();
|
||||
await manager.build();
|
||||
await manager.push();
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "docker not found on this machine"
|
||||
### "docker not found"
|
||||
|
||||
Make sure Docker is installed and the `docker` command is in your PATH:
|
||||
Ensure Docker is installed and in your PATH:
|
||||
|
||||
```bash
|
||||
docker --version
|
||||
```
|
||||
|
||||
### Tests fail in container but work locally
|
||||
### Multi-arch build fails
|
||||
|
||||
This often indicates environment-specific issues. Check:
|
||||
|
||||
- Are all dependencies in `package.json`? (not relying on global packages)
|
||||
- Does your code have hardcoded paths?
|
||||
- Are environment variables set correctly?
|
||||
|
||||
### Permission errors with docker.sock
|
||||
|
||||
If using `dockerSock: true`, ensure your user has permissions to access `/var/run/docker.sock`:
|
||||
Make sure Docker Buildx is available:
|
||||
|
||||
```bash
|
||||
sudo usermod -aG docker $USER
|
||||
# Then log out and back in
|
||||
docker buildx version
|
||||
docker buildx create --use
|
||||
```
|
||||
|
||||
## Examples
|
||||
### Registry authentication fails
|
||||
|
||||
### Basic npm test
|
||||
Check your environment variables are set correctly:
|
||||
|
||||
```json
|
||||
{
|
||||
"@git.zone/tsdocker": {
|
||||
"baseImage": "node:20",
|
||||
"command": "npm test"
|
||||
}
|
||||
}
|
||||
```bash
|
||||
echo $DOCKER_REGISTRY_1
|
||||
tsdocker login
|
||||
```
|
||||
|
||||
### Running pnpm tests
|
||||
### Circular dependency detected
|
||||
|
||||
```json
|
||||
{
|
||||
"@git.zone/tsdocker": {
|
||||
"baseImage": "node:20",
|
||||
"command": "corepack enable && pnpm install && pnpm test"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Testing Docker-based tools
|
||||
|
||||
```json
|
||||
{
|
||||
"@git.zone/tsdocker": {
|
||||
"baseImage": "docker:latest",
|
||||
"command": "sh -c 'docker version && docker ps'",
|
||||
"dockerSock": true
|
||||
}
|
||||
}
|
||||
```
|
||||
Review your Dockerfiles' `FROM` statements — you have images depending on each other in a loop.
|
||||
|
||||
## Performance Tips
|
||||
|
||||
🚀 **Use specific base images**: `node:20-alpine` is much faster to pull than `node:latest`
|
||||
🚀 **Layer caching**: Docker caches image layers — your base image only downloads once
|
||||
🚀 **Prune regularly**: Run `docker system prune` periodically to reclaim disk space
|
||||
🚀 **Use specific tags**: `node:20-alpine` is smaller and faster than `node:latest`
|
||||
|
||||
## Migration from legacy npmdocker
|
||||
🚀 **Leverage caching**: Docker layers are cached — your builds get faster over time
|
||||
|
||||
This package was previously published under the `npmdocker` name. It is now available as `@git.zone/tsdocker` with modernized ESM support and updated dependencies.
|
||||
🚀 **Prune regularly**: `docker system prune` reclaims disk space
|
||||
|
||||
Key changes:
|
||||
- Configuration key changed from `npmdocker` to `@git.zone/tsdocker` in `npmextra.json`
|
||||
- CLI command is now `tsdocker` instead of `npmdocker`
|
||||
- Full ESM support with `.js` extensions in imports
|
||||
🚀 **Use .dockerignore**: Exclude `node_modules`, `.git`, etc. from build context
|
||||
|
||||
## Migration from Legacy
|
||||
|
||||
Previously published as `npmdocker`, now `@git.zone/tsdocker`:
|
||||
|
||||
| Old | New |
|
||||
|-----|-----|
|
||||
| `npmdocker` command | `tsdocker` command |
|
||||
| `"npmdocker"` config key | `"@git.zone/tsdocker"` config key |
|
||||
| CommonJS | ESM with `.js` imports |
|
||||
|
||||
## License and Legal Information
|
||||
|
||||
|
||||
@@ -3,6 +3,6 @@
|
||||
*/
|
||||
export const commitinfo = {
|
||||
name: '@git.zone/tsdocker',
|
||||
version: '1.3.0',
|
||||
version: '1.14.0',
|
||||
description: 'develop npm modules cross platform with docker'
|
||||
}
|
||||
|
||||
69
ts/classes.dockercontext.ts
Normal file
69
ts/classes.dockercontext.ts
Normal file
@@ -0,0 +1,69 @@
|
||||
import * as plugins from './tsdocker.plugins.js';
|
||||
import { logger } from './tsdocker.logging.js';
|
||||
import type { IDockerContextInfo } from './interfaces/index.js';
|
||||
|
||||
const smartshellInstance = new plugins.smartshell.Smartshell({ executor: 'bash' });
|
||||
|
||||
export class DockerContext {
|
||||
public contextInfo: IDockerContextInfo | null = null;
|
||||
|
||||
/** Sets DOCKER_CONTEXT env var for explicit context selection. */
|
||||
public setContext(contextName: string): void {
|
||||
process.env.DOCKER_CONTEXT = contextName;
|
||||
logger.log('info', `Docker context explicitly set to: ${contextName}`);
|
||||
}
|
||||
|
||||
/** Detects current Docker context via `docker context inspect` and rootless via `docker info`. */
|
||||
public async detect(): Promise<IDockerContextInfo> {
|
||||
let name = 'default';
|
||||
let endpoint = 'unknown';
|
||||
|
||||
const contextResult = await smartshellInstance.execSilent(
|
||||
`docker context inspect --format '{{json .}}'`
|
||||
);
|
||||
if (contextResult.exitCode === 0 && contextResult.stdout) {
|
||||
try {
|
||||
const parsed = JSON.parse(contextResult.stdout.trim());
|
||||
const data = Array.isArray(parsed) ? parsed[0] : parsed;
|
||||
name = data.Name || 'default';
|
||||
endpoint = data.Endpoints?.docker?.Host || 'unknown';
|
||||
} catch { /* fallback to defaults */ }
|
||||
}
|
||||
|
||||
let isRootless = false;
|
||||
const infoResult = await smartshellInstance.execSilent(
|
||||
`docker info --format '{{json .SecurityOptions}}'`
|
||||
);
|
||||
if (infoResult.exitCode === 0 && infoResult.stdout) {
|
||||
isRootless = infoResult.stdout.includes('name=rootless');
|
||||
}
|
||||
|
||||
this.contextInfo = { name, endpoint, isRootless, dockerHost: process.env.DOCKER_HOST };
|
||||
return this.contextInfo;
|
||||
}
|
||||
|
||||
/** Logs context info prominently. */
|
||||
public logContextInfo(): void {
|
||||
if (!this.contextInfo) return;
|
||||
const { name, endpoint, isRootless, dockerHost } = this.contextInfo;
|
||||
logger.log('info', '=== DOCKER CONTEXT ===');
|
||||
logger.log('info', `Context: ${name}`);
|
||||
logger.log('info', `Endpoint: ${endpoint}`);
|
||||
if (dockerHost) logger.log('info', `DOCKER_HOST: ${dockerHost}`);
|
||||
logger.log('info', `Rootless: ${isRootless ? 'yes' : 'no'}`);
|
||||
}
|
||||
|
||||
/** Emits rootless-specific warnings. */
|
||||
public logRootlessWarnings(): void {
|
||||
if (!this.contextInfo?.isRootless) return;
|
||||
logger.log('warn', '[rootless] network=host in buildx is namespaced by rootlesskit');
|
||||
logger.log('warn', '[rootless] Local registry may have localhost vs 127.0.0.1 resolution quirks');
|
||||
}
|
||||
|
||||
/** Returns context-aware builder name: tsdocker-builder-<context> */
|
||||
public getBuilderName(): string {
|
||||
const contextName = this.contextInfo?.name || 'default';
|
||||
const sanitized = contextName.replace(/[^a-zA-Z0-9_-]/g, '-');
|
||||
return `tsdocker-builder-${sanitized}`;
|
||||
}
|
||||
}
|
||||
743
ts/classes.dockerfile.ts
Normal file
743
ts/classes.dockerfile.ts
Normal file
@@ -0,0 +1,743 @@
|
||||
import * as plugins from './tsdocker.plugins.js';
|
||||
import * as paths from './tsdocker.paths.js';
|
||||
import { logger, formatDuration } from './tsdocker.logging.js';
|
||||
import { DockerRegistry } from './classes.dockerregistry.js';
|
||||
import type { IDockerfileOptions, ITsDockerConfig, IBuildCommandOptions } from './interfaces/index.js';
|
||||
import type { TsDockerManager } from './classes.tsdockermanager.js';
|
||||
import * as fs from 'fs';
|
||||
|
||||
const smartshellInstance = new plugins.smartshell.Smartshell({
|
||||
executor: 'bash',
|
||||
});
|
||||
|
||||
const LOCAL_REGISTRY_PORT = 5234;
|
||||
const LOCAL_REGISTRY_HOST = `localhost:${LOCAL_REGISTRY_PORT}`;
|
||||
const LOCAL_REGISTRY_CONTAINER = 'tsdocker-local-registry';
|
||||
|
||||
/**
|
||||
* Class Dockerfile represents a Dockerfile on disk
|
||||
*/
|
||||
export class Dockerfile {
|
||||
// STATIC METHODS
|
||||
|
||||
/**
|
||||
* Creates instances of class Dockerfile for all Dockerfiles in cwd
|
||||
*/
|
||||
public static async readDockerfiles(managerRef: TsDockerManager): Promise<Dockerfile[]> {
|
||||
const entries = await plugins.smartfs.directory(paths.cwd).filter('Dockerfile*').list();
|
||||
const fileTree = entries
|
||||
.filter(entry => entry.isFile)
|
||||
.map(entry => plugins.path.join(paths.cwd, entry.name));
|
||||
|
||||
const readDockerfilesArray: Dockerfile[] = [];
|
||||
logger.log('info', `found ${fileTree.length} Dockerfile(s):`);
|
||||
for (const filePath of fileTree) {
|
||||
logger.log('info', ` ${plugins.path.basename(filePath)}`);
|
||||
}
|
||||
|
||||
for (const dockerfilePath of fileTree) {
|
||||
const myDockerfile = new Dockerfile(managerRef, {
|
||||
filePath: dockerfilePath,
|
||||
read: true,
|
||||
});
|
||||
readDockerfilesArray.push(myDockerfile);
|
||||
}
|
||||
|
||||
return readDockerfilesArray;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sorts Dockerfiles into a build order based on dependencies (topological sort)
|
||||
*/
|
||||
public static async sortDockerfiles(dockerfiles: Dockerfile[]): Promise<Dockerfile[]> {
|
||||
logger.log('info', 'Sorting Dockerfiles based on dependencies...');
|
||||
|
||||
// Map from cleanTag to Dockerfile instance for quick lookup
|
||||
const tagToDockerfile = new Map<string, Dockerfile>();
|
||||
dockerfiles.forEach((dockerfile) => {
|
||||
tagToDockerfile.set(dockerfile.cleanTag, dockerfile);
|
||||
});
|
||||
|
||||
// Build the dependency graph
|
||||
const graph = new Map<Dockerfile, Dockerfile[]>();
|
||||
dockerfiles.forEach((dockerfile) => {
|
||||
const dependencies: Dockerfile[] = [];
|
||||
const baseImage = dockerfile.baseImage;
|
||||
|
||||
// Extract repo:version from baseImage for comparison with cleanTag
|
||||
// baseImage may include a registry prefix (e.g., "host.today/repo:version")
|
||||
// but cleanTag is just "repo:version", so we strip the registry prefix
|
||||
const baseImageKey = Dockerfile.extractRepoVersion(baseImage);
|
||||
|
||||
// Check if the baseImage is among the local Dockerfiles
|
||||
if (tagToDockerfile.has(baseImageKey)) {
|
||||
const baseDockerfile = tagToDockerfile.get(baseImageKey)!;
|
||||
dependencies.push(baseDockerfile);
|
||||
dockerfile.localBaseImageDependent = true;
|
||||
dockerfile.localBaseDockerfile = baseDockerfile;
|
||||
}
|
||||
|
||||
graph.set(dockerfile, dependencies);
|
||||
});
|
||||
|
||||
// Perform topological sort
|
||||
const sortedDockerfiles: Dockerfile[] = [];
|
||||
const visited = new Set<Dockerfile>();
|
||||
const tempMarked = new Set<Dockerfile>();
|
||||
|
||||
const visit = (dockerfile: Dockerfile) => {
|
||||
if (tempMarked.has(dockerfile)) {
|
||||
throw new Error(`Circular dependency detected involving ${dockerfile.cleanTag}`);
|
||||
}
|
||||
if (!visited.has(dockerfile)) {
|
||||
tempMarked.add(dockerfile);
|
||||
const dependencies = graph.get(dockerfile) || [];
|
||||
dependencies.forEach((dep) => visit(dep));
|
||||
tempMarked.delete(dockerfile);
|
||||
visited.add(dockerfile);
|
||||
sortedDockerfiles.push(dockerfile);
|
||||
}
|
||||
};
|
||||
|
||||
try {
|
||||
dockerfiles.forEach((dockerfile) => {
|
||||
if (!visited.has(dockerfile)) {
|
||||
visit(dockerfile);
|
||||
}
|
||||
});
|
||||
} catch (error) {
|
||||
logger.log('error', (error as Error).message);
|
||||
throw error;
|
||||
}
|
||||
|
||||
// Log the sorted order
|
||||
sortedDockerfiles.forEach((dockerfile, index) => {
|
||||
logger.log(
|
||||
'info',
|
||||
`Build order ${index + 1}: ${dockerfile.cleanTag} with base image ${dockerfile.baseImage}`
|
||||
);
|
||||
});
|
||||
|
||||
return sortedDockerfiles;
|
||||
}
|
||||
|
||||
/**
|
||||
* Maps local Dockerfiles dependencies to the corresponding Dockerfile class instances
|
||||
*/
|
||||
public static async mapDockerfiles(sortedDockerfileArray: Dockerfile[]): Promise<Dockerfile[]> {
|
||||
sortedDockerfileArray.forEach((dockerfileArg) => {
|
||||
if (dockerfileArg.localBaseImageDependent) {
|
||||
// Extract repo:version from baseImage for comparison with cleanTag
|
||||
const baseImageKey = Dockerfile.extractRepoVersion(dockerfileArg.baseImage);
|
||||
sortedDockerfileArray.forEach((dockfile2: Dockerfile) => {
|
||||
if (dockfile2.cleanTag === baseImageKey) {
|
||||
dockerfileArg.localBaseDockerfile = dockfile2;
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
return sortedDockerfileArray;
|
||||
}
|
||||
|
||||
/** Determines if a local registry is needed for buildx dependency resolution. */
|
||||
public static needsLocalRegistry(
|
||||
dockerfiles: Dockerfile[],
|
||||
options?: { platform?: string },
|
||||
): boolean {
|
||||
const hasLocalDeps = dockerfiles.some(df => df.localBaseImageDependent);
|
||||
if (!hasLocalDeps) return false;
|
||||
const config = dockerfiles[0]?.managerRef?.config;
|
||||
return !!options?.platform || !!(config?.platforms && config.platforms.length > 1);
|
||||
}
|
||||
|
||||
/** Starts a temporary registry:2 container on port 5234. */
|
||||
public static async startLocalRegistry(isRootless?: boolean): Promise<void> {
|
||||
await smartshellInstance.execSilent(
|
||||
`docker rm -f ${LOCAL_REGISTRY_CONTAINER} 2>/dev/null || true`
|
||||
);
|
||||
const result = await smartshellInstance.execSilent(
|
||||
`docker run -d --name ${LOCAL_REGISTRY_CONTAINER} -p ${LOCAL_REGISTRY_PORT}:5000 registry:2`
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Failed to start local registry: ${result.stderr || result.stdout}`);
|
||||
}
|
||||
// registry:2 starts near-instantly; brief wait for readiness
|
||||
await new Promise(resolve => setTimeout(resolve, 1000));
|
||||
logger.log('info', `Started local registry at ${LOCAL_REGISTRY_HOST} (buildx dependency bridge)`);
|
||||
if (isRootless) {
|
||||
logger.log('warn', `[rootless] Registry on port ${LOCAL_REGISTRY_PORT} — if buildx cannot reach localhost:${LOCAL_REGISTRY_PORT}, try 127.0.0.1:${LOCAL_REGISTRY_PORT}`);
|
||||
}
|
||||
}
|
||||
|
||||
/** Stops and removes the temporary local registry container. */
|
||||
public static async stopLocalRegistry(): Promise<void> {
|
||||
await smartshellInstance.execSilent(
|
||||
`docker rm -f ${LOCAL_REGISTRY_CONTAINER} 2>/dev/null || true`
|
||||
);
|
||||
logger.log('info', 'Stopped local registry');
|
||||
}
|
||||
|
||||
/** Pushes a built image to the local registry for buildx consumption. */
|
||||
public static async pushToLocalRegistry(dockerfile: Dockerfile): Promise<void> {
|
||||
const registryTag = `${LOCAL_REGISTRY_HOST}/${dockerfile.buildTag}`;
|
||||
await smartshellInstance.execSilent(`docker tag ${dockerfile.buildTag} ${registryTag}`);
|
||||
const result = await smartshellInstance.execSilent(`docker push ${registryTag}`);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Failed to push to local registry: ${result.stderr || result.stdout}`);
|
||||
}
|
||||
dockerfile.localRegistryTag = registryTag;
|
||||
logger.log('info', `Pushed ${dockerfile.buildTag} to local registry as ${registryTag}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Groups topologically sorted Dockerfiles into dependency levels.
|
||||
* Level 0 = no local dependencies; level N = depends on something in level N-1.
|
||||
* Images within the same level are independent and can build in parallel.
|
||||
*/
|
||||
public static computeLevels(sortedDockerfiles: Dockerfile[]): Dockerfile[][] {
|
||||
const levelMap = new Map<Dockerfile, number>();
|
||||
for (const df of sortedDockerfiles) {
|
||||
if (!df.localBaseImageDependent || !df.localBaseDockerfile) {
|
||||
levelMap.set(df, 0);
|
||||
} else {
|
||||
const depLevel = levelMap.get(df.localBaseDockerfile) ?? 0;
|
||||
levelMap.set(df, depLevel + 1);
|
||||
}
|
||||
}
|
||||
const maxLevel = Math.max(...Array.from(levelMap.values()), 0);
|
||||
const levels: Dockerfile[][] = [];
|
||||
for (let l = 0; l <= maxLevel; l++) {
|
||||
levels.push(sortedDockerfiles.filter(df => levelMap.get(df) === l));
|
||||
}
|
||||
return levels;
|
||||
}
|
||||
|
||||
/**
|
||||
* Runs async tasks with bounded concurrency (worker-pool pattern).
|
||||
* Fast-fail: if any task throws, Promise.all rejects immediately.
|
||||
*/
|
||||
public static async runWithConcurrency<T>(
|
||||
tasks: (() => Promise<T>)[],
|
||||
concurrency: number,
|
||||
): Promise<T[]> {
|
||||
const results: T[] = new Array(tasks.length);
|
||||
let nextIndex = 0;
|
||||
async function worker(): Promise<void> {
|
||||
while (true) {
|
||||
const idx = nextIndex++;
|
||||
if (idx >= tasks.length) break;
|
||||
results[idx] = await tasks[idx]();
|
||||
}
|
||||
}
|
||||
const workers = Array.from(
|
||||
{ length: Math.min(concurrency, tasks.length) },
|
||||
() => worker(),
|
||||
);
|
||||
await Promise.all(workers);
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds the corresponding real docker image for each Dockerfile class instance
|
||||
*/
|
||||
public static async buildDockerfiles(
|
||||
sortedArrayArg: Dockerfile[],
|
||||
options?: { platform?: string; timeout?: number; noCache?: boolean; verbose?: boolean; isRootless?: boolean; parallel?: boolean; parallelConcurrency?: number },
|
||||
): Promise<Dockerfile[]> {
|
||||
const total = sortedArrayArg.length;
|
||||
const overallStart = Date.now();
|
||||
const useRegistry = Dockerfile.needsLocalRegistry(sortedArrayArg, options);
|
||||
|
||||
if (useRegistry) {
|
||||
await Dockerfile.startLocalRegistry(options?.isRootless);
|
||||
}
|
||||
|
||||
try {
|
||||
if (options?.parallel) {
|
||||
// === PARALLEL MODE: build independent images concurrently within each level ===
|
||||
const concurrency = options.parallelConcurrency ?? 4;
|
||||
const levels = Dockerfile.computeLevels(sortedArrayArg);
|
||||
|
||||
logger.log('info', `Parallel build: ${levels.length} level(s), concurrency ${concurrency}`);
|
||||
for (let l = 0; l < levels.length; l++) {
|
||||
const level = levels[l];
|
||||
logger.log('info', ` Level ${l} (${level.length}): ${level.map(df => df.cleanTag).join(', ')}`);
|
||||
}
|
||||
|
||||
let built = 0;
|
||||
for (let l = 0; l < levels.length; l++) {
|
||||
const level = levels[l];
|
||||
logger.log('info', `--- Level ${l}: building ${level.length} image(s) in parallel ---`);
|
||||
|
||||
const tasks = level.map((df) => {
|
||||
const myIndex = ++built;
|
||||
return async () => {
|
||||
const progress = `(${myIndex}/${total})`;
|
||||
logger.log('info', `${progress} Building ${df.cleanTag}...`);
|
||||
const elapsed = await df.build(options);
|
||||
logger.log('ok', `${progress} Built ${df.cleanTag} in ${formatDuration(elapsed)}`);
|
||||
return df;
|
||||
};
|
||||
});
|
||||
|
||||
await Dockerfile.runWithConcurrency(tasks, concurrency);
|
||||
|
||||
// After the entire level completes, tag + push for dependency resolution
|
||||
for (const df of level) {
|
||||
const dependentBaseImages = new Set<string>();
|
||||
for (const other of sortedArrayArg) {
|
||||
if (other.localBaseDockerfile === df && other.baseImage !== df.buildTag) {
|
||||
dependentBaseImages.add(other.baseImage);
|
||||
}
|
||||
}
|
||||
for (const fullTag of dependentBaseImages) {
|
||||
logger.log('info', `Tagging ${df.buildTag} as ${fullTag} for local dependency resolution`);
|
||||
await smartshellInstance.exec(`docker tag ${df.buildTag} ${fullTag}`);
|
||||
}
|
||||
if (useRegistry && sortedArrayArg.some(other => other.localBaseDockerfile === df)) {
|
||||
await Dockerfile.pushToLocalRegistry(df);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// === SEQUENTIAL MODE: build one at a time ===
|
||||
for (let i = 0; i < total; i++) {
|
||||
const dockerfileArg = sortedArrayArg[i];
|
||||
const progress = `(${i + 1}/${total})`;
|
||||
logger.log('info', `${progress} Building ${dockerfileArg.cleanTag}...`);
|
||||
|
||||
const elapsed = await dockerfileArg.build(options);
|
||||
logger.log('ok', `${progress} Built ${dockerfileArg.cleanTag} in ${formatDuration(elapsed)}`);
|
||||
|
||||
// Tag in host daemon for standard docker build compatibility
|
||||
const dependentBaseImages = new Set<string>();
|
||||
for (const other of sortedArrayArg) {
|
||||
if (other.localBaseDockerfile === dockerfileArg && other.baseImage !== dockerfileArg.buildTag) {
|
||||
dependentBaseImages.add(other.baseImage);
|
||||
}
|
||||
}
|
||||
for (const fullTag of dependentBaseImages) {
|
||||
logger.log('info', `Tagging ${dockerfileArg.buildTag} as ${fullTag} for local dependency resolution`);
|
||||
await smartshellInstance.exec(`docker tag ${dockerfileArg.buildTag} ${fullTag}`);
|
||||
}
|
||||
|
||||
// Push to local registry for buildx dependency resolution
|
||||
if (useRegistry && sortedArrayArg.some(other => other.localBaseDockerfile === dockerfileArg)) {
|
||||
await Dockerfile.pushToLocalRegistry(dockerfileArg);
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
if (useRegistry) {
|
||||
await Dockerfile.stopLocalRegistry();
|
||||
}
|
||||
}
|
||||
|
||||
logger.log('info', `Total build time: ${formatDuration(Date.now() - overallStart)}`);
|
||||
return sortedArrayArg;
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests all Dockerfiles by calling Dockerfile.test()
|
||||
*/
|
||||
public static async testDockerfiles(sortedArrayArg: Dockerfile[]): Promise<Dockerfile[]> {
|
||||
const total = sortedArrayArg.length;
|
||||
const overallStart = Date.now();
|
||||
|
||||
for (let i = 0; i < total; i++) {
|
||||
const dockerfileArg = sortedArrayArg[i];
|
||||
const progress = `(${i + 1}/${total})`;
|
||||
logger.log('info', `${progress} Testing ${dockerfileArg.cleanTag}...`);
|
||||
|
||||
const elapsed = await dockerfileArg.test();
|
||||
logger.log('ok', `${progress} Tested ${dockerfileArg.cleanTag} in ${formatDuration(elapsed)}`);
|
||||
}
|
||||
|
||||
logger.log('info', `Total test time: ${formatDuration(Date.now() - overallStart)}`);
|
||||
return sortedArrayArg;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a version for a docker file
|
||||
* Dockerfile_latest -> latest
|
||||
* Dockerfile_v1.0.0 -> v1.0.0
|
||||
* Dockerfile -> latest
|
||||
*/
|
||||
public static dockerFileVersion(
|
||||
dockerfileInstanceArg: Dockerfile,
|
||||
dockerfileNameArg: string
|
||||
): string {
|
||||
let versionString: string;
|
||||
const versionRegex = /Dockerfile_(.+)$/;
|
||||
const regexResultArray = versionRegex.exec(dockerfileNameArg);
|
||||
if (regexResultArray && regexResultArray.length === 2) {
|
||||
versionString = regexResultArray[1];
|
||||
} else {
|
||||
versionString = 'latest';
|
||||
}
|
||||
|
||||
// Replace ##version## placeholder with actual package version if available
|
||||
if (dockerfileInstanceArg.managerRef?.projectInfo?.npm?.version) {
|
||||
versionString = versionString.replace(
|
||||
'##version##',
|
||||
dockerfileInstanceArg.managerRef.projectInfo.npm.version
|
||||
);
|
||||
}
|
||||
|
||||
return versionString;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts the base image from a Dockerfile content
|
||||
* Handles ARG substitution for variable base images
|
||||
*/
|
||||
public static dockerBaseImage(dockerfileContentArg: string): string {
|
||||
const lines = dockerfileContentArg.split(/\r?\n/);
|
||||
const args: { [key: string]: string } = {};
|
||||
|
||||
for (const line of lines) {
|
||||
const trimmedLine = line.trim();
|
||||
|
||||
// Skip empty lines and comments
|
||||
if (trimmedLine === '' || trimmedLine.startsWith('#')) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Match ARG instructions
|
||||
const argMatch = trimmedLine.match(/^ARG\s+([^\s=]+)(?:=(.*))?$/i);
|
||||
if (argMatch) {
|
||||
const argName = argMatch[1];
|
||||
const argValue = argMatch[2] !== undefined ? argMatch[2] : process.env[argName] || '';
|
||||
args[argName] = argValue;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Match FROM instructions
|
||||
const fromMatch = trimmedLine.match(/^FROM\s+(.+?)(?:\s+AS\s+[^\s]+)?$/i);
|
||||
if (fromMatch) {
|
||||
let baseImage = fromMatch[1].trim();
|
||||
|
||||
// Substitute variables in the base image name
|
||||
baseImage = Dockerfile.substituteVariables(baseImage, args);
|
||||
|
||||
return baseImage;
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error('No FROM instruction found in Dockerfile');
|
||||
}
|
||||
|
||||
/**
|
||||
* Substitutes variables in a string, supporting default values like ${VAR:-default}
|
||||
*/
|
||||
private static substituteVariables(str: string, vars: { [key: string]: string }): string {
|
||||
return str.replace(/\${([^}:]+)(:-([^}]+))?}/g, (_, varName, __, defaultValue) => {
|
||||
if (vars[varName] !== undefined) {
|
||||
return vars[varName];
|
||||
} else if (defaultValue !== undefined) {
|
||||
return defaultValue;
|
||||
} else {
|
||||
return '';
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts the repo:version part from a full image reference, stripping any registry prefix.
|
||||
* Examples:
|
||||
* "registry.example.com/repo:version" -> "repo:version"
|
||||
* "repo:version" -> "repo:version"
|
||||
* "host.today/ht-docker-node:npmci" -> "ht-docker-node:npmci"
|
||||
*/
|
||||
private static extractRepoVersion(imageRef: string): string {
|
||||
const parts = imageRef.split('/');
|
||||
if (parts.length === 1) {
|
||||
// No registry prefix: "repo:version"
|
||||
return imageRef;
|
||||
}
|
||||
|
||||
// Check if first part looks like a registry (contains '.' or ':' or is 'localhost')
|
||||
const firstPart = parts[0];
|
||||
const looksLikeRegistry =
|
||||
firstPart.includes('.') || firstPart.includes(':') || firstPart === 'localhost';
|
||||
|
||||
if (looksLikeRegistry) {
|
||||
// Strip registry: "registry.example.com/repo:version" -> "repo:version"
|
||||
return parts.slice(1).join('/');
|
||||
}
|
||||
|
||||
// No registry prefix, could be "org/repo:version"
|
||||
return imageRef;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the docker tag string for a given registry and repo
|
||||
*/
|
||||
public static getDockerTagString(
|
||||
managerRef: TsDockerManager,
|
||||
registryArg: string,
|
||||
repoArg: string,
|
||||
versionArg: string,
|
||||
suffixArg?: string
|
||||
): string {
|
||||
// Determine whether the repo should be mapped according to the registry
|
||||
const config = managerRef.config;
|
||||
const mappedRepo = config.registryRepoMap?.[registryArg];
|
||||
const repo = mappedRepo || repoArg;
|
||||
|
||||
// Determine whether the version contains a suffix
|
||||
let version = versionArg;
|
||||
if (suffixArg) {
|
||||
version = versionArg + '_' + suffixArg;
|
||||
}
|
||||
|
||||
const tagString = `${registryArg}/${repo}:${version}`;
|
||||
return tagString;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets build args from environment variable mapping
|
||||
*/
|
||||
public static async getDockerBuildArgs(managerRef: TsDockerManager): Promise<string> {
|
||||
logger.log('info', 'checking for env vars to be supplied to the docker build');
|
||||
let buildArgsString: string = '';
|
||||
const config = managerRef.config;
|
||||
|
||||
if (config.buildArgEnvMap) {
|
||||
for (const dockerArgKey of Object.keys(config.buildArgEnvMap)) {
|
||||
const dockerArgOuterEnvVar = config.buildArgEnvMap[dockerArgKey];
|
||||
logger.log(
|
||||
'note',
|
||||
`docker ARG "${dockerArgKey}" maps to outer env var "${dockerArgOuterEnvVar}"`
|
||||
);
|
||||
const targetValue = process.env[dockerArgOuterEnvVar];
|
||||
if (targetValue) {
|
||||
buildArgsString = `${buildArgsString} --build-arg ${dockerArgKey}="${targetValue}"`;
|
||||
}
|
||||
}
|
||||
}
|
||||
return buildArgsString;
|
||||
}
|
||||
|
||||
// INSTANCE PROPERTIES
|
||||
public managerRef: TsDockerManager;
|
||||
public filePath!: string;
|
||||
public repo: string;
|
||||
public version: string;
|
||||
public cleanTag: string;
|
||||
public buildTag: string;
|
||||
public pushTag!: string;
|
||||
public containerName: string;
|
||||
public content!: string;
|
||||
public baseImage: string;
|
||||
public localBaseImageDependent: boolean;
|
||||
public localBaseDockerfile!: Dockerfile;
|
||||
public localRegistryTag?: string;
|
||||
|
||||
constructor(managerRefArg: TsDockerManager, options: IDockerfileOptions) {
|
||||
this.managerRef = managerRefArg;
|
||||
this.filePath = options.filePath!;
|
||||
|
||||
// Build repo name from project info or directory name
|
||||
const projectInfo = this.managerRef.projectInfo;
|
||||
if (projectInfo?.npm?.name) {
|
||||
// Use package name, removing scope if present
|
||||
const packageName = projectInfo.npm.name.replace(/^@[^/]+\//, '');
|
||||
this.repo = packageName;
|
||||
} else {
|
||||
// Fallback to directory name
|
||||
this.repo = plugins.path.basename(paths.cwd);
|
||||
}
|
||||
|
||||
this.version = Dockerfile.dockerFileVersion(this, plugins.path.parse(this.filePath).base);
|
||||
this.cleanTag = this.repo + ':' + this.version;
|
||||
this.buildTag = this.cleanTag;
|
||||
this.containerName = 'dockerfile-' + this.version;
|
||||
|
||||
if (options.filePath && options.read) {
|
||||
this.content = fs.readFileSync(plugins.path.resolve(options.filePath), 'utf-8');
|
||||
} else if (options.fileContents) {
|
||||
this.content = options.fileContents;
|
||||
}
|
||||
|
||||
this.baseImage = Dockerfile.dockerBaseImage(this.content);
|
||||
this.localBaseImageDependent = false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds the Dockerfile
|
||||
*/
|
||||
public async build(options?: { platform?: string; timeout?: number; noCache?: boolean; verbose?: boolean }): Promise<number> {
|
||||
const startTime = Date.now();
|
||||
const buildArgsString = await Dockerfile.getDockerBuildArgs(this.managerRef);
|
||||
const config = this.managerRef.config;
|
||||
const platformOverride = options?.platform;
|
||||
const timeout = options?.timeout;
|
||||
const noCacheFlag = options?.noCache ? ' --no-cache' : '';
|
||||
const verbose = options?.verbose ?? false;
|
||||
|
||||
let buildContextFlag = '';
|
||||
if (this.localBaseImageDependent && this.localBaseDockerfile) {
|
||||
const fromImage = this.baseImage;
|
||||
if (this.localBaseDockerfile.localRegistryTag) {
|
||||
// BuildKit pulls from the local registry (reachable via host network)
|
||||
const registryTag = this.localBaseDockerfile.localRegistryTag;
|
||||
buildContextFlag = ` --build-context "${fromImage}=docker-image://${registryTag}"`;
|
||||
logger.log('info', `Using local registry build context: ${fromImage} -> docker-image://${registryTag}`);
|
||||
}
|
||||
}
|
||||
|
||||
let buildCommand: string;
|
||||
|
||||
if (platformOverride) {
|
||||
// Single platform override via buildx
|
||||
buildCommand = `docker buildx build --platform ${platformOverride}${noCacheFlag}${buildContextFlag} --load -t ${this.buildTag} -f ${this.filePath} ${buildArgsString} .`;
|
||||
logger.log('info', `Build: buildx --platform ${platformOverride} --load`);
|
||||
} else if (config.platforms && config.platforms.length > 1) {
|
||||
// Multi-platform build using buildx
|
||||
const platformString = config.platforms.join(',');
|
||||
buildCommand = `docker buildx build --platform ${platformString}${noCacheFlag}${buildContextFlag} -t ${this.buildTag} -f ${this.filePath} ${buildArgsString} .`;
|
||||
|
||||
if (config.push) {
|
||||
buildCommand += ' --push';
|
||||
logger.log('info', `Build: buildx --platform ${platformString} --push`);
|
||||
} else {
|
||||
buildCommand += ' --load';
|
||||
logger.log('info', `Build: buildx --platform ${platformString} --load`);
|
||||
}
|
||||
} else {
|
||||
// Standard build
|
||||
const versionLabel = this.managerRef.projectInfo?.npm?.version || 'unknown';
|
||||
buildCommand = `docker build --label="version=${versionLabel}"${noCacheFlag} -t ${this.buildTag} -f ${this.filePath} ${buildArgsString} .`;
|
||||
logger.log('info', 'Build: docker build (standard)');
|
||||
}
|
||||
|
||||
if (timeout) {
|
||||
// Use streaming execution with timeout
|
||||
const streaming = verbose
|
||||
? await smartshellInstance.execStreaming(buildCommand)
|
||||
: await smartshellInstance.execStreamingSilent(buildCommand);
|
||||
const timeoutPromise = new Promise<never>((_, reject) => {
|
||||
setTimeout(() => {
|
||||
streaming.childProcess.kill();
|
||||
reject(new Error(`Build timed out after ${timeout}s for ${this.cleanTag}`));
|
||||
}, timeout * 1000);
|
||||
});
|
||||
const result = await Promise.race([streaming.finalPromise, timeoutPromise]);
|
||||
if (result.exitCode !== 0) {
|
||||
logger.log('error', `Build failed for ${this.cleanTag}`);
|
||||
throw new Error(`Build failed for ${this.cleanTag}`);
|
||||
}
|
||||
} else {
|
||||
const result = verbose
|
||||
? await smartshellInstance.exec(buildCommand)
|
||||
: await smartshellInstance.execSilent(buildCommand);
|
||||
if (result.exitCode !== 0) {
|
||||
logger.log('error', `Build failed for ${this.cleanTag}`);
|
||||
if (!verbose && result.stdout) {
|
||||
logger.log('error', `Build output:\n${result.stdout}`);
|
||||
}
|
||||
throw new Error(`Build failed for ${this.cleanTag}`);
|
||||
}
|
||||
}
|
||||
|
||||
return Date.now() - startTime;
|
||||
}
|
||||
|
||||
/**
|
||||
* Pushes the Dockerfile to a registry
|
||||
*/
|
||||
public async push(dockerRegistryArg: DockerRegistry, versionSuffix?: string): Promise<void> {
|
||||
this.pushTag = Dockerfile.getDockerTagString(
|
||||
this.managerRef,
|
||||
dockerRegistryArg.registryUrl,
|
||||
this.repo,
|
||||
this.version,
|
||||
versionSuffix
|
||||
);
|
||||
|
||||
await smartshellInstance.exec(`docker tag ${this.buildTag} ${this.pushTag}`);
|
||||
const pushResult = await smartshellInstance.exec(`docker push ${this.pushTag}`);
|
||||
|
||||
if (pushResult.exitCode !== 0) {
|
||||
logger.log('error', `Push failed for ${this.pushTag}`);
|
||||
throw new Error(`Push failed for ${this.pushTag}`);
|
||||
}
|
||||
|
||||
// Get image digest
|
||||
const inspectResult = await smartshellInstance.exec(
|
||||
`docker inspect --format="{{index .RepoDigests 0}}" ${this.pushTag}`
|
||||
);
|
||||
|
||||
if (inspectResult.exitCode === 0 && inspectResult.stdout.includes('@')) {
|
||||
const imageDigest = inspectResult.stdout.split('@')[1]?.trim();
|
||||
logger.log('info', `The image ${this.pushTag} has digest ${imageDigest}`);
|
||||
}
|
||||
|
||||
logger.log('ok', `Pushed ${this.pushTag}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Pulls the Dockerfile from a registry
|
||||
*/
|
||||
public async pull(registryArg: DockerRegistry, versionSuffixArg?: string): Promise<void> {
|
||||
const pullTag = Dockerfile.getDockerTagString(
|
||||
this.managerRef,
|
||||
registryArg.registryUrl,
|
||||
this.repo,
|
||||
this.version,
|
||||
versionSuffixArg
|
||||
);
|
||||
|
||||
await smartshellInstance.exec(`docker pull ${pullTag}`);
|
||||
await smartshellInstance.exec(`docker tag ${pullTag} ${this.buildTag}`);
|
||||
|
||||
logger.log('ok', `Pulled and tagged ${pullTag} as ${this.buildTag}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests the Dockerfile by running a test script if it exists
|
||||
*/
|
||||
public async test(): Promise<number> {
|
||||
const startTime = Date.now();
|
||||
const testDir = this.managerRef.config.testDir || plugins.path.join(paths.cwd, 'test');
|
||||
const testFile = plugins.path.join(testDir, 'test_' + this.version + '.sh');
|
||||
|
||||
const testFileExists = fs.existsSync(testFile);
|
||||
|
||||
if (testFileExists) {
|
||||
// Run tests in container
|
||||
await smartshellInstance.exec(
|
||||
`docker run --name tsdocker_test_container --entrypoint="bash" ${this.buildTag} -c "mkdir /tsdocker_test"`
|
||||
);
|
||||
await smartshellInstance.exec(`docker cp ${testFile} tsdocker_test_container:/tsdocker_test/test.sh`);
|
||||
await smartshellInstance.exec(`docker commit tsdocker_test_container tsdocker_test_image`);
|
||||
|
||||
const testResult = await smartshellInstance.exec(
|
||||
`docker run --entrypoint="bash" tsdocker_test_image -x /tsdocker_test/test.sh`
|
||||
);
|
||||
|
||||
// Cleanup
|
||||
await smartshellInstance.exec(`docker rm tsdocker_test_container`);
|
||||
await smartshellInstance.exec(`docker rmi --force tsdocker_test_image`);
|
||||
|
||||
if (testResult.exitCode !== 0) {
|
||||
throw new Error(`Tests failed for ${this.cleanTag}`);
|
||||
}
|
||||
} else {
|
||||
logger.log('warn', `Skipping tests for ${this.cleanTag} — no test file at ${testFile}`);
|
||||
}
|
||||
|
||||
return Date.now() - startTime;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the ID of a built Docker image
|
||||
*/
|
||||
public async getId(): Promise<string> {
|
||||
const result = await smartshellInstance.exec(
|
||||
'docker inspect --type=image --format="{{.Id}}" ' + this.buildTag
|
||||
);
|
||||
return result.stdout.trim();
|
||||
}
|
||||
}
|
||||
91
ts/classes.dockerregistry.ts
Normal file
91
ts/classes.dockerregistry.ts
Normal file
@@ -0,0 +1,91 @@
|
||||
import * as plugins from './tsdocker.plugins.js';
|
||||
import { logger } from './tsdocker.logging.js';
|
||||
import type { IDockerRegistryOptions } from './interfaces/index.js';
|
||||
|
||||
const smartshellInstance = new plugins.smartshell.Smartshell({
|
||||
executor: 'bash',
|
||||
});
|
||||
|
||||
/**
|
||||
* Represents a Docker registry with authentication capabilities
|
||||
*/
|
||||
export class DockerRegistry {
|
||||
public registryUrl: string;
|
||||
public username: string;
|
||||
public password: string;
|
||||
|
||||
constructor(optionsArg: IDockerRegistryOptions) {
|
||||
this.registryUrl = optionsArg.registryUrl;
|
||||
this.username = optionsArg.username;
|
||||
this.password = optionsArg.password;
|
||||
logger.log('info', `created DockerRegistry for ${this.registryUrl}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a DockerRegistry instance from a pipe-delimited environment string
|
||||
* Format: "registryUrl|username|password"
|
||||
*/
|
||||
public static fromEnvString(envString: string): DockerRegistry {
|
||||
const dockerRegexResultArray = envString.split('|');
|
||||
if (dockerRegexResultArray.length !== 3) {
|
||||
logger.log('error', 'malformed docker env var...');
|
||||
throw new Error('malformed docker env var, expected format: registryUrl|username|password');
|
||||
}
|
||||
const registryUrl = dockerRegexResultArray[0].replace('https://', '').replace('http://', '');
|
||||
const username = dockerRegexResultArray[1];
|
||||
const password = dockerRegexResultArray[2];
|
||||
return new DockerRegistry({
|
||||
registryUrl: registryUrl,
|
||||
username: username,
|
||||
password: password,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a DockerRegistry from environment variables
|
||||
* Looks for DOCKER_REGISTRY, DOCKER_REGISTRY_USER, DOCKER_REGISTRY_PASSWORD
|
||||
* Or for a specific registry: DOCKER_REGISTRY_<NAME>, etc.
|
||||
*/
|
||||
public static fromEnv(registryName?: string): DockerRegistry | null {
|
||||
const prefix = registryName ? `DOCKER_REGISTRY_${registryName.toUpperCase()}_` : 'DOCKER_REGISTRY_';
|
||||
|
||||
const registryUrl = process.env[`${prefix}URL`] || process.env['DOCKER_REGISTRY'];
|
||||
const username = process.env[`${prefix}USER`] || process.env['DOCKER_REGISTRY_USER'];
|
||||
const password = process.env[`${prefix}PASSWORD`] || process.env['DOCKER_REGISTRY_PASSWORD'];
|
||||
|
||||
if (!registryUrl || !username || !password) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return new DockerRegistry({
|
||||
registryUrl: registryUrl.replace('https://', '').replace('http://', ''),
|
||||
username,
|
||||
password,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Logs in to the Docker registry
|
||||
*/
|
||||
public async login(): Promise<void> {
|
||||
if (this.registryUrl === 'docker.io') {
|
||||
await smartshellInstance.exec(`docker login -u ${this.username} -p ${this.password}`);
|
||||
logger.log('info', 'Logged in to standard docker hub');
|
||||
} else {
|
||||
await smartshellInstance.exec(`docker login -u ${this.username} -p ${this.password} ${this.registryUrl}`);
|
||||
}
|
||||
logger.log('ok', `docker authenticated for ${this.registryUrl}!`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Logs out from the Docker registry
|
||||
*/
|
||||
public async logout(): Promise<void> {
|
||||
if (this.registryUrl === 'docker.io') {
|
||||
await smartshellInstance.exec('docker logout');
|
||||
} else {
|
||||
await smartshellInstance.exec(`docker logout ${this.registryUrl}`);
|
||||
}
|
||||
logger.log('info', `logged out from ${this.registryUrl}`);
|
||||
}
|
||||
}
|
||||
83
ts/classes.registrystorage.ts
Normal file
83
ts/classes.registrystorage.ts
Normal file
@@ -0,0 +1,83 @@
|
||||
import * as plugins from './tsdocker.plugins.js';
|
||||
import { logger } from './tsdocker.logging.js';
|
||||
import { DockerRegistry } from './classes.dockerregistry.js';
|
||||
|
||||
/**
|
||||
* Storage class for managing multiple Docker registries
|
||||
*/
|
||||
export class RegistryStorage {
|
||||
public objectMap = new plugins.lik.ObjectMap<DockerRegistry>();
|
||||
|
||||
constructor() {
|
||||
// Nothing here
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a registry to the storage
|
||||
*/
|
||||
public addRegistry(registryArg: DockerRegistry): void {
|
||||
this.objectMap.add(registryArg);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a registry by its URL
|
||||
*/
|
||||
public getRegistryByUrl(registryUrlArg: string): DockerRegistry | undefined {
|
||||
return this.objectMap.findSync((registryArg) => {
|
||||
return registryArg.registryUrl === registryUrlArg;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets all registries
|
||||
*/
|
||||
public getAllRegistries(): DockerRegistry[] {
|
||||
return this.objectMap.getArray();
|
||||
}
|
||||
|
||||
/**
|
||||
* Logs in to all registries
|
||||
*/
|
||||
public async loginAll(): Promise<void> {
|
||||
await this.objectMap.forEach(async (registryArg) => {
|
||||
await registryArg.login();
|
||||
});
|
||||
logger.log('success', 'logged in successfully into all available DockerRegistries!');
|
||||
}
|
||||
|
||||
/**
|
||||
* Logs out from all registries
|
||||
*/
|
||||
public async logoutAll(): Promise<void> {
|
||||
await this.objectMap.forEach(async (registryArg) => {
|
||||
await registryArg.logout();
|
||||
});
|
||||
logger.log('info', 'logged out from all DockerRegistries');
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads registries from environment variables
|
||||
* Looks for DOCKER_REGISTRY_1, DOCKER_REGISTRY_2, etc. (pipe-delimited format)
|
||||
* Or individual registries like DOCKER_REGISTRY_GITLAB_URL, etc.
|
||||
*/
|
||||
public loadFromEnv(): void {
|
||||
// Check for numbered registry env vars (pipe-delimited format)
|
||||
for (let i = 1; i <= 10; i++) {
|
||||
const envVar = process.env[`DOCKER_REGISTRY_${i}`];
|
||||
if (envVar) {
|
||||
try {
|
||||
const registry = DockerRegistry.fromEnvString(envVar);
|
||||
this.addRegistry(registry);
|
||||
} catch (err) {
|
||||
logger.log('warn', `Failed to parse DOCKER_REGISTRY_${i}: ${(err as Error).message}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check for default registry
|
||||
const defaultRegistry = DockerRegistry.fromEnv();
|
||||
if (defaultRegistry) {
|
||||
this.addRegistry(defaultRegistry);
|
||||
}
|
||||
}
|
||||
}
|
||||
108
ts/classes.tsdockercache.ts
Normal file
108
ts/classes.tsdockercache.ts
Normal file
@@ -0,0 +1,108 @@
|
||||
import * as crypto from 'crypto';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
import * as plugins from './tsdocker.plugins.js';
|
||||
import * as paths from './tsdocker.paths.js';
|
||||
import { logger } from './tsdocker.logging.js';
|
||||
import type { ICacheData, ICacheEntry } from './interfaces/index.js';
|
||||
|
||||
const smartshellInstance = new plugins.smartshell.Smartshell({
|
||||
executor: 'bash',
|
||||
});
|
||||
|
||||
/**
|
||||
* Manages content-hash-based build caching for Dockerfiles.
|
||||
* Cache is stored in .nogit/tsdocker_support.json.
|
||||
*/
|
||||
export class TsDockerCache {
|
||||
private cacheFilePath: string;
|
||||
private data: ICacheData;
|
||||
|
||||
constructor() {
|
||||
this.cacheFilePath = path.join(paths.cwd, '.nogit', 'tsdocker_support.json');
|
||||
this.data = { version: 1, entries: {} };
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads cache data from disk. Falls back to empty cache on missing/corrupt file.
|
||||
*/
|
||||
public load(): void {
|
||||
try {
|
||||
const raw = fs.readFileSync(this.cacheFilePath, 'utf-8');
|
||||
const parsed = JSON.parse(raw);
|
||||
if (parsed && parsed.version === 1 && parsed.entries) {
|
||||
this.data = parsed;
|
||||
} else {
|
||||
logger.log('warn', '[cache] Cache file has unexpected format, starting fresh');
|
||||
this.data = { version: 1, entries: {} };
|
||||
}
|
||||
} catch {
|
||||
// Missing or corrupt file — start fresh
|
||||
this.data = { version: 1, entries: {} };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Saves cache data to disk. Creates .nogit directory if needed.
|
||||
*/
|
||||
public save(): void {
|
||||
const dir = path.dirname(this.cacheFilePath);
|
||||
fs.mkdirSync(dir, { recursive: true });
|
||||
fs.writeFileSync(this.cacheFilePath, JSON.stringify(this.data, null, 2), 'utf-8');
|
||||
}
|
||||
|
||||
/**
|
||||
* Computes SHA-256 hash of Dockerfile content.
|
||||
*/
|
||||
public computeContentHash(content: string): string {
|
||||
return crypto.createHash('sha256').update(content).digest('hex');
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks whether a build can be skipped for the given Dockerfile.
|
||||
* Logs detailed diagnostics and returns true if the build should be skipped.
|
||||
*/
|
||||
public async shouldSkipBuild(cleanTag: string, content: string): Promise<boolean> {
|
||||
const contentHash = this.computeContentHash(content);
|
||||
const entry = this.data.entries[cleanTag];
|
||||
|
||||
if (!entry) {
|
||||
logger.log('info', `[cache] ${cleanTag}: no cached entry, will build`);
|
||||
return false;
|
||||
}
|
||||
|
||||
const hashMatch = entry.contentHash === contentHash;
|
||||
logger.log('info', `[cache] ${cleanTag}: hash ${hashMatch ? 'matches' : 'changed'}`);
|
||||
|
||||
if (!hashMatch) {
|
||||
logger.log('info', `[cache] ${cleanTag}: content changed, will build`);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Hash matches — verify the image still exists locally
|
||||
const inspectResult = await smartshellInstance.exec(
|
||||
`docker image inspect ${entry.imageId} > /dev/null 2>&1`
|
||||
);
|
||||
const available = inspectResult.exitCode === 0;
|
||||
|
||||
if (available) {
|
||||
logger.log('info', `[cache] ${cleanTag}: cache hit, skipping build`);
|
||||
return true;
|
||||
}
|
||||
|
||||
logger.log('info', `[cache] ${cleanTag}: image no longer available, will build`);
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Records a successful build in the cache.
|
||||
*/
|
||||
public recordBuild(cleanTag: string, content: string, imageId: string, buildTag: string): void {
|
||||
this.data.entries[cleanTag] = {
|
||||
contentHash: this.computeContentHash(content),
|
||||
imageId,
|
||||
buildTag,
|
||||
timestamp: Date.now(),
|
||||
};
|
||||
}
|
||||
}
|
||||
484
ts/classes.tsdockermanager.ts
Normal file
484
ts/classes.tsdockermanager.ts
Normal file
@@ -0,0 +1,484 @@
|
||||
import * as plugins from './tsdocker.plugins.js';
|
||||
import * as paths from './tsdocker.paths.js';
|
||||
import { logger, formatDuration } from './tsdocker.logging.js';
|
||||
import { Dockerfile } from './classes.dockerfile.js';
|
||||
import { DockerRegistry } from './classes.dockerregistry.js';
|
||||
import { RegistryStorage } from './classes.registrystorage.js';
|
||||
import { TsDockerCache } from './classes.tsdockercache.js';
|
||||
import { DockerContext } from './classes.dockercontext.js';
|
||||
import type { ITsDockerConfig, IBuildCommandOptions } from './interfaces/index.js';
|
||||
|
||||
const smartshellInstance = new plugins.smartshell.Smartshell({
|
||||
executor: 'bash',
|
||||
});
|
||||
|
||||
/**
|
||||
* Main orchestrator class for Docker operations
|
||||
*/
|
||||
export class TsDockerManager {
|
||||
public registryStorage: RegistryStorage;
|
||||
public config: ITsDockerConfig;
|
||||
public projectInfo: any;
|
||||
public dockerContext: DockerContext;
|
||||
private dockerfiles: Dockerfile[] = [];
|
||||
|
||||
constructor(config: ITsDockerConfig) {
|
||||
this.config = config;
|
||||
this.registryStorage = new RegistryStorage();
|
||||
this.dockerContext = new DockerContext();
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepares the manager by loading project info and registries
|
||||
*/
|
||||
public async prepare(contextArg?: string): Promise<void> {
|
||||
// Detect Docker context
|
||||
if (contextArg) {
|
||||
this.dockerContext.setContext(contextArg);
|
||||
}
|
||||
await this.dockerContext.detect();
|
||||
this.dockerContext.logContextInfo();
|
||||
this.dockerContext.logRootlessWarnings();
|
||||
|
||||
// Load project info
|
||||
try {
|
||||
const projectinfoInstance = new plugins.projectinfo.ProjectInfo(paths.cwd);
|
||||
this.projectInfo = {
|
||||
npm: {
|
||||
name: projectinfoInstance.npm.name,
|
||||
version: projectinfoInstance.npm.version,
|
||||
},
|
||||
};
|
||||
} catch (err) {
|
||||
logger.log('warn', 'Could not load project info');
|
||||
this.projectInfo = null;
|
||||
}
|
||||
|
||||
// Load registries from environment
|
||||
this.registryStorage.loadFromEnv();
|
||||
|
||||
// Add registries from config if specified
|
||||
if (this.config.registries) {
|
||||
for (const registryUrl of this.config.registries) {
|
||||
// Check if already loaded from env
|
||||
if (!this.registryStorage.getRegistryByUrl(registryUrl)) {
|
||||
// Try to load credentials for this registry from env
|
||||
const envVarName = registryUrl.replace(/\./g, '_').toUpperCase();
|
||||
const envString = process.env[`DOCKER_REGISTRY_${envVarName}`];
|
||||
if (envString) {
|
||||
try {
|
||||
const registry = DockerRegistry.fromEnvString(envString);
|
||||
this.registryStorage.addRegistry(registry);
|
||||
} catch (err) {
|
||||
logger.log('warn', `Could not load credentials for registry ${registryUrl}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logger.log('info', `Prepared TsDockerManager with ${this.registryStorage.getAllRegistries().length} registries`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Logs in to all configured registries
|
||||
*/
|
||||
public async login(): Promise<void> {
|
||||
if (this.registryStorage.getAllRegistries().length === 0) {
|
||||
logger.log('warn', 'No registries configured');
|
||||
return;
|
||||
}
|
||||
await this.registryStorage.loginAll();
|
||||
}
|
||||
|
||||
/**
|
||||
* Discovers and sorts Dockerfiles in the current directory
|
||||
*/
|
||||
public async discoverDockerfiles(): Promise<Dockerfile[]> {
|
||||
this.dockerfiles = await Dockerfile.readDockerfiles(this);
|
||||
this.dockerfiles = await Dockerfile.sortDockerfiles(this.dockerfiles);
|
||||
this.dockerfiles = await Dockerfile.mapDockerfiles(this.dockerfiles);
|
||||
return this.dockerfiles;
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds discovered Dockerfiles in dependency order.
|
||||
* When options.patterns is provided, only matching Dockerfiles (and their dependencies) are built.
|
||||
*/
|
||||
public async build(options?: IBuildCommandOptions): Promise<Dockerfile[]> {
|
||||
if (this.dockerfiles.length === 0) {
|
||||
await this.discoverDockerfiles();
|
||||
}
|
||||
|
||||
if (this.dockerfiles.length === 0) {
|
||||
logger.log('warn', 'No Dockerfiles found');
|
||||
return [];
|
||||
}
|
||||
|
||||
// Determine which Dockerfiles to build
|
||||
let toBuild = this.dockerfiles;
|
||||
|
||||
if (options?.patterns && options.patterns.length > 0) {
|
||||
// Filter to matching Dockerfiles
|
||||
const matched = this.dockerfiles.filter((df) => {
|
||||
const basename = plugins.path.basename(df.filePath);
|
||||
return options.patterns!.some((pattern) => {
|
||||
if (pattern.includes('*') || pattern.includes('?')) {
|
||||
// Convert glob pattern to regex
|
||||
const regexStr = '^' + pattern.replace(/\*/g, '.*').replace(/\?/g, '.') + '$';
|
||||
return new RegExp(regexStr).test(basename);
|
||||
}
|
||||
return basename === pattern;
|
||||
});
|
||||
});
|
||||
|
||||
if (matched.length === 0) {
|
||||
logger.log('warn', `No Dockerfiles matched patterns: ${options.patterns.join(', ')}`);
|
||||
return [];
|
||||
}
|
||||
|
||||
// Resolve dependency chain and preserve topological order
|
||||
toBuild = this.resolveWithDependencies(matched, this.dockerfiles);
|
||||
logger.log('info', `Matched ${matched.length} Dockerfile(s), building ${toBuild.length} (including dependencies)`);
|
||||
}
|
||||
|
||||
// Check if buildx is needed
|
||||
const useBuildx = !!(options?.platform || (this.config.platforms && this.config.platforms.length > 1));
|
||||
if (useBuildx) {
|
||||
await this.ensureBuildx();
|
||||
}
|
||||
|
||||
logger.log('info', '');
|
||||
logger.log('info', '=== BUILD PHASE ===');
|
||||
|
||||
if (useBuildx) {
|
||||
const platforms = options?.platform || this.config.platforms!.join(', ');
|
||||
logger.log('info', `Build mode: buildx multi-platform [${platforms}]`);
|
||||
} else {
|
||||
logger.log('info', 'Build mode: standard docker build');
|
||||
}
|
||||
|
||||
const localDeps = toBuild.filter(df => df.localBaseImageDependent);
|
||||
if (localDeps.length > 0) {
|
||||
logger.log('info', `Local dependencies: ${localDeps.map(df => `${df.cleanTag} -> ${df.localBaseDockerfile?.cleanTag}`).join(', ')}`);
|
||||
}
|
||||
|
||||
if (options?.noCache) {
|
||||
logger.log('info', 'Cache: disabled (--no-cache)');
|
||||
}
|
||||
|
||||
if (options?.parallel) {
|
||||
const concurrency = options.parallelConcurrency ?? 4;
|
||||
const levels = Dockerfile.computeLevels(toBuild);
|
||||
logger.log('info', `Parallel build: ${levels.length} level(s), concurrency ${concurrency}`);
|
||||
for (let l = 0; l < levels.length; l++) {
|
||||
const level = levels[l];
|
||||
logger.log('info', ` Level ${l} (${level.length}): ${level.map(df => df.cleanTag).join(', ')}`);
|
||||
}
|
||||
}
|
||||
|
||||
logger.log('info', `Building ${toBuild.length} Dockerfile(s)...`);
|
||||
|
||||
if (options?.cached) {
|
||||
// === CACHED MODE: skip builds for unchanged Dockerfiles ===
|
||||
logger.log('info', '(cached mode active)');
|
||||
const cache = new TsDockerCache();
|
||||
cache.load();
|
||||
|
||||
const total = toBuild.length;
|
||||
const overallStart = Date.now();
|
||||
const useRegistry = Dockerfile.needsLocalRegistry(toBuild, options);
|
||||
|
||||
if (useRegistry) {
|
||||
await Dockerfile.startLocalRegistry(this.dockerContext.contextInfo?.isRootless);
|
||||
}
|
||||
|
||||
try {
|
||||
if (options?.parallel) {
|
||||
// === PARALLEL CACHED MODE ===
|
||||
const concurrency = options.parallelConcurrency ?? 4;
|
||||
const levels = Dockerfile.computeLevels(toBuild);
|
||||
|
||||
let built = 0;
|
||||
for (let l = 0; l < levels.length; l++) {
|
||||
const level = levels[l];
|
||||
logger.log('info', `--- Level ${l}: building ${level.length} image(s) in parallel ---`);
|
||||
|
||||
const tasks = level.map((df) => {
|
||||
const myIndex = ++built;
|
||||
return async () => {
|
||||
const progress = `(${myIndex}/${total})`;
|
||||
const skip = await cache.shouldSkipBuild(df.cleanTag, df.content);
|
||||
|
||||
if (skip) {
|
||||
logger.log('ok', `${progress} Skipped ${df.cleanTag} (cached)`);
|
||||
} else {
|
||||
logger.log('info', `${progress} Building ${df.cleanTag}...`);
|
||||
const elapsed = await df.build({
|
||||
platform: options?.platform,
|
||||
timeout: options?.timeout,
|
||||
noCache: options?.noCache,
|
||||
verbose: options?.verbose,
|
||||
});
|
||||
logger.log('ok', `${progress} Built ${df.cleanTag} in ${formatDuration(elapsed)}`);
|
||||
const imageId = await df.getId();
|
||||
cache.recordBuild(df.cleanTag, df.content, imageId, df.buildTag);
|
||||
}
|
||||
return df;
|
||||
};
|
||||
});
|
||||
|
||||
await Dockerfile.runWithConcurrency(tasks, concurrency);
|
||||
|
||||
// After the entire level completes, tag + push for dependency resolution
|
||||
for (const df of level) {
|
||||
const dependentBaseImages = new Set<string>();
|
||||
for (const other of toBuild) {
|
||||
if (other.localBaseDockerfile === df && other.baseImage !== df.buildTag) {
|
||||
dependentBaseImages.add(other.baseImage);
|
||||
}
|
||||
}
|
||||
for (const fullTag of dependentBaseImages) {
|
||||
logger.log('info', `Tagging ${df.buildTag} as ${fullTag} for local dependency resolution`);
|
||||
await smartshellInstance.exec(`docker tag ${df.buildTag} ${fullTag}`);
|
||||
}
|
||||
if (useRegistry && toBuild.some(other => other.localBaseDockerfile === df)) {
|
||||
await Dockerfile.pushToLocalRegistry(df);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// === SEQUENTIAL CACHED MODE ===
|
||||
for (let i = 0; i < total; i++) {
|
||||
const dockerfileArg = toBuild[i];
|
||||
const progress = `(${i + 1}/${total})`;
|
||||
const skip = await cache.shouldSkipBuild(dockerfileArg.cleanTag, dockerfileArg.content);
|
||||
|
||||
if (skip) {
|
||||
logger.log('ok', `${progress} Skipped ${dockerfileArg.cleanTag} (cached)`);
|
||||
} else {
|
||||
logger.log('info', `${progress} Building ${dockerfileArg.cleanTag}...`);
|
||||
const elapsed = await dockerfileArg.build({
|
||||
platform: options?.platform,
|
||||
timeout: options?.timeout,
|
||||
noCache: options?.noCache,
|
||||
verbose: options?.verbose,
|
||||
});
|
||||
logger.log('ok', `${progress} Built ${dockerfileArg.cleanTag} in ${formatDuration(elapsed)}`);
|
||||
const imageId = await dockerfileArg.getId();
|
||||
cache.recordBuild(dockerfileArg.cleanTag, dockerfileArg.content, imageId, dockerfileArg.buildTag);
|
||||
}
|
||||
|
||||
// Tag for dependents IMMEDIATELY (not after all builds)
|
||||
const dependentBaseImages = new Set<string>();
|
||||
for (const other of toBuild) {
|
||||
if (other.localBaseDockerfile === dockerfileArg && other.baseImage !== dockerfileArg.buildTag) {
|
||||
dependentBaseImages.add(other.baseImage);
|
||||
}
|
||||
}
|
||||
for (const fullTag of dependentBaseImages) {
|
||||
logger.log('info', `Tagging ${dockerfileArg.buildTag} as ${fullTag} for local dependency resolution`);
|
||||
await smartshellInstance.exec(`docker tag ${dockerfileArg.buildTag} ${fullTag}`);
|
||||
}
|
||||
|
||||
// Push to local registry for buildx (even for cache hits — image exists but registry doesn't)
|
||||
if (useRegistry && toBuild.some(other => other.localBaseDockerfile === dockerfileArg)) {
|
||||
await Dockerfile.pushToLocalRegistry(dockerfileArg);
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
if (useRegistry) {
|
||||
await Dockerfile.stopLocalRegistry();
|
||||
}
|
||||
}
|
||||
|
||||
logger.log('info', `Total build time: ${formatDuration(Date.now() - overallStart)}`);
|
||||
cache.save();
|
||||
} else {
|
||||
// === STANDARD MODE: build all via static helper ===
|
||||
await Dockerfile.buildDockerfiles(toBuild, {
|
||||
platform: options?.platform,
|
||||
timeout: options?.timeout,
|
||||
noCache: options?.noCache,
|
||||
verbose: options?.verbose,
|
||||
isRootless: this.dockerContext.contextInfo?.isRootless,
|
||||
parallel: options?.parallel,
|
||||
parallelConcurrency: options?.parallelConcurrency,
|
||||
});
|
||||
}
|
||||
|
||||
logger.log('success', 'All Dockerfiles built successfully');
|
||||
|
||||
return toBuild;
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolves a set of target Dockerfiles to include all their local base image dependencies,
|
||||
* preserving the original topological build order.
|
||||
*/
|
||||
private resolveWithDependencies(targets: Dockerfile[], allSorted: Dockerfile[]): Dockerfile[] {
|
||||
const needed = new Set<Dockerfile>();
|
||||
const addWithDeps = (df: Dockerfile) => {
|
||||
if (needed.has(df)) return;
|
||||
needed.add(df);
|
||||
if (df.localBaseImageDependent && df.localBaseDockerfile) {
|
||||
addWithDeps(df.localBaseDockerfile);
|
||||
}
|
||||
};
|
||||
for (const df of targets) addWithDeps(df);
|
||||
return allSorted.filter((df) => needed.has(df));
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensures Docker buildx is set up for multi-architecture builds
|
||||
*/
|
||||
private async ensureBuildx(): Promise<void> {
|
||||
const builderName = this.dockerContext.getBuilderName();
|
||||
const platforms = this.config.platforms?.join(', ') || 'default';
|
||||
logger.log('info', `Setting up Docker buildx [${platforms}]...`);
|
||||
logger.log('info', `Builder: ${builderName}`);
|
||||
const inspectResult = await smartshellInstance.exec(`docker buildx inspect ${builderName} 2>/dev/null`);
|
||||
|
||||
if (inspectResult.exitCode !== 0) {
|
||||
logger.log('info', 'Creating new buildx builder with host network...');
|
||||
await smartshellInstance.exec(
|
||||
`docker buildx create --name ${builderName} --driver docker-container --driver-opt network=host --use`
|
||||
);
|
||||
await smartshellInstance.exec('docker buildx inspect --bootstrap');
|
||||
} else {
|
||||
const inspectOutput = inspectResult.stdout || '';
|
||||
if (!inspectOutput.includes('network=host')) {
|
||||
logger.log('info', 'Recreating buildx builder with host network (migration)...');
|
||||
await smartshellInstance.exec(`docker buildx rm ${builderName} 2>/dev/null`);
|
||||
await smartshellInstance.exec(
|
||||
`docker buildx create --name ${builderName} --driver docker-container --driver-opt network=host --use`
|
||||
);
|
||||
await smartshellInstance.exec('docker buildx inspect --bootstrap');
|
||||
} else {
|
||||
await smartshellInstance.exec(`docker buildx use ${builderName}`);
|
||||
}
|
||||
}
|
||||
logger.log('ok', `Docker buildx ready (builder: ${builderName}, platforms: ${platforms})`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Pushes all built images to specified registries
|
||||
*/
|
||||
public async push(registryUrls?: string[]): Promise<void> {
|
||||
if (this.dockerfiles.length === 0) {
|
||||
await this.discoverDockerfiles();
|
||||
}
|
||||
|
||||
if (this.dockerfiles.length === 0) {
|
||||
logger.log('warn', 'No Dockerfiles found to push');
|
||||
return;
|
||||
}
|
||||
|
||||
// Determine which registries to push to
|
||||
let registriesToPush: DockerRegistry[] = [];
|
||||
|
||||
if (registryUrls && registryUrls.length > 0) {
|
||||
// Push to specified registries
|
||||
for (const url of registryUrls) {
|
||||
const registry = this.registryStorage.getRegistryByUrl(url);
|
||||
if (registry) {
|
||||
registriesToPush.push(registry);
|
||||
} else {
|
||||
logger.log('warn', `Registry ${url} not found in storage`);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Push to all configured registries
|
||||
registriesToPush = this.registryStorage.getAllRegistries();
|
||||
}
|
||||
|
||||
if (registriesToPush.length === 0) {
|
||||
logger.log('warn', 'No registries available to push to');
|
||||
return;
|
||||
}
|
||||
|
||||
// Push each Dockerfile to each registry
|
||||
for (const dockerfile of this.dockerfiles) {
|
||||
for (const registry of registriesToPush) {
|
||||
await dockerfile.push(registry);
|
||||
}
|
||||
}
|
||||
|
||||
logger.log('success', 'All images pushed successfully');
|
||||
}
|
||||
|
||||
/**
|
||||
* Pulls images from a specified registry
|
||||
*/
|
||||
public async pull(registryUrl: string): Promise<void> {
|
||||
if (this.dockerfiles.length === 0) {
|
||||
await this.discoverDockerfiles();
|
||||
}
|
||||
|
||||
const registry = this.registryStorage.getRegistryByUrl(registryUrl);
|
||||
if (!registry) {
|
||||
throw new Error(`Registry ${registryUrl} not found`);
|
||||
}
|
||||
|
||||
for (const dockerfile of this.dockerfiles) {
|
||||
await dockerfile.pull(registry);
|
||||
}
|
||||
|
||||
logger.log('success', 'All images pulled successfully');
|
||||
}
|
||||
|
||||
/**
|
||||
* Runs tests for all Dockerfiles
|
||||
*/
|
||||
public async test(): Promise<void> {
|
||||
if (this.dockerfiles.length === 0) {
|
||||
await this.discoverDockerfiles();
|
||||
}
|
||||
|
||||
if (this.dockerfiles.length === 0) {
|
||||
logger.log('warn', 'No Dockerfiles found to test');
|
||||
return;
|
||||
}
|
||||
|
||||
logger.log('info', '');
|
||||
logger.log('info', '=== TEST PHASE ===');
|
||||
await Dockerfile.testDockerfiles(this.dockerfiles);
|
||||
logger.log('success', 'All tests completed');
|
||||
}
|
||||
|
||||
/**
|
||||
* Lists all discovered Dockerfiles and their info
|
||||
*/
|
||||
public async list(): Promise<Dockerfile[]> {
|
||||
if (this.dockerfiles.length === 0) {
|
||||
await this.discoverDockerfiles();
|
||||
}
|
||||
|
||||
logger.log('info', '');
|
||||
logger.log('info', 'Discovered Dockerfiles:');
|
||||
logger.log('info', '========================');
|
||||
logger.log('info', '');
|
||||
|
||||
for (let i = 0; i < this.dockerfiles.length; i++) {
|
||||
const df = this.dockerfiles[i];
|
||||
logger.log('info', `${i + 1}. ${df.filePath}`);
|
||||
logger.log('info', ` Tag: ${df.cleanTag}`);
|
||||
logger.log('info', ` Base Image: ${df.baseImage}`);
|
||||
logger.log('info', ` Version: ${df.version}`);
|
||||
if (df.localBaseImageDependent) {
|
||||
logger.log('info', ` Depends on: ${df.localBaseDockerfile?.cleanTag}`);
|
||||
}
|
||||
logger.log('info', '');
|
||||
}
|
||||
|
||||
return this.dockerfiles;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the cached Dockerfiles (after discovery)
|
||||
*/
|
||||
public getDockerfiles(): Dockerfile[] {
|
||||
return this.dockerfiles;
|
||||
}
|
||||
}
|
||||
104
ts/interfaces/index.ts
Normal file
104
ts/interfaces/index.ts
Normal file
@@ -0,0 +1,104 @@
|
||||
/**
|
||||
* Configuration interface for tsdocker
|
||||
* Extends legacy config with new Docker build capabilities
|
||||
*/
|
||||
export interface ITsDockerConfig {
|
||||
// Legacy (backward compatible)
|
||||
baseImage: string;
|
||||
command: string;
|
||||
dockerSock: boolean;
|
||||
keyValueObject: { [key: string]: any };
|
||||
|
||||
// New Docker build config
|
||||
registries?: string[];
|
||||
registryRepoMap?: { [registry: string]: string };
|
||||
buildArgEnvMap?: { [dockerArg: string]: string };
|
||||
platforms?: string[]; // ['linux/amd64', 'linux/arm64']
|
||||
push?: boolean;
|
||||
testDir?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Options for constructing a DockerRegistry
|
||||
*/
|
||||
export interface IDockerRegistryOptions {
|
||||
registryUrl: string;
|
||||
username: string;
|
||||
password: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Information about a discovered Dockerfile
|
||||
*/
|
||||
export interface IDockerfileInfo {
|
||||
filePath: string;
|
||||
fileName: string;
|
||||
version: string;
|
||||
baseImage: string;
|
||||
buildTag: string;
|
||||
localBaseImageDependent: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Options for creating a Dockerfile instance
|
||||
*/
|
||||
export interface IDockerfileOptions {
|
||||
filePath?: string;
|
||||
fileContents?: string;
|
||||
read?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Result from a Docker build operation
|
||||
*/
|
||||
export interface IBuildResult {
|
||||
success: boolean;
|
||||
tag: string;
|
||||
duration?: number;
|
||||
error?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Result from a Docker push operation
|
||||
*/
|
||||
export interface IPushResult {
|
||||
success: boolean;
|
||||
registry: string;
|
||||
tag: string;
|
||||
digest?: string;
|
||||
error?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Options for the build command
|
||||
*/
|
||||
export interface IBuildCommandOptions {
|
||||
patterns?: string[]; // Dockerfile name patterns (e.g., ['Dockerfile_base', 'Dockerfile_*'])
|
||||
platform?: string; // Single platform override (e.g., 'linux/arm64')
|
||||
timeout?: number; // Build timeout in seconds
|
||||
noCache?: boolean; // Force rebuild without Docker layer cache (--no-cache)
|
||||
cached?: boolean; // Skip builds when Dockerfile content hasn't changed
|
||||
verbose?: boolean; // Stream raw docker build output (default: silent)
|
||||
context?: string; // Explicit Docker context name (--context flag)
|
||||
parallel?: boolean; // Enable parallel builds within dependency levels
|
||||
parallelConcurrency?: number; // Max concurrent builds per level (default 4)
|
||||
}
|
||||
|
||||
export interface ICacheEntry {
|
||||
contentHash: string; // SHA-256 hex of Dockerfile content
|
||||
imageId: string; // Docker image ID (sha256:...)
|
||||
buildTag: string;
|
||||
timestamp: number; // Unix ms
|
||||
}
|
||||
|
||||
export interface ICacheData {
|
||||
version: 1;
|
||||
entries: { [cleanTag: string]: ICacheEntry };
|
||||
}
|
||||
|
||||
export interface IDockerContextInfo {
|
||||
name: string; // 'default', 'rootless', 'colima', etc.
|
||||
endpoint: string; // 'unix:///var/run/docker.sock'
|
||||
isRootless: boolean;
|
||||
dockerHost?: string; // value of DOCKER_HOST env var, if set
|
||||
}
|
||||
@@ -6,10 +6,15 @@ import * as ConfigModule from './tsdocker.config.js';
|
||||
import * as DockerModule from './tsdocker.docker.js';
|
||||
|
||||
import { logger, ora } from './tsdocker.logging.js';
|
||||
import { TsDockerManager } from './classes.tsdockermanager.js';
|
||||
import type { IBuildCommandOptions } from './interfaces/index.js';
|
||||
import { commitinfo } from './00_commitinfo_data.js';
|
||||
|
||||
const tsdockerCli = new plugins.smartcli.Smartcli();
|
||||
tsdockerCli.addVersion(commitinfo.version);
|
||||
|
||||
export let run = () => {
|
||||
// Default command: run tests in container (legacy behavior)
|
||||
tsdockerCli.standardCommand().subscribe(async argvArg => {
|
||||
const configArg = await ConfigModule.run().then(DockerModule.run);
|
||||
if (configArg.exitCode === 0) {
|
||||
@@ -20,6 +25,198 @@ export let run = () => {
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Build Dockerfiles in dependency order
|
||||
* Usage: tsdocker build [Dockerfile_patterns...] [--platform=linux/arm64] [--timeout=600]
|
||||
*/
|
||||
tsdockerCli.addCommand('build').subscribe(async argvArg => {
|
||||
try {
|
||||
const config = await ConfigModule.run();
|
||||
const manager = new TsDockerManager(config);
|
||||
await manager.prepare(argvArg.context as string | undefined);
|
||||
|
||||
const buildOptions: IBuildCommandOptions = {};
|
||||
const patterns = argvArg._.slice(1) as string[];
|
||||
if (patterns.length > 0) {
|
||||
buildOptions.patterns = patterns;
|
||||
}
|
||||
if (argvArg.platform) {
|
||||
buildOptions.platform = argvArg.platform as string;
|
||||
}
|
||||
if (argvArg.timeout) {
|
||||
buildOptions.timeout = Number(argvArg.timeout);
|
||||
}
|
||||
if (argvArg.cache === false) {
|
||||
buildOptions.noCache = true;
|
||||
}
|
||||
if (argvArg.cached) {
|
||||
buildOptions.cached = true;
|
||||
}
|
||||
if (argvArg.verbose) {
|
||||
buildOptions.verbose = true;
|
||||
}
|
||||
if (argvArg.parallel) {
|
||||
buildOptions.parallel = true;
|
||||
if (typeof argvArg.parallel === 'number') {
|
||||
buildOptions.parallelConcurrency = argvArg.parallel;
|
||||
}
|
||||
}
|
||||
|
||||
await manager.build(buildOptions);
|
||||
logger.log('success', 'Build completed successfully');
|
||||
} catch (err) {
|
||||
logger.log('error', `Build failed: ${(err as Error).message}`);
|
||||
process.exit(1);
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Push built images to configured registries
|
||||
* Usage: tsdocker push [Dockerfile_patterns...] [--platform=linux/arm64] [--timeout=600] [--registry=url]
|
||||
*/
|
||||
tsdockerCli.addCommand('push').subscribe(async argvArg => {
|
||||
try {
|
||||
const config = await ConfigModule.run();
|
||||
const manager = new TsDockerManager(config);
|
||||
await manager.prepare(argvArg.context as string | undefined);
|
||||
|
||||
// Login first
|
||||
await manager.login();
|
||||
|
||||
// Parse build options from positional args and flags
|
||||
const buildOptions: IBuildCommandOptions = {};
|
||||
const patterns = argvArg._.slice(1) as string[];
|
||||
if (patterns.length > 0) {
|
||||
buildOptions.patterns = patterns;
|
||||
}
|
||||
if (argvArg.platform) {
|
||||
buildOptions.platform = argvArg.platform as string;
|
||||
}
|
||||
if (argvArg.timeout) {
|
||||
buildOptions.timeout = Number(argvArg.timeout);
|
||||
}
|
||||
if (argvArg.cache === false) {
|
||||
buildOptions.noCache = true;
|
||||
}
|
||||
if (argvArg.verbose) {
|
||||
buildOptions.verbose = true;
|
||||
}
|
||||
if (argvArg.parallel) {
|
||||
buildOptions.parallel = true;
|
||||
if (typeof argvArg.parallel === 'number') {
|
||||
buildOptions.parallelConcurrency = argvArg.parallel;
|
||||
}
|
||||
}
|
||||
|
||||
// Build images first (if not already built)
|
||||
await manager.build(buildOptions);
|
||||
|
||||
// Get registry from --registry flag
|
||||
const registryArg = argvArg.registry as string | undefined;
|
||||
const registries = registryArg ? [registryArg] : undefined;
|
||||
|
||||
await manager.push(registries);
|
||||
logger.log('success', 'Push completed successfully');
|
||||
} catch (err) {
|
||||
logger.log('error', `Push failed: ${(err as Error).message}`);
|
||||
process.exit(1);
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Pull images from a specified registry
|
||||
*/
|
||||
tsdockerCli.addCommand('pull').subscribe(async argvArg => {
|
||||
try {
|
||||
const registryArg = argvArg._[1]; // e.g., tsdocker pull registry.gitlab.com
|
||||
if (!registryArg) {
|
||||
logger.log('error', 'Registry URL required. Usage: tsdocker pull <registry-url>');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const config = await ConfigModule.run();
|
||||
const manager = new TsDockerManager(config);
|
||||
await manager.prepare(argvArg.context as string | undefined);
|
||||
|
||||
// Login first
|
||||
await manager.login();
|
||||
|
||||
await manager.pull(registryArg);
|
||||
logger.log('success', 'Pull completed successfully');
|
||||
} catch (err) {
|
||||
logger.log('error', `Pull failed: ${(err as Error).message}`);
|
||||
process.exit(1);
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Run container tests for all Dockerfiles
|
||||
*/
|
||||
tsdockerCli.addCommand('test').subscribe(async argvArg => {
|
||||
try {
|
||||
const config = await ConfigModule.run();
|
||||
const manager = new TsDockerManager(config);
|
||||
await manager.prepare(argvArg.context as string | undefined);
|
||||
|
||||
// Build images first
|
||||
const buildOptions: IBuildCommandOptions = {};
|
||||
if (argvArg.cache === false) {
|
||||
buildOptions.noCache = true;
|
||||
}
|
||||
if (argvArg.cached) {
|
||||
buildOptions.cached = true;
|
||||
}
|
||||
if (argvArg.verbose) {
|
||||
buildOptions.verbose = true;
|
||||
}
|
||||
if (argvArg.parallel) {
|
||||
buildOptions.parallel = true;
|
||||
if (typeof argvArg.parallel === 'number') {
|
||||
buildOptions.parallelConcurrency = argvArg.parallel;
|
||||
}
|
||||
}
|
||||
await manager.build(buildOptions);
|
||||
|
||||
// Run tests
|
||||
await manager.test();
|
||||
logger.log('success', 'Tests completed successfully');
|
||||
} catch (err) {
|
||||
logger.log('error', `Tests failed: ${(err as Error).message}`);
|
||||
process.exit(1);
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Login to configured registries
|
||||
*/
|
||||
tsdockerCli.addCommand('login').subscribe(async argvArg => {
|
||||
try {
|
||||
const config = await ConfigModule.run();
|
||||
const manager = new TsDockerManager(config);
|
||||
await manager.prepare(argvArg.context as string | undefined);
|
||||
await manager.login();
|
||||
logger.log('success', 'Login completed successfully');
|
||||
} catch (err) {
|
||||
logger.log('error', `Login failed: ${(err as Error).message}`);
|
||||
process.exit(1);
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* List discovered Dockerfiles and their dependencies
|
||||
*/
|
||||
tsdockerCli.addCommand('list').subscribe(async argvArg => {
|
||||
try {
|
||||
const config = await ConfigModule.run();
|
||||
const manager = new TsDockerManager(config);
|
||||
await manager.prepare(argvArg.context as string | undefined);
|
||||
await manager.list();
|
||||
} catch (err) {
|
||||
logger.log('error', `List failed: ${(err as Error).message}`);
|
||||
process.exit(1);
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* this command is executed inside docker and meant for use from outside docker
|
||||
*/
|
||||
@@ -62,16 +259,6 @@ export let run = () => {
|
||||
ora.finishSuccess('docker environment now is clean!');
|
||||
});
|
||||
|
||||
tsdockerCli.addCommand('speedtest').subscribe(async argvArg => {
|
||||
const smartshellInstance = new plugins.smartshell.Smartshell({
|
||||
executor: 'bash'
|
||||
});
|
||||
logger.log('ok', 'Starting speedtest');
|
||||
await smartshellInstance.exec(
|
||||
`docker pull tianon/speedtest && docker run --rm tianon/speedtest --accept-license --accept-gdpr`
|
||||
);
|
||||
});
|
||||
|
||||
tsdockerCli.addCommand('vscode').subscribe(async argvArg => {
|
||||
const smartshellInstance = new plugins.smartshell.Smartshell({
|
||||
executor: 'bash'
|
||||
|
||||
@@ -1,14 +1,12 @@
|
||||
import * as plugins from './tsdocker.plugins.js';
|
||||
import * as paths from './tsdocker.paths.js';
|
||||
import * as fs from 'fs';
|
||||
import type { ITsDockerConfig } from './interfaces/index.js';
|
||||
|
||||
export interface IConfig {
|
||||
baseImage: string;
|
||||
command: string;
|
||||
dockerSock: boolean;
|
||||
// Re-export ITsDockerConfig as IConfig for backward compatibility
|
||||
export type IConfig = ITsDockerConfig & {
|
||||
exitCode?: number;
|
||||
keyValueObject: {[key: string]: any};
|
||||
}
|
||||
};
|
||||
|
||||
const getQenvKeyValueObject = async () => {
|
||||
let qenvKeyValueObjectArray: { [key: string]: string | number };
|
||||
@@ -23,11 +21,20 @@ const getQenvKeyValueObject = async () => {
|
||||
const buildConfig = async (qenvKeyValueObjectArg: { [key: string]: string | number }) => {
|
||||
const npmextra = new plugins.npmextra.Npmextra(paths.cwd);
|
||||
const config = npmextra.dataFor<IConfig>('@git.zone/tsdocker', {
|
||||
// Legacy options (backward compatible)
|
||||
baseImage: 'hosttoday/ht-docker-node:npmdocker',
|
||||
init: 'rm -rf node_nodules/ && yarn install',
|
||||
command: 'npmci npm test',
|
||||
dockerSock: false,
|
||||
keyValueObject: qenvKeyValueObjectArg
|
||||
keyValueObject: qenvKeyValueObjectArg,
|
||||
|
||||
// New Docker build options
|
||||
registries: [],
|
||||
registryRepoMap: {},
|
||||
buildArgEnvMap: {},
|
||||
platforms: ['linux/amd64'],
|
||||
push: false,
|
||||
testDir: undefined,
|
||||
});
|
||||
return config;
|
||||
};
|
||||
|
||||
@@ -15,3 +15,12 @@ export const logger = new plugins.smartlog.Smartlog({
|
||||
logger.addLogDestination(new plugins.smartlogDestinationLocal.DestinationLocal());
|
||||
|
||||
export const ora = new plugins.smartlogSouceOra.SmartlogSourceOra();
|
||||
|
||||
export function formatDuration(ms: number): string {
|
||||
if (ms < 1000) return `${ms}ms`;
|
||||
const totalSeconds = ms / 1000;
|
||||
if (totalSeconds < 60) return `${totalSeconds.toFixed(1)}s`;
|
||||
const minutes = Math.floor(totalSeconds / 60);
|
||||
const seconds = Math.round(totalSeconds % 60);
|
||||
return `${minutes}m ${seconds}s`;
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
// push.rocks scope
|
||||
import * as lik from '@push.rocks/lik';
|
||||
import * as npmextra from '@push.rocks/npmextra';
|
||||
import * as path from 'path';
|
||||
import * as projectinfo from '@push.rocks/projectinfo';
|
||||
@@ -17,6 +18,7 @@ import * as smartstring from '@push.rocks/smartstring';
|
||||
export const smartfs = new SmartFs(new SmartFsProviderNode());
|
||||
|
||||
export {
|
||||
lik,
|
||||
npmextra,
|
||||
path,
|
||||
projectinfo,
|
||||
|
||||
Reference in New Issue
Block a user