Compare commits
103 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 2ecd4e9d7c | |||
| 08dbad47bc | |||
| 15e5dedae4 | |||
| 5834721da8 | |||
| 2f31e14cbe | |||
| 5691e5fb78 | |||
| 8d043d20a8 | |||
| 6fe70e0a1d | |||
| cc9c20882e | |||
| 08af9fec14 | |||
| b8a26bf3bd | |||
| e6432b4ea9 | |||
| e9975ba7b8 | |||
| 396ce29d7a | |||
| 7c0935d585 | |||
| 52af76b7ed | |||
| 414d7dd727 | |||
| 4b1c908b89 | |||
| 6e313261e7 | |||
| 42df15a523 | |||
| 7ef2ebcf5b | |||
| 87f26b7b63 | |||
| ffdc61fb42 | |||
| 5b25704cf8 | |||
| 00e6033d8b | |||
| 453040983d | |||
| 456858bc36 | |||
| 606c82dafa | |||
| 9fc4afe4b8 | |||
| 90689c2645 | |||
| 4a1d649e5e | |||
| 66bd36dc4f | |||
| 349d711cc5 | |||
| c74a4bcd5b | |||
| ff835c4160 | |||
| 05eceeb056 | |||
| de55beda08 | |||
| 9aa2b0c7be | |||
| a283bbfba0 | |||
| 8a4e300581 | |||
| 6b0d96b745 | |||
| a08c11838f | |||
| 7c5225125c | |||
| bc4778f7db | |||
| 2e7e8ae5cf | |||
| 054585c7f5 | |||
| c0cebbe614 | |||
| 740f83114c | |||
| e48023d490 | |||
| eaaf313442 | |||
| 68b2baadae | |||
| 6743dc35e7 | |||
| bbf265716d | |||
| 3a705534fe | |||
| cbdbd32dd1 | |||
| 224004217c | |||
| e06ef454a6 | |||
| a5f4d93f50 | |||
| 9f5d2cacf1 | |||
| d9112d3e04 | |||
| 455404c3c9 | |||
| 90089625dc | |||
| 86d5cc1d47 | |||
| 6407033694 | |||
| 9dd69868d9 | |||
| dc4074340d | |||
| 001e870643 | |||
| 440eb07afb | |||
| 8d74712a97 | |||
| bbdf61e0a9 | |||
| 6f5ed697cb | |||
| cc93c296c6 | |||
| 07a4d024a8 | |||
| 192216c7ec | |||
| daa97c68d9 | |||
| 4569bffc37 | |||
| ad4e6ad206 | |||
| ddfa701391 | |||
| 3d2d1e3b1d | |||
| 01e79b8cd6 | |||
| 8b6021ff66 | |||
| 5e5bb3032c | |||
| 855e18a74f | |||
| b808a93e46 | |||
| a18166260e | |||
| cba8de348d | |||
| 30d4a7bd24 | |||
| 4ea99426fd | |||
| 19309f7f45 | |||
| 4e7d2fd637 | |||
| 1675c0c4c9 | |||
| 3a4f59ef9e | |||
| 90eac3e50a | |||
| edec48529d | |||
| e622b97097 | |||
| 23266ca459 | |||
| a91e69b6db | |||
| 015ccfad48 | |||
| 06d2fcb750 | |||
| f3e4bc0350 | |||
| 6de3abe3bf | |||
| eaa4140f2f | |||
| b21fe80109 |
66
.gitea/workflows/default_nottags.yaml
Normal file
66
.gitea/workflows/default_nottags.yaml
Normal file
@@ -0,0 +1,66 @@
|
||||
name: Default (not tags)
|
||||
|
||||
on:
|
||||
push:
|
||||
tags-ignore:
|
||||
- '**'
|
||||
|
||||
env:
|
||||
IMAGE: code.foss.global/host.today/ht-docker-node:npmci
|
||||
NPMCI_COMPUTED_REPOURL: https://${{gitea.repository_owner}}:${{secrets.GITEA_TOKEN}}@/${{gitea.repository}}.git
|
||||
NPMCI_TOKEN_NPM: ${{secrets.NPMCI_TOKEN_NPM}}
|
||||
NPMCI_TOKEN_NPM2: ${{secrets.NPMCI_TOKEN_NPM2}}
|
||||
NPMCI_GIT_GITHUBTOKEN: ${{secrets.NPMCI_GIT_GITHUBTOKEN}}
|
||||
NPMCI_URL_CLOUDLY: ${{secrets.NPMCI_URL_CLOUDLY}}
|
||||
|
||||
jobs:
|
||||
security:
|
||||
runs-on: ubuntu-latest
|
||||
continue-on-error: true
|
||||
container:
|
||||
image: ${{ env.IMAGE }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install pnpm and npmci
|
||||
run: |
|
||||
pnpm install -g pnpm
|
||||
pnpm install -g @ship.zone/npmci
|
||||
|
||||
- name: Run npm prepare
|
||||
run: npmci npm prepare
|
||||
|
||||
- name: Audit production dependencies
|
||||
run: |
|
||||
npmci command npm config set registry https://registry.npmjs.org
|
||||
npmci command pnpm audit --audit-level=high --prod
|
||||
continue-on-error: true
|
||||
|
||||
- name: Audit development dependencies
|
||||
run: |
|
||||
npmci command npm config set registry https://registry.npmjs.org
|
||||
npmci command pnpm audit --audit-level=high --dev
|
||||
continue-on-error: true
|
||||
|
||||
test:
|
||||
if: ${{ always() }}
|
||||
needs: security
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: ${{ env.IMAGE }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Test stable
|
||||
run: |
|
||||
npmci node install stable
|
||||
npmci npm install
|
||||
npmci npm test
|
||||
|
||||
- name: Test build
|
||||
run: |
|
||||
npmci node install stable
|
||||
npmci npm install
|
||||
npmci npm build
|
||||
124
.gitea/workflows/default_tags.yaml
Normal file
124
.gitea/workflows/default_tags.yaml
Normal file
@@ -0,0 +1,124 @@
|
||||
name: Default (tags)
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
env:
|
||||
IMAGE: code.foss.global/host.today/ht-docker-node:npmci
|
||||
NPMCI_COMPUTED_REPOURL: https://${{gitea.repository_owner}}:${{secrets.GITEA_TOKEN}}@/${{gitea.repository}}.git
|
||||
NPMCI_TOKEN_NPM: ${{secrets.NPMCI_TOKEN_NPM}}
|
||||
NPMCI_TOKEN_NPM2: ${{secrets.NPMCI_TOKEN_NPM2}}
|
||||
NPMCI_GIT_GITHUBTOKEN: ${{secrets.NPMCI_GIT_GITHUBTOKEN}}
|
||||
NPMCI_URL_CLOUDLY: ${{secrets.NPMCI_URL_CLOUDLY}}
|
||||
|
||||
jobs:
|
||||
security:
|
||||
runs-on: ubuntu-latest
|
||||
continue-on-error: true
|
||||
container:
|
||||
image: ${{ env.IMAGE }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Prepare
|
||||
run: |
|
||||
pnpm install -g pnpm
|
||||
pnpm install -g @ship.zone/npmci
|
||||
npmci npm prepare
|
||||
|
||||
- name: Audit production dependencies
|
||||
run: |
|
||||
npmci command npm config set registry https://registry.npmjs.org
|
||||
npmci command pnpm audit --audit-level=high --prod
|
||||
continue-on-error: true
|
||||
|
||||
- name: Audit development dependencies
|
||||
run: |
|
||||
npmci command npm config set registry https://registry.npmjs.org
|
||||
npmci command pnpm audit --audit-level=high --dev
|
||||
continue-on-error: true
|
||||
|
||||
test:
|
||||
if: ${{ always() }}
|
||||
needs: security
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: ${{ env.IMAGE }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Prepare
|
||||
run: |
|
||||
pnpm install -g pnpm
|
||||
pnpm install -g @ship.zone/npmci
|
||||
npmci npm prepare
|
||||
|
||||
- name: Test stable
|
||||
run: |
|
||||
npmci node install stable
|
||||
npmci npm install
|
||||
npmci npm test
|
||||
|
||||
- name: Test build
|
||||
run: |
|
||||
npmci node install stable
|
||||
npmci npm install
|
||||
npmci npm build
|
||||
|
||||
release:
|
||||
needs: test
|
||||
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/')
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: ${{ env.IMAGE }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Prepare
|
||||
run: |
|
||||
pnpm install -g pnpm
|
||||
pnpm install -g @ship.zone/npmci
|
||||
npmci npm prepare
|
||||
|
||||
- name: Release
|
||||
run: |
|
||||
npmci node install stable
|
||||
npmci npm publish
|
||||
|
||||
metadata:
|
||||
needs: test
|
||||
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/')
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: ${{ env.IMAGE }}
|
||||
continue-on-error: true
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Prepare
|
||||
run: |
|
||||
pnpm install -g pnpm
|
||||
pnpm install -g @ship.zone/npmci
|
||||
npmci npm prepare
|
||||
|
||||
- name: Code quality
|
||||
run: |
|
||||
npmci command npm install -g typescript
|
||||
npmci npm install
|
||||
|
||||
- name: Trigger
|
||||
run: npmci trigger
|
||||
|
||||
- name: Build docs and upload artifacts
|
||||
run: |
|
||||
npmci node install stable
|
||||
npmci npm install
|
||||
pnpm install -g @git.zone/tsdoc
|
||||
npmci command tsdoc
|
||||
continue-on-error: true
|
||||
11
.gitignore
vendored
11
.gitignore
vendored
@@ -3,7 +3,6 @@
|
||||
# artifacts
|
||||
coverage/
|
||||
public/
|
||||
pages/
|
||||
|
||||
# installs
|
||||
node_modules/
|
||||
@@ -15,8 +14,10 @@ node_modules/
|
||||
|
||||
# builds
|
||||
dist/
|
||||
dist_web/
|
||||
dist_serve/
|
||||
dist_ts_web/
|
||||
dist_*/
|
||||
|
||||
# custom
|
||||
# AI
|
||||
.claude/
|
||||
.serena/
|
||||
|
||||
#------# custom
|
||||
119
.gitlab-ci.yml
119
.gitlab-ci.yml
@@ -1,119 +0,0 @@
|
||||
# gitzone ci_default
|
||||
image: registry.gitlab.com/hosttoday/ht-docker-node:npmci
|
||||
|
||||
cache:
|
||||
paths:
|
||||
- .npmci_cache/
|
||||
key: "$CI_BUILD_STAGE"
|
||||
|
||||
stages:
|
||||
- security
|
||||
- test
|
||||
- release
|
||||
- metadata
|
||||
|
||||
# ====================
|
||||
# security stage
|
||||
# ====================
|
||||
mirror:
|
||||
stage: security
|
||||
script:
|
||||
- npmci git mirror
|
||||
tags:
|
||||
- docker
|
||||
- notpriv
|
||||
|
||||
snyk:
|
||||
stage: security
|
||||
script:
|
||||
- npmci npm prepare
|
||||
- npmci command npm install -g snyk
|
||||
- npmci command npm install --ignore-scripts
|
||||
- npmci command snyk test
|
||||
tags:
|
||||
- docker
|
||||
- notpriv
|
||||
|
||||
# ====================
|
||||
# test stage
|
||||
# ====================
|
||||
|
||||
testStable:
|
||||
stage: test
|
||||
script:
|
||||
- npmci npm prepare
|
||||
- npmci node install stable
|
||||
- npmci npm install
|
||||
- npmci npm test
|
||||
coverage: /\d+.?\d+?\%\s*coverage/
|
||||
tags:
|
||||
- docker
|
||||
- priv
|
||||
|
||||
testBuild:
|
||||
stage: test
|
||||
script:
|
||||
- npmci npm prepare
|
||||
- npmci node install stable
|
||||
- npmci npm install
|
||||
- npmci command npm run build
|
||||
coverage: /\d+.?\d+?\%\s*coverage/
|
||||
tags:
|
||||
- docker
|
||||
- notpriv
|
||||
|
||||
release:
|
||||
stage: release
|
||||
script:
|
||||
- npmci node install stable
|
||||
- npmci npm publish
|
||||
only:
|
||||
- tags
|
||||
tags:
|
||||
- docker
|
||||
- notpriv
|
||||
|
||||
# ====================
|
||||
# metadata stage
|
||||
# ====================
|
||||
codequality:
|
||||
stage: metadata
|
||||
allow_failure: true
|
||||
script:
|
||||
- npmci command npm install -g tslint typescript
|
||||
- npmci npm install
|
||||
- npmci command "tslint -c tslint.json ./ts/**/*.ts"
|
||||
tags:
|
||||
- docker
|
||||
- priv
|
||||
|
||||
trigger:
|
||||
stage: metadata
|
||||
script:
|
||||
- npmci trigger
|
||||
only:
|
||||
- tags
|
||||
tags:
|
||||
- docker
|
||||
- notpriv
|
||||
|
||||
pages:
|
||||
image: hosttoday/ht-docker-dbase:npmci
|
||||
services:
|
||||
- docker:stable-dind
|
||||
stage: metadata
|
||||
script:
|
||||
- npmci command npm install -g @gitzone/tsdoc
|
||||
- npmci npm prepare
|
||||
- npmci npm install
|
||||
- npmci command tsdoc
|
||||
tags:
|
||||
- docker
|
||||
- notpriv
|
||||
only:
|
||||
- tags
|
||||
artifacts:
|
||||
expire_in: 1 week
|
||||
paths:
|
||||
- public
|
||||
allow_failure: true
|
||||
9
.snyk
9
.snyk
@@ -1,9 +0,0 @@
|
||||
# Snyk (https://snyk.io) policy file, patches or ignores known vulnerabilities.
|
||||
version: v1.13.5
|
||||
# ignores vulnerabilities until expiry date; change duration by modifying expiry date
|
||||
ignore:
|
||||
SNYK-JS-HTTPSPROXYAGENT-469131:
|
||||
- '@pushrocks/smartnetwork > speedtest-net > https-proxy-agent':
|
||||
reason: None given
|
||||
expires: '2019-11-04T13:59:28.695Z'
|
||||
patch: {}
|
||||
24
.vscode/launch.json
vendored
24
.vscode/launch.json
vendored
@@ -2,28 +2,10 @@
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "current file",
|
||||
"type": "node",
|
||||
"command": "npm test",
|
||||
"name": "Run npm test",
|
||||
"request": "launch",
|
||||
"args": [
|
||||
"${relativeFile}"
|
||||
],
|
||||
"runtimeArgs": ["-r", "@gitzone/tsrun"],
|
||||
"cwd": "${workspaceRoot}",
|
||||
"protocol": "inspector",
|
||||
"internalConsoleOptions": "openOnSessionStart"
|
||||
},
|
||||
{
|
||||
"name": "test.ts",
|
||||
"type": "node",
|
||||
"request": "launch",
|
||||
"args": [
|
||||
"test/test.ts"
|
||||
],
|
||||
"runtimeArgs": ["-r", "@gitzone/tsrun"],
|
||||
"cwd": "${workspaceRoot}",
|
||||
"protocol": "inspector",
|
||||
"internalConsoleOptions": "openOnSessionStart"
|
||||
"type": "node-terminal"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
26
.vscode/settings.json
vendored
Normal file
26
.vscode/settings.json
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
{
|
||||
"json.schemas": [
|
||||
{
|
||||
"fileMatch": ["/npmextra.json"],
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"npmci": {
|
||||
"type": "object",
|
||||
"description": "settings for npmci"
|
||||
},
|
||||
"gitzone": {
|
||||
"type": "object",
|
||||
"description": "settings for gitzone",
|
||||
"properties": {
|
||||
"projectType": {
|
||||
"type": "string",
|
||||
"enum": ["website", "element", "service", "npm", "wcc"]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
38
README.md
38
README.md
@@ -1,38 +0,0 @@
|
||||
# @mojoio/docker
|
||||
unofficial docker engine api abstraction package written in TypeScript
|
||||
|
||||
## Availabililty and Links
|
||||
* [npmjs.org (npm package)](https://www.npmjs.com/package/@mojoio/docker)
|
||||
* [gitlab.com (source)](https://gitlab.com/mojoio/docker)
|
||||
* [github.com (source mirror)](https://github.com/mojoio/docker)
|
||||
* [docs (typedoc)](https://mojoio.gitlab.io/docker/)
|
||||
|
||||
## Status for master
|
||||
[](https://gitlab.com/mojoio/docker/commits/master)
|
||||
[](https://gitlab.com/mojoio/docker/commits/master)
|
||||
[](https://www.npmjs.com/package/@mojoio/docker)
|
||||
[](https://snyk.io/test/npm/@mojoio/docker)
|
||||
[](https://nodejs.org/dist/latest-v10.x/docs/api/)
|
||||
[](https://nodejs.org/dist/latest-v10.x/docs/api/)
|
||||
[](https://prettier.io/)
|
||||
|
||||
## Usage
|
||||
|
||||
Use TypeScript for best in class instellisense.
|
||||
|
||||
```typescript
|
||||
import { DockerHost } from '@mojoio/docker'; // require Dockersock class
|
||||
|
||||
const run = async () => {
|
||||
const myDockerHost = new DockerHost(); // optional: you can pass a domain to the contructor, defaults to /var/run/docker.sock
|
||||
|
||||
const containers = await myDockerHost.getContainers(); // promise, resolve with an array of DockerContainers
|
||||
};
|
||||
```
|
||||
|
||||
For further information read the linked docs at the top of this readme.
|
||||
|
||||
> MIT licensed | **©** [Lossless GmbH](https://lossless.gmbh)
|
||||
| By using this npm module you agree to our [privacy policy](https://lossless.gmbH/privacy)
|
||||
|
||||
[](https://maintainedby.lossless.com)
|
||||
321
changelog.md
Normal file
321
changelog.md
Normal file
@@ -0,0 +1,321 @@
|
||||
# Changelog
|
||||
|
||||
## 2025-11-24 - 5.0.0 - BREAKING CHANGE(DockerHost)
|
||||
Rename array-returning get* methods to list* on DockerHost and related resource classes; update docs, tests and changelog
|
||||
|
||||
- Renamed public DockerHost methods: getContainers → listContainers, getNetworks → listNetworks, getServices → listServices, getImages → listImages, getSecrets → listSecrets.
|
||||
- Renamed DockerNetwork.getContainersOnNetwork → DockerNetwork.listContainersOnNetwork and updated usages (e.g. getContainersOnNetworkForService).
|
||||
- Updated internal/static method docs/comments to recommend dockerHost.list*() usage and adjusted implementations accordingly.
|
||||
- Updated README, readme.hints.md, tests (test.nonci.node+deno.ts) and changelog to reflect the new list* method names.
|
||||
- Bumped package version to 4.0.0.
|
||||
- Migration note: replace calls to get*() with list*() for methods that return multiple items (arrays). Single-item getters such as getContainerById or getNetworkByName remain unchanged.
|
||||
|
||||
## 2025-11-24 - 4.0.0 - BREAKING CHANGE: Rename list methods for consistency
|
||||
|
||||
**Breaking Changes:**
|
||||
- Renamed all "get*" methods that return arrays to "list*" methods for better clarity:
|
||||
- `getContainers()` → `listContainers()`
|
||||
- `getNetworks()` → `listNetworks()`
|
||||
- `getServices()` → `listServices()`
|
||||
- `getImages()` → `listImages()`
|
||||
- `getSecrets()` → `listSecrets()`
|
||||
- `getContainersOnNetwork()` → `listContainersOnNetwork()` (on DockerNetwork class)
|
||||
|
||||
**Migration Guide:**
|
||||
Update all method calls from `get*()` to `list*()` where the method returns an array of resources. Single-item getters like `getContainerById()`, `getNetworkByName()`, etc. remain unchanged.
|
||||
|
||||
**Rationale:**
|
||||
The `list*` naming convention more clearly indicates that these methods return multiple items (arrays), while `get*` methods are reserved for retrieving single items by ID or name. This follows standard API design patterns and improves code readability.
|
||||
|
||||
## 2025-11-24 - 3.0.2 - fix(readme)
|
||||
Update README to document 3.0.0+ changes: architecture refactor, streaming improvements, health check and circular dependency fixes
|
||||
|
||||
- Documented major refactor to a Clean OOP / Facade pattern with DockerHost as the single entry point
|
||||
- Added/clarified real-time container streaming APIs: streamLogs(), attach(), exec()
|
||||
- Clarified support for flexible descriptors (accept both string references and class instances)
|
||||
- Documented complete container lifecycle API (start, stop, remove, logs, inspect, stats)
|
||||
- Documented new ping() health check method to verify Docker daemon availability
|
||||
- Noted fix for circular dependency issues in Node.js by using type-only imports
|
||||
- Mentioned improved TypeScript definitions and expanded examples, migration guides, and real-world use cases
|
||||
|
||||
## 2025-11-24 - 3.0.1 - fix(classes.base)
|
||||
Use type-only import for DockerHost in classes.base to avoid runtime side-effects
|
||||
|
||||
- Changed the import in ts/classes.base.ts to a type-only import: import type { DockerHost } from './classes.host.js';
|
||||
- Prevents a runtime import of classes.host when only the type is needed, reducing risk of circular dependencies and unintended side-effects during module initialization.
|
||||
- No behavior changes to the public API — TypeScript-only change; intended to improve bundling and runtime stability.
|
||||
|
||||
## 2025-11-24 - 3.0.0 - BREAKING CHANGE(DockerHost)
|
||||
Refactor public API to DockerHost facade; introduce DockerResource base; make resource static methods internal; support flexible descriptors and stream compatibility
|
||||
|
||||
- Refactored architecture: DockerHost is now the single public entry point (Facade) for all operations; direct static calls like DockerImage.createFromRegistry(...) are now internal and replaced by DockerHost.createImageFromRegistry(...) and similar factory methods.
|
||||
- Introduced DockerResource abstract base class used by all resource classes (DockerContainer, DockerImage, DockerNetwork, DockerSecret, DockerService) with a required refresh() method and standardized dockerHost property.
|
||||
- Static methods on resource classes were renamed / scoped as internal (prefixed with _): _list, _fromName/_fromId, _create, _createFromRegistry, _createFromTarStream, _build, etc. Consumers should call DockerHost methods instead.
|
||||
- Creation descriptor interfaces (container, service, etc.) now accept either string identifiers or resource instances (e.g. image: string | DockerImage, networks: (string | DockerNetwork)[], secrets: (string | DockerSecret)[]). DockerHost resolves instances internally.
|
||||
- DockerImageStore imageStore has been made private on DockerHost; new public methods DockerHost.storeImage(name, stream) and DockerHost.retrieveImage(name) provide access to the image store.
|
||||
- Streaming compatibility: updated requestStreaming to convert web ReadableStreams (smartrequest v5+) to Node.js streams via smartstream.nodewebhelpers, preserving backward compatibility for existing streaming APIs (container logs, attach, exec, image import/export, events).
|
||||
- Container enhancements: added full lifecycle and streaming/interactive APIs on DockerContainer: refresh(), inspect(), start(), stop(), remove(), logs(), stats(), streamLogs(), attach(), exec().
|
||||
- Service creation updated: resolves image/network/secret descriptors (strings or instances); adds labels.version from image; improved resource handling and port/secret/network resolution.
|
||||
- Network and Secret classes updated to extend DockerResource and to expose refresh(), remove() and lookup methods via DockerHost (createNetwork/listNetworks/getNetworkByName, createSecret/listSecrets/getSecretByName/getSecretById).
|
||||
- Tests and docs updated: migration guide and examples added (readme.hints.md, README); test timeout reduced from 600s to 300s in package.json.
|
||||
- BREAKING: Public API changes require consumers to migrate away from direct resource static calls and direct imageStore access to the new DockerHost-based factory methods and storeImage/retrieveImage APIs.
|
||||
|
||||
## 2025-11-18 - 2.1.0 - feat(DockerHost)
|
||||
Add DockerHost.ping() to check Docker daemon availability and document health-check usage
|
||||
|
||||
- Add DockerHost.ping() method that issues a GET to /_ping and throws an error if the response status is not 200
|
||||
- Update README: show ping() in Quick Start, add health check examples (isDockerHealthy, waitForDocker) and mention Health Checks in Key Concepts
|
||||
|
||||
## 2025-11-18 - 2.0.0 - BREAKING CHANGE(DockerHost)
|
||||
Rename DockerHost constructor option 'dockerSockPath' to 'socketPath' and update internal socket path handling
|
||||
|
||||
- Breaking: constructor option renamed from 'dockerSockPath' to 'socketPath' — callers must update their code.
|
||||
- Constructor now reads the provided 'socketPath' option first, then falls back to DOCKER_HOST, CI, and finally the default unix socket.
|
||||
- README examples and documentation updated to use 'socketPath'.
|
||||
|
||||
## 2025-11-17 - 1.3.6 - fix(streaming)
|
||||
Convert smartrequest v5 web ReadableStreams to Node.js streams and update deps for streaming compatibility
|
||||
|
||||
- Upgrade @push.rocks/smartrequest to ^5.0.1 and bump @git.zone dev tooling (@git.zone/tsbuild, tsrun, tstest).
|
||||
- requestStreaming now uses response.stream() (web ReadableStream) and converts it to a Node.js Readable via plugins.smartstream.nodewebhelpers.convertWebReadableToNodeReadable for backward compatibility.
|
||||
- Updated consumers of streaming responses (DockerHost.getEventObservable, DockerImage.createFromTarStream, DockerImage.exportToTarStream) to work with the converted Node.js stream and preserve event/backpressure semantics (.on, .pause, .resume).
|
||||
- Added readme.hints.md documenting the smartrequest v5 migration, conversion approach, modified files, and test/build status (type errors resolved and Node.js tests passing).
|
||||
- Removed project metadata file (.serena/project.yml) from the repository.
|
||||
|
||||
## 2025-08-19 - 1.3.5 - fix(core)
|
||||
Stabilize CI/workflows and runtime: update CI images/metadata, improve streaming requests and image handling, and fix tests & package metadata
|
||||
|
||||
- Update CI workflows and images: switch workflow IMAGE to code.foss.global/host.today/ht-docker-node:npmci, fix NPMCI_COMPUTED_REPOURL placeholders, and replace @shipzone/npmci with @ship.zone/npmci in workflows
|
||||
- Update npmextra.json gitzone metadata (githost -> code.foss.global, gitscope -> apiclient.xyz, npmPackagename -> @apiclient.xyz/docker) and npmdocker.baseImage -> host.today/ht-docker-node:npmci
|
||||
- Adjust package.json repository/bugs/homepage to code.foss.global, add pnpm overrides entry and normalize package metadata
|
||||
- Improve DockerHost streaming and request handling: reduce requestStreaming timeout to 30s, enable autoDrain for streaming requests, improve response parsing for streaming vs JSON endpoints to avoid hangs
|
||||
- Enhance DockerImage and DockerImageStore stream handling and tar processing: more robust import/export parsing, safer stream-to-file writes, repackaging steps, and error handling
|
||||
- Unskip and update tests: re-enable DockerImageStore integration test, change stored image name to 'hello2', add formatting fixes and ensure cleanup stops the test DockerHost
|
||||
- Miscellaneous code and docs cleanup: numerous formatting fixes and trailing-comma normalization across README and TS sources, update commitinfo and logger newline fixes, and add local tool ignores (.claude/.serena) to .gitignore
|
||||
|
||||
## 2025-08-19 - 1.3.4 - fix(test)
|
||||
|
||||
Increase test timeout, enable DockerImageStore test, update test image name, bump smartrequest patch, and add local claude settings
|
||||
|
||||
- Increase tstest timeout from 120s to 600s in package.json to accommodate longer-running integration tests.
|
||||
- Unskip the DockerImageStore integration test and change stored image name from 'hello' to 'hello2' in test/test.nonci.node.ts.
|
||||
- Bump dependency @push.rocks/smartrequest from ^4.3.0 to ^4.3.1.
|
||||
- Add .claude/settings.local.json to allow local agent permissions for running tests and related tooling.
|
||||
|
||||
## 2025-08-19 - 1.3.3 - fix(classes.host)
|
||||
|
||||
Adjust requestStreaming timeout and autoDrain; stabilize tests
|
||||
|
||||
- Reduced requestStreaming timeout from 10 minutes to 30 seconds to avoid long-running hanging requests.
|
||||
- Enabled autoDrain for streaming requests to ensure response streams are properly drained and reduce resource issues.
|
||||
- Marked the DockerImageStore S3 integration test as skipped to avoid CI dependence on external S3 and added a cleanup test to stop the test DockerHost.
|
||||
- Added local tool settings file (.claude/settings.local.json) with local permissions (development-only).
|
||||
|
||||
## 2025-08-18 - 1.3.2 - fix(package.json)
|
||||
|
||||
Fix test script timeout typo, update dependency versions, and add typings & project configs
|
||||
|
||||
- Fix test script: correct 'tineout' -> 'timeout' for npm test command and set timeout to 120s
|
||||
- Add 'typings': 'dist_ts/index.d.ts' to package.json
|
||||
- Bump dependencies to newer compatible versions (notable packages: @push.rocks/lik, @push.rocks/smartarchive, @push.rocks/smartbucket, @push.rocks/smartfile, @push.rocks/smartlog, @push.rocks/smartpromise, @push.rocks/smartstream, rxjs)
|
||||
- Add project/config files: .serena/project.yml and .claude/settings.local.json (editor/CI metadata)
|
||||
- Include generated cache/metadata files (typescript document symbols cache) — not source changes but tooling/cache artifacts
|
||||
|
||||
## 2025-08-18 - 1.3.1 - fix(test)
|
||||
|
||||
Update test setup and devDependencies; adjust test import and add package metadata
|
||||
|
||||
- Update test script to run with additional flags: --verbose, --logfile and --tineout 120
|
||||
- Bump devDependencies: @git.zone/tsbuild -> ^2.6.7, @git.zone/tsrun -> ^1.3.3, @git.zone/tstest -> ^2.3.5, @push.rocks/qenv -> ^6.1.3
|
||||
- Change test import from @push.rocks/tapbundle to @git.zone/tstest/tapbundle
|
||||
- Add typings field (dist_ts/index.d.ts)
|
||||
- Add packageManager field for pnpm@10.14.0 with integrity hash
|
||||
|
||||
## 2024-12-23 - 1.3.0 - feat(core)
|
||||
|
||||
Initial release of Docker client with TypeScript support
|
||||
|
||||
- Provides easy communication with Docker's remote API from Node.js
|
||||
- Includes implementations for managing Docker services, networks, secrets, containers, and images
|
||||
|
||||
## 2024-12-23 - 1.2.8 - fix(core)
|
||||
|
||||
Improved the image creation process from tar stream in DockerImage class.
|
||||
|
||||
- Enhanced `DockerImage.createFromTarStream` method to handle streamed response and parse imported image details.
|
||||
- Fixed the dependency version for `@push.rocks/smartarchive` in package.json.
|
||||
|
||||
## 2024-10-13 - 1.2.7 - fix(core)
|
||||
|
||||
Prepare patch release with minor fixes and improvements
|
||||
|
||||
## 2024-10-13 - 1.2.6 - fix(core)
|
||||
|
||||
Minor refactoring and code quality improvements.
|
||||
|
||||
## 2024-10-13 - 1.2.5 - fix(dependencies)
|
||||
|
||||
Update dependencies for stability improvements
|
||||
|
||||
- Updated @push.rocks/smartstream to version ^3.0.46
|
||||
- Updated @push.rocks/tapbundle to version ^5.3.0
|
||||
- Updated @types/node to version 22.7.5
|
||||
|
||||
## 2024-10-13 - 1.2.4 - fix(core)
|
||||
|
||||
Refactored DockerImageStore constructor to remove DockerHost dependency
|
||||
|
||||
- Adjusted DockerImageStore constructor to remove dependency on DockerHost
|
||||
- Updated ts/classes.host.ts to align with DockerImageStore's new constructor signature
|
||||
|
||||
## 2024-08-21 - 1.2.3 - fix(dependencies)
|
||||
|
||||
Update dependencies to the latest versions and fix image export test
|
||||
|
||||
- Updated several dependencies to their latest versions in package.json.
|
||||
- Enabled the previously skipped 'should export images' test.
|
||||
|
||||
## 2024-06-10 - 1.2.1-1.2.2 - Core/General
|
||||
|
||||
General updates and fixes.
|
||||
|
||||
- Fix core update
|
||||
|
||||
## 2024-06-10 - 1.2.0 - Core
|
||||
|
||||
Core updates and bug fixes.
|
||||
|
||||
- Fix core update
|
||||
|
||||
## 2024-06-08 - 1.2.0 - General/Core
|
||||
|
||||
Major release with core enhancements.
|
||||
|
||||
- Processing images with extraction, retagging, repackaging, and long-term storage
|
||||
|
||||
## 2024-06-06 - 1.1.4 - General/Imagestore
|
||||
|
||||
Significant feature addition.
|
||||
|
||||
- Add feature to process images with extraction, retagging, repackaging, and long-term storage
|
||||
|
||||
## 2024-05-08 - 1.0.112 - Images
|
||||
|
||||
Add new functionality for image handling.
|
||||
|
||||
- Can now import and export images
|
||||
- Start work on local 100% JS OCI image registry
|
||||
|
||||
## 2024-06-05 - 1.1.0-1.1.3 - Core
|
||||
|
||||
Regular updates and fixes.
|
||||
|
||||
- Fix core update
|
||||
|
||||
## 2024-02-02 - 1.0.105-1.0.110 - Core
|
||||
|
||||
Routine core updates and fixes.
|
||||
|
||||
- Fix core update
|
||||
|
||||
## 2022-10-17 - 1.0.103-1.0.104 - Core
|
||||
|
||||
Routine core updates.
|
||||
|
||||
- Fix core update
|
||||
|
||||
## 2020-10-01 - 1.0.99-1.0.102 - Core
|
||||
|
||||
Routine core updates.
|
||||
|
||||
- Fix core update
|
||||
|
||||
## 2019-09-22 - 1.0.73-1.0.78 - Core
|
||||
|
||||
Routine updates and core fixes.
|
||||
|
||||
- Fix core update
|
||||
|
||||
## 2019-09-13 - 1.0.60-1.0.72 - Core
|
||||
|
||||
Routine updates and core fixes.
|
||||
|
||||
- Fix core update
|
||||
|
||||
## 2019-08-16 - 1.0.43-1.0.59 - Core
|
||||
|
||||
Routine updates and core fixes.
|
||||
|
||||
- Fix core update
|
||||
|
||||
## 2019-08-15 - 1.0.37-1.0.42 - Core
|
||||
|
||||
Routine updates and core fixes.
|
||||
|
||||
- Fix core update
|
||||
|
||||
## 2019-08-14 - 1.0.31-1.0.36 - Core
|
||||
|
||||
Routine updates and core fixes.
|
||||
|
||||
- Fix core update
|
||||
|
||||
## 2019-01-10 - 1.0.27-1.0.30 - Core
|
||||
|
||||
Routine updates and core fixes.
|
||||
|
||||
- Fix core update
|
||||
|
||||
## 2018-07-16 - 1.0.23-1.0.24 - Core
|
||||
|
||||
Routine updates and core fixes.
|
||||
|
||||
- Fix core shift to new style
|
||||
|
||||
## 2017-07-16 - 1.0.20-1.0.22 - General
|
||||
|
||||
Routine updates and fixes.
|
||||
|
||||
- Update node_modules within npmdocker
|
||||
|
||||
## 2017-04-02 - 1.0.18-1.0.19 - General
|
||||
|
||||
Routine updates and fixes.
|
||||
|
||||
- Work with npmdocker and npmts 7.x.x
|
||||
- CI updates
|
||||
|
||||
## 2016-07-31 - 1.0.17 - General
|
||||
|
||||
Enhancements and fixes.
|
||||
|
||||
- Now waiting for response to be stored before ending streaming request
|
||||
- Cosmetic fix
|
||||
|
||||
## 2016-07-29 - 1.0.14-1.0.16 - General
|
||||
|
||||
Multiple updates and features added.
|
||||
|
||||
- Fix request for change observable and add npmdocker
|
||||
- Add request typings
|
||||
|
||||
## 2016-07-28 - 1.0.13 - Core
|
||||
|
||||
Fixes and preparations.
|
||||
|
||||
- Fixed request for newer docker
|
||||
- Prepare for npmdocker
|
||||
|
||||
## 2016-06-16 - 1.0.0-1.0.2 - General
|
||||
|
||||
Initial sequence of releases, significant feature additions and CI setups.
|
||||
|
||||
- Implement container start and stop
|
||||
- Implement list containers and related functions
|
||||
- Add tests with in docker environment
|
||||
|
||||
## 2016-04-12 - unknown - Initial Commit
|
||||
|
||||
Initial project setup.
|
||||
|
||||
- Initial commit
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"npmdocker": {
|
||||
"baseImage": "hosttoday/ht-docker-node:npmci",
|
||||
"baseImage": "host.today/ht-docker-node:npmci",
|
||||
"command": "(ls -a && rm -r node_modules && yarn global add npmts && yarn install && npmts)",
|
||||
"dockerSock": true
|
||||
},
|
||||
@@ -10,13 +10,28 @@
|
||||
"npmRegistryUrl": "registry.npmjs.org"
|
||||
},
|
||||
"gitzone": {
|
||||
"projectType": "npm",
|
||||
"module": {
|
||||
"githost": "gitlab.com",
|
||||
"gitscope": "mojoio",
|
||||
"githost": "code.foss.global",
|
||||
"gitscope": "apiclient.xyz",
|
||||
"gitrepo": "docker",
|
||||
"shortDescription": "unofficial docker engine api abstraction package written in TypeScript",
|
||||
"npmPackagename": "@mojoio/docker",
|
||||
"license": "MIT"
|
||||
}
|
||||
"description": "Provides easy communication with Docker remote API from Node.js, with TypeScript support.",
|
||||
"npmPackagename": "@apiclient.xyz/docker",
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
"Docker",
|
||||
"API",
|
||||
"Node.js",
|
||||
"TypeScript",
|
||||
"Containers",
|
||||
"Images",
|
||||
"Networks",
|
||||
"Services",
|
||||
"Secrets"
|
||||
]
|
||||
}
|
||||
},
|
||||
"tsdoc": {
|
||||
"legal": "\n## License and Legal Information\n\nThis repository contains open-source code that is licensed under the MIT License. A copy of the MIT License can be found in the [license](license) file within this repository. \n\n**Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.\n\n### Trademarks\n\nThis project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and any related products or services are trademarks of Task Venture Capital GmbH and are not included within the scope of the MIT license granted herein. Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines, and any usage must be approved in writing by Task Venture Capital GmbH.\n\n### Company Information\n\nTask Venture Capital GmbH \nRegistered at District court Bremen HRB 35230 HB, Germany\n\nFor any legal inquiries or if you require further information, please contact us via email at hello@task.vc.\n\nBy using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.\n"
|
||||
}
|
||||
}
|
||||
2043
package-lock.json
generated
2043
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
97
package.json
97
package.json
@@ -1,62 +1,79 @@
|
||||
{
|
||||
"name": "@mojoio/docker",
|
||||
"version": "1.0.89",
|
||||
"description": "easy communication with docker remote api from node, TypeScript ready",
|
||||
"name": "@apiclient.xyz/docker",
|
||||
"version": "5.0.0",
|
||||
"description": "Provides easy communication with Docker remote API from Node.js, with TypeScript support.",
|
||||
"private": false,
|
||||
"main": "dist/index.js",
|
||||
"typings": "dist/index.d.ts",
|
||||
"main": "dist_ts/index.js",
|
||||
"typings": "dist_ts/index.d.ts",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"test": "tstest test/",
|
||||
"build": "tsbuild"
|
||||
"test": "(tstest test/ --verbose --logfile --timeout 300)",
|
||||
"build": "(tsbuild --web --allowimplicitany)",
|
||||
"buildDocs": "tsdoc"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+ssh://git@gitlab.com/pushrocks/dockersock.git"
|
||||
"url": "https://code.foss.global/apiclient.xyz/docker.git"
|
||||
},
|
||||
"keywords": [
|
||||
"docker",
|
||||
"sock",
|
||||
"container",
|
||||
"request",
|
||||
"api"
|
||||
"Docker",
|
||||
"API",
|
||||
"Node.js",
|
||||
"TypeScript",
|
||||
"Containers",
|
||||
"Images",
|
||||
"Networks",
|
||||
"Services",
|
||||
"Secrets"
|
||||
],
|
||||
"author": "Lossless GmbH",
|
||||
"license": "MIT",
|
||||
"bugs": {
|
||||
"url": "https://gitlab.com/pushrocks/dockersock/issues"
|
||||
"url": "https://code.foss.global/apiclient.xyz/docker/issues"
|
||||
},
|
||||
"homepage": "https://gitlab.com/pushrocks/dockersock#README",
|
||||
"homepage": "https://code.foss.global/apiclient.xyz/docker#readme",
|
||||
"dependencies": {
|
||||
"@pushrocks/lik": "^3.0.11",
|
||||
"@pushrocks/smartfile": "^7.0.6",
|
||||
"@pushrocks/smartjson": "^3.0.8",
|
||||
"@pushrocks/smartlog": "^2.0.21",
|
||||
"@pushrocks/smartnetwork": "^1.1.16",
|
||||
"@pushrocks/smartpath": "^4.0.1",
|
||||
"@pushrocks/smartpromise": "^3.0.6",
|
||||
"@pushrocks/smartrequest": "^1.1.42",
|
||||
"@pushrocks/smartstring": "^3.0.14",
|
||||
"@pushrocks/smartversion": "^2.0.4",
|
||||
"rxjs": "^6.5.3"
|
||||
"@push.rocks/lik": "^6.2.2",
|
||||
"@push.rocks/smartarchive": "^4.2.2",
|
||||
"@push.rocks/smartbucket": "^3.3.10",
|
||||
"@push.rocks/smartfile": "^11.2.7",
|
||||
"@push.rocks/smartjson": "^5.2.0",
|
||||
"@push.rocks/smartlog": "^3.1.10",
|
||||
"@push.rocks/smartnetwork": "^4.4.0",
|
||||
"@push.rocks/smartpath": "^6.0.0",
|
||||
"@push.rocks/smartpromise": "^4.2.3",
|
||||
"@push.rocks/smartrequest": "^5.0.1",
|
||||
"@push.rocks/smartstream": "^3.2.5",
|
||||
"@push.rocks/smartstring": "^4.1.0",
|
||||
"@push.rocks/smartunique": "^3.0.9",
|
||||
"@push.rocks/smartversion": "^3.0.5",
|
||||
"@tsclass/tsclass": "^9.3.0",
|
||||
"rxjs": "^7.8.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@gitzone/tsbuild": "^2.1.17",
|
||||
"@gitzone/tsrun": "^1.2.8",
|
||||
"@gitzone/tstest": "^1.0.28",
|
||||
"@pushrocks/tapbundle": "^3.2.0",
|
||||
"@types/node": "^12.12.11",
|
||||
"tslint": "^5.20.1",
|
||||
"tslint-config-prettier": "^1.18.0"
|
||||
"@git.zone/tsbuild": "^3.1.0",
|
||||
"@git.zone/tsrun": "^2.0.0",
|
||||
"@git.zone/tstest": "^2.8.2",
|
||||
"@push.rocks/qenv": "^6.1.3",
|
||||
"@types/node": "22.7.5"
|
||||
},
|
||||
"files": [
|
||||
"ts/*",
|
||||
"ts_web/*",
|
||||
"dist/*",
|
||||
"dist_web/*",
|
||||
"dist_ts_web/*",
|
||||
"assets/*",
|
||||
"ts/**/*",
|
||||
"ts_web/**/*",
|
||||
"dist/**/*",
|
||||
"dist_*/**/*",
|
||||
"dist_ts/**/*",
|
||||
"dist_ts_web/**/*",
|
||||
"assets/**/*",
|
||||
"cli.js",
|
||||
"npmextra.json",
|
||||
"readme.md"
|
||||
]
|
||||
],
|
||||
"browserslist": [
|
||||
"last 1 chrome versions"
|
||||
],
|
||||
"packageManager": "pnpm@10.14.0+sha512.ad27a79641b49c3e481a16a805baa71817a04bbe06a38d17e60e2eaee83f6a146c6a688125f5792e48dd5ba30e7da52a5cda4c3992b9ccf333f9ce223af84748",
|
||||
"pnpm": {
|
||||
"overrides": {}
|
||||
}
|
||||
}
|
||||
|
||||
8699
pnpm-lock.yaml
generated
Normal file
8699
pnpm-lock.yaml
generated
Normal file
File diff suppressed because it is too large
Load Diff
6
qenv.yml
Normal file
6
qenv.yml
Normal file
@@ -0,0 +1,6 @@
|
||||
required:
|
||||
- S3_ENDPOINT
|
||||
- S3_ACCESSKEY
|
||||
- S3_ACCESSSECRET
|
||||
- S3_BUCKET
|
||||
|
||||
147
readme.hints.md
Normal file
147
readme.hints.md
Normal file
@@ -0,0 +1,147 @@
|
||||
# Docker Module - Development Hints
|
||||
|
||||
## OOP Refactoring - Clean Architecture (2025-11-24)
|
||||
|
||||
### Architecture Changes
|
||||
The module has been restructured to follow a clean OOP Facade pattern:
|
||||
- **DockerHost** is now the single entry point for all Docker operations
|
||||
- All resource classes extend abstract `DockerResource` base class
|
||||
- Static methods are prefixed with `_` to indicate internal use
|
||||
- Public API is exclusively through DockerHost methods
|
||||
|
||||
### Key Changes
|
||||
|
||||
**1. Factory Pattern**
|
||||
- All resource creation/retrieval goes through DockerHost:
|
||||
```typescript
|
||||
// Old (deprecated):
|
||||
const container = await DockerContainer.getContainers(dockerHost);
|
||||
const network = await DockerNetwork.createNetwork(dockerHost, descriptor);
|
||||
|
||||
// New (clean API):
|
||||
const containers = await dockerHost.listContainers();
|
||||
const network = await dockerHost.createNetwork(descriptor);
|
||||
```
|
||||
|
||||
**2. Container Management Methods Added**
|
||||
The DockerContainer class now has full CRUD and streaming operations:
|
||||
|
||||
**Lifecycle:**
|
||||
- `container.start()` - Start container
|
||||
- `container.stop(options?)` - Stop container
|
||||
- `container.remove(options?)` - Remove container
|
||||
- `container.refresh()` - Reload state
|
||||
|
||||
**Information:**
|
||||
- `container.inspect()` - Get detailed info
|
||||
- `container.logs(options)` - Get logs as string (one-shot)
|
||||
- `container.stats(options)` - Get stats
|
||||
|
||||
**Streaming & Interactive:**
|
||||
- `container.streamLogs(options)` - Stream logs continuously (follow mode)
|
||||
- `container.attach(options)` - Attach to main process (PID 1) with bidirectional stream
|
||||
- `container.exec(command, options)` - Execute commands in container interactively
|
||||
|
||||
**Example - Stream Logs:**
|
||||
```typescript
|
||||
const container = await dockerHost.getContainerById('abc123');
|
||||
const logStream = await container.streamLogs({ timestamps: true });
|
||||
|
||||
logStream.on('data', (chunk) => {
|
||||
console.log(chunk.toString());
|
||||
});
|
||||
```
|
||||
|
||||
**Example - Attach to Container:**
|
||||
```typescript
|
||||
const { stream, close } = await container.attach({
|
||||
stdin: true,
|
||||
stdout: true,
|
||||
stderr: true
|
||||
});
|
||||
|
||||
// Pipe to/from process
|
||||
process.stdin.pipe(stream);
|
||||
stream.pipe(process.stdout);
|
||||
|
||||
// Later: detach
|
||||
await close();
|
||||
```
|
||||
|
||||
**Example - Execute Command:**
|
||||
```typescript
|
||||
const { stream, close } = await container.exec('ls -la /app', {
|
||||
tty: true
|
||||
});
|
||||
|
||||
stream.on('data', (chunk) => {
|
||||
console.log(chunk.toString());
|
||||
});
|
||||
|
||||
stream.on('end', async () => {
|
||||
await close();
|
||||
});
|
||||
```
|
||||
|
||||
**3. DockerResource Base Class**
|
||||
All resource classes now extend `DockerResource`:
|
||||
- Consistent `dockerHost` property (not `dockerHostRef`)
|
||||
- Required `refresh()` method
|
||||
- Standardized constructor pattern
|
||||
|
||||
**4. ImageStore Encapsulation**
|
||||
- `dockerHost.imageStore` is now private
|
||||
- Use `dockerHost.storeImage(name, stream)` instead
|
||||
- Use `dockerHost.retrieveImage(name)` instead
|
||||
|
||||
**5. Creation Descriptors Support Both Primitives and Instances**
|
||||
Interfaces now accept both strings and class instances:
|
||||
```typescript
|
||||
// Both work:
|
||||
await dockerHost.createService({
|
||||
image: 'nginx:latest', // String
|
||||
networks: ['my-network'], // String array
|
||||
secrets: ['my-secret'] // String array
|
||||
});
|
||||
|
||||
await dockerHost.createService({
|
||||
image: imageInstance, // DockerImage instance
|
||||
networks: [networkInstance], // DockerNetwork array
|
||||
secrets: [secretInstance] // DockerSecret array
|
||||
});
|
||||
```
|
||||
|
||||
### Migration Guide
|
||||
Replace all static method calls with dockerHost methods:
|
||||
- `DockerContainer.getContainers(host)` → `dockerHost.listContainers()`
|
||||
- `DockerImage.createFromRegistry(host, opts)` → `dockerHost.createImageFromRegistry(opts)`
|
||||
- `DockerService.createService(host, desc)` → `dockerHost.createService(desc)`
|
||||
- `dockerHost.imageStore.storeImage(...)` → `dockerHost.storeImage(...)`
|
||||
|
||||
## smartrequest v5+ Migration (2025-11-17)
|
||||
|
||||
### Breaking Change
|
||||
smartrequest v5.0.0+ returns web `ReadableStream` objects (Web Streams API) instead of Node.js streams.
|
||||
|
||||
### Solution Implemented
|
||||
All streaming methods now convert web ReadableStreams to Node.js streams using:
|
||||
```typescript
|
||||
plugins.smartstream.nodewebhelpers.convertWebReadableToNodeReadable(webStream)
|
||||
```
|
||||
|
||||
### Files Modified
|
||||
- `ts/classes.host.ts`:
|
||||
- `requestStreaming()` - Converts web stream to Node.js stream before returning
|
||||
- `getEventObservable()` - Works with converted Node.js stream
|
||||
|
||||
- `ts/classes.image.ts`:
|
||||
- `createFromTarStream()` - Uses converted Node.js stream for event handling
|
||||
- `exportToTarStream()` - Uses converted Node.js stream for backpressure management
|
||||
|
||||
### Testing
|
||||
- Build: All 11 type errors resolved
|
||||
- Tests: Node.js tests pass (DockerHost, DockerContainer, DockerImage, DockerImageStore)
|
||||
|
||||
### Notes
|
||||
- The conversion maintains backward compatibility with existing code expecting Node.js stream methods (`.on()`, `.emit()`, `.pause()`, `.resume()`)
|
||||
- smartstream's `nodewebhelpers` module provides bidirectional conversion utilities between web and Node.js streams
|
||||
936
readme.md
Normal file
936
readme.md
Normal file
@@ -0,0 +1,936 @@
|
||||
# @apiclient.xyz/docker 🐳
|
||||
|
||||
> **Powerful TypeScript client for Docker Remote API** - Build, manage, and orchestrate Docker containers, images, networks, and swarm services with type-safe elegance.
|
||||
|
||||
## Issue Reporting and Security
|
||||
|
||||
For reporting bugs, issues, or security vulnerabilities, please visit [community.foss.global/](https://community.foss.global/). This is the central community hub for all issue reporting. Developers who sign and comply with our contribution agreement and go through identification can also get a [code.foss.global/](https://code.foss.global/) account to submit Pull Requests directly.
|
||||
|
||||
## 🚀 Features
|
||||
|
||||
- 🎯 **Full TypeScript Support** - Complete type definitions for all Docker API entities
|
||||
- 🔄 **Async/Await Ready** - Modern promise-based architecture for seamless async operations
|
||||
- 📦 **Container Management** - Full lifecycle control: create, start, stop, remove, inspect containers
|
||||
- 🔌 **Interactive Containers** - Stream logs, attach to processes, execute commands in real-time
|
||||
- 🖼️ **Image Handling** - Pull from registries, build from tarballs, export, and manage tags
|
||||
- 🌐 **Network Operations** - Create and manage Docker networks with full IPAM support
|
||||
- 🔐 **Secrets Management** - Handle Docker secrets securely in swarm mode
|
||||
- 🎭 **Service Orchestration** - Deploy and manage services in Docker Swarm
|
||||
- 💾 **S3 Image Storage** - Built-in support for storing/retrieving images from S3-compatible storage
|
||||
- 📊 **Event Streaming** - Real-time Docker event monitoring with RxJS observables
|
||||
- 🔧 **Registry Authentication** - Seamless authentication with Docker registries including private registries
|
||||
- 🐝 **Swarm Mode** - Full support for Docker Swarm initialization and management
|
||||
|
||||
## 📦 Installation
|
||||
|
||||
```bash
|
||||
# Using pnpm (recommended)
|
||||
pnpm add @apiclient.xyz/docker
|
||||
|
||||
# Using npm
|
||||
npm install @apiclient.xyz/docker
|
||||
|
||||
# Using yarn
|
||||
yarn add @apiclient.xyz/docker
|
||||
```
|
||||
|
||||
## 🎯 Quick Start
|
||||
|
||||
```typescript
|
||||
import { DockerHost } from '@apiclient.xyz/docker';
|
||||
|
||||
// Connect to local Docker daemon (default: /var/run/docker.sock)
|
||||
const docker = new DockerHost({});
|
||||
await docker.start();
|
||||
|
||||
// Check if Docker is accessible
|
||||
await docker.ping();
|
||||
console.log('✅ Docker is running');
|
||||
|
||||
// List all containers
|
||||
const containers = await docker.listContainers();
|
||||
console.log(`Found ${containers.length} containers`);
|
||||
|
||||
// Get a specific container and interact with it
|
||||
const container = await docker.getContainerById('abc123');
|
||||
await container.start();
|
||||
|
||||
// Stream logs in real-time
|
||||
const logStream = await container.streamLogs({ follow: true });
|
||||
logStream.on('data', (chunk) => console.log(chunk.toString()));
|
||||
|
||||
// Don't forget to clean up
|
||||
await docker.stop();
|
||||
```
|
||||
|
||||
## 🏗️ Clean Architecture
|
||||
|
||||
The module follows a **Facade pattern** with `DockerHost` as the single entry point:
|
||||
|
||||
```typescript
|
||||
const docker = new DockerHost({});
|
||||
|
||||
// All operations go through DockerHost
|
||||
const containers = await docker.listContainers(); // List containers
|
||||
const container = await docker.getContainerById('id'); // Get specific container
|
||||
const network = await docker.createNetwork({ Name: 'my-net' }); // Create network
|
||||
const service = await docker.createService(descriptor); // Deploy service
|
||||
const image = await docker.createImageFromRegistry({ imageUrl: 'nginx' });
|
||||
|
||||
// Resources support both strings and instances
|
||||
await docker.createService({
|
||||
image: 'nginx:latest', // String works!
|
||||
networks: ['my-network'], // String array works!
|
||||
secrets: [secretInstance] // Or use actual instances
|
||||
});
|
||||
```
|
||||
|
||||
## 🔌 Socket Path Configuration
|
||||
|
||||
The library determines which Docker socket to use in the following priority order:
|
||||
|
||||
1. **Constructor option** - `socketPath` parameter (highest priority)
|
||||
2. **Environment variable** - `DOCKER_HOST` environment variable
|
||||
3. **CI environment** - If `CI` env var is set, uses `http://docker:2375/`
|
||||
4. **Default** - Falls back to `http://unix:/var/run/docker.sock:`
|
||||
|
||||
```typescript
|
||||
// Explicit socket path (highest priority)
|
||||
const docker1 = new DockerHost({
|
||||
socketPath: 'tcp://remote-host:2375',
|
||||
});
|
||||
|
||||
// Uses DOCKER_HOST environment variable if set
|
||||
const docker2 = new DockerHost({});
|
||||
|
||||
// Custom image store directory
|
||||
const docker3 = new DockerHost({
|
||||
imageStoreDir: '/custom/path/to/image-store',
|
||||
});
|
||||
```
|
||||
|
||||
## 📚 Complete API Guide
|
||||
|
||||
### 🐳 DockerHost - Your Gateway to Docker
|
||||
|
||||
The `DockerHost` class is your primary interface to interact with the Docker daemon.
|
||||
|
||||
```typescript
|
||||
import { DockerHost } from '@apiclient.xyz/docker';
|
||||
|
||||
// Initialize with options
|
||||
const docker = new DockerHost({
|
||||
socketPath: '/var/run/docker.sock', // Optional: custom socket path
|
||||
imageStoreDir: './docker-images', // Optional: custom image store location
|
||||
});
|
||||
|
||||
// Start the docker host (initializes image store)
|
||||
await docker.start();
|
||||
|
||||
// ... perform operations ...
|
||||
|
||||
// Stop and clean up
|
||||
await docker.stop();
|
||||
```
|
||||
|
||||
#### Health Check / Ping Docker
|
||||
|
||||
Check if the Docker daemon is running and accessible:
|
||||
|
||||
```typescript
|
||||
// Ping Docker daemon
|
||||
try {
|
||||
await docker.ping();
|
||||
console.log('✅ Docker is running and accessible');
|
||||
} catch (error) {
|
||||
console.error('❌ Docker is not accessible:', error.message);
|
||||
}
|
||||
|
||||
// Use in health check function
|
||||
async function isDockerHealthy(): Promise<boolean> {
|
||||
try {
|
||||
await docker.ping();
|
||||
return true;
|
||||
} catch (error) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Example: Wait for Docker to be ready
|
||||
async function waitForDocker(timeoutMs = 10000): Promise<void> {
|
||||
const startTime = Date.now();
|
||||
|
||||
while (Date.now() - startTime < timeoutMs) {
|
||||
try {
|
||||
await docker.ping();
|
||||
console.log('✅ Docker is ready');
|
||||
return;
|
||||
} catch (error) {
|
||||
console.log('⏳ Waiting for Docker...');
|
||||
await new Promise(resolve => setTimeout(resolve, 1000));
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error('Docker did not become available within timeout');
|
||||
}
|
||||
```
|
||||
|
||||
### 📦 Container Management
|
||||
|
||||
#### List All Containers
|
||||
|
||||
```typescript
|
||||
// Get all containers (running and stopped)
|
||||
const containers = await docker.listContainers();
|
||||
|
||||
containers.forEach((container) => {
|
||||
console.log(`Container: ${container.Names[0]}`);
|
||||
console.log(` ID: ${container.Id}`);
|
||||
console.log(` Status: ${container.Status}`);
|
||||
console.log(` Image: ${container.Image}`);
|
||||
console.log(` State: ${container.State}`);
|
||||
});
|
||||
```
|
||||
|
||||
#### Get Container by ID
|
||||
|
||||
```typescript
|
||||
const container = await docker.getContainerById('abc123');
|
||||
if (container) {
|
||||
console.log(`Found: ${container.Names[0]}`);
|
||||
console.log(`Running: ${container.State === 'running'}`);
|
||||
}
|
||||
```
|
||||
|
||||
#### Container Lifecycle Operations
|
||||
|
||||
```typescript
|
||||
// Get a container
|
||||
const container = await docker.getContainerById('abc123');
|
||||
|
||||
// Start the container
|
||||
await container.start();
|
||||
console.log('Container started');
|
||||
|
||||
// Stop the container (with optional timeout)
|
||||
await container.stop({ t: 10 }); // 10 seconds graceful stop
|
||||
console.log('Container stopped');
|
||||
|
||||
// Restart by starting again
|
||||
await container.start();
|
||||
|
||||
// Remove the container
|
||||
await container.remove({ force: true, v: true }); // force + remove volumes
|
||||
console.log('Container removed');
|
||||
```
|
||||
|
||||
#### Inspect Container Details
|
||||
|
||||
```typescript
|
||||
const container = await docker.getContainerById('abc123');
|
||||
|
||||
// Get detailed information
|
||||
const details = await container.inspect();
|
||||
console.log('Container details:', details);
|
||||
|
||||
// Or just refresh the container state
|
||||
await container.refresh();
|
||||
console.log('Updated state:', container.State);
|
||||
```
|
||||
|
||||
#### Get Container Logs
|
||||
|
||||
```typescript
|
||||
// Get logs as a string (one-shot)
|
||||
const logs = await container.logs({
|
||||
stdout: true,
|
||||
stderr: true,
|
||||
timestamps: true,
|
||||
tail: 100, // Last 100 lines
|
||||
});
|
||||
console.log(logs);
|
||||
```
|
||||
|
||||
#### Stream Logs in Real-Time 🔥
|
||||
|
||||
```typescript
|
||||
// Stream logs continuously (follow mode)
|
||||
const logStream = await container.streamLogs({
|
||||
stdout: true,
|
||||
stderr: true,
|
||||
timestamps: true,
|
||||
tail: 50, // Start with last 50 lines, then follow
|
||||
});
|
||||
|
||||
logStream.on('data', (chunk) => {
|
||||
console.log(chunk.toString());
|
||||
});
|
||||
|
||||
logStream.on('error', (err) => {
|
||||
console.error('Stream error:', err);
|
||||
});
|
||||
|
||||
// Stop streaming when done
|
||||
// logStream.destroy();
|
||||
```
|
||||
|
||||
#### Attach to Container Process 🔥
|
||||
|
||||
Attach to the container's main process (PID 1) for interactive session:
|
||||
|
||||
```typescript
|
||||
const { stream, close } = await container.attach({
|
||||
stdin: true,
|
||||
stdout: true,
|
||||
stderr: true,
|
||||
logs: true, // Include previous logs
|
||||
});
|
||||
|
||||
// Pipe to/from process streams
|
||||
process.stdin.pipe(stream);
|
||||
stream.pipe(process.stdout);
|
||||
|
||||
// Handle stream events
|
||||
stream.on('end', () => {
|
||||
console.log('Attachment ended');
|
||||
});
|
||||
|
||||
// Later: detach cleanly
|
||||
await close();
|
||||
```
|
||||
|
||||
#### Execute Commands in Container 🔥
|
||||
|
||||
Run commands inside a running container:
|
||||
|
||||
```typescript
|
||||
// Execute a command
|
||||
const { stream, close } = await container.exec('ls -la /app', {
|
||||
tty: true,
|
||||
user: 'root',
|
||||
workingDir: '/app',
|
||||
env: ['DEBUG=true'],
|
||||
});
|
||||
|
||||
// Handle output
|
||||
stream.on('data', (chunk) => {
|
||||
console.log(chunk.toString());
|
||||
});
|
||||
|
||||
stream.on('end', async () => {
|
||||
console.log('Command finished');
|
||||
await close();
|
||||
});
|
||||
|
||||
// Execute with array of arguments
|
||||
const { stream: stream2, close: close2 } = await container.exec(
|
||||
['bash', '-c', 'echo "Hello from container"'],
|
||||
{ tty: true }
|
||||
);
|
||||
```
|
||||
|
||||
#### Get Container Stats
|
||||
|
||||
```typescript
|
||||
// Get stats (one-shot)
|
||||
const stats = await container.stats({ stream: false });
|
||||
console.log('CPU Usage:', stats.cpu_stats);
|
||||
console.log('Memory Usage:', stats.memory_stats);
|
||||
```
|
||||
|
||||
#### Create Containers
|
||||
|
||||
```typescript
|
||||
const newContainer = await docker.createContainer({
|
||||
Hostname: 'my-app',
|
||||
Domainname: 'local',
|
||||
networks: ['my-network'], // Can use string or DockerNetwork instance
|
||||
});
|
||||
console.log(`Container created: ${newContainer.Id}`);
|
||||
```
|
||||
|
||||
### 🖼️ Image Management
|
||||
|
||||
#### Pull Images from Registry
|
||||
|
||||
```typescript
|
||||
// Pull from Docker Hub
|
||||
const image = await docker.createImageFromRegistry({
|
||||
imageUrl: 'nginx',
|
||||
imageTag: 'alpine', // Optional, defaults to 'latest'
|
||||
});
|
||||
|
||||
console.log(`Image pulled: ${image.RepoTags[0]}`);
|
||||
console.log(`Size: ${(image.Size / 1024 / 1024).toFixed(2)} MB`);
|
||||
|
||||
// Pull from private registry
|
||||
const privateImage = await docker.createImageFromRegistry({
|
||||
imageUrl: 'registry.example.com/my-app',
|
||||
imageTag: 'v2.0.0',
|
||||
});
|
||||
```
|
||||
|
||||
#### Import Images from Tar Stream
|
||||
|
||||
```typescript
|
||||
import * as fs from 'fs';
|
||||
|
||||
// Import from a tar file
|
||||
const tarStream = fs.createReadStream('./my-image.tar');
|
||||
const importedImage = await docker.createImageFromTarStream(tarStream, {
|
||||
imageUrl: 'my-app',
|
||||
imageTag: 'v1.0.0',
|
||||
});
|
||||
|
||||
console.log(`Imported: ${importedImage.RepoTags[0]}`);
|
||||
```
|
||||
|
||||
#### Export Images to Tar Stream
|
||||
|
||||
```typescript
|
||||
// Get image by name
|
||||
const image = await docker.getImageByName('nginx:alpine');
|
||||
|
||||
// Export to tar stream
|
||||
const exportStream = await image.exportToTarStream();
|
||||
|
||||
// Save to file
|
||||
const writeStream = fs.createWriteStream('./nginx-export.tar');
|
||||
exportStream.pipe(writeStream);
|
||||
|
||||
writeStream.on('finish', () => {
|
||||
console.log('Image exported successfully');
|
||||
});
|
||||
```
|
||||
|
||||
#### List All Images
|
||||
|
||||
```typescript
|
||||
const images = await docker.listImages();
|
||||
|
||||
images.forEach((img) => {
|
||||
console.log(`Image: ${img.RepoTags ? img.RepoTags.join(', ') : '<none>'}`);
|
||||
console.log(` ID: ${img.Id}`);
|
||||
console.log(` Size: ${(img.Size / 1024 / 1024).toFixed(2)} MB`);
|
||||
console.log(` Created: ${new Date(img.Created * 1000).toISOString()}`);
|
||||
});
|
||||
```
|
||||
|
||||
#### Remove Images
|
||||
|
||||
```typescript
|
||||
const image = await docker.getImageByName('nginx:alpine');
|
||||
await image.remove({ force: true });
|
||||
console.log('Image removed');
|
||||
```
|
||||
|
||||
### 🌐 Network Management
|
||||
|
||||
#### Create Custom Networks
|
||||
|
||||
```typescript
|
||||
// Create an overlay network (for swarm)
|
||||
const network = await docker.createNetwork({
|
||||
Name: 'my-app-network',
|
||||
Driver: 'overlay',
|
||||
EnableIPv6: false,
|
||||
Attachable: true,
|
||||
});
|
||||
|
||||
console.log(`Network created: ${network.Name} (${network.Id})`);
|
||||
```
|
||||
|
||||
#### List and Inspect Networks
|
||||
|
||||
```typescript
|
||||
// Get all networks
|
||||
const networks = await docker.listNetworks();
|
||||
|
||||
networks.forEach((net) => {
|
||||
console.log(`Network: ${net.Name} (${net.Driver})`);
|
||||
console.log(` Scope: ${net.Scope}`);
|
||||
console.log(` Internal: ${net.Internal}`);
|
||||
});
|
||||
|
||||
// Get specific network by name
|
||||
const appNetwork = await docker.getNetworkByName('my-app-network');
|
||||
|
||||
// Get containers connected to this network
|
||||
const containers = await appNetwork.listContainersOnNetwork();
|
||||
console.log(`Containers on network: ${containers.length}`);
|
||||
```
|
||||
|
||||
#### Remove a Network
|
||||
|
||||
```typescript
|
||||
const network = await docker.getNetworkByName('my-app-network');
|
||||
await network.remove();
|
||||
console.log('Network removed');
|
||||
```
|
||||
|
||||
### 🎭 Service Management (Swarm Mode)
|
||||
|
||||
#### Activate Swarm Mode
|
||||
|
||||
```typescript
|
||||
// Initialize swarm mode first
|
||||
await docker.activateSwarm('192.168.1.100'); // Optional: advertisement IP
|
||||
console.log('Swarm mode activated');
|
||||
```
|
||||
|
||||
#### Deploy Services
|
||||
|
||||
```typescript
|
||||
// Create prerequisites
|
||||
const network = await docker.createNetwork({
|
||||
Name: 'app-network',
|
||||
Driver: 'overlay', // Use overlay for swarm
|
||||
});
|
||||
|
||||
const image = await docker.createImageFromRegistry({
|
||||
imageUrl: 'nginx',
|
||||
imageTag: 'latest',
|
||||
});
|
||||
|
||||
const secret = await docker.createSecret({
|
||||
name: 'api-key',
|
||||
version: '1.0.0',
|
||||
contentArg: 'super-secret-key',
|
||||
labels: { app: 'my-app' },
|
||||
});
|
||||
|
||||
// Create a service (supports both strings and instances!)
|
||||
const service = await docker.createService({
|
||||
name: 'web-api',
|
||||
image: image, // Or use string: 'nginx:latest'
|
||||
labels: {
|
||||
app: 'api',
|
||||
version: '1.0.0',
|
||||
},
|
||||
networks: [network], // Or use strings: ['app-network']
|
||||
networkAlias: 'api',
|
||||
secrets: [secret], // Or use strings: ['api-key']
|
||||
ports: ['80:3000'], // host:container
|
||||
resources: {
|
||||
memorySizeMB: 512,
|
||||
},
|
||||
});
|
||||
|
||||
console.log(`Service deployed: ${service.ID}`);
|
||||
```
|
||||
|
||||
#### List and Manage Services
|
||||
|
||||
```typescript
|
||||
// List all services
|
||||
const services = await docker.listServices();
|
||||
|
||||
services.forEach((service) => {
|
||||
console.log(`Service: ${service.Spec.Name}`);
|
||||
console.log(` Image: ${service.Spec.TaskTemplate.ContainerSpec.Image}`);
|
||||
});
|
||||
|
||||
// Get service by name
|
||||
const myService = await docker.getServiceByName('web-api');
|
||||
|
||||
// Refresh service state
|
||||
await myService.refresh();
|
||||
|
||||
// Check if service needs update
|
||||
const needsUpdate = await myService.needsUpdate();
|
||||
if (needsUpdate) {
|
||||
console.log('⚠️ Service configuration has changed, update needed');
|
||||
}
|
||||
|
||||
// Remove service
|
||||
await myService.remove();
|
||||
console.log('Service removed');
|
||||
```
|
||||
|
||||
### 🔐 Secrets Management
|
||||
|
||||
Secrets are only available in Docker Swarm mode.
|
||||
|
||||
```typescript
|
||||
// Create a secret
|
||||
const secret = await docker.createSecret({
|
||||
name: 'database-password',
|
||||
version: '1.0.0',
|
||||
contentArg: 'my-super-secret-password',
|
||||
labels: {
|
||||
app: 'my-app',
|
||||
type: 'credential',
|
||||
},
|
||||
});
|
||||
|
||||
console.log(`Secret created: ${secret.ID}`);
|
||||
|
||||
// List all secrets
|
||||
const secrets = await docker.listSecrets();
|
||||
secrets.forEach((s) => {
|
||||
console.log(`Secret: ${s.Spec.Name}`);
|
||||
console.log(` Labels:`, s.Spec.Labels);
|
||||
});
|
||||
|
||||
// Get secret by name
|
||||
const dbSecret = await docker.getSecretByName('database-password');
|
||||
|
||||
// Update secret content
|
||||
await dbSecret.update('new-password-value');
|
||||
|
||||
// Remove secret
|
||||
await dbSecret.remove();
|
||||
console.log('Secret removed');
|
||||
```
|
||||
|
||||
### 💾 Image Storage
|
||||
|
||||
Store and retrieve Docker images from local storage or S3:
|
||||
|
||||
```typescript
|
||||
// Store image to local storage
|
||||
const imageStream = fs.createReadStream('./my-app.tar');
|
||||
await docker.storeImage('my-app-v1', imageStream);
|
||||
console.log('Image stored locally');
|
||||
|
||||
// Retrieve image from storage
|
||||
const storedImageStream = await docker.retrieveImage('my-app-v1');
|
||||
storedImageStream.pipe(fs.createWriteStream('./restored-image.tar'));
|
||||
|
||||
// Configure S3 storage (optional)
|
||||
await docker.addS3Storage({
|
||||
endpoint: 's3.amazonaws.com',
|
||||
accessKey: 'AKIAIOSFODNN7EXAMPLE',
|
||||
accessSecret: 'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY',
|
||||
bucketName: 'my-docker-images',
|
||||
});
|
||||
```
|
||||
|
||||
### 📊 Event Monitoring
|
||||
|
||||
Monitor Docker events in real-time using RxJS observables:
|
||||
|
||||
```typescript
|
||||
// Subscribe to Docker events
|
||||
const eventObservable = await docker.getEventObservable();
|
||||
|
||||
const subscription = eventObservable.subscribe({
|
||||
next: (event) => {
|
||||
console.log(`📡 Event: ${event.Type} - ${event.Action}`);
|
||||
console.log(` Actor: ${event.Actor.ID}`);
|
||||
console.log(` Time: ${new Date(event.time * 1000).toISOString()}`);
|
||||
|
||||
if (event.Type === 'container') {
|
||||
console.log(` Container: ${event.Actor.Attributes.name}`);
|
||||
}
|
||||
},
|
||||
error: (err) => console.error('❌ Event stream error:', err),
|
||||
complete: () => console.log('Event stream completed'),
|
||||
});
|
||||
|
||||
// Unsubscribe when done
|
||||
// subscription.unsubscribe();
|
||||
```
|
||||
|
||||
### 🔧 Registry Authentication
|
||||
|
||||
Authenticate with Docker registries to pull private images:
|
||||
|
||||
```typescript
|
||||
// Authenticate with a registry
|
||||
await docker.auth({
|
||||
username: 'your-username',
|
||||
password: 'your-password',
|
||||
serveraddress: 'https://index.docker.io/v1/', // Docker Hub
|
||||
});
|
||||
|
||||
console.log('✅ Authenticated with registry');
|
||||
|
||||
// Or read credentials from Docker config file
|
||||
await docker.getAuthTokenFromDockerConfig('registry.example.com');
|
||||
|
||||
// Now you can pull private images
|
||||
const privateImage = await docker.createImageFromRegistry({
|
||||
imageUrl: 'registry.example.com/private/app',
|
||||
imageTag: 'latest',
|
||||
});
|
||||
```
|
||||
|
||||
## 🏗️ Advanced Examples
|
||||
|
||||
### Complete Application Stack with Swarm
|
||||
|
||||
Deploy a complete multi-service application stack:
|
||||
|
||||
```typescript
|
||||
import { DockerHost } from '@apiclient.xyz/docker';
|
||||
|
||||
async function deployStack() {
|
||||
const docker = new DockerHost({});
|
||||
await docker.start();
|
||||
|
||||
// Initialize swarm
|
||||
await docker.activateSwarm();
|
||||
console.log('✅ Swarm initialized');
|
||||
|
||||
// Create overlay network for service communication
|
||||
const network = await docker.createNetwork({
|
||||
Name: 'app-network',
|
||||
Driver: 'overlay',
|
||||
Attachable: true,
|
||||
});
|
||||
console.log('✅ Network created');
|
||||
|
||||
// Create secrets
|
||||
const dbPassword = await docker.createSecret({
|
||||
name: 'db-password',
|
||||
version: '1.0.0',
|
||||
contentArg: 'strong-database-password',
|
||||
labels: { app: 'stack' },
|
||||
});
|
||||
console.log('✅ Secrets created');
|
||||
|
||||
// Deploy database service
|
||||
const dbService = await docker.createService({
|
||||
name: 'postgres-db',
|
||||
image: 'postgres:14-alpine', // Using string for convenience
|
||||
labels: { tier: 'database' },
|
||||
networks: ['app-network'], // Using string array
|
||||
networkAlias: 'postgres',
|
||||
secrets: ['db-password'], // Using string array
|
||||
ports: [],
|
||||
resources: {
|
||||
memorySizeMB: 1024,
|
||||
},
|
||||
});
|
||||
console.log('✅ Database service deployed');
|
||||
|
||||
// Deploy application service
|
||||
const appService = await docker.createService({
|
||||
name: 'web-app',
|
||||
image: 'my-app:latest',
|
||||
labels: { tier: 'application' },
|
||||
networks: ['app-network'],
|
||||
networkAlias: 'app',
|
||||
secrets: ['db-password'],
|
||||
ports: ['80:3000'],
|
||||
resources: {
|
||||
memorySizeMB: 512,
|
||||
},
|
||||
});
|
||||
console.log('✅ Application service deployed');
|
||||
|
||||
console.log('🚀 Stack deployment complete!');
|
||||
}
|
||||
|
||||
deployStack().catch(console.error);
|
||||
```
|
||||
|
||||
### Container Debugging Session
|
||||
|
||||
Interactive debugging session with a running container:
|
||||
|
||||
```typescript
|
||||
async function debugContainer(containerId: string) {
|
||||
const docker = new DockerHost({});
|
||||
await docker.start();
|
||||
|
||||
const container = await docker.getContainerById(containerId);
|
||||
|
||||
// First, check container state
|
||||
await container.inspect();
|
||||
console.log(`Container: ${container.Names[0]}`);
|
||||
console.log(`State: ${container.State}`);
|
||||
|
||||
// Get recent logs
|
||||
const logs = await container.logs({ tail: 50 });
|
||||
console.log('Recent logs:', logs);
|
||||
|
||||
// Stream live logs in one terminal
|
||||
console.log('\n--- Live Logs ---');
|
||||
const logStream = await container.streamLogs({ timestamps: true });
|
||||
logStream.on('data', (chunk) => {
|
||||
process.stdout.write(chunk);
|
||||
});
|
||||
|
||||
// Execute diagnostic commands
|
||||
console.log('\n--- Running Diagnostics ---');
|
||||
const { stream, close } = await container.exec('ps aux', { tty: true });
|
||||
|
||||
stream.on('data', (chunk) => {
|
||||
console.log(chunk.toString());
|
||||
});
|
||||
|
||||
stream.on('end', async () => {
|
||||
console.log('\nDiagnostics complete');
|
||||
await close();
|
||||
await docker.stop();
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
### Image Pipeline: Pull, Tag, Export
|
||||
|
||||
```typescript
|
||||
async function imagePipeline() {
|
||||
const docker = new DockerHost({});
|
||||
await docker.start();
|
||||
|
||||
// Pull latest image
|
||||
const image = await docker.createImageFromRegistry({
|
||||
imageUrl: 'node',
|
||||
imageTag: '18-alpine',
|
||||
});
|
||||
console.log('✅ Image pulled');
|
||||
|
||||
// Export to tar for backup/transfer
|
||||
const exportStream = await image.exportToTarStream();
|
||||
const writeStream = fs.createWriteStream('./node-18-alpine.tar');
|
||||
|
||||
exportStream.pipe(writeStream);
|
||||
|
||||
await new Promise((resolve, reject) => {
|
||||
writeStream.on('finish', resolve);
|
||||
writeStream.on('error', reject);
|
||||
});
|
||||
console.log('✅ Image exported to tar');
|
||||
|
||||
// Store in image store (with S3 backup if configured)
|
||||
const tarStream = fs.createReadStream('./node-18-alpine.tar');
|
||||
await docker.storeImage('node-18-alpine-backup', tarStream);
|
||||
console.log('✅ Image stored in image store');
|
||||
|
||||
await docker.stop();
|
||||
}
|
||||
```
|
||||
|
||||
## 🔍 TypeScript Support
|
||||
|
||||
Full TypeScript definitions for all Docker API entities:
|
||||
|
||||
```typescript
|
||||
import type {
|
||||
IDockerHostConstructorOptions,
|
||||
IImageCreationDescriptor,
|
||||
IServiceCreationDescriptor,
|
||||
ISecretCreationDescriptor,
|
||||
IContainerCreationDescriptor,
|
||||
INetworkCreationDescriptor,
|
||||
TLabels,
|
||||
TPorts,
|
||||
DockerResource,
|
||||
} from '@apiclient.xyz/docker';
|
||||
|
||||
// Full IntelliSense support
|
||||
const options: IDockerHostConstructorOptions = {
|
||||
socketPath: '/var/run/docker.sock',
|
||||
imageStoreDir: '/tmp/docker-images',
|
||||
};
|
||||
|
||||
const imageConfig: IImageCreationDescriptor = {
|
||||
imageUrl: 'nginx',
|
||||
imageTag: 'alpine',
|
||||
};
|
||||
|
||||
const labels: TLabels = {
|
||||
app: 'my-app',
|
||||
environment: 'production',
|
||||
};
|
||||
```
|
||||
|
||||
## 🎯 Real-World Use Cases
|
||||
|
||||
### CI/CD Pipeline Integration
|
||||
|
||||
```typescript
|
||||
// In your CI/CD pipeline
|
||||
const docker = new DockerHost({
|
||||
socketPath: process.env.DOCKER_HOST || '/var/run/docker.sock',
|
||||
});
|
||||
|
||||
await docker.start();
|
||||
|
||||
// Build and push process
|
||||
const buildStream = fs.createReadStream('./build-artifact.tar');
|
||||
const image = await docker.createImageFromTarStream(buildStream, {
|
||||
imageUrl: 'my-app',
|
||||
imageTag: process.env.CI_COMMIT_SHA,
|
||||
});
|
||||
|
||||
console.log(`✅ Image built: my-app:${process.env.CI_COMMIT_SHA}`);
|
||||
```
|
||||
|
||||
### Health Check Service
|
||||
|
||||
```typescript
|
||||
async function healthCheckService() {
|
||||
const docker = new DockerHost({});
|
||||
|
||||
try {
|
||||
await docker.ping();
|
||||
const containers = await docker.listContainers();
|
||||
|
||||
const unhealthy = containers.filter(c => c.State !== 'running');
|
||||
if (unhealthy.length > 0) {
|
||||
console.warn(`⚠️ ${unhealthy.length} containers not running`);
|
||||
// Send alerts, restart services, etc.
|
||||
}
|
||||
|
||||
return { healthy: true, containers: containers.length };
|
||||
} catch (error) {
|
||||
console.error('❌ Docker health check failed:', error);
|
||||
return { healthy: false, error: error.message };
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## 📖 API Documentation
|
||||
|
||||
- **Package Repository**: [https://code.foss.global/apiclient.xyz/docker](https://code.foss.global/apiclient.xyz/docker)
|
||||
- **npm Package**: [https://www.npmjs.com/package/@apiclient.xyz/docker](https://www.npmjs.com/package/@apiclient.xyz/docker)
|
||||
- **Docker Engine API Reference**: [https://docs.docker.com/engine/api/latest/](https://docs.docker.com/engine/api/latest/)
|
||||
|
||||
## 🔑 Key Concepts
|
||||
|
||||
- **DockerHost**: Main entry point - all operations flow through this facade
|
||||
- **Flexible Descriptors**: Accept both string references and class instances
|
||||
- **Health Checks**: Use `ping()` method to verify Docker daemon accessibility
|
||||
- **Socket Path Priority**: Constructor option → `DOCKER_HOST` env → CI mode → default socket
|
||||
- **Swarm Mode Required**: Services and secrets require Docker Swarm to be activated
|
||||
- **Type Safety**: Full TypeScript support with comprehensive interfaces
|
||||
- **Streaming Support**: Real-time log streaming, event monitoring, and container attachment
|
||||
- **Interactive Containers**: Attach to processes, execute commands, stream logs
|
||||
- **Clean Architecture**: Facade pattern with internal delegation for maintainability
|
||||
|
||||
## 🆕 Recent Updates
|
||||
|
||||
### Version 3.0.0+ - Architecture & Stability
|
||||
|
||||
- ✨ **Clean OOP Architecture**: Refactored to Facade pattern with DockerHost as single entry point
|
||||
- ✨ **Container Streaming**: Real-time `streamLogs()`, `attach()`, and `exec()` methods for interactive containers
|
||||
- ✨ **Flexible Descriptors**: Support both string references and class instances in all creation methods
|
||||
- ✨ **Complete Container API**: Full lifecycle methods (start, stop, remove, logs, inspect, stats)
|
||||
- ✨ **DockerResource Base Class**: Consistent patterns and type safety across all resources
|
||||
- ✨ **Health Check Support**: New `ping()` method to verify Docker daemon availability
|
||||
- 🐛 **Fixed Circular Dependencies**: Resolved Node.js module loading issues with type-only imports
|
||||
- 🔧 **Improved Type Safety**: Better TypeScript definitions and interfaces throughout
|
||||
- 📚 **Enhanced Documentation**: Comprehensive examples, migration guides, and real-world use cases
|
||||
|
||||
## License and Legal Information
|
||||
|
||||
This repository contains open-source code that is licensed under the MIT License. A copy of the MIT License can be found in the [license](license) file within this repository.
|
||||
|
||||
**Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.
|
||||
|
||||
### Trademarks
|
||||
|
||||
This project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and any related products or services are trademarks of Task Venture Capital GmbH and are not included within the scope of the MIT license granted herein. Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines, and any usage must be approved in writing by Task Venture Capital GmbH.
|
||||
|
||||
### Company Information
|
||||
|
||||
Task Venture Capital GmbH
|
||||
Registered at District court Bremen HRB 35230 HB, Germany
|
||||
|
||||
For any legal inquiries or if you require further information, please contact us via email at hello@task.vc.
|
||||
|
||||
By using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.
|
||||
@@ -1,5 +1,5 @@
|
||||
import * as docker from '../ts';
|
||||
import * as smartstring from '@pushrocks/smartstring';
|
||||
import * as smartstring from '@push.rocks/smartstring';
|
||||
|
||||
const run = async () => {
|
||||
const dockerHost = new docker.DockerHost();
|
||||
|
||||
40
test-stream.js
Normal file
40
test-stream.js
Normal file
@@ -0,0 +1,40 @@
|
||||
const { SmartRequest } = require('@push.rocks/smartrequest');
|
||||
|
||||
async function test() {
|
||||
try {
|
||||
const response = await SmartRequest.create()
|
||||
.url('http://unix:/run/user/1000/docker.sock:/images/hello-world:latest/get')
|
||||
.header('Host', 'docker.sock')
|
||||
.get();
|
||||
|
||||
console.log('Response status:', response.status);
|
||||
console.log('Response type:', typeof response);
|
||||
|
||||
const stream = response.streamNode();
|
||||
console.log('Stream type:', typeof stream);
|
||||
console.log('Has on method:', typeof stream.on);
|
||||
|
||||
if (stream) {
|
||||
let chunks = 0;
|
||||
stream.on('data', (chunk) => {
|
||||
chunks++;
|
||||
if (chunks <= 3) console.log('Got chunk', chunks, chunk.length);
|
||||
});
|
||||
stream.on('end', () => {
|
||||
console.log('Stream ended, total chunks:', chunks);
|
||||
process.exit(0);
|
||||
});
|
||||
stream.on('error', (err) => {
|
||||
console.error('Stream error:', err);
|
||||
process.exit(1);
|
||||
});
|
||||
} else {
|
||||
console.log('No stream available');
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error:', error);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
test();
|
||||
46
test-stream.mjs
Normal file
46
test-stream.mjs
Normal file
@@ -0,0 +1,46 @@
|
||||
import { SmartRequest } from '@push.rocks/smartrequest';
|
||||
|
||||
async function test() {
|
||||
try {
|
||||
const response = await SmartRequest.create()
|
||||
.url('http://unix:/run/user/1000/docker.sock:/images/hello-world:latest/get')
|
||||
.header('Host', 'docker.sock')
|
||||
.get();
|
||||
|
||||
console.log('Response status:', response.status);
|
||||
console.log('Response type:', typeof response);
|
||||
|
||||
const stream = response.streamNode();
|
||||
console.log('Stream type:', typeof stream);
|
||||
console.log('Has on method:', typeof stream.on);
|
||||
|
||||
if (stream) {
|
||||
let chunks = 0;
|
||||
stream.on('data', (chunk) => {
|
||||
chunks++;
|
||||
if (chunks <= 3) console.log('Got chunk', chunks, chunk.length);
|
||||
});
|
||||
stream.on('end', () => {
|
||||
console.log('Stream ended, total chunks:', chunks);
|
||||
process.exit(0);
|
||||
});
|
||||
stream.on('error', (err) => {
|
||||
console.error('Stream error:', err);
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
// Set a timeout in case stream doesn't end
|
||||
setTimeout(() => {
|
||||
console.log('Timeout after 5 seconds');
|
||||
process.exit(1);
|
||||
}, 5000);
|
||||
} else {
|
||||
console.log('No stream available');
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error:', error);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
test();
|
||||
309
test/test.nonci.node+deno.ts
Normal file
309
test/test.nonci.node+deno.ts
Normal file
@@ -0,0 +1,309 @@
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
import { Qenv } from '@push.rocks/qenv';
|
||||
|
||||
const testQenv = new Qenv('./', './.nogit/');
|
||||
|
||||
import * as plugins from '../ts/plugins.js';
|
||||
import * as paths from '../ts/paths.js';
|
||||
|
||||
import * as docker from '../ts/index.js';
|
||||
|
||||
let testDockerHost: docker.DockerHost;
|
||||
|
||||
tap.test('should create a new Dockersock instance', async () => {
|
||||
testDockerHost = new docker.DockerHost({});
|
||||
await testDockerHost.start();
|
||||
return expect(testDockerHost).toBeInstanceOf(docker.DockerHost);
|
||||
});
|
||||
|
||||
tap.test('should create a docker swarm', async () => {
|
||||
await testDockerHost.activateSwarm();
|
||||
});
|
||||
|
||||
// Containers
|
||||
tap.test('should list containers', async () => {
|
||||
const containers = await testDockerHost.listContainers();
|
||||
console.log(containers);
|
||||
});
|
||||
|
||||
// Networks
|
||||
tap.test('should list networks', async () => {
|
||||
const networks = await testDockerHost.listNetworks();
|
||||
console.log(networks);
|
||||
});
|
||||
|
||||
tap.test('should create a network', async () => {
|
||||
const newNetwork = await testDockerHost.createNetwork({
|
||||
Name: 'webgateway',
|
||||
});
|
||||
expect(newNetwork).toBeInstanceOf(docker.DockerNetwork);
|
||||
expect(newNetwork.Name).toEqual('webgateway');
|
||||
});
|
||||
|
||||
tap.test('should remove a network', async () => {
|
||||
const webgateway = await testDockerHost.getNetworkByName('webgateway');
|
||||
await webgateway.remove();
|
||||
});
|
||||
|
||||
// Images
|
||||
tap.test('should pull an image from imagetag', async () => {
|
||||
const image = await testDockerHost.createImageFromRegistry({
|
||||
imageUrl: 'hosttoday/ht-docker-node',
|
||||
imageTag: 'alpine',
|
||||
});
|
||||
expect(image).toBeInstanceOf(docker.DockerImage);
|
||||
console.log(image);
|
||||
});
|
||||
|
||||
tap.test('should return a change Observable', async (tools) => {
|
||||
const testObservable = await testDockerHost.getEventObservable();
|
||||
const subscription = testObservable.subscribe((changeObject) => {
|
||||
console.log(changeObject);
|
||||
});
|
||||
await tools.delayFor(2000);
|
||||
subscription.unsubscribe();
|
||||
});
|
||||
|
||||
// SECRETS
|
||||
tap.test('should create a secret', async () => {
|
||||
const mySecret = await testDockerHost.createSecret({
|
||||
name: 'testSecret',
|
||||
version: '1.0.3',
|
||||
contentArg: `{ "hi": "wow"}`,
|
||||
labels: {},
|
||||
});
|
||||
console.log(mySecret);
|
||||
});
|
||||
|
||||
tap.test('should remove a secret by name', async () => {
|
||||
const mySecret = await testDockerHost.getSecretByName('testSecret');
|
||||
await mySecret.remove();
|
||||
});
|
||||
|
||||
// SERVICES
|
||||
tap.test('should activate swarm mode', async () => {
|
||||
await testDockerHost.activateSwarm();
|
||||
});
|
||||
|
||||
tap.test('should list all services', async (tools) => {
|
||||
const services = await testDockerHost.listServices();
|
||||
console.log(services);
|
||||
});
|
||||
|
||||
tap.test('should create a service', async () => {
|
||||
const testNetwork = await testDockerHost.createNetwork({
|
||||
Name: 'testNetwork',
|
||||
});
|
||||
const testSecret = await testDockerHost.createSecret({
|
||||
name: 'testSecret',
|
||||
version: '0.0.1',
|
||||
labels: {},
|
||||
contentArg: '{"hi": "wow"}',
|
||||
});
|
||||
const testImage = await testDockerHost.createImageFromRegistry({
|
||||
imageUrl: 'code.foss.global/host.today/ht-docker-node:latest',
|
||||
});
|
||||
const testService = await testDockerHost.createService({
|
||||
image: testImage,
|
||||
labels: {},
|
||||
name: 'testService',
|
||||
networks: [testNetwork],
|
||||
networkAlias: 'testService',
|
||||
secrets: [testSecret],
|
||||
ports: ['3000:80'],
|
||||
});
|
||||
|
||||
await testService.remove();
|
||||
await testNetwork.remove();
|
||||
await testSecret.remove();
|
||||
});
|
||||
|
||||
tap.test('should export images', async (toolsArg) => {
|
||||
const done = toolsArg.defer();
|
||||
const testImage = await testDockerHost.createImageFromRegistry({
|
||||
imageUrl: 'code.foss.global/host.today/ht-docker-node:latest',
|
||||
});
|
||||
const fsWriteStream = plugins.smartfile.fsStream.createWriteStream(
|
||||
plugins.path.join(paths.nogitDir, 'testimage.tar'),
|
||||
);
|
||||
const exportStream = await testImage.exportToTarStream();
|
||||
exportStream.pipe(fsWriteStream).on('finish', () => {
|
||||
done.resolve();
|
||||
});
|
||||
await done.promise;
|
||||
});
|
||||
|
||||
tap.test('should import images', async () => {
|
||||
const fsReadStream = plugins.smartfile.fsStream.createReadStream(
|
||||
plugins.path.join(paths.nogitDir, 'testimage.tar'),
|
||||
);
|
||||
const importedImage = await testDockerHost.createImageFromTarStream(
|
||||
fsReadStream,
|
||||
{
|
||||
imageUrl: 'code.foss.global/host.today/ht-docker-node:latest',
|
||||
},
|
||||
);
|
||||
expect(importedImage).toBeInstanceOf(docker.DockerImage);
|
||||
});
|
||||
|
||||
tap.test('should expose a working DockerImageStore', async () => {
|
||||
// lets first add am s3 target
|
||||
const s3Descriptor = {
|
||||
endpoint: await testQenv.getEnvVarOnDemand('S3_ENDPOINT'),
|
||||
accessKey: await testQenv.getEnvVarOnDemand('S3_ACCESSKEY'),
|
||||
accessSecret: await testQenv.getEnvVarOnDemand('S3_ACCESSSECRET'),
|
||||
bucketName: await testQenv.getEnvVarOnDemand('S3_BUCKET'),
|
||||
};
|
||||
await testDockerHost.addS3Storage(s3Descriptor);
|
||||
|
||||
// Use the new public API instead of direct imageStore access
|
||||
await testDockerHost.storeImage(
|
||||
'hello2',
|
||||
plugins.smartfile.fsStream.createReadStream(
|
||||
plugins.path.join(paths.nogitDir, 'testimage.tar'),
|
||||
),
|
||||
);
|
||||
});
|
||||
|
||||
// CONTAINER STREAMING FEATURES
|
||||
let testContainer: docker.DockerContainer;
|
||||
|
||||
tap.test('should get an existing container for streaming tests', async () => {
|
||||
const containers = await testDockerHost.listContainers();
|
||||
|
||||
// Use the first running container we find
|
||||
testContainer = containers.find((c) => c.State === 'running');
|
||||
|
||||
if (!testContainer) {
|
||||
throw new Error('No running containers found for streaming tests');
|
||||
}
|
||||
|
||||
expect(testContainer).toBeInstanceOf(docker.DockerContainer);
|
||||
console.log('Using existing container for tests:', testContainer.Names[0], testContainer.Id);
|
||||
});
|
||||
|
||||
tap.test('should stream container logs', async (tools) => {
|
||||
const done = tools.defer();
|
||||
const logStream = await testContainer.streamLogs({
|
||||
stdout: true,
|
||||
stderr: true,
|
||||
timestamps: true,
|
||||
});
|
||||
|
||||
let receivedData = false;
|
||||
|
||||
logStream.on('data', (chunk) => {
|
||||
console.log('Received log chunk:', chunk.toString().slice(0, 100));
|
||||
receivedData = true;
|
||||
});
|
||||
|
||||
logStream.on('error', (error) => {
|
||||
console.error('Stream error:', error);
|
||||
done.resolve();
|
||||
});
|
||||
|
||||
// Wait for 2 seconds to collect logs, then close
|
||||
await tools.delayFor(2000);
|
||||
logStream.destroy();
|
||||
done.resolve();
|
||||
|
||||
await done.promise;
|
||||
console.log('Log streaming test completed. Received data:', receivedData);
|
||||
});
|
||||
|
||||
tap.test('should get container logs (one-shot)', async () => {
|
||||
const logs = await testContainer.logs({
|
||||
stdout: true,
|
||||
stderr: true,
|
||||
tail: 10,
|
||||
});
|
||||
|
||||
expect(typeof logs).toEqual('string');
|
||||
console.log('Container logs (last 10 lines):', logs.slice(0, 200));
|
||||
});
|
||||
|
||||
tap.test('should execute command in container', async (tools) => {
|
||||
const done = tools.defer();
|
||||
const { stream, close } = await testContainer.exec('echo "Hello from exec"', {
|
||||
tty: false,
|
||||
attachStdout: true,
|
||||
attachStderr: true,
|
||||
});
|
||||
|
||||
let output = '';
|
||||
|
||||
stream.on('data', (chunk) => {
|
||||
output += chunk.toString();
|
||||
console.log('Exec output:', chunk.toString());
|
||||
});
|
||||
|
||||
stream.on('end', async () => {
|
||||
await close();
|
||||
console.log('Exec completed. Full output:', output);
|
||||
done.resolve();
|
||||
});
|
||||
|
||||
stream.on('error', async (error) => {
|
||||
console.error('Exec error:', error);
|
||||
await close();
|
||||
done.resolve();
|
||||
});
|
||||
|
||||
await done.promise;
|
||||
expect(output.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
tap.test('should attach to container', async (tools) => {
|
||||
const done = tools.defer();
|
||||
const { stream, close } = await testContainer.attach({
|
||||
stream: true,
|
||||
stdout: true,
|
||||
stderr: true,
|
||||
stdin: false,
|
||||
});
|
||||
|
||||
let receivedData = false;
|
||||
|
||||
stream.on('data', (chunk) => {
|
||||
console.log('Attach received:', chunk.toString().slice(0, 100));
|
||||
receivedData = true;
|
||||
});
|
||||
|
||||
stream.on('error', async (error) => {
|
||||
console.error('Attach error:', error);
|
||||
await close();
|
||||
done.resolve();
|
||||
});
|
||||
|
||||
// Monitor for 2 seconds then detach
|
||||
await tools.delayFor(2000);
|
||||
await close();
|
||||
done.resolve();
|
||||
|
||||
await done.promise;
|
||||
console.log('Attach test completed. Received data:', receivedData);
|
||||
});
|
||||
|
||||
tap.test('should get container stats', async () => {
|
||||
const stats = await testContainer.stats({ stream: false });
|
||||
expect(stats).toBeInstanceOf(Object);
|
||||
console.log('Container stats keys:', Object.keys(stats));
|
||||
});
|
||||
|
||||
tap.test('should inspect container', async () => {
|
||||
const inspection = await testContainer.inspect();
|
||||
expect(inspection).toBeInstanceOf(Object);
|
||||
expect(inspection.Id).toEqual(testContainer.Id);
|
||||
console.log('Container state:', inspection.State?.Status);
|
||||
});
|
||||
|
||||
tap.test('should complete container tests', async () => {
|
||||
// Using existing container, no cleanup needed
|
||||
console.log('Container streaming tests completed');
|
||||
});
|
||||
|
||||
tap.test('cleanup', async () => {
|
||||
await testDockerHost.stop();
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
113
test/test.ts
113
test/test.ts
@@ -1,113 +0,0 @@
|
||||
import { expect, tap } from '@pushrocks/tapbundle';
|
||||
import * as docker from '../ts/index';
|
||||
|
||||
let testDockerHost: docker.DockerHost;
|
||||
|
||||
tap.test('should create a new Dockersock instance', async () => {
|
||||
testDockerHost = new docker.DockerHost('http://unix:/var/run/docker.sock:');
|
||||
return expect(testDockerHost).to.be.instanceof(docker.DockerHost);
|
||||
});
|
||||
|
||||
tap.test('should create a docker swarm', async () => {
|
||||
await testDockerHost.activateSwarm();
|
||||
});
|
||||
|
||||
// Containers
|
||||
tap.test('should list containers', async () => {
|
||||
const containers = await testDockerHost.getContainers();
|
||||
console.log(containers);
|
||||
});
|
||||
|
||||
// Networks
|
||||
tap.test('should list networks', async () => {
|
||||
const networks = await testDockerHost.getNetworks();
|
||||
console.log(networks);
|
||||
});
|
||||
|
||||
tap.test('should create a network', async () => {
|
||||
const newNetwork = await docker.DockerNetwork.createNetwork(testDockerHost, {
|
||||
Name: 'webgateway'
|
||||
});
|
||||
expect(newNetwork).to.be.instanceOf(docker.DockerNetwork);
|
||||
expect(newNetwork.Name).to.equal('webgateway');
|
||||
});
|
||||
|
||||
tap.test('should remove a network', async () => {
|
||||
const webgateway = await docker.DockerNetwork.getNetworkByName(testDockerHost, 'webgateway');
|
||||
await webgateway.remove();
|
||||
});
|
||||
|
||||
// Images
|
||||
tap.test('should pull an image from imagetag', async () => {
|
||||
const image = await docker.DockerImage.createFromRegistry(testDockerHost, {
|
||||
imageUrl: 'hosttoday/ht-docker-node',
|
||||
imageTag: 'alpine'
|
||||
});
|
||||
expect(image).to.be.instanceOf(docker.DockerImage);
|
||||
console.log(image);
|
||||
});
|
||||
|
||||
tap.test('should return a change Observable', async tools => {
|
||||
const testObservable = await testDockerHost.getEventObservable();
|
||||
const subscription = testObservable.subscribe(changeObject => {
|
||||
console.log(changeObject);
|
||||
});
|
||||
await tools.delayFor(2000);
|
||||
subscription.unsubscribe();
|
||||
});
|
||||
|
||||
// SECRETS
|
||||
tap.test('should create a secret', async () => {
|
||||
const mySecret = await docker.DockerSecret.createSecret(testDockerHost, {
|
||||
name: 'testSecret',
|
||||
version: '1.0.3',
|
||||
contentArg: `{ "hi": "wow"}`,
|
||||
labels: {}
|
||||
});
|
||||
console.log(mySecret);
|
||||
});
|
||||
|
||||
tap.test('should remove a secret by name', async () => {
|
||||
const mySecret = await docker.DockerSecret.getSecretByName(testDockerHost, 'testSecret');
|
||||
await mySecret.remove();
|
||||
});
|
||||
|
||||
// SERVICES
|
||||
tap.test('should activate swarm mode', async () => {
|
||||
await testDockerHost.activateSwarm();
|
||||
});
|
||||
|
||||
tap.test('should list all services', async tools => {
|
||||
const services = await testDockerHost.getServices();
|
||||
console.log(services);
|
||||
});
|
||||
|
||||
tap.test('should create a service', async () => {
|
||||
const testNetwork = await docker.DockerNetwork.createNetwork(testDockerHost, {
|
||||
Name: 'testNetwork'
|
||||
});
|
||||
const testSecret = await docker.DockerSecret.createSecret(testDockerHost, {
|
||||
name: 'testSecret',
|
||||
version: '0.0.1',
|
||||
labels: {},
|
||||
contentArg: '{"hi": "wow"}'
|
||||
});
|
||||
const testImage = await docker.DockerImage.createFromRegistry(testDockerHost, {
|
||||
imageUrl: 'registry.gitlab.com/hosttoday/ht-docker-static'
|
||||
});
|
||||
const testService = await docker.DockerService.createService(testDockerHost, {
|
||||
image: testImage,
|
||||
labels: {},
|
||||
name: 'testService',
|
||||
networks: [testNetwork],
|
||||
networkAlias: 'testService',
|
||||
secrets: [testSecret],
|
||||
ports: ['3000:80']
|
||||
});
|
||||
|
||||
await testService.remove();
|
||||
await testNetwork.remove();
|
||||
await testSecret.remove();
|
||||
});
|
||||
|
||||
tap.start();
|
||||
8
ts/00_commitinfo_data.ts
Normal file
8
ts/00_commitinfo_data.ts
Normal file
@@ -0,0 +1,8 @@
|
||||
/**
|
||||
* autocreated commitinfo by @push.rocks/commitinfo
|
||||
*/
|
||||
export const commitinfo = {
|
||||
name: '@apiclient.xyz/docker',
|
||||
version: '5.0.0',
|
||||
description: 'Provides easy communication with Docker remote API from Node.js, with TypeScript support.'
|
||||
}
|
||||
27
ts/classes.base.ts
Normal file
27
ts/classes.base.ts
Normal file
@@ -0,0 +1,27 @@
|
||||
import type { DockerHost } from './classes.host.js';
|
||||
|
||||
/**
|
||||
* Abstract base class for all Docker resources.
|
||||
* Provides standardized patterns for resource management and lifecycle.
|
||||
*/
|
||||
export abstract class DockerResource {
|
||||
/**
|
||||
* Reference to the DockerHost that manages this resource.
|
||||
* All API operations go through this host instance.
|
||||
*/
|
||||
protected readonly dockerHost: DockerHost;
|
||||
|
||||
/**
|
||||
* Creates a new Docker resource instance.
|
||||
* @param dockerHost The DockerHost instance that manages this resource
|
||||
*/
|
||||
constructor(dockerHost: DockerHost) {
|
||||
this.dockerHost = dockerHost;
|
||||
}
|
||||
|
||||
/**
|
||||
* Refreshes this resource's state from the Docker daemon.
|
||||
* Implementations should fetch current data and update instance properties.
|
||||
*/
|
||||
abstract refresh(): Promise<void>;
|
||||
}
|
||||
393
ts/classes.container.ts
Normal file
393
ts/classes.container.ts
Normal file
@@ -0,0 +1,393 @@
|
||||
import * as plugins from './plugins.js';
|
||||
import * as interfaces from './interfaces/index.js';
|
||||
|
||||
import { DockerHost } from './classes.host.js';
|
||||
import { DockerResource } from './classes.base.js';
|
||||
import { logger } from './logger.js';
|
||||
|
||||
export class DockerContainer extends DockerResource {
|
||||
// STATIC (Internal - prefixed with _ to indicate internal use)
|
||||
|
||||
/**
|
||||
* Internal: Get all containers
|
||||
* Public API: Use dockerHost.listContainers() instead
|
||||
*/
|
||||
public static async _list(
|
||||
dockerHostArg: DockerHost,
|
||||
): Promise<DockerContainer[]> {
|
||||
const result: DockerContainer[] = [];
|
||||
const response = await dockerHostArg.request('GET', '/containers/json');
|
||||
|
||||
// TODO: Think about getting the config by inspecting the container
|
||||
for (const containerResult of response.body) {
|
||||
result.push(new DockerContainer(dockerHostArg, containerResult));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal: Get a container by ID
|
||||
* Public API: Use dockerHost.getContainerById(id) instead
|
||||
*/
|
||||
public static async _fromId(
|
||||
dockerHostArg: DockerHost,
|
||||
containerId: string,
|
||||
): Promise<DockerContainer> {
|
||||
const response = await dockerHostArg.request('GET', `/containers/${containerId}/json`);
|
||||
return new DockerContainer(dockerHostArg, response.body);
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal: Create a container
|
||||
* Public API: Use dockerHost.createContainer(descriptor) instead
|
||||
*/
|
||||
public static async _create(
|
||||
dockerHost: DockerHost,
|
||||
containerCreationDescriptor: interfaces.IContainerCreationDescriptor,
|
||||
): Promise<DockerContainer> {
|
||||
// Check for unique hostname
|
||||
const existingContainers = await DockerContainer._list(dockerHost);
|
||||
const sameHostNameContainer = existingContainers.find((container) => {
|
||||
// TODO implement HostName Detection;
|
||||
return false;
|
||||
});
|
||||
|
||||
const response = await dockerHost.request('POST', '/containers/create', {
|
||||
Hostname: containerCreationDescriptor.Hostname,
|
||||
Domainname: containerCreationDescriptor.Domainname,
|
||||
User: 'root',
|
||||
});
|
||||
|
||||
if (response.statusCode < 300) {
|
||||
logger.log('info', 'Container created successfully');
|
||||
// Return the created container instance
|
||||
return await DockerContainer._fromId(dockerHost, response.body.Id);
|
||||
} else {
|
||||
logger.log('error', 'There has been a problem when creating the container');
|
||||
throw new Error(`Failed to create container: ${response.statusCode}`);
|
||||
}
|
||||
}
|
||||
|
||||
// INSTANCE PROPERTIES
|
||||
public Id: string;
|
||||
public Names: string[];
|
||||
public Image: string;
|
||||
public ImageID: string;
|
||||
public Command: string;
|
||||
public Created: number;
|
||||
public Ports: interfaces.TPorts;
|
||||
public Labels: interfaces.TLabels;
|
||||
public State: string;
|
||||
public Status: string;
|
||||
public HostConfig: any;
|
||||
public NetworkSettings: {
|
||||
Networks: {
|
||||
[key: string]: {
|
||||
IPAMConfig: any;
|
||||
Links: any;
|
||||
Aliases: any;
|
||||
NetworkID: string;
|
||||
EndpointID: string;
|
||||
Gateway: string;
|
||||
IPAddress: string;
|
||||
IPPrefixLen: number;
|
||||
IPv6Gateway: string;
|
||||
GlobalIPv6Address: string;
|
||||
GlobalIPv6PrefixLen: number;
|
||||
MacAddress: string;
|
||||
DriverOpts: any;
|
||||
};
|
||||
};
|
||||
};
|
||||
public Mounts: any;
|
||||
|
||||
constructor(dockerHostArg: DockerHost, dockerContainerObjectArg: any) {
|
||||
super(dockerHostArg);
|
||||
Object.keys(dockerContainerObjectArg).forEach((keyArg) => {
|
||||
this[keyArg] = dockerContainerObjectArg[keyArg];
|
||||
});
|
||||
}
|
||||
|
||||
// INSTANCE METHODS
|
||||
|
||||
/**
|
||||
* Refreshes this container's state from the Docker daemon
|
||||
*/
|
||||
public async refresh(): Promise<void> {
|
||||
const updated = await DockerContainer._fromId(this.dockerHost, this.Id);
|
||||
Object.assign(this, updated);
|
||||
}
|
||||
|
||||
/**
|
||||
* Inspects the container and returns detailed information
|
||||
*/
|
||||
public async inspect(): Promise<any> {
|
||||
const response = await this.dockerHost.request('GET', `/containers/${this.Id}/json`);
|
||||
// Update instance with fresh data
|
||||
Object.assign(this, response.body);
|
||||
return response.body;
|
||||
}
|
||||
|
||||
/**
|
||||
* Starts the container
|
||||
*/
|
||||
public async start(): Promise<void> {
|
||||
const response = await this.dockerHost.request('POST', `/containers/${this.Id}/start`);
|
||||
if (response.statusCode >= 300) {
|
||||
throw new Error(`Failed to start container: ${response.statusCode}`);
|
||||
}
|
||||
await this.refresh();
|
||||
}
|
||||
|
||||
/**
|
||||
* Stops the container
|
||||
* @param options Options for stopping (e.g., timeout in seconds)
|
||||
*/
|
||||
public async stop(options?: { t?: number }): Promise<void> {
|
||||
const queryParams = options?.t ? `?t=${options.t}` : '';
|
||||
const response = await this.dockerHost.request('POST', `/containers/${this.Id}/stop${queryParams}`);
|
||||
if (response.statusCode >= 300) {
|
||||
throw new Error(`Failed to stop container: ${response.statusCode}`);
|
||||
}
|
||||
await this.refresh();
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes the container
|
||||
* @param options Options for removal (force, remove volumes, remove link)
|
||||
*/
|
||||
public async remove(options?: { force?: boolean; v?: boolean; link?: boolean }): Promise<void> {
|
||||
const queryParams = new URLSearchParams();
|
||||
if (options?.force) queryParams.append('force', '1');
|
||||
if (options?.v) queryParams.append('v', '1');
|
||||
if (options?.link) queryParams.append('link', '1');
|
||||
|
||||
const queryString = queryParams.toString();
|
||||
const response = await this.dockerHost.request(
|
||||
'DELETE',
|
||||
`/containers/${this.Id}${queryString ? '?' + queryString : ''}`,
|
||||
);
|
||||
|
||||
if (response.statusCode >= 300) {
|
||||
throw new Error(`Failed to remove container: ${response.statusCode}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets container logs
|
||||
* @param options Log options (stdout, stderr, timestamps, tail, since, follow)
|
||||
*/
|
||||
public async logs(options?: {
|
||||
stdout?: boolean;
|
||||
stderr?: boolean;
|
||||
timestamps?: boolean;
|
||||
tail?: number | 'all';
|
||||
since?: number;
|
||||
follow?: boolean;
|
||||
}): Promise<string> {
|
||||
const queryParams = new URLSearchParams();
|
||||
queryParams.append('stdout', options?.stdout !== false ? '1' : '0');
|
||||
queryParams.append('stderr', options?.stderr !== false ? '1' : '0');
|
||||
if (options?.timestamps) queryParams.append('timestamps', '1');
|
||||
if (options?.tail) queryParams.append('tail', options.tail.toString());
|
||||
if (options?.since) queryParams.append('since', options.since.toString());
|
||||
if (options?.follow) queryParams.append('follow', '1');
|
||||
|
||||
const response = await this.dockerHost.request('GET', `/containers/${this.Id}/logs?${queryParams.toString()}`);
|
||||
|
||||
// Docker returns logs with a special format (8 bytes header + payload)
|
||||
// For simplicity, we'll return the raw body as string
|
||||
return response.body.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets container stats
|
||||
* @param options Stats options (stream for continuous stats)
|
||||
*/
|
||||
public async stats(options?: { stream?: boolean }): Promise<any> {
|
||||
const queryParams = new URLSearchParams();
|
||||
queryParams.append('stream', options?.stream ? '1' : '0');
|
||||
|
||||
const response = await this.dockerHost.request('GET', `/containers/${this.Id}/stats?${queryParams.toString()}`);
|
||||
return response.body;
|
||||
}
|
||||
|
||||
/**
|
||||
* Streams container logs continuously (follow mode)
|
||||
* Returns a readable stream that emits log data as it's produced
|
||||
* @param options Log streaming options
|
||||
*/
|
||||
public async streamLogs(options?: {
|
||||
stdout?: boolean;
|
||||
stderr?: boolean;
|
||||
timestamps?: boolean;
|
||||
tail?: number | 'all';
|
||||
since?: number;
|
||||
}): Promise<plugins.smartstream.stream.Readable> {
|
||||
const queryParams = new URLSearchParams();
|
||||
queryParams.append('stdout', options?.stdout !== false ? '1' : '0');
|
||||
queryParams.append('stderr', options?.stderr !== false ? '1' : '0');
|
||||
queryParams.append('follow', '1'); // Always follow for streaming
|
||||
if (options?.timestamps) queryParams.append('timestamps', '1');
|
||||
if (options?.tail) queryParams.append('tail', options.tail.toString());
|
||||
if (options?.since) queryParams.append('since', options.since.toString());
|
||||
|
||||
const response = await this.dockerHost.requestStreaming(
|
||||
'GET',
|
||||
`/containers/${this.Id}/logs?${queryParams.toString()}`
|
||||
);
|
||||
|
||||
// requestStreaming returns Node.js stream
|
||||
return response as plugins.smartstream.stream.Readable;
|
||||
}
|
||||
|
||||
/**
|
||||
* Attaches to the container's main process (PID 1)
|
||||
* Returns a duplex stream for bidirectional communication
|
||||
* @param options Attach options
|
||||
*/
|
||||
public async attach(options?: {
|
||||
stream?: boolean;
|
||||
stdin?: boolean;
|
||||
stdout?: boolean;
|
||||
stderr?: boolean;
|
||||
logs?: boolean;
|
||||
}): Promise<{
|
||||
stream: plugins.smartstream.stream.Duplex;
|
||||
close: () => Promise<void>;
|
||||
}> {
|
||||
const queryParams = new URLSearchParams();
|
||||
queryParams.append('stream', options?.stream !== false ? '1' : '0');
|
||||
queryParams.append('stdin', options?.stdin ? '1' : '0');
|
||||
queryParams.append('stdout', options?.stdout !== false ? '1' : '0');
|
||||
queryParams.append('stderr', options?.stderr !== false ? '1' : '0');
|
||||
if (options?.logs) queryParams.append('logs', '1');
|
||||
|
||||
const response = await this.dockerHost.requestStreaming(
|
||||
'POST',
|
||||
`/containers/${this.Id}/attach?${queryParams.toString()}`
|
||||
);
|
||||
|
||||
// Create a duplex stream for bidirectional communication
|
||||
const nodeStream = response as plugins.smartstream.stream.Readable;
|
||||
|
||||
// Convert to duplex by wrapping in SmartDuplex
|
||||
const duplexStream = new plugins.smartstream.SmartDuplex({
|
||||
writeFunction: async (chunk) => {
|
||||
// Write data is sent to the container's stdin
|
||||
return chunk;
|
||||
},
|
||||
readableObjectMode: false,
|
||||
writableObjectMode: false,
|
||||
});
|
||||
|
||||
// Pipe container output to our duplex readable side
|
||||
nodeStream.on('data', (chunk) => {
|
||||
duplexStream.push(chunk);
|
||||
});
|
||||
|
||||
nodeStream.on('end', () => {
|
||||
duplexStream.push(null); // Signal end of stream
|
||||
});
|
||||
|
||||
nodeStream.on('error', (error) => {
|
||||
duplexStream.destroy(error);
|
||||
});
|
||||
|
||||
// Helper function to close the attachment
|
||||
const close = async () => {
|
||||
duplexStream.end();
|
||||
if (nodeStream.destroy) {
|
||||
nodeStream.destroy();
|
||||
}
|
||||
};
|
||||
|
||||
return {
|
||||
stream: duplexStream,
|
||||
close,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a command in the container
|
||||
* Returns a duplex stream for command interaction
|
||||
* @param command Command to execute (string or array of strings)
|
||||
* @param options Exec options
|
||||
*/
|
||||
public async exec(
|
||||
command: string | string[],
|
||||
options?: {
|
||||
tty?: boolean;
|
||||
attachStdin?: boolean;
|
||||
attachStdout?: boolean;
|
||||
attachStderr?: boolean;
|
||||
env?: string[];
|
||||
workingDir?: string;
|
||||
user?: string;
|
||||
}
|
||||
): Promise<{
|
||||
stream: plugins.smartstream.stream.Duplex;
|
||||
close: () => Promise<void>;
|
||||
}> {
|
||||
// Step 1: Create exec instance
|
||||
const createResponse = await this.dockerHost.request('POST', `/containers/${this.Id}/exec`, {
|
||||
Cmd: typeof command === 'string' ? ['/bin/sh', '-c', command] : command,
|
||||
AttachStdin: options?.attachStdin !== false,
|
||||
AttachStdout: options?.attachStdout !== false,
|
||||
AttachStderr: options?.attachStderr !== false,
|
||||
Tty: options?.tty || false,
|
||||
Env: options?.env || [],
|
||||
WorkingDir: options?.workingDir,
|
||||
User: options?.user,
|
||||
});
|
||||
|
||||
const execId = createResponse.body.Id;
|
||||
|
||||
// Step 2: Start exec instance with streaming response
|
||||
const startResponse = await this.dockerHost.requestStreaming(
|
||||
'POST',
|
||||
`/exec/${execId}/start`,
|
||||
undefined, // no stream input
|
||||
{
|
||||
Detach: false,
|
||||
Tty: options?.tty || false,
|
||||
}
|
||||
);
|
||||
|
||||
const nodeStream = startResponse as plugins.smartstream.stream.Readable;
|
||||
|
||||
// Create duplex stream for bidirectional communication
|
||||
const duplexStream = new plugins.smartstream.SmartDuplex({
|
||||
writeFunction: async (chunk) => {
|
||||
return chunk;
|
||||
},
|
||||
readableObjectMode: false,
|
||||
writableObjectMode: false,
|
||||
});
|
||||
|
||||
// Pipe exec output to duplex readable side
|
||||
nodeStream.on('data', (chunk) => {
|
||||
duplexStream.push(chunk);
|
||||
});
|
||||
|
||||
nodeStream.on('end', () => {
|
||||
duplexStream.push(null);
|
||||
});
|
||||
|
||||
nodeStream.on('error', (error) => {
|
||||
duplexStream.destroy(error);
|
||||
});
|
||||
|
||||
const close = async () => {
|
||||
duplexStream.end();
|
||||
if (nodeStream.destroy) {
|
||||
nodeStream.destroy();
|
||||
}
|
||||
};
|
||||
|
||||
return {
|
||||
stream: duplexStream,
|
||||
close,
|
||||
};
|
||||
}
|
||||
}
|
||||
560
ts/classes.host.ts
Normal file
560
ts/classes.host.ts
Normal file
@@ -0,0 +1,560 @@
|
||||
import * as plugins from './plugins.js';
|
||||
import * as paths from './paths.js';
|
||||
import * as interfaces from './interfaces/index.js';
|
||||
import { DockerContainer } from './classes.container.js';
|
||||
import { DockerNetwork } from './classes.network.js';
|
||||
import { DockerService } from './classes.service.js';
|
||||
import { DockerSecret } from './classes.secret.js';
|
||||
import { logger } from './logger.js';
|
||||
import { DockerImageStore } from './classes.imagestore.js';
|
||||
import { DockerImage } from './classes.image.js';
|
||||
|
||||
export interface IAuthData {
|
||||
serveraddress: string;
|
||||
username: string;
|
||||
password: string;
|
||||
}
|
||||
|
||||
export interface IDockerHostConstructorOptions {
|
||||
socketPath?: string;
|
||||
imageStoreDir?: string;
|
||||
}
|
||||
|
||||
export class DockerHost {
|
||||
public options: IDockerHostConstructorOptions;
|
||||
|
||||
/**
|
||||
* the path where the docker sock can be found
|
||||
*/
|
||||
public socketPath: string;
|
||||
private registryToken: string = '';
|
||||
private imageStore: DockerImageStore; // Now private - use storeImage/retrieveImage instead
|
||||
public smartBucket: plugins.smartbucket.SmartBucket;
|
||||
|
||||
/**
|
||||
* the constructor to instantiate a new docker sock instance
|
||||
* @param pathArg
|
||||
*/
|
||||
constructor(optionsArg: IDockerHostConstructorOptions) {
|
||||
this.options = {
|
||||
...{
|
||||
imageStoreDir: plugins.path.join(
|
||||
paths.nogitDir,
|
||||
'temp-docker-image-store',
|
||||
),
|
||||
},
|
||||
...optionsArg,
|
||||
};
|
||||
let pathToUse: string;
|
||||
if (optionsArg.socketPath) {
|
||||
pathToUse = optionsArg.socketPath;
|
||||
} else if (process.env.DOCKER_HOST) {
|
||||
pathToUse = process.env.DOCKER_HOST;
|
||||
} else if (process.env.CI) {
|
||||
pathToUse = 'http://docker:2375/';
|
||||
} else {
|
||||
pathToUse = 'http://unix:/var/run/docker.sock:';
|
||||
}
|
||||
if (pathToUse.startsWith('unix:///')) {
|
||||
pathToUse = pathToUse.replace('unix://', 'http://unix:');
|
||||
}
|
||||
if (pathToUse.endsWith('.sock')) {
|
||||
pathToUse = pathToUse.replace('.sock', '.sock:');
|
||||
}
|
||||
console.log(`using docker sock at ${pathToUse}`);
|
||||
this.socketPath = pathToUse;
|
||||
this.imageStore = new DockerImageStore({
|
||||
bucketDir: null,
|
||||
localDirPath: this.options.imageStoreDir,
|
||||
});
|
||||
}
|
||||
|
||||
public async start() {
|
||||
await this.imageStore.start();
|
||||
}
|
||||
public async stop() {
|
||||
await this.imageStore.stop();
|
||||
}
|
||||
|
||||
/**
|
||||
* Ping the Docker daemon to check if it's running and accessible
|
||||
* @returns Promise that resolves if Docker is available, rejects otherwise
|
||||
* @throws Error if Docker ping fails
|
||||
*/
|
||||
public async ping(): Promise<void> {
|
||||
const response = await this.request('GET', '/_ping');
|
||||
if (response.statusCode !== 200) {
|
||||
throw new Error(`Docker ping failed with status ${response.statusCode}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* authenticate against a registry
|
||||
* @param userArg
|
||||
* @param passArg
|
||||
*/
|
||||
public async auth(authData: IAuthData) {
|
||||
const response = await this.request('POST', '/auth', authData);
|
||||
if (response.body.Status !== 'Login Succeeded') {
|
||||
console.log(`Login failed with ${response.body.Status}`);
|
||||
throw new Error(response.body.Status);
|
||||
}
|
||||
console.log(response.body.Status);
|
||||
this.registryToken = plugins.smartstring.base64.encode(
|
||||
plugins.smartjson.stringify(authData),
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* gets the token from the .docker/config.json file for GitLab registry
|
||||
*/
|
||||
public async getAuthTokenFromDockerConfig(registryUrlArg: string) {
|
||||
const dockerConfigPath = plugins.smartpath.get.home(
|
||||
'~/.docker/config.json',
|
||||
);
|
||||
const configObject = plugins.smartfile.fs.toObjectSync(dockerConfigPath);
|
||||
const gitlabAuthBase64 = configObject.auths[registryUrlArg].auth;
|
||||
const gitlabAuth: string =
|
||||
plugins.smartstring.base64.decode(gitlabAuthBase64);
|
||||
const gitlabAuthArray = gitlabAuth.split(':');
|
||||
await this.auth({
|
||||
username: gitlabAuthArray[0],
|
||||
password: gitlabAuthArray[1],
|
||||
serveraddress: registryUrlArg,
|
||||
});
|
||||
}
|
||||
|
||||
// ==============
|
||||
// NETWORKS - Public Factory API
|
||||
// ==============
|
||||
|
||||
/**
|
||||
* Lists all networks
|
||||
*/
|
||||
public async listNetworks() {
|
||||
return await DockerNetwork._list(this);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a network by name
|
||||
*/
|
||||
public async getNetworkByName(networkNameArg: string) {
|
||||
return await DockerNetwork._fromName(this, networkNameArg);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a network
|
||||
*/
|
||||
public async createNetwork(
|
||||
descriptor: interfaces.INetworkCreationDescriptor,
|
||||
) {
|
||||
return await DockerNetwork._create(this, descriptor);
|
||||
}
|
||||
|
||||
// ==============
|
||||
// CONTAINERS - Public Factory API
|
||||
// ==============
|
||||
|
||||
/**
|
||||
* Lists all containers
|
||||
*/
|
||||
public async listContainers() {
|
||||
return await DockerContainer._list(this);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a container by ID
|
||||
*/
|
||||
public async getContainerById(containerId: string) {
|
||||
return await DockerContainer._fromId(this, containerId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a container
|
||||
*/
|
||||
public async createContainer(
|
||||
descriptor: interfaces.IContainerCreationDescriptor,
|
||||
) {
|
||||
return await DockerContainer._create(this, descriptor);
|
||||
}
|
||||
|
||||
// ==============
|
||||
// SERVICES - Public Factory API
|
||||
// ==============
|
||||
|
||||
/**
|
||||
* Lists all services
|
||||
*/
|
||||
public async listServices() {
|
||||
return await DockerService._list(this);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a service by name
|
||||
*/
|
||||
public async getServiceByName(serviceName: string) {
|
||||
return await DockerService._fromName(this, serviceName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a service
|
||||
*/
|
||||
public async createService(
|
||||
descriptor: interfaces.IServiceCreationDescriptor,
|
||||
) {
|
||||
return await DockerService._create(this, descriptor);
|
||||
}
|
||||
|
||||
// ==============
|
||||
// IMAGES - Public Factory API
|
||||
// ==============
|
||||
|
||||
/**
|
||||
* Lists all images
|
||||
*/
|
||||
public async listImages() {
|
||||
return await DockerImage._list(this);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets an image by name
|
||||
*/
|
||||
public async getImageByName(imageNameArg: string) {
|
||||
return await DockerImage._fromName(this, imageNameArg);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an image from a registry
|
||||
*/
|
||||
public async createImageFromRegistry(
|
||||
descriptor: interfaces.IImageCreationDescriptor,
|
||||
) {
|
||||
return await DockerImage._createFromRegistry(this, {
|
||||
creationObject: descriptor,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an image from a tar stream
|
||||
*/
|
||||
public async createImageFromTarStream(
|
||||
tarStream: plugins.smartstream.stream.Readable,
|
||||
descriptor: interfaces.IImageCreationDescriptor,
|
||||
) {
|
||||
return await DockerImage._createFromTarStream(this, {
|
||||
creationObject: descriptor,
|
||||
tarStream: tarStream,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds an image from a Dockerfile
|
||||
*/
|
||||
public async buildImage(imageTag: string) {
|
||||
return await DockerImage._build(this, imageTag);
|
||||
}
|
||||
|
||||
// ==============
|
||||
// SECRETS - Public Factory API
|
||||
// ==============
|
||||
|
||||
/**
|
||||
* Lists all secrets
|
||||
*/
|
||||
public async listSecrets() {
|
||||
return await DockerSecret._list(this);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a secret by name
|
||||
*/
|
||||
public async getSecretByName(secretName: string) {
|
||||
return await DockerSecret._fromName(this, secretName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a secret by ID
|
||||
*/
|
||||
public async getSecretById(secretId: string) {
|
||||
return await DockerSecret._fromId(this, secretId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a secret
|
||||
*/
|
||||
public async createSecret(
|
||||
descriptor: interfaces.ISecretCreationDescriptor,
|
||||
) {
|
||||
return await DockerSecret._create(this, descriptor);
|
||||
}
|
||||
|
||||
// ==============
|
||||
// IMAGE STORE - Public API
|
||||
// ==============
|
||||
|
||||
/**
|
||||
* Stores an image in the local image store
|
||||
*/
|
||||
public async storeImage(
|
||||
imageName: string,
|
||||
tarStream: plugins.smartstream.stream.Readable,
|
||||
): Promise<void> {
|
||||
return await this.imageStore.storeImage(imageName, tarStream);
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves an image from the local image store
|
||||
*/
|
||||
public async retrieveImage(
|
||||
imageName: string,
|
||||
): Promise<plugins.smartstream.stream.Readable> {
|
||||
return await this.imageStore.getImage(imageName);
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public async getEventObservable(): Promise<plugins.rxjs.Observable<any>> {
|
||||
const response = await this.requestStreaming('GET', '/events');
|
||||
|
||||
// requestStreaming now returns Node.js stream, not web stream
|
||||
const nodeStream = response as plugins.smartstream.stream.Readable;
|
||||
|
||||
return plugins.rxjs.Observable.create((observer) => {
|
||||
nodeStream.on('data', (data) => {
|
||||
const eventString = data.toString();
|
||||
try {
|
||||
const eventObject = JSON.parse(eventString);
|
||||
observer.next(eventObject);
|
||||
} catch (e) {
|
||||
console.log(e);
|
||||
}
|
||||
});
|
||||
return () => {
|
||||
nodeStream.emit('end');
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* activates docker swarm
|
||||
*/
|
||||
public async activateSwarm(addvertisementIpArg?: string) {
|
||||
// determine advertisement address
|
||||
let addvertisementIp: string;
|
||||
if (addvertisementIpArg) {
|
||||
addvertisementIp = addvertisementIpArg;
|
||||
} else {
|
||||
const smartnetworkInstance = new plugins.smartnetwork.SmartNetwork();
|
||||
const defaultGateway = await smartnetworkInstance.getDefaultGateway();
|
||||
if (defaultGateway) {
|
||||
addvertisementIp = defaultGateway.ipv4.address;
|
||||
}
|
||||
}
|
||||
|
||||
const response = await this.request('POST', '/swarm/init', {
|
||||
ListenAddr: '0.0.0.0:2377',
|
||||
AdvertiseAddr: addvertisementIp,
|
||||
DataPathPort: 4789,
|
||||
DefaultAddrPool: ['10.10.0.0/8', '20.20.0.0/8'],
|
||||
SubnetSize: 24,
|
||||
ForceNewCluster: false,
|
||||
});
|
||||
if (response.statusCode === 200) {
|
||||
logger.log('info', 'created Swam succesfully');
|
||||
} else {
|
||||
logger.log('error', 'could not initiate swarm');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* fire a request
|
||||
*/
|
||||
public async request(methodArg: string, routeArg: string, dataArg = {}) {
|
||||
const requestUrl = `${this.socketPath}${routeArg}`;
|
||||
|
||||
// Build the request using the fluent API
|
||||
const smartRequest = plugins.smartrequest.SmartRequest.create()
|
||||
.url(requestUrl)
|
||||
.header('Content-Type', 'application/json')
|
||||
.header('X-Registry-Auth', this.registryToken)
|
||||
.header('Host', 'docker.sock')
|
||||
.options({ keepAlive: false });
|
||||
|
||||
// Add body for methods that support it
|
||||
if (dataArg && Object.keys(dataArg).length > 0) {
|
||||
smartRequest.json(dataArg);
|
||||
}
|
||||
|
||||
// Execute the request based on method
|
||||
let response;
|
||||
switch (methodArg.toUpperCase()) {
|
||||
case 'GET':
|
||||
response = await smartRequest.get();
|
||||
break;
|
||||
case 'POST':
|
||||
response = await smartRequest.post();
|
||||
break;
|
||||
case 'PUT':
|
||||
response = await smartRequest.put();
|
||||
break;
|
||||
case 'DELETE':
|
||||
response = await smartRequest.delete();
|
||||
break;
|
||||
default:
|
||||
throw new Error(`Unsupported HTTP method: ${methodArg}`);
|
||||
}
|
||||
|
||||
// Parse the response body based on content type
|
||||
let body;
|
||||
const contentType = response.headers['content-type'] || '';
|
||||
|
||||
// Docker's streaming endpoints (like /images/create) return newline-delimited JSON
|
||||
// which can't be parsed as a single JSON object
|
||||
const isStreamingEndpoint =
|
||||
routeArg.includes('/images/create') ||
|
||||
routeArg.includes('/images/load') ||
|
||||
routeArg.includes('/build');
|
||||
|
||||
if (contentType.includes('application/json') && !isStreamingEndpoint) {
|
||||
body = await response.json();
|
||||
} else {
|
||||
body = await response.text();
|
||||
// Try to parse as JSON if it looks like JSON and is not a streaming response
|
||||
if (
|
||||
!isStreamingEndpoint &&
|
||||
body &&
|
||||
(body.startsWith('{') || body.startsWith('['))
|
||||
) {
|
||||
try {
|
||||
body = JSON.parse(body);
|
||||
} catch {
|
||||
// Keep as text if parsing fails
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create a response object compatible with existing code
|
||||
const legacyResponse = {
|
||||
statusCode: response.status,
|
||||
body: body,
|
||||
headers: response.headers,
|
||||
};
|
||||
|
||||
if (response.status !== 200) {
|
||||
console.log(body);
|
||||
}
|
||||
|
||||
return legacyResponse;
|
||||
}
|
||||
|
||||
public async requestStreaming(
|
||||
methodArg: string,
|
||||
routeArg: string,
|
||||
readStream?: plugins.smartstream.stream.Readable,
|
||||
jsonData?: any,
|
||||
) {
|
||||
const requestUrl = `${this.socketPath}${routeArg}`;
|
||||
|
||||
// Build the request using the fluent API
|
||||
const smartRequest = plugins.smartrequest.SmartRequest.create()
|
||||
.url(requestUrl)
|
||||
.header('Content-Type', 'application/json')
|
||||
.header('X-Registry-Auth', this.registryToken)
|
||||
.header('Host', 'docker.sock')
|
||||
.timeout(30000)
|
||||
.options({ keepAlive: false, autoDrain: true }); // Disable auto-drain for streaming
|
||||
|
||||
// If we have JSON data, add it to the request
|
||||
if (jsonData && Object.keys(jsonData).length > 0) {
|
||||
smartRequest.json(jsonData);
|
||||
}
|
||||
|
||||
// If we have a readStream, use the new stream method with logging
|
||||
if (readStream) {
|
||||
let counter = 0;
|
||||
const smartduplex = new plugins.smartstream.SmartDuplex({
|
||||
writeFunction: async (chunkArg) => {
|
||||
if (counter % 1000 === 0) {
|
||||
console.log(`posting chunk ${counter}`);
|
||||
}
|
||||
counter++;
|
||||
return chunkArg;
|
||||
},
|
||||
});
|
||||
|
||||
// Pipe through the logging duplex stream
|
||||
const loggedStream = readStream.pipe(smartduplex);
|
||||
|
||||
// Use the new stream method to stream the data
|
||||
smartRequest.stream(loggedStream, 'application/octet-stream');
|
||||
}
|
||||
|
||||
// Execute the request based on method
|
||||
let response: plugins.smartrequest.ICoreResponse;
|
||||
switch (methodArg.toUpperCase()) {
|
||||
case 'GET':
|
||||
response = await smartRequest.get();
|
||||
break;
|
||||
case 'POST':
|
||||
response = await smartRequest.post();
|
||||
break;
|
||||
case 'PUT':
|
||||
response = await smartRequest.put();
|
||||
break;
|
||||
case 'DELETE':
|
||||
response = await smartRequest.delete();
|
||||
break;
|
||||
default:
|
||||
throw new Error(`Unsupported HTTP method: ${methodArg}`);
|
||||
}
|
||||
|
||||
console.log(response.status);
|
||||
|
||||
// For streaming responses, get the web stream
|
||||
const webStream = response.stream();
|
||||
|
||||
if (!webStream) {
|
||||
// If no stream is available, consume the body as text
|
||||
const body = await response.text();
|
||||
console.log(body);
|
||||
|
||||
// Return a compatible response object
|
||||
return {
|
||||
statusCode: response.status,
|
||||
body: body,
|
||||
headers: response.headers,
|
||||
};
|
||||
}
|
||||
|
||||
// Convert web ReadableStream to Node.js stream for backward compatibility
|
||||
const nodeStream = plugins.smartstream.nodewebhelpers.convertWebReadableToNodeReadable(webStream);
|
||||
|
||||
// Add properties for compatibility
|
||||
(nodeStream as any).statusCode = response.status;
|
||||
(nodeStream as any).body = ''; // For compatibility
|
||||
|
||||
return nodeStream;
|
||||
}
|
||||
|
||||
/**
|
||||
* add s3 storage
|
||||
* @param optionsArg
|
||||
*/
|
||||
public async addS3Storage(optionsArg: plugins.tsclass.storage.IS3Descriptor) {
|
||||
this.smartBucket = new plugins.smartbucket.SmartBucket(optionsArg);
|
||||
if (!optionsArg.bucketName) {
|
||||
throw new Error('bucketName is required');
|
||||
}
|
||||
const bucket = await this.smartBucket.getBucketByName(
|
||||
optionsArg.bucketName,
|
||||
);
|
||||
let wantedDirectory = await bucket.getBaseDirectory();
|
||||
if (optionsArg.directoryPath) {
|
||||
wantedDirectory = await wantedDirectory.getSubDirectoryByName(
|
||||
optionsArg.directoryPath,
|
||||
);
|
||||
}
|
||||
this.imageStore.options.bucketDir = wantedDirectory;
|
||||
}
|
||||
}
|
||||
345
ts/classes.image.ts
Normal file
345
ts/classes.image.ts
Normal file
@@ -0,0 +1,345 @@
|
||||
import * as plugins from './plugins.js';
|
||||
import * as interfaces from './interfaces/index.js';
|
||||
import { DockerHost } from './classes.host.js';
|
||||
import { DockerResource } from './classes.base.js';
|
||||
import { logger } from './logger.js';
|
||||
|
||||
/**
|
||||
* represents a docker image on the remote docker host
|
||||
*/
|
||||
export class DockerImage extends DockerResource {
|
||||
// STATIC (Internal - prefixed with _ to indicate internal use)
|
||||
|
||||
/**
|
||||
* Internal: Get all images
|
||||
* Public API: Use dockerHost.listImages() instead
|
||||
*/
|
||||
public static async _list(dockerHost: DockerHost) {
|
||||
const images: DockerImage[] = [];
|
||||
const response = await dockerHost.request('GET', '/images/json');
|
||||
for (const imageObject of response.body) {
|
||||
images.push(new DockerImage(dockerHost, imageObject));
|
||||
}
|
||||
return images;
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal: Get image by name
|
||||
* Public API: Use dockerHost.getImageByName(name) instead
|
||||
*/
|
||||
public static async _fromName(
|
||||
dockerHost: DockerHost,
|
||||
imageNameArg: string,
|
||||
) {
|
||||
const images = await this._list(dockerHost);
|
||||
const result = images.find((image) => {
|
||||
if (image.RepoTags) {
|
||||
return image.RepoTags.includes(imageNameArg);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
});
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal: Create image from registry
|
||||
* Public API: Use dockerHost.createImageFromRegistry(descriptor) instead
|
||||
*/
|
||||
public static async _createFromRegistry(
|
||||
dockerHostArg: DockerHost,
|
||||
optionsArg: {
|
||||
creationObject: interfaces.IImageCreationDescriptor;
|
||||
},
|
||||
): Promise<DockerImage> {
|
||||
// lets create a sanatized imageUrlObject
|
||||
const imageUrlObject: {
|
||||
imageUrl: string;
|
||||
imageTag: string;
|
||||
imageOriginTag: string;
|
||||
} = {
|
||||
imageUrl: optionsArg.creationObject.imageUrl,
|
||||
imageTag: optionsArg.creationObject.imageTag,
|
||||
imageOriginTag: null,
|
||||
};
|
||||
if (imageUrlObject.imageUrl.includes(':')) {
|
||||
const imageUrl = imageUrlObject.imageUrl.split(':')[0];
|
||||
const imageTag = imageUrlObject.imageUrl.split(':')[1];
|
||||
if (imageUrlObject.imageTag) {
|
||||
throw new Error(
|
||||
`imageUrl ${imageUrlObject.imageUrl} can't be tagged with ${imageUrlObject.imageTag} because it is already tagged with ${imageTag}`,
|
||||
);
|
||||
} else {
|
||||
imageUrlObject.imageUrl = imageUrl;
|
||||
imageUrlObject.imageTag = imageTag;
|
||||
}
|
||||
} else if (!imageUrlObject.imageTag) {
|
||||
imageUrlObject.imageTag = 'latest';
|
||||
}
|
||||
imageUrlObject.imageOriginTag = `${imageUrlObject.imageUrl}:${imageUrlObject.imageTag}`;
|
||||
|
||||
// lets actually create the image
|
||||
const response = await dockerHostArg.request(
|
||||
'POST',
|
||||
`/images/create?fromImage=${encodeURIComponent(
|
||||
imageUrlObject.imageUrl,
|
||||
)}&tag=${encodeURIComponent(imageUrlObject.imageTag)}`,
|
||||
);
|
||||
if (response.statusCode < 300) {
|
||||
logger.log(
|
||||
'info',
|
||||
`Successfully pulled image ${imageUrlObject.imageUrl} from the registry`,
|
||||
);
|
||||
const image = await DockerImage._fromName(
|
||||
dockerHostArg,
|
||||
imageUrlObject.imageOriginTag,
|
||||
);
|
||||
return image;
|
||||
} else {
|
||||
logger.log('error', `Failed at the attempt of creating a new image`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal: Create image from tar stream
|
||||
* Public API: Use dockerHost.createImageFromTarStream(stream, descriptor) instead
|
||||
*/
|
||||
public static async _createFromTarStream(
|
||||
dockerHostArg: DockerHost,
|
||||
optionsArg: {
|
||||
creationObject: interfaces.IImageCreationDescriptor;
|
||||
tarStream: plugins.smartstream.stream.Readable;
|
||||
},
|
||||
): Promise<DockerImage> {
|
||||
// Start the request for importing an image
|
||||
const response = await dockerHostArg.requestStreaming(
|
||||
'POST',
|
||||
'/images/load',
|
||||
optionsArg.tarStream,
|
||||
);
|
||||
|
||||
// requestStreaming now returns Node.js stream
|
||||
const nodeStream = response as plugins.smartstream.stream.Readable;
|
||||
|
||||
/**
|
||||
* Docker typically returns lines like:
|
||||
* {"stream":"Loaded image: myrepo/myimage:latest"}
|
||||
*
|
||||
* So we will collect those lines and parse out the final image name.
|
||||
*/
|
||||
let rawOutput = '';
|
||||
nodeStream.on('data', (chunk) => {
|
||||
rawOutput += chunk.toString();
|
||||
});
|
||||
|
||||
// Wrap the end event in a Promise for easier async/await usage
|
||||
await new Promise<void>((resolve, reject) => {
|
||||
nodeStream.on('end', () => {
|
||||
resolve();
|
||||
});
|
||||
nodeStream.on('error', (err) => {
|
||||
reject(err);
|
||||
});
|
||||
});
|
||||
|
||||
// Attempt to parse each line to find something like "Loaded image: ..."
|
||||
let loadedImageTag: string | undefined;
|
||||
const lines = rawOutput.trim().split('\n').filter(Boolean);
|
||||
|
||||
for (const line of lines) {
|
||||
try {
|
||||
const jsonLine = JSON.parse(line);
|
||||
if (
|
||||
jsonLine.stream &&
|
||||
(jsonLine.stream.startsWith('Loaded image:') ||
|
||||
jsonLine.stream.startsWith('Loaded image ID:'))
|
||||
) {
|
||||
// Examples:
|
||||
// "Loaded image: your-image:latest"
|
||||
// "Loaded image ID: sha256:...."
|
||||
loadedImageTag = jsonLine.stream
|
||||
.replace('Loaded image: ', '')
|
||||
.replace('Loaded image ID: ', '')
|
||||
.trim();
|
||||
}
|
||||
} catch {
|
||||
// not valid JSON, ignore
|
||||
}
|
||||
}
|
||||
|
||||
if (!loadedImageTag) {
|
||||
throw new Error(
|
||||
`Could not parse the loaded image info from Docker response.\nResponse was:\n${rawOutput}`,
|
||||
);
|
||||
}
|
||||
|
||||
// Now try to look up that image by the "loadedImageTag".
|
||||
// Depending on Docker's response, it might be something like:
|
||||
// "myrepo/myimage:latest" OR "sha256:someHash..."
|
||||
// If Docker gave you an ID (e.g. "sha256:..."), you may need a separate
|
||||
// DockerImage.getImageById method; or if you prefer, you can treat it as a name.
|
||||
const newlyImportedImage = await DockerImage._fromName(
|
||||
dockerHostArg,
|
||||
loadedImageTag,
|
||||
);
|
||||
|
||||
if (!newlyImportedImage) {
|
||||
throw new Error(
|
||||
`Image load succeeded, but no local reference found for "${loadedImageTag}".`,
|
||||
);
|
||||
}
|
||||
|
||||
logger.log('info', `Successfully imported image "${loadedImageTag}".`);
|
||||
|
||||
return newlyImportedImage;
|
||||
}
|
||||
|
||||
public static async tagImageByIdOrName(
|
||||
dockerHost: DockerHost,
|
||||
idOrNameArg: string,
|
||||
newTagArg: string,
|
||||
) {
|
||||
const response = await dockerHost.request(
|
||||
'POST',
|
||||
`/images/${encodeURIComponent(idOrNameArg)}/${encodeURIComponent(newTagArg)}`,
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal: Build image from Dockerfile
|
||||
* Public API: Use dockerHost.buildImage(tag) instead
|
||||
*/
|
||||
public static async _build(dockerHostArg: DockerHost, dockerImageTag) {
|
||||
// TODO: implement building an image
|
||||
}
|
||||
|
||||
// INSTANCE PROPERTIES
|
||||
/**
|
||||
* the tags for an image
|
||||
*/
|
||||
public Containers: number;
|
||||
public Created: number;
|
||||
public Id: string;
|
||||
public Labels: interfaces.TLabels;
|
||||
public ParentId: string;
|
||||
public RepoDigests: string[];
|
||||
public RepoTags: string[];
|
||||
public SharedSize: number;
|
||||
public Size: number;
|
||||
public VirtualSize: number;
|
||||
|
||||
constructor(dockerHostArg: DockerHost, dockerImageObjectArg: any) {
|
||||
super(dockerHostArg);
|
||||
Object.keys(dockerImageObjectArg).forEach((keyArg) => {
|
||||
this[keyArg] = dockerImageObjectArg[keyArg];
|
||||
});
|
||||
}
|
||||
|
||||
// INSTANCE METHODS
|
||||
|
||||
/**
|
||||
* Refreshes this image's state from the Docker daemon
|
||||
*/
|
||||
public async refresh(): Promise<void> {
|
||||
if (!this.RepoTags || this.RepoTags.length === 0) {
|
||||
throw new Error('Cannot refresh image without RepoTags');
|
||||
}
|
||||
const updated = await DockerImage._fromName(this.dockerHost, this.RepoTags[0]);
|
||||
if (updated) {
|
||||
Object.assign(this, updated);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* tag an image
|
||||
* @param newTag
|
||||
*/
|
||||
public async tagImage(newTag) {
|
||||
throw new Error('.tagImage is not yet implemented');
|
||||
}
|
||||
|
||||
/**
|
||||
* pulls the latest version from the registry
|
||||
*/
|
||||
public async pullLatestImageFromRegistry(): Promise<boolean> {
|
||||
const updatedImage = await DockerImage._createFromRegistry(this.dockerHost, {
|
||||
creationObject: {
|
||||
imageUrl: this.RepoTags[0],
|
||||
},
|
||||
});
|
||||
Object.assign(this, updatedImage);
|
||||
// TODO: Compare image digists before and after
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes this image from the Docker daemon
|
||||
*/
|
||||
public async remove(options?: { force?: boolean; noprune?: boolean }): Promise<void> {
|
||||
const queryParams = new URLSearchParams();
|
||||
if (options?.force) queryParams.append('force', '1');
|
||||
if (options?.noprune) queryParams.append('noprune', '1');
|
||||
|
||||
const queryString = queryParams.toString();
|
||||
const response = await this.dockerHost.request(
|
||||
'DELETE',
|
||||
`/images/${encodeURIComponent(this.Id)}${queryString ? '?' + queryString : ''}`,
|
||||
);
|
||||
|
||||
if (response.statusCode >= 300) {
|
||||
throw new Error(`Failed to remove image: ${response.statusCode}`);
|
||||
}
|
||||
}
|
||||
|
||||
// get stuff
|
||||
public async getVersion() {
|
||||
if (this.Labels && this.Labels.version) {
|
||||
return this.Labels.version;
|
||||
} else {
|
||||
return '0.0.0';
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* exports an image to a tar ball
|
||||
*/
|
||||
public async exportToTarStream(): Promise<plugins.smartstream.stream.Readable> {
|
||||
logger.log('info', `Exporting image ${this.RepoTags[0]} to tar stream.`);
|
||||
const response = await this.dockerHost.requestStreaming(
|
||||
'GET',
|
||||
`/images/${encodeURIComponent(this.RepoTags[0])}/get`,
|
||||
);
|
||||
|
||||
// requestStreaming now returns Node.js stream
|
||||
const nodeStream = response as plugins.smartstream.stream.Readable;
|
||||
|
||||
let counter = 0;
|
||||
const webduplexStream = new plugins.smartstream.SmartDuplex({
|
||||
writeFunction: async (chunk, tools) => {
|
||||
if (counter % 1000 === 0) console.log(`Got chunk: ${counter}`);
|
||||
counter++;
|
||||
return chunk;
|
||||
},
|
||||
});
|
||||
|
||||
nodeStream.on('data', (chunk) => {
|
||||
if (!webduplexStream.write(chunk)) {
|
||||
nodeStream.pause();
|
||||
webduplexStream.once('drain', () => {
|
||||
nodeStream.resume();
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
nodeStream.on('end', () => {
|
||||
webduplexStream.end();
|
||||
});
|
||||
|
||||
nodeStream.on('error', (error) => {
|
||||
logger.log('error', `Error during image export: ${error.message}`);
|
||||
webduplexStream.destroy(error);
|
||||
});
|
||||
|
||||
return webduplexStream;
|
||||
}
|
||||
}
|
||||
163
ts/classes.imagestore.ts
Normal file
163
ts/classes.imagestore.ts
Normal file
@@ -0,0 +1,163 @@
|
||||
import * as plugins from './plugins.js';
|
||||
import * as paths from './paths.js';
|
||||
import { logger } from './logger.js';
|
||||
import type { DockerHost } from './classes.host.js';
|
||||
|
||||
export interface IDockerImageStoreConstructorOptions {
|
||||
/**
|
||||
* used for preparing images for longer term storage
|
||||
*/
|
||||
localDirPath: string;
|
||||
/**
|
||||
* a smartbucket dir for longer term storage.
|
||||
*/
|
||||
bucketDir: plugins.smartbucket.Directory;
|
||||
}
|
||||
|
||||
export class DockerImageStore {
|
||||
public options: IDockerImageStoreConstructorOptions;
|
||||
|
||||
constructor(optionsArg: IDockerImageStoreConstructorOptions) {
|
||||
this.options = optionsArg;
|
||||
}
|
||||
|
||||
// Method to store tar stream
|
||||
public async storeImage(
|
||||
imageName: string,
|
||||
tarStream: plugins.smartstream.stream.Readable,
|
||||
): Promise<void> {
|
||||
logger.log('info', `Storing image ${imageName}...`);
|
||||
const uniqueProcessingId = plugins.smartunique.shortId();
|
||||
|
||||
const initialTarDownloadPath = plugins.path.join(
|
||||
this.options.localDirPath,
|
||||
`${uniqueProcessingId}.tar`,
|
||||
);
|
||||
const extractionDir = plugins.path.join(
|
||||
this.options.localDirPath,
|
||||
uniqueProcessingId,
|
||||
);
|
||||
// Create a write stream to store the tar file
|
||||
const writeStream = plugins.smartfile.fsStream.createWriteStream(
|
||||
initialTarDownloadPath,
|
||||
);
|
||||
|
||||
// lets wait for the write stream to finish
|
||||
await new Promise((resolve, reject) => {
|
||||
tarStream.pipe(writeStream);
|
||||
writeStream.on('finish', resolve);
|
||||
writeStream.on('error', reject);
|
||||
});
|
||||
logger.log(
|
||||
'info',
|
||||
`Image ${imageName} stored locally for processing. Extracting...`,
|
||||
);
|
||||
|
||||
// lets process the image
|
||||
const tarArchive = await plugins.smartarchive.SmartArchive.fromArchiveFile(
|
||||
initialTarDownloadPath,
|
||||
);
|
||||
await tarArchive.exportToFs(extractionDir);
|
||||
logger.log('info', `Image ${imageName} extracted.`);
|
||||
await plugins.smartfile.fs.remove(initialTarDownloadPath);
|
||||
logger.log('info', `deleted original tar to save space.`);
|
||||
logger.log('info', `now repackaging for s3...`);
|
||||
const smartfileIndexJson = await plugins.smartfile.SmartFile.fromFilePath(
|
||||
plugins.path.join(extractionDir, 'index.json'),
|
||||
);
|
||||
const smartfileManifestJson =
|
||||
await plugins.smartfile.SmartFile.fromFilePath(
|
||||
plugins.path.join(extractionDir, 'manifest.json'),
|
||||
);
|
||||
const smartfileOciLayoutJson =
|
||||
await plugins.smartfile.SmartFile.fromFilePath(
|
||||
plugins.path.join(extractionDir, 'oci-layout'),
|
||||
);
|
||||
const smartfileRepositoriesJson =
|
||||
await plugins.smartfile.SmartFile.fromFilePath(
|
||||
plugins.path.join(extractionDir, 'repositories'),
|
||||
);
|
||||
const indexJson = JSON.parse(smartfileIndexJson.contents.toString());
|
||||
const manifestJson = JSON.parse(smartfileManifestJson.contents.toString());
|
||||
const ociLayoutJson = JSON.parse(
|
||||
smartfileOciLayoutJson.contents.toString(),
|
||||
);
|
||||
const repositoriesJson = JSON.parse(
|
||||
smartfileRepositoriesJson.contents.toString(),
|
||||
);
|
||||
|
||||
indexJson.manifests[0].annotations['io.containerd.image.name'] = imageName;
|
||||
manifestJson[0].RepoTags[0] = imageName;
|
||||
const repoFirstKey = Object.keys(repositoriesJson)[0];
|
||||
const repoFirstValue = repositoriesJson[repoFirstKey];
|
||||
repositoriesJson[imageName] = repoFirstValue;
|
||||
delete repositoriesJson[repoFirstKey];
|
||||
|
||||
smartfileIndexJson.contents = Buffer.from(
|
||||
JSON.stringify(indexJson, null, 2),
|
||||
);
|
||||
smartfileManifestJson.contents = Buffer.from(
|
||||
JSON.stringify(manifestJson, null, 2),
|
||||
);
|
||||
smartfileOciLayoutJson.contents = Buffer.from(
|
||||
JSON.stringify(ociLayoutJson, null, 2),
|
||||
);
|
||||
smartfileRepositoriesJson.contents = Buffer.from(
|
||||
JSON.stringify(repositoriesJson, null, 2),
|
||||
);
|
||||
await Promise.all([
|
||||
smartfileIndexJson.write(),
|
||||
smartfileManifestJson.write(),
|
||||
smartfileOciLayoutJson.write(),
|
||||
smartfileRepositoriesJson.write(),
|
||||
]);
|
||||
|
||||
logger.log('info', 'repackaging archive for s3...');
|
||||
const tartools = new plugins.smartarchive.TarTools();
|
||||
const newTarPack = await tartools.packDirectory(extractionDir);
|
||||
const finalTarName = `${uniqueProcessingId}.processed.tar`;
|
||||
const finalTarPath = plugins.path.join(
|
||||
this.options.localDirPath,
|
||||
finalTarName,
|
||||
);
|
||||
const finalWriteStream =
|
||||
plugins.smartfile.fsStream.createWriteStream(finalTarPath);
|
||||
await new Promise((resolve, reject) => {
|
||||
newTarPack.finalize();
|
||||
newTarPack.pipe(finalWriteStream);
|
||||
finalWriteStream.on('finish', resolve);
|
||||
finalWriteStream.on('error', reject);
|
||||
});
|
||||
logger.log('ok', `Repackaged image ${imageName} for s3.`);
|
||||
await plugins.smartfile.fs.remove(extractionDir);
|
||||
const finalTarReadStream =
|
||||
plugins.smartfile.fsStream.createReadStream(finalTarPath);
|
||||
await this.options.bucketDir.fastPutStream({
|
||||
stream: finalTarReadStream,
|
||||
path: `${imageName}.tar`,
|
||||
});
|
||||
await plugins.smartfile.fs.remove(finalTarPath);
|
||||
}
|
||||
|
||||
public async start() {
|
||||
await plugins.smartfile.fs.ensureEmptyDir(this.options.localDirPath);
|
||||
}
|
||||
|
||||
public async stop() {}
|
||||
|
||||
// Method to retrieve tar stream
|
||||
public async getImage(
|
||||
imageName: string,
|
||||
): Promise<plugins.smartstream.stream.Readable> {
|
||||
const imagePath = plugins.path.join(
|
||||
this.options.localDirPath,
|
||||
`${imageName}.tar`,
|
||||
);
|
||||
|
||||
if (!(await plugins.smartfile.fs.fileExists(imagePath))) {
|
||||
throw new Error(`Image ${imageName} does not exist.`);
|
||||
}
|
||||
|
||||
return plugins.smartfile.fsStream.createReadStream(imagePath);
|
||||
}
|
||||
}
|
||||
160
ts/classes.network.ts
Normal file
160
ts/classes.network.ts
Normal file
@@ -0,0 +1,160 @@
|
||||
import * as plugins from './plugins.js';
|
||||
import * as interfaces from './interfaces/index.js';
|
||||
|
||||
import { DockerHost } from './classes.host.js';
|
||||
import { DockerResource } from './classes.base.js';
|
||||
import { DockerService } from './classes.service.js';
|
||||
import { logger } from './logger.js';
|
||||
|
||||
export class DockerNetwork extends DockerResource {
|
||||
// STATIC (Internal - prefixed with _ to indicate internal use)
|
||||
|
||||
/**
|
||||
* Internal: Get all networks
|
||||
* Public API: Use dockerHost.getNetworks() instead
|
||||
*/
|
||||
public static async _list(
|
||||
dockerHost: DockerHost,
|
||||
): Promise<DockerNetwork[]> {
|
||||
const dockerNetworks: DockerNetwork[] = [];
|
||||
const response = await dockerHost.request('GET', '/networks');
|
||||
for (const networkObject of response.body) {
|
||||
const dockerNetwork = new DockerNetwork(dockerHost);
|
||||
Object.assign(dockerNetwork, networkObject);
|
||||
dockerNetworks.push(dockerNetwork);
|
||||
}
|
||||
return dockerNetworks;
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal: Get network by name
|
||||
* Public API: Use dockerHost.getNetworkByName(name) instead
|
||||
*/
|
||||
public static async _fromName(
|
||||
dockerHost: DockerHost,
|
||||
dockerNetworkNameArg: string,
|
||||
) {
|
||||
const networks = await DockerNetwork._list(dockerHost);
|
||||
return networks.find(
|
||||
(dockerNetwork) => dockerNetwork.Name === dockerNetworkNameArg,
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal: Create a network
|
||||
* Public API: Use dockerHost.createNetwork(descriptor) instead
|
||||
*/
|
||||
public static async _create(
|
||||
dockerHost: DockerHost,
|
||||
networkCreationDescriptor: interfaces.INetworkCreationDescriptor,
|
||||
): Promise<DockerNetwork> {
|
||||
const response = await dockerHost.request('POST', '/networks/create', {
|
||||
Name: networkCreationDescriptor.Name,
|
||||
CheckDuplicate: true,
|
||||
Driver: 'overlay',
|
||||
EnableIPv6: false,
|
||||
/* IPAM: {
|
||||
Driver: 'default',
|
||||
Config: [
|
||||
{
|
||||
Subnet: `172.20.${networkCreationDescriptor.NetworkNumber}.0/16`,
|
||||
IPRange: `172.20.${networkCreationDescriptor.NetworkNumber}.0/24`,
|
||||
Gateway: `172.20.${networkCreationDescriptor.NetworkNumber}.11`
|
||||
}
|
||||
]
|
||||
}, */
|
||||
Internal: false,
|
||||
Attachable: true,
|
||||
Ingress: false,
|
||||
});
|
||||
if (response.statusCode < 300) {
|
||||
logger.log('info', 'Created network successfully');
|
||||
return await DockerNetwork._fromName(
|
||||
dockerHost,
|
||||
networkCreationDescriptor.Name,
|
||||
);
|
||||
} else {
|
||||
logger.log(
|
||||
'error',
|
||||
'There has been an error creating the wanted network',
|
||||
);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
// INSTANCE PROPERTIES
|
||||
public Name: string;
|
||||
public Id: string;
|
||||
public Created: string;
|
||||
public Scope: string;
|
||||
public Driver: string;
|
||||
public EnableIPv6: boolean;
|
||||
public Internal: boolean;
|
||||
public Attachable: boolean;
|
||||
public Ingress: false;
|
||||
public IPAM: {
|
||||
Driver: 'default' | 'bridge' | 'overlay';
|
||||
Config: [
|
||||
{
|
||||
Subnet: string;
|
||||
IPRange: string;
|
||||
Gateway: string;
|
||||
},
|
||||
];
|
||||
};
|
||||
|
||||
constructor(dockerHostArg: DockerHost) {
|
||||
super(dockerHostArg);
|
||||
}
|
||||
|
||||
// INSTANCE METHODS
|
||||
|
||||
/**
|
||||
* Refreshes this network's state from the Docker daemon
|
||||
*/
|
||||
public async refresh(): Promise<void> {
|
||||
const updated = await DockerNetwork._fromName(this.dockerHost, this.Name);
|
||||
if (updated) {
|
||||
Object.assign(this, updated);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes the network
|
||||
*/
|
||||
public async remove() {
|
||||
const response = await this.dockerHost.request(
|
||||
'DELETE',
|
||||
`/networks/${this.Id}`,
|
||||
);
|
||||
}
|
||||
|
||||
public async listContainersOnNetwork(): Promise<
|
||||
Array<{
|
||||
Name: string;
|
||||
EndpointID: string;
|
||||
MacAddress: string;
|
||||
IPv4Address: string;
|
||||
IPv6Address: string;
|
||||
}>
|
||||
> {
|
||||
const returnArray = [];
|
||||
const response = await this.dockerHost.request(
|
||||
'GET',
|
||||
`/networks/${this.Id}`,
|
||||
);
|
||||
for (const key of Object.keys(response.body.Containers)) {
|
||||
returnArray.push(response.body.Containers[key]);
|
||||
}
|
||||
|
||||
return returnArray;
|
||||
}
|
||||
|
||||
public async getContainersOnNetworkForService(serviceArg: DockerService) {
|
||||
const containersOnNetwork = await this.listContainersOnNetwork();
|
||||
const containersOfService = containersOnNetwork.filter((container) => {
|
||||
return container.Name.startsWith(serviceArg.Spec.Name);
|
||||
});
|
||||
return containersOfService;
|
||||
}
|
||||
}
|
||||
129
ts/classes.secret.ts
Normal file
129
ts/classes.secret.ts
Normal file
@@ -0,0 +1,129 @@
|
||||
import * as plugins from './plugins.js';
|
||||
import { DockerHost } from './classes.host.js';
|
||||
import { DockerResource } from './classes.base.js';
|
||||
|
||||
// interfaces
|
||||
import * as interfaces from './interfaces/index.js';
|
||||
|
||||
export class DockerSecret extends DockerResource {
|
||||
// STATIC (Internal - prefixed with _ to indicate internal use)
|
||||
|
||||
/**
|
||||
* Internal: Get all secrets
|
||||
* Public API: Use dockerHost.listSecrets() instead
|
||||
*/
|
||||
public static async _list(dockerHostArg: DockerHost) {
|
||||
const response = await dockerHostArg.request('GET', '/secrets');
|
||||
const secrets: DockerSecret[] = [];
|
||||
for (const secret of response.body) {
|
||||
const dockerSecretInstance = new DockerSecret(dockerHostArg);
|
||||
Object.assign(dockerSecretInstance, secret);
|
||||
secrets.push(dockerSecretInstance);
|
||||
}
|
||||
return secrets;
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal: Get secret by ID
|
||||
* Public API: Use dockerHost.getSecretById(id) instead
|
||||
*/
|
||||
public static async _fromId(dockerHostArg: DockerHost, idArg: string) {
|
||||
const secrets = await this._list(dockerHostArg);
|
||||
return secrets.find((secret) => secret.ID === idArg);
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal: Get secret by name
|
||||
* Public API: Use dockerHost.getSecretByName(name) instead
|
||||
*/
|
||||
public static async _fromName(
|
||||
dockerHostArg: DockerHost,
|
||||
nameArg: string,
|
||||
) {
|
||||
const secrets = await this._list(dockerHostArg);
|
||||
return secrets.find((secret) => secret.Spec.Name === nameArg);
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal: Create a secret
|
||||
* Public API: Use dockerHost.createSecret(descriptor) instead
|
||||
*/
|
||||
public static async _create(
|
||||
dockerHostArg: DockerHost,
|
||||
secretDescriptor: interfaces.ISecretCreationDescriptor,
|
||||
) {
|
||||
const labels: interfaces.TLabels = {
|
||||
...secretDescriptor.labels,
|
||||
version: secretDescriptor.version,
|
||||
};
|
||||
const response = await dockerHostArg.request('POST', '/secrets/create', {
|
||||
Name: secretDescriptor.name,
|
||||
Labels: labels,
|
||||
Data: plugins.smartstring.base64.encode(secretDescriptor.contentArg),
|
||||
});
|
||||
|
||||
const newSecretInstance = new DockerSecret(dockerHostArg);
|
||||
Object.assign(newSecretInstance, response.body);
|
||||
Object.assign(
|
||||
newSecretInstance,
|
||||
await DockerSecret._fromId(dockerHostArg, newSecretInstance.ID),
|
||||
);
|
||||
return newSecretInstance;
|
||||
}
|
||||
|
||||
// INSTANCE PROPERTIES
|
||||
public ID: string;
|
||||
public Spec: {
|
||||
Name: string;
|
||||
Labels: interfaces.TLabels;
|
||||
};
|
||||
public Version: {
|
||||
Index: string;
|
||||
};
|
||||
|
||||
constructor(dockerHostArg: DockerHost) {
|
||||
super(dockerHostArg);
|
||||
}
|
||||
|
||||
// INSTANCE METHODS
|
||||
|
||||
/**
|
||||
* Refreshes this secret's state from the Docker daemon
|
||||
*/
|
||||
public async refresh(): Promise<void> {
|
||||
const updated = await DockerSecret._fromId(this.dockerHost, this.ID);
|
||||
if (updated) {
|
||||
Object.assign(this, updated);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates a secret
|
||||
*/
|
||||
public async update(contentArg: string) {
|
||||
const route = `/secrets/${this.ID}/update?=version=${this.Version.Index}`;
|
||||
const response = await this.dockerHost.request(
|
||||
'POST',
|
||||
`/secrets/${this.ID}/update?version=${this.Version.Index}`,
|
||||
{
|
||||
Name: this.Spec.Name,
|
||||
Labels: this.Spec.Labels,
|
||||
Data: plugins.smartstring.base64.encode(contentArg),
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes this secret from the Docker daemon
|
||||
*/
|
||||
public async remove() {
|
||||
await this.dockerHost.request('DELETE', `/secrets/${this.ID}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the version label of this secret
|
||||
*/
|
||||
public async getVersion() {
|
||||
return this.Spec.Labels.version;
|
||||
}
|
||||
}
|
||||
330
ts/classes.service.ts
Normal file
330
ts/classes.service.ts
Normal file
@@ -0,0 +1,330 @@
|
||||
import * as plugins from './plugins.js';
|
||||
import * as interfaces from './interfaces/index.js';
|
||||
|
||||
import { DockerHost } from './classes.host.js';
|
||||
import { DockerResource } from './classes.base.js';
|
||||
import { DockerImage } from './classes.image.js';
|
||||
import { DockerSecret } from './classes.secret.js';
|
||||
import { logger } from './logger.js';
|
||||
|
||||
export class DockerService extends DockerResource {
|
||||
// STATIC (Internal - prefixed with _ to indicate internal use)
|
||||
|
||||
/**
|
||||
* Internal: Get all services
|
||||
* Public API: Use dockerHost.listServices() instead
|
||||
*/
|
||||
public static async _list(dockerHost: DockerHost) {
|
||||
const services: DockerService[] = [];
|
||||
const response = await dockerHost.request('GET', '/services');
|
||||
for (const serviceObject of response.body) {
|
||||
const dockerService = new DockerService(dockerHost);
|
||||
Object.assign(dockerService, serviceObject);
|
||||
services.push(dockerService);
|
||||
}
|
||||
return services;
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal: Get service by name
|
||||
* Public API: Use dockerHost.getServiceByName(name) instead
|
||||
*/
|
||||
public static async _fromName(
|
||||
dockerHost: DockerHost,
|
||||
networkName: string,
|
||||
): Promise<DockerService> {
|
||||
const allServices = await DockerService._list(dockerHost);
|
||||
const wantedService = allServices.find((service) => {
|
||||
return service.Spec.Name === networkName;
|
||||
});
|
||||
return wantedService;
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal: Create a service
|
||||
* Public API: Use dockerHost.createService(descriptor) instead
|
||||
*/
|
||||
public static async _create(
|
||||
dockerHost: DockerHost,
|
||||
serviceCreationDescriptor: interfaces.IServiceCreationDescriptor,
|
||||
): Promise<DockerService> {
|
||||
logger.log(
|
||||
'info',
|
||||
`now creating service ${serviceCreationDescriptor.name}`,
|
||||
);
|
||||
|
||||
// Resolve image (support both string and DockerImage instance)
|
||||
let imageInstance: DockerImage;
|
||||
if (typeof serviceCreationDescriptor.image === 'string') {
|
||||
imageInstance = await DockerImage._fromName(dockerHost, serviceCreationDescriptor.image);
|
||||
if (!imageInstance) {
|
||||
throw new Error(`Image not found: ${serviceCreationDescriptor.image}`);
|
||||
}
|
||||
} else {
|
||||
imageInstance = serviceCreationDescriptor.image;
|
||||
}
|
||||
|
||||
const serviceVersion = await imageInstance.getVersion();
|
||||
|
||||
const labels: interfaces.TLabels = {
|
||||
...serviceCreationDescriptor.labels,
|
||||
version: serviceVersion,
|
||||
};
|
||||
|
||||
const mounts: Array<{
|
||||
/**
|
||||
* the target inside the container
|
||||
*/
|
||||
Target: string;
|
||||
/**
|
||||
* The Source from which to mount the data (Volume or host path)
|
||||
*/
|
||||
Source: string;
|
||||
Type: 'bind' | 'volume' | 'tmpfs' | 'npipe';
|
||||
ReadOnly: boolean;
|
||||
Consistency: 'default' | 'consistent' | 'cached' | 'delegated';
|
||||
}> = [];
|
||||
if (serviceCreationDescriptor.accessHostDockerSock) {
|
||||
mounts.push({
|
||||
Target: '/var/run/docker.sock',
|
||||
Source: '/var/run/docker.sock',
|
||||
Consistency: 'default',
|
||||
ReadOnly: false,
|
||||
Type: 'bind',
|
||||
});
|
||||
}
|
||||
|
||||
if (
|
||||
serviceCreationDescriptor.resources &&
|
||||
serviceCreationDescriptor.resources.volumeMounts
|
||||
) {
|
||||
for (const volumeMount of serviceCreationDescriptor.resources
|
||||
.volumeMounts) {
|
||||
mounts.push({
|
||||
Target: volumeMount.containerFsPath,
|
||||
Source: volumeMount.hostFsPath,
|
||||
Consistency: 'default',
|
||||
ReadOnly: false,
|
||||
Type: 'bind',
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Resolve networks (support both string[] and DockerNetwork[])
|
||||
const networkArray: Array<{
|
||||
Target: string;
|
||||
Aliases: string[];
|
||||
}> = [];
|
||||
|
||||
for (const network of serviceCreationDescriptor.networks) {
|
||||
// Skip null networks (can happen if network creation fails)
|
||||
if (!network) {
|
||||
logger.log('warn', 'Skipping null network in service creation');
|
||||
continue;
|
||||
}
|
||||
|
||||
// Resolve network name
|
||||
const networkName = typeof network === 'string' ? network : network.Name;
|
||||
networkArray.push({
|
||||
Target: networkName,
|
||||
Aliases: [serviceCreationDescriptor.networkAlias],
|
||||
});
|
||||
}
|
||||
|
||||
const ports = [];
|
||||
for (const port of serviceCreationDescriptor.ports) {
|
||||
const portArray = port.split(':');
|
||||
const hostPort = portArray[0];
|
||||
const containerPort = portArray[1];
|
||||
ports.push({
|
||||
Protocol: 'tcp',
|
||||
PublishedPort: parseInt(hostPort, 10),
|
||||
TargetPort: parseInt(containerPort, 10),
|
||||
});
|
||||
}
|
||||
|
||||
// Resolve secrets (support both string[] and DockerSecret[])
|
||||
const secretArray: any[] = [];
|
||||
for (const secret of serviceCreationDescriptor.secrets) {
|
||||
// Resolve secret instance
|
||||
let secretInstance: DockerSecret;
|
||||
if (typeof secret === 'string') {
|
||||
secretInstance = await DockerSecret._fromName(dockerHost, secret);
|
||||
if (!secretInstance) {
|
||||
throw new Error(`Secret not found: ${secret}`);
|
||||
}
|
||||
} else {
|
||||
secretInstance = secret;
|
||||
}
|
||||
|
||||
secretArray.push({
|
||||
File: {
|
||||
Name: 'secret.json', // TODO: make sure that works with multiple secrets
|
||||
UID: '33',
|
||||
GID: '33',
|
||||
Mode: 384,
|
||||
},
|
||||
SecretID: secretInstance.ID,
|
||||
SecretName: secretInstance.Spec.Name,
|
||||
});
|
||||
}
|
||||
|
||||
// lets configure limits
|
||||
|
||||
const memoryLimitMB =
|
||||
serviceCreationDescriptor.resources &&
|
||||
serviceCreationDescriptor.resources.memorySizeMB
|
||||
? serviceCreationDescriptor.resources.memorySizeMB
|
||||
: 1000;
|
||||
|
||||
const limits = {
|
||||
MemoryBytes: memoryLimitMB * 1000000,
|
||||
};
|
||||
|
||||
if (serviceCreationDescriptor.resources) {
|
||||
limits.MemoryBytes =
|
||||
serviceCreationDescriptor.resources.memorySizeMB * 1000000;
|
||||
}
|
||||
|
||||
const response = await dockerHost.request('POST', '/services/create', {
|
||||
Name: serviceCreationDescriptor.name,
|
||||
TaskTemplate: {
|
||||
ContainerSpec: {
|
||||
Image: imageInstance.RepoTags[0],
|
||||
Labels: labels,
|
||||
Secrets: secretArray,
|
||||
Mounts: mounts,
|
||||
/* DNSConfig: {
|
||||
Nameservers: ['1.1.1.1']
|
||||
} */
|
||||
},
|
||||
UpdateConfig: {
|
||||
Parallelism: 0,
|
||||
Delay: 0,
|
||||
FailureAction: 'pause',
|
||||
Monitor: 15000000000,
|
||||
MaxFailureRatio: 0.15,
|
||||
},
|
||||
ForceUpdate: 1,
|
||||
Resources: {
|
||||
Limits: limits,
|
||||
},
|
||||
LogDriver: {
|
||||
Name: 'json-file',
|
||||
Options: {
|
||||
'max-file': '3',
|
||||
'max-size': '10M',
|
||||
},
|
||||
},
|
||||
},
|
||||
Labels: labels,
|
||||
Networks: networkArray,
|
||||
EndpointSpec: {
|
||||
Ports: ports,
|
||||
},
|
||||
});
|
||||
|
||||
const createdService = await DockerService._fromName(
|
||||
dockerHost,
|
||||
serviceCreationDescriptor.name,
|
||||
);
|
||||
return createdService;
|
||||
}
|
||||
|
||||
// INSTANCE PROPERTIES
|
||||
// Note: dockerHost (not dockerHostRef) for consistency with base class
|
||||
|
||||
public ID: string;
|
||||
public Version: { Index: number };
|
||||
public CreatedAt: string;
|
||||
public UpdatedAt: string;
|
||||
public Spec: {
|
||||
Name: string;
|
||||
Labels: interfaces.TLabels;
|
||||
TaskTemplate: {
|
||||
ContainerSpec: {
|
||||
Image: string;
|
||||
Isolation: string;
|
||||
Secrets: Array<{
|
||||
File: {
|
||||
Name: string;
|
||||
UID: string;
|
||||
GID: string;
|
||||
Mode: number;
|
||||
};
|
||||
SecretID: string;
|
||||
SecretName: string;
|
||||
}>;
|
||||
};
|
||||
ForceUpdate: 0;
|
||||
};
|
||||
Mode: {};
|
||||
Networks: [any[]];
|
||||
};
|
||||
public Endpoint: { Spec: {}; VirtualIPs: [any[]] };
|
||||
|
||||
constructor(dockerHostArg: DockerHost) {
|
||||
super(dockerHostArg);
|
||||
}
|
||||
|
||||
// INSTANCE METHODS
|
||||
|
||||
/**
|
||||
* Refreshes this service's state from the Docker daemon
|
||||
*/
|
||||
public async refresh(): Promise<void> {
|
||||
const updated = await DockerService._fromName(this.dockerHost, this.Spec.Name);
|
||||
if (updated) {
|
||||
Object.assign(this, updated);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes this service from the Docker daemon
|
||||
*/
|
||||
public async remove() {
|
||||
await this.dockerHost.request('DELETE', `/services/${this.ID}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Re-reads service data from Docker engine
|
||||
* @deprecated Use refresh() instead
|
||||
*/
|
||||
public async reReadFromDockerEngine() {
|
||||
const dockerData = await this.dockerHost.request(
|
||||
'GET',
|
||||
`/services/${this.ID}`,
|
||||
);
|
||||
// TODO: Better assign: Object.assign(this, dockerData);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if this service needs an update based on image version
|
||||
*/
|
||||
public async needsUpdate(): Promise<boolean> {
|
||||
// TODO: implement digest based update recognition
|
||||
|
||||
await this.reReadFromDockerEngine();
|
||||
const dockerImage = await DockerImage._createFromRegistry(
|
||||
this.dockerHost,
|
||||
{
|
||||
creationObject: {
|
||||
imageUrl: this.Spec.TaskTemplate.ContainerSpec.Image,
|
||||
},
|
||||
},
|
||||
);
|
||||
|
||||
const imageVersion = new plugins.smartversion.SmartVersion(
|
||||
dockerImage.Labels.version,
|
||||
);
|
||||
const serviceVersion = new plugins.smartversion.SmartVersion(
|
||||
this.Spec.Labels.version,
|
||||
);
|
||||
if (imageVersion.greaterThan(serviceVersion)) {
|
||||
console.log(`service ${this.Spec.Name} needs to be updated`);
|
||||
return true;
|
||||
} else {
|
||||
console.log(`service ${this.Spec.Name} is up to date.`);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,101 +0,0 @@
|
||||
import * as plugins from './docker.plugins';
|
||||
import * as interfaces from './interfaces';
|
||||
|
||||
import { DockerHost } from './docker.classes.host';
|
||||
|
||||
export class DockerContainer {
|
||||
// STATIC
|
||||
|
||||
/**
|
||||
* get all containers
|
||||
*/
|
||||
public static async getContainers(dockerHostArg: DockerHost): Promise<DockerContainer[]> {
|
||||
const result: DockerContainer[] = [];
|
||||
const response = await dockerHostArg.request('GET', '/containers/json');
|
||||
|
||||
// TODO: Think about getting the config by inpsecting the container
|
||||
for (const containerResult of response.body) {
|
||||
result.push(new DockerContainer(dockerHostArg, containerResult));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* gets an container by Id
|
||||
* @param containerId
|
||||
*/
|
||||
public static async getContainerById(containerId: string) {
|
||||
// TODO: implement get container by id
|
||||
}
|
||||
|
||||
/**
|
||||
* create a container
|
||||
*/
|
||||
public static async create(
|
||||
dockerHost: DockerHost,
|
||||
containerCreationDescriptor: interfaces.IContainerCreationDescriptor
|
||||
) {
|
||||
// check for unique hostname
|
||||
const existingContainers = await DockerContainer.getContainers(dockerHost);
|
||||
const sameHostNameContainer = existingContainers.find(container => {
|
||||
// TODO implement HostName Detection;
|
||||
return false;
|
||||
});
|
||||
const response = await dockerHost.request('POST', '/containers/create', {
|
||||
Hostname: containerCreationDescriptor.Hostname,
|
||||
Domainname: containerCreationDescriptor.Domainname,
|
||||
User: 'root'
|
||||
});
|
||||
if (response.statusCode < 300) {
|
||||
plugins.smartlog.defaultLogger.log('info', 'Container created successfully');
|
||||
} else {
|
||||
plugins.smartlog.defaultLogger.log(
|
||||
'error',
|
||||
'There has been a problem when creating the container'
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// INSTANCE
|
||||
// references
|
||||
public dockerHost: DockerHost;
|
||||
|
||||
// properties
|
||||
public Id: string;
|
||||
public Names: string[];
|
||||
public Image: string;
|
||||
public ImageID: string;
|
||||
public Command: string;
|
||||
public Created: number;
|
||||
public Ports: interfaces.TPorts;
|
||||
public Labels: interfaces.TLabels;
|
||||
public State: string;
|
||||
public Status: string;
|
||||
public HostConfig: any;
|
||||
public NetworkSettings: {
|
||||
Networks: {
|
||||
[key: string]: {
|
||||
IPAMConfig: any;
|
||||
Links: any;
|
||||
Aliases: any;
|
||||
NetworkID: string;
|
||||
EndpointID: string;
|
||||
Gateway: string;
|
||||
IPAddress: string;
|
||||
IPPrefixLen: number;
|
||||
IPv6Gateway: string;
|
||||
GlobalIPv6Address: string;
|
||||
GlobalIPv6PrefixLen: number;
|
||||
MacAddress: string;
|
||||
DriverOpts: any;
|
||||
};
|
||||
};
|
||||
};
|
||||
public Mounts: any;
|
||||
constructor(dockerHostArg: DockerHost, dockerContainerObjectArg: any) {
|
||||
this.dockerHost = dockerHostArg;
|
||||
Object.keys(dockerContainerObjectArg).forEach(keyArg => {
|
||||
this[keyArg] = dockerContainerObjectArg[keyArg];
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -1,188 +0,0 @@
|
||||
import * as plugins from './docker.plugins';
|
||||
import { DockerContainer } from './docker.classes.container';
|
||||
import { DockerNetwork } from './docker.classes.network';
|
||||
import { DockerService } from './docker.classes.service';
|
||||
|
||||
export interface IAuthData {
|
||||
serveraddress: string;
|
||||
username: string;
|
||||
password: string;
|
||||
}
|
||||
|
||||
export class DockerHost {
|
||||
/**
|
||||
* the path where the docker sock can be found
|
||||
*/
|
||||
public socketPath: string;
|
||||
private registryToken: string = '';
|
||||
|
||||
/**
|
||||
* the constructor to instantiate a new docker sock instance
|
||||
* @param pathArg
|
||||
*/
|
||||
constructor(pathArg?: string) {
|
||||
let pathToUse: string;
|
||||
if (pathArg) {
|
||||
pathToUse = pathArg;
|
||||
} else if (process.env.CI) {
|
||||
pathToUse = 'http://docker:2375/';
|
||||
} else {
|
||||
pathToUse = 'http://unix:/var/run/docker.sock:';
|
||||
}
|
||||
this.socketPath = pathToUse;
|
||||
}
|
||||
|
||||
/**
|
||||
* authenticate against a registry
|
||||
* @param userArg
|
||||
* @param passArg
|
||||
*/
|
||||
public async auth(authData: IAuthData) {
|
||||
const response = await this.request('POST', '/auth', authData);
|
||||
if (response.body.Status !== 'Login Succeeded') {
|
||||
console.log(`Login failed with ${response.body.Status}`);
|
||||
throw new Error(response.body.Status);
|
||||
}
|
||||
console.log(response.body.Status);
|
||||
this.registryToken = plugins.smartstring.base64.encode(
|
||||
plugins.smartjson.Smartjson.stringify(authData, {})
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* gets the token from the .docker/config.json file for GitLab registry
|
||||
*/
|
||||
public async getGitlabComTokenFromDockerConfig() {
|
||||
const dockerConfigPath = plugins.smartpath.get.home('~/.docker/config.json');
|
||||
const configObject = plugins.smartfile.fs.toObjectSync(dockerConfigPath);
|
||||
const gitlabAuthBase64 = configObject.auths['registry.gitlab.com'].auth;
|
||||
const gitlabAuth: string = plugins.smartstring.base64.decode(gitlabAuthBase64);
|
||||
const gitlabAuthArray = gitlabAuth.split(':');
|
||||
await this.auth({
|
||||
username: gitlabAuthArray[0],
|
||||
password: gitlabAuthArray[1],
|
||||
serveraddress: 'registry.gitlab.com'
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* gets all networks
|
||||
*/
|
||||
public async getNetworks() {
|
||||
return await DockerNetwork.getNetworks(this);
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* gets all containers
|
||||
*/
|
||||
public async getContainers() {
|
||||
const containerArray = await DockerContainer.getContainers(this);
|
||||
return containerArray;
|
||||
}
|
||||
|
||||
/**
|
||||
* gets all services
|
||||
*/
|
||||
public async getServices() {
|
||||
const serviceArray = await DockerService.getServices(this);
|
||||
return serviceArray;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public async getEventObservable(): Promise<plugins.rxjs.Observable<any>> {
|
||||
const response = await this.requestStreaming('GET', '/events');
|
||||
return plugins.rxjs.Observable.create(observer => {
|
||||
response.on('data', data => {
|
||||
const eventString = data.toString();
|
||||
try {
|
||||
const eventObject = JSON.parse(eventString);
|
||||
observer.next(eventObject);
|
||||
} catch (e) {
|
||||
console.log(e);
|
||||
}
|
||||
});
|
||||
return () => {
|
||||
response.emit('end');
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* activates docker swarm
|
||||
*/
|
||||
public async activateSwarm(addvertisementIpArg?: string) {
|
||||
// determine advertisement address
|
||||
let addvertisementIp: string;
|
||||
if (addvertisementIpArg) {
|
||||
addvertisementIp = addvertisementIpArg;
|
||||
} else {
|
||||
const smartnetworkInstance = new plugins.smartnetwork.SmartNetwork();
|
||||
const defaultGateway = await smartnetworkInstance.getDefaultGateway();
|
||||
if (defaultGateway) {
|
||||
addvertisementIp = defaultGateway.ipv4.address;
|
||||
}
|
||||
}
|
||||
|
||||
const response = await this.request('POST', '/swarm/init', {
|
||||
ListenAddr: '0.0.0.0:2377',
|
||||
AdvertiseAddr: addvertisementIp,
|
||||
DataPathPort: 4789,
|
||||
DefaultAddrPool: ['10.10.0.0/8', '20.20.0.0/8'],
|
||||
SubnetSize: 24,
|
||||
ForceNewCluster: false
|
||||
});
|
||||
if (response.statusCode === 200) {
|
||||
plugins.smartlog.defaultLogger.log('info', 'created Swam succesfully');
|
||||
} else {
|
||||
plugins.smartlog.defaultLogger.log('error', 'could not initiate swarm');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* fire a request
|
||||
*/
|
||||
public async request(methodArg: string, routeArg: string, dataArg = {}) {
|
||||
const requestUrl = `${this.socketPath}${routeArg}`;
|
||||
const response = await plugins.smartrequest.request(requestUrl, {
|
||||
method: methodArg,
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-Registry-Auth': this.registryToken,
|
||||
Host: 'docker.sock'
|
||||
},
|
||||
requestBody: dataArg,
|
||||
keepAlive: false
|
||||
});
|
||||
if (response.statusCode !== 200) {
|
||||
console.log(response.body);
|
||||
}
|
||||
return response;
|
||||
}
|
||||
|
||||
public async requestStreaming(methodArg: string, routeArg: string, dataArg = {}) {
|
||||
const requestUrl = `${this.socketPath}${routeArg}`;
|
||||
const response = await plugins.smartrequest.request(
|
||||
requestUrl,
|
||||
{
|
||||
method: methodArg,
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-Registry-Auth': this.registryToken,
|
||||
Host: 'docker.sock'
|
||||
},
|
||||
requestBody: null,
|
||||
keepAlive: false
|
||||
},
|
||||
true
|
||||
);
|
||||
console.log(response.statusCode);
|
||||
console.log(response.body);
|
||||
return response;
|
||||
}
|
||||
}
|
||||
@@ -1,146 +0,0 @@
|
||||
import * as plugins from './docker.plugins';
|
||||
import * as interfaces from './interfaces';
|
||||
import { DockerHost } from './docker.classes.host';
|
||||
|
||||
export class DockerImage {
|
||||
// STATIC
|
||||
public static async getImages(dockerHost: DockerHost) {
|
||||
const images: DockerImage[] = [];
|
||||
const response = await dockerHost.request('GET', '/images/json');
|
||||
for (const imageObject of response.body) {
|
||||
images.push(new DockerImage(dockerHost, imageObject));
|
||||
}
|
||||
return images;
|
||||
}
|
||||
|
||||
public static async findImageByName(dockerHost: DockerHost, imageNameArg: string) {
|
||||
const images = await this.getImages(dockerHost);
|
||||
const result = images.find(image => {
|
||||
if (image.RepoTags) {
|
||||
return image.RepoTags.includes(imageNameArg);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
});
|
||||
return result;
|
||||
}
|
||||
|
||||
public static async createFromRegistry(
|
||||
dockerHostArg: DockerHost,
|
||||
creationObject: interfaces.IImageCreationDescriptor
|
||||
): Promise<DockerImage> {
|
||||
// lets create a sanatized imageUrlObject
|
||||
const imageUrlObject: {
|
||||
imageUrl: string;
|
||||
imageTag: string;
|
||||
imageOriginTag: string;
|
||||
} = {
|
||||
imageUrl: creationObject.imageUrl,
|
||||
imageTag: creationObject.imageTag,
|
||||
imageOriginTag: null
|
||||
};
|
||||
if (imageUrlObject.imageUrl.includes(':')) {
|
||||
const imageUrl = imageUrlObject.imageUrl.split(':')[0];
|
||||
const imageTag = imageUrlObject.imageUrl.split(':')[1];
|
||||
if (imageUrlObject.imageTag) {
|
||||
throw new Error(
|
||||
`imageUrl ${imageUrlObject.imageUrl} can't be tagged with ${imageUrlObject.imageTag} because it is already tagged with ${imageTag}`
|
||||
);
|
||||
} else {
|
||||
imageUrlObject.imageUrl = imageUrl;
|
||||
imageUrlObject.imageTag = imageTag;
|
||||
}
|
||||
} else if (!imageUrlObject.imageTag) {
|
||||
imageUrlObject.imageTag = 'latest';
|
||||
}
|
||||
imageUrlObject.imageOriginTag = `${imageUrlObject.imageUrl}:${imageUrlObject.imageTag}`;
|
||||
|
||||
// lets actually create the image
|
||||
const response = await dockerHostArg.request(
|
||||
'POST',
|
||||
`/images/create?fromImage=${encodeURIComponent(
|
||||
imageUrlObject.imageUrl
|
||||
)}&tag=${encodeURIComponent(imageUrlObject.imageTag)}`
|
||||
);
|
||||
if (response.statusCode < 300) {
|
||||
plugins.smartlog.defaultLogger.log(
|
||||
'info',
|
||||
`Successfully pulled image ${imageUrlObject.imageUrl} from the registry`
|
||||
);
|
||||
const image = await DockerImage.findImageByName(dockerHostArg, imageUrlObject.imageOriginTag);
|
||||
return image;
|
||||
} else {
|
||||
plugins.smartlog.defaultLogger.log('error', `Failed at the attempt of creating a new image`);
|
||||
}
|
||||
}
|
||||
|
||||
public static async tagImageByIdOrName(
|
||||
dockerHost: DockerHost,
|
||||
idOrNameArg: string,
|
||||
newTagArg: string
|
||||
) {
|
||||
const response = await dockerHost.request(
|
||||
'POST',
|
||||
`/images/${encodeURIComponent(idOrNameArg)}/${encodeURIComponent(newTagArg)}`
|
||||
);
|
||||
}
|
||||
|
||||
public static async buildImage(dockerHostArg: DockerHost, dockerImageTag) {
|
||||
// TODO: implement building an image
|
||||
}
|
||||
|
||||
// INSTANCE
|
||||
// references
|
||||
public dockerHost: DockerHost;
|
||||
|
||||
// properties
|
||||
/**
|
||||
* the tags for an image
|
||||
*/
|
||||
public Containers: number;
|
||||
public Created: number;
|
||||
public Id: string;
|
||||
public Labels: interfaces.TLabels;
|
||||
public ParentId: string;
|
||||
public RepoDigests: string[];
|
||||
public RepoTags: string[];
|
||||
public SharedSize: number;
|
||||
public Size: number;
|
||||
public VirtualSize: number;
|
||||
|
||||
constructor(dockerHostArg, dockerImageObjectArg: any) {
|
||||
this.dockerHost = dockerHostArg;
|
||||
Object.keys(dockerImageObjectArg).forEach(keyArg => {
|
||||
this[keyArg] = dockerImageObjectArg[keyArg];
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* tag an image
|
||||
* @param newTag
|
||||
*/
|
||||
public async tagImage(newTag) {
|
||||
throw new Error('.tagImage is not yet implemented');
|
||||
}
|
||||
|
||||
/**
|
||||
* pulls the latest version from the registry
|
||||
*/
|
||||
public async pullLatestImageFromRegistry(): Promise<boolean> {
|
||||
const updatedImage = await DockerImage.createFromRegistry(this.dockerHost, {
|
||||
imageUrl: this.RepoTags[0]
|
||||
});
|
||||
Object.assign(this, updatedImage);
|
||||
// TODO: Compare image digists before and after
|
||||
return true;
|
||||
}
|
||||
|
||||
// get stuff
|
||||
public async getVersion() {
|
||||
if (this.Labels && this.Labels.version) {
|
||||
return this.Labels.version;
|
||||
} else {
|
||||
return '0.0.0';
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,119 +0,0 @@
|
||||
import * as plugins from './docker.plugins';
|
||||
import * as interfaces from './interfaces';
|
||||
|
||||
import { DockerHost } from './docker.classes.host';
|
||||
import { DockerService } from './docker.classes.service';
|
||||
|
||||
export class DockerNetwork {
|
||||
public static async getNetworks(dockerHost: DockerHost): Promise<DockerNetwork[]> {
|
||||
const dockerNetworks: DockerNetwork[] = [];
|
||||
const response = await dockerHost.request('GET', '/networks');
|
||||
for (const networkObject of response.body) {
|
||||
const dockerNetwork = new DockerNetwork(dockerHost);
|
||||
Object.assign(dockerNetwork, networkObject);
|
||||
dockerNetworks.push(dockerNetwork);
|
||||
}
|
||||
return dockerNetworks;
|
||||
}
|
||||
|
||||
public static async getNetworkByName(dockerHost: DockerHost, dockerNetworkNameArg: string) {
|
||||
const networks = await DockerNetwork.getNetworks(dockerHost);
|
||||
return networks.find(dockerNetwork => dockerNetwork.Name === dockerNetworkNameArg);
|
||||
}
|
||||
|
||||
public static async createNetwork(
|
||||
dockerHost: DockerHost,
|
||||
networkCreationDescriptor: interfaces.INetworkCreationDescriptor
|
||||
): Promise<DockerNetwork> {
|
||||
const response = await dockerHost.request('POST', '/networks/create', {
|
||||
Name: networkCreationDescriptor.Name,
|
||||
CheckDuplicate: true,
|
||||
Driver: 'overlay',
|
||||
EnableIPv6: false,
|
||||
/* IPAM: {
|
||||
Driver: 'default',
|
||||
Config: [
|
||||
{
|
||||
Subnet: `172.20.${networkCreationDescriptor.NetworkNumber}.0/16`,
|
||||
IPRange: `172.20.${networkCreationDescriptor.NetworkNumber}.0/24`,
|
||||
Gateway: `172.20.${networkCreationDescriptor.NetworkNumber}.11`
|
||||
}
|
||||
]
|
||||
}, */
|
||||
Internal: false,
|
||||
Attachable: true,
|
||||
Ingress: false
|
||||
});
|
||||
if (response.statusCode < 300) {
|
||||
plugins.smartlog.defaultLogger.log('info', 'Created network successfully');
|
||||
return await DockerNetwork.getNetworkByName(dockerHost, networkCreationDescriptor.Name);
|
||||
} else {
|
||||
plugins.smartlog.defaultLogger.log(
|
||||
'error',
|
||||
'There has been an error creating the wanted network'
|
||||
);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
// INSTANCE
|
||||
// references
|
||||
public dockerHost: DockerHost;
|
||||
|
||||
// properties
|
||||
public Name: string;
|
||||
public Id: string;
|
||||
public Created: string;
|
||||
public Scope: string;
|
||||
public Driver: string;
|
||||
public EnableIPv6: boolean;
|
||||
public Internal: boolean;
|
||||
public Attachable: boolean;
|
||||
public Ingress: false;
|
||||
public IPAM: {
|
||||
Driver: 'default' | 'bridge' | 'overlay';
|
||||
Config: [
|
||||
{
|
||||
Subnet: string;
|
||||
IPRange: string;
|
||||
Gateway: string;
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
constructor(dockerHostArg: DockerHost) {
|
||||
this.dockerHost = dockerHostArg;
|
||||
}
|
||||
|
||||
/**
|
||||
* removes the network
|
||||
*/
|
||||
public async remove() {
|
||||
const response = await this.dockerHost.request('DELETE', `/networks/${this.Id}`);
|
||||
}
|
||||
|
||||
public async getContainersOnNetwork(): Promise<Array<{
|
||||
Name: string;
|
||||
EndpointID: string;
|
||||
MacAddress: string;
|
||||
IPv4Address: string;
|
||||
IPv6Address: string;
|
||||
}>> {
|
||||
const returnArray = [];
|
||||
const response = await this.dockerHost.request('GET', `/networks/${this.Id}`);
|
||||
for (const key of Object.keys(response.body.Containers)) {
|
||||
returnArray.push(response.body.Containers[key]);
|
||||
}
|
||||
|
||||
return returnArray;
|
||||
|
||||
}
|
||||
|
||||
public async getContainersOnNetworkForService(serviceArg: DockerService) {
|
||||
const containersOnNetwork = await this.getContainersOnNetwork();
|
||||
const containersOfService = containersOnNetwork.filter(container => {
|
||||
return container.Name.startsWith(serviceArg.Spec.Name);
|
||||
});
|
||||
return containersOfService;
|
||||
}
|
||||
}
|
||||
@@ -1,92 +0,0 @@
|
||||
import * as plugins from './docker.plugins';
|
||||
import { DockerHost } from './docker.classes.host';
|
||||
|
||||
// interfaces
|
||||
import * as interfaces from './interfaces';
|
||||
|
||||
export class DockerSecret {
|
||||
// STATIC
|
||||
public static async getSecrets(dockerHostArg: DockerHost) {
|
||||
const response = await dockerHostArg.request('GET', '/secrets');
|
||||
const secrets: DockerSecret[] = [];
|
||||
for (const secret of response.body) {
|
||||
const dockerSecretInstance = new DockerSecret(dockerHostArg);
|
||||
Object.assign(dockerSecretInstance, secret);
|
||||
secrets.push(dockerSecretInstance);
|
||||
}
|
||||
return secrets;
|
||||
}
|
||||
|
||||
public static async getSecretByID(dockerHostArg: DockerHost, idArg: string) {
|
||||
const secrets = await this.getSecrets(dockerHostArg);
|
||||
return secrets.find(secret => secret.ID === idArg);
|
||||
}
|
||||
|
||||
public static async getSecretByName(dockerHostArg: DockerHost, nameArg: string) {
|
||||
const secrets = await this.getSecrets(dockerHostArg);
|
||||
return secrets.find(secret => secret.Spec.Name === nameArg);
|
||||
}
|
||||
|
||||
public static async createSecret(
|
||||
dockerHostArg: DockerHost,
|
||||
secretDescriptor: interfaces.ISecretCreationDescriptor
|
||||
) {
|
||||
const labels: interfaces.TLabels = {
|
||||
...secretDescriptor.labels,
|
||||
version: secretDescriptor.version
|
||||
};
|
||||
const response = await dockerHostArg.request('POST', '/secrets/create', {
|
||||
Name: secretDescriptor.name,
|
||||
Labels: labels,
|
||||
Data: plugins.smartstring.base64.encode(secretDescriptor.contentArg)
|
||||
});
|
||||
|
||||
const newSecretInstance = new DockerSecret(dockerHostArg);
|
||||
Object.assign(newSecretInstance, response.body);
|
||||
Object.assign(
|
||||
newSecretInstance,
|
||||
await DockerSecret.getSecretByID(dockerHostArg, newSecretInstance.ID)
|
||||
);
|
||||
return newSecretInstance;
|
||||
}
|
||||
|
||||
// INSTANCE
|
||||
public ID: string;
|
||||
public Spec: {
|
||||
Name: string;
|
||||
Labels: interfaces.TLabels;
|
||||
};
|
||||
public Version: {
|
||||
Index: string;
|
||||
};
|
||||
|
||||
public dockerHost: DockerHost;
|
||||
constructor(dockerHostArg: DockerHost) {
|
||||
this.dockerHost = dockerHostArg;
|
||||
}
|
||||
|
||||
/**
|
||||
* updates a secret
|
||||
*/
|
||||
public async update(contentArg: string) {
|
||||
const route = `/secrets/${this.ID}/update?=version=${this.Version.Index}`;
|
||||
const response = await this.dockerHost.request(
|
||||
'POST',
|
||||
`/secrets/${this.ID}/update?version=${this.Version.Index}`,
|
||||
{
|
||||
Name: this.Spec.Name,
|
||||
Labels: this.Spec.Labels,
|
||||
Data: plugins.smartstring.base64.encode(contentArg)
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
public async remove() {
|
||||
await this.dockerHost.request('DELETE', `/secrets/${this.ID}`);
|
||||
}
|
||||
|
||||
// get things
|
||||
public async getVersion() {
|
||||
return this.Spec.Labels.version;
|
||||
}
|
||||
}
|
||||
@@ -1,237 +0,0 @@
|
||||
import * as plugins from './docker.plugins';
|
||||
import * as interfaces from './interfaces';
|
||||
|
||||
import { DockerHost } from './docker.classes.host';
|
||||
import { DockerImage } from './docker.classes.image';
|
||||
import { DockerSecret } from './docker.classes.secret';
|
||||
|
||||
export class DockerService {
|
||||
// STATIC
|
||||
public static async getServices(dockerHost: DockerHost) {
|
||||
const services: DockerService[] = [];
|
||||
const response = await dockerHost.request('GET', '/services');
|
||||
for (const serviceObject of response.body) {
|
||||
const dockerService = new DockerService(dockerHost);
|
||||
Object.assign(dockerService, serviceObject);
|
||||
services.push(dockerService);
|
||||
}
|
||||
return services;
|
||||
}
|
||||
|
||||
public static async getServiceByName(
|
||||
dockerHost: DockerHost,
|
||||
networkName: string
|
||||
): Promise<DockerService> {
|
||||
const allServices = await DockerService.getServices(dockerHost);
|
||||
const wantedService = allServices.find(service => {
|
||||
return service.Spec.Name === networkName;
|
||||
});
|
||||
return wantedService;
|
||||
}
|
||||
|
||||
/**
|
||||
* creates a service
|
||||
*/
|
||||
public static async createService(
|
||||
dockerHost: DockerHost,
|
||||
serviceCreationDescriptor: interfaces.IServiceCreationDescriptor
|
||||
): Promise<DockerService> {
|
||||
// lets get the image
|
||||
plugins.smartlog.defaultLogger.log(
|
||||
'info',
|
||||
`now creating service ${serviceCreationDescriptor.name}`
|
||||
);
|
||||
|
||||
// await serviceCreationDescriptor.image.pullLatestImageFromRegistry();
|
||||
const serviceVersion = await serviceCreationDescriptor.image.getVersion();
|
||||
|
||||
const labels: interfaces.TLabels = {
|
||||
...serviceCreationDescriptor.labels,
|
||||
version: serviceVersion
|
||||
};
|
||||
|
||||
const mounts: Array<{
|
||||
/**
|
||||
* the target inside the container
|
||||
*/
|
||||
Target: string;
|
||||
/**
|
||||
* The Source from which to mount the data (Volume or host path)
|
||||
*/
|
||||
Source: string;
|
||||
Type: 'bind' | 'volume' | 'tmpfs' | 'npipe';
|
||||
ReadOnly: boolean;
|
||||
Consistency: 'default' | 'consistent' | 'cached' | 'delegated';
|
||||
}> = [];
|
||||
if (serviceCreationDescriptor.accessHostDockerSock) {
|
||||
mounts.push({
|
||||
Target: '/var/run/docker.sock',
|
||||
Source: '/var/run/docker.sock',
|
||||
Consistency: 'default',
|
||||
ReadOnly: false,
|
||||
Type: 'bind'
|
||||
});
|
||||
}
|
||||
|
||||
const networkArray: Array<{
|
||||
Target: string;
|
||||
Aliases: string[];
|
||||
}> = [];
|
||||
|
||||
for (const network of serviceCreationDescriptor.networks) {
|
||||
networkArray.push({
|
||||
Target: network.Name,
|
||||
Aliases: [serviceCreationDescriptor.networkAlias]
|
||||
});
|
||||
}
|
||||
|
||||
const ports = [];
|
||||
for (const port of serviceCreationDescriptor.ports) {
|
||||
const portArray = port.split(':');
|
||||
const hostPort = portArray[0];
|
||||
const containerPort = portArray[1];
|
||||
ports.push({
|
||||
Protocol: 'tcp',
|
||||
PublishedPort: parseInt(hostPort, 10),
|
||||
TargetPort: parseInt(containerPort, 10)
|
||||
});
|
||||
}
|
||||
|
||||
// lets configure secrets
|
||||
const secretArray: any[] = [];
|
||||
for (const secret of serviceCreationDescriptor.secrets) {
|
||||
secretArray.push({
|
||||
File: {
|
||||
Name: 'secret.json', // TODO: make sure that works with multiple secrets
|
||||
UID: '33',
|
||||
GID: '33',
|
||||
Mode: 384
|
||||
},
|
||||
SecretID: secret.ID,
|
||||
SecretName: secret.Spec.Name
|
||||
});
|
||||
}
|
||||
|
||||
// lets configure limits
|
||||
|
||||
const memoryLimitMB =
|
||||
serviceCreationDescriptor.resources && serviceCreationDescriptor.resources.memorySizeMB
|
||||
? serviceCreationDescriptor.resources.memorySizeMB
|
||||
: 1000;
|
||||
|
||||
const limits = {
|
||||
MemoryBytes: memoryLimitMB * 1000000
|
||||
};
|
||||
|
||||
if (serviceCreationDescriptor.resources) {
|
||||
limits.MemoryBytes = serviceCreationDescriptor.resources.memorySizeMB * 1000000;
|
||||
}
|
||||
|
||||
const response = await dockerHost.request('POST', '/services/create', {
|
||||
Name: serviceCreationDescriptor.name,
|
||||
TaskTemplate: {
|
||||
ContainerSpec: {
|
||||
Image: serviceCreationDescriptor.image.RepoTags[0],
|
||||
Labels: labels,
|
||||
Secrets: secretArray,
|
||||
Mounts: mounts
|
||||
/* DNSConfig: {
|
||||
Nameservers: ['1.1.1.1']
|
||||
} */
|
||||
},
|
||||
UpdateConfig: {
|
||||
Parallelism: 0,
|
||||
Delay: 0,
|
||||
FailureAction: 'pause',
|
||||
Monitor: 15000000000,
|
||||
MaxFailureRatio: 0.15
|
||||
},
|
||||
ForceUpdate: 1,
|
||||
Resources: {
|
||||
Limits: limits
|
||||
},
|
||||
LogDriver: {
|
||||
Name: 'json-file',
|
||||
Options: {
|
||||
'max-file': '3',
|
||||
'max-size': '10M'
|
||||
}
|
||||
}
|
||||
},
|
||||
Labels: labels,
|
||||
Networks: networkArray,
|
||||
EndpointSpec: {
|
||||
Ports: ports
|
||||
}
|
||||
});
|
||||
|
||||
const createdService = await DockerService.getServiceByName(
|
||||
dockerHost,
|
||||
serviceCreationDescriptor.name
|
||||
);
|
||||
return createdService;
|
||||
}
|
||||
|
||||
// INSTANCE
|
||||
public dockerHostRef: DockerHost;
|
||||
|
||||
public ID: string;
|
||||
public Version: { Index: number };
|
||||
public CreatedAt: string;
|
||||
public UpdatedAt: string;
|
||||
public Spec: {
|
||||
Name: string;
|
||||
Labels: interfaces.TLabels;
|
||||
TaskTemplate: {
|
||||
ContainerSpec: {
|
||||
Image: string;
|
||||
Isolation: string;
|
||||
Secrets: Array<{
|
||||
File: {
|
||||
Name: string;
|
||||
UID: string;
|
||||
GID: string;
|
||||
Mode: number;
|
||||
};
|
||||
SecretID: string;
|
||||
SecretName: string;
|
||||
}>;
|
||||
};
|
||||
ForceUpdate: 0;
|
||||
};
|
||||
Mode: {};
|
||||
Networks: [any[]];
|
||||
};
|
||||
public Endpoint: { Spec: {}; VirtualIPs: [any[]] };
|
||||
|
||||
constructor(dockerHostArg: DockerHost) {
|
||||
this.dockerHostRef = dockerHostArg;
|
||||
}
|
||||
|
||||
public async remove() {
|
||||
await this.dockerHostRef.request('DELETE', `/services/${this.ID}`);
|
||||
}
|
||||
|
||||
public async reReadFromDockerEngine() {
|
||||
const dockerData = await this.dockerHostRef.request('GET', `/services/${this.ID}`);
|
||||
// TODO: Better assign: Object.assign(this, dockerData);
|
||||
}
|
||||
|
||||
public async needsUpdate(): Promise<boolean> {
|
||||
// TODO: implement digest based update recognition
|
||||
|
||||
await this.reReadFromDockerEngine();
|
||||
const dockerImage = await DockerImage.createFromRegistry(this.dockerHostRef, {
|
||||
imageUrl: this.Spec.TaskTemplate.ContainerSpec.Image
|
||||
});
|
||||
|
||||
const imageVersion = new plugins.smartversion.SmartVersion(dockerImage.Labels.version);
|
||||
const serviceVersion = new plugins.smartversion.SmartVersion(this.Spec.Labels.version);
|
||||
if (imageVersion.greaterThan(serviceVersion)) {
|
||||
console.log(`service ${this.Spec.Name} needs to be updated`);
|
||||
return true;
|
||||
} else {
|
||||
console.log(`service ${this.Spec.Name} is up to date.`);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
// node native path
|
||||
import * as path from 'path';
|
||||
|
||||
export { path };
|
||||
|
||||
// @pushrocks scope
|
||||
import * as lik from '@pushrocks/lik';
|
||||
import * as smartfile from '@pushrocks/smartfile';
|
||||
import * as smartjson from '@pushrocks/smartjson';
|
||||
import * as smartlog from '@pushrocks/smartlog';
|
||||
import * as smartnetwork from '@pushrocks/smartnetwork';
|
||||
import * as smartpath from '@pushrocks/smartpath';
|
||||
import * as smartpromise from '@pushrocks/smartpromise';
|
||||
import * as smartrequest from '@pushrocks/smartrequest';
|
||||
import * as smartstring from '@pushrocks/smartstring';
|
||||
import * as smartversion from '@pushrocks/smartversion';
|
||||
|
||||
smartlog.defaultLogger.enableConsole();
|
||||
|
||||
export {
|
||||
lik,
|
||||
smartfile,
|
||||
smartjson,
|
||||
smartlog,
|
||||
smartnetwork,
|
||||
smartpath,
|
||||
smartpromise,
|
||||
smartrequest,
|
||||
smartstring,
|
||||
smartversion
|
||||
};
|
||||
|
||||
// third party
|
||||
import * as rxjs from 'rxjs';
|
||||
|
||||
export { rxjs };
|
||||
14
ts/index.ts
14
ts/index.ts
@@ -1,6 +1,8 @@
|
||||
export * from './docker.classes.host';
|
||||
export * from './docker.classes.container';
|
||||
export * from './docker.classes.image';
|
||||
export * from './docker.classes.network';
|
||||
export * from './docker.classes.secret';
|
||||
export * from './docker.classes.service';
|
||||
export * from './classes.base.js';
|
||||
export * from './classes.host.js';
|
||||
export * from './classes.container.js';
|
||||
export * from './classes.image.js';
|
||||
export * from './classes.imagestore.js';
|
||||
export * from './classes.network.js';
|
||||
export * from './classes.secret.js';
|
||||
export * from './classes.service.js';
|
||||
|
||||
@@ -1,7 +1,12 @@
|
||||
import { DockerNetwork } from '../docker.classes.network';
|
||||
import { DockerNetwork } from '../classes.network.js';
|
||||
|
||||
/**
|
||||
* Container creation descriptor supporting both string references and class instances.
|
||||
* Strings will be resolved to resources internally.
|
||||
*/
|
||||
export interface IContainerCreationDescriptor {
|
||||
Hostname: string;
|
||||
Domainname: string;
|
||||
networks?: DockerNetwork[];
|
||||
/** Network names (strings) or DockerNetwork instances */
|
||||
networks?: (string | DockerNetwork)[];
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
export * from './container';
|
||||
export * from './image';
|
||||
export * from './label';
|
||||
export * from './network';
|
||||
export * from './port';
|
||||
export * from './secret';
|
||||
export * from './service';
|
||||
export * from './container.js';
|
||||
export * from './image.js';
|
||||
export * from './label.js';
|
||||
export * from './network.js';
|
||||
export * from './port.js';
|
||||
export * from './secret.js';
|
||||
export * from './service.js';
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import * as interfaces from './';
|
||||
import * as interfaces from './index.js';
|
||||
|
||||
export interface ISecretCreationDescriptor {
|
||||
name: string;
|
||||
|
||||
@@ -1,18 +1,28 @@
|
||||
import * as interfaces from './';
|
||||
import { DockerNetwork } from '../docker.classes.network';
|
||||
import { DockerSecret } from '../docker.classes.secret';
|
||||
import { DockerImage } from '../docker.classes.image';
|
||||
import * as plugins from '../plugins.js';
|
||||
|
||||
import * as interfaces from './index.js';
|
||||
import { DockerNetwork } from '../classes.network.js';
|
||||
import { DockerSecret } from '../classes.secret.js';
|
||||
import { DockerImage } from '../classes.image.js';
|
||||
|
||||
/**
|
||||
* Service creation descriptor supporting both string references and class instances.
|
||||
* Strings will be resolved to resources internally.
|
||||
*/
|
||||
export interface IServiceCreationDescriptor {
|
||||
name: string;
|
||||
image: DockerImage;
|
||||
/** Image tag (string) or DockerImage instance */
|
||||
image: string | DockerImage;
|
||||
labels: interfaces.TLabels;
|
||||
networks: DockerNetwork[];
|
||||
/** Network names (strings) or DockerNetwork instances */
|
||||
networks: (string | DockerNetwork)[];
|
||||
networkAlias: string;
|
||||
secrets: DockerSecret[];
|
||||
/** Secret names (strings) or DockerSecret instances */
|
||||
secrets: (string | DockerSecret)[];
|
||||
ports: string[];
|
||||
accessHostDockerSock?: boolean;
|
||||
resources?: {
|
||||
memorySizeMB: number
|
||||
memorySizeMB?: number;
|
||||
volumeMounts?: plugins.tsclass.container.IVolumeMount[];
|
||||
};
|
||||
}
|
||||
|
||||
5
ts/logger.ts
Normal file
5
ts/logger.ts
Normal file
@@ -0,0 +1,5 @@
|
||||
import * as plugins from './plugins.js';
|
||||
import { commitinfo } from './00_commitinfo_data.js';
|
||||
|
||||
export const logger = plugins.smartlog.Smartlog.createForCommitinfo(commitinfo);
|
||||
logger.enableConsole();
|
||||
9
ts/paths.ts
Normal file
9
ts/paths.ts
Normal file
@@ -0,0 +1,9 @@
|
||||
import * as plugins from './plugins.js';
|
||||
|
||||
export const packageDir = plugins.path.resolve(
|
||||
plugins.smartpath.get.dirnameFromImportMetaUrl(import.meta.url),
|
||||
'../',
|
||||
);
|
||||
|
||||
export const nogitDir = plugins.path.resolve(packageDir, '.nogit/');
|
||||
plugins.smartfile.fs.ensureDir(nogitDir);
|
||||
47
ts/plugins.ts
Normal file
47
ts/plugins.ts
Normal file
@@ -0,0 +1,47 @@
|
||||
// node native path
|
||||
import * as path from 'node:path';
|
||||
|
||||
export { path };
|
||||
|
||||
// @pushrocks scope
|
||||
import * as lik from '@push.rocks/lik';
|
||||
import * as smartarchive from '@push.rocks/smartarchive';
|
||||
import * as smartbucket from '@push.rocks/smartbucket';
|
||||
import * as smartfile from '@push.rocks/smartfile';
|
||||
import * as smartjson from '@push.rocks/smartjson';
|
||||
import * as smartlog from '@push.rocks/smartlog';
|
||||
import * as smartnetwork from '@push.rocks/smartnetwork';
|
||||
import * as smartpath from '@push.rocks/smartpath';
|
||||
import * as smartpromise from '@push.rocks/smartpromise';
|
||||
import * as smartrequest from '@push.rocks/smartrequest';
|
||||
import * as smartstring from '@push.rocks/smartstring';
|
||||
import * as smartstream from '@push.rocks/smartstream';
|
||||
import * as smartunique from '@push.rocks/smartunique';
|
||||
import * as smartversion from '@push.rocks/smartversion';
|
||||
|
||||
export {
|
||||
lik,
|
||||
smartarchive,
|
||||
smartbucket,
|
||||
smartfile,
|
||||
smartjson,
|
||||
smartlog,
|
||||
smartnetwork,
|
||||
smartpath,
|
||||
smartpromise,
|
||||
smartrequest,
|
||||
smartstring,
|
||||
smartstream,
|
||||
smartunique,
|
||||
smartversion,
|
||||
};
|
||||
|
||||
// @tsclass scope
|
||||
import * as tsclass from '@tsclass/tsclass';
|
||||
|
||||
export { tsclass };
|
||||
|
||||
// third party
|
||||
import * as rxjs from 'rxjs';
|
||||
|
||||
export { rxjs };
|
||||
14
tsconfig.json
Normal file
14
tsconfig.json
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"experimentalDecorators": true,
|
||||
"useDefineForClassFields": false,
|
||||
"target": "ES2022",
|
||||
"module": "NodeNext",
|
||||
"moduleResolution": "NodeNext",
|
||||
"esModuleInterop": true,
|
||||
"verbatimModuleSyntax": true,
|
||||
"baseUrl": ".",
|
||||
"paths": {}
|
||||
},
|
||||
"exclude": ["dist_*/**/*.d.ts"]
|
||||
}
|
||||
17
tslint.json
17
tslint.json
@@ -1,17 +0,0 @@
|
||||
{
|
||||
"extends": ["tslint:latest", "tslint-config-prettier"],
|
||||
"rules": {
|
||||
"semicolon": [true, "always"],
|
||||
"no-console": false,
|
||||
"ordered-imports": false,
|
||||
"object-literal-sort-keys": false,
|
||||
"member-ordering": {
|
||||
"options":{
|
||||
"order": [
|
||||
"static-method"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"defaultSeverity": "warning"
|
||||
}
|
||||
Reference in New Issue
Block a user