Compare commits
199 Commits
Author | SHA1 | Date | |
---|---|---|---|
606c82dafa | |||
9fc4afe4b8 | |||
90689c2645 | |||
4a1d649e5e | |||
66bd36dc4f | |||
349d711cc5 | |||
c74a4bcd5b | |||
ff835c4160 | |||
05eceeb056 | |||
de55beda08 | |||
9aa2b0c7be | |||
a283bbfba0 | |||
8a4e300581 | |||
6b0d96b745 | |||
a08c11838f | |||
7c5225125c | |||
bc4778f7db | |||
2e7e8ae5cf | |||
054585c7f5 | |||
c0cebbe614 | |||
740f83114c | |||
e48023d490 | |||
eaaf313442 | |||
68b2baadae | |||
6743dc35e7 | |||
bbf265716d | |||
3a705534fe | |||
cbdbd32dd1 | |||
224004217c | |||
e06ef454a6 | |||
a5f4d93f50 | |||
9f5d2cacf1 | |||
d9112d3e04 | |||
455404c3c9 | |||
90089625dc | |||
86d5cc1d47 | |||
6407033694 | |||
9dd69868d9 | |||
dc4074340d | |||
001e870643 | |||
440eb07afb | |||
8d74712a97 | |||
bbdf61e0a9 | |||
6f5ed697cb | |||
cc93c296c6 | |||
07a4d024a8 | |||
192216c7ec | |||
daa97c68d9 | |||
4569bffc37 | |||
ad4e6ad206 | |||
ddfa701391 | |||
3d2d1e3b1d | |||
01e79b8cd6 | |||
8b6021ff66 | |||
5e5bb3032c | |||
855e18a74f | |||
b808a93e46 | |||
a18166260e | |||
cba8de348d | |||
30d4a7bd24 | |||
4ea99426fd | |||
19309f7f45 | |||
4e7d2fd637 | |||
1675c0c4c9 | |||
3a4f59ef9e | |||
90eac3e50a | |||
edec48529d | |||
e622b97097 | |||
23266ca459 | |||
a91e69b6db | |||
015ccfad48 | |||
06d2fcb750 | |||
f3e4bc0350 | |||
6de3abe3bf | |||
eaa4140f2f | |||
b21fe80109 | |||
96a2992432 | |||
870b5f2c07 | |||
212edf1db7 | |||
46dbd81bcc | |||
8f5678502d | |||
959d7aaed1 | |||
5aa10653b6 | |||
e120d6527e | |||
c80da05fbb | |||
b9c3475b86 | |||
de2d7e647b | |||
d9348bd016 | |||
034fbc3994 | |||
a33a6a1f7f | |||
9dd403821b | |||
601d82ea74 | |||
784bb22511 | |||
71c89ac9bc | |||
0b3e3b68c9 | |||
f3779faaaf | |||
73476c2c39 | |||
942f65268d | |||
a965647c1f | |||
db88c7f86c | |||
3f18cb68bf | |||
dae3b59e3b | |||
53062e70d4 | |||
3e70dc465b | |||
49445d93c6 | |||
4f838837f8 | |||
c76968bbe8 | |||
6c5e5644b1 | |||
5cf80944fe | |||
cdb69c5f17 | |||
178c1d2df1 | |||
43d9da808b | |||
15f5c38eb0 | |||
225c1be14c | |||
44f2aab2f6 | |||
b69315f1d3 | |||
7d20804986 | |||
0aab639fbd | |||
794bb60dfc | |||
b182a379af | |||
5c6c06dee6 | |||
a48e1e035e | |||
8836c06b56 | |||
7af8e0739b | |||
684185e951 | |||
21e6fff3fb | |||
83c49a6234 | |||
ad67849d45 | |||
0e4e07a912 | |||
1fbc09f557 | |||
d6201b864c | |||
ea5e552192 | |||
1afe5c6e16 | |||
eb0dc96dbd | |||
55f45b1c3a | |||
87ff0f01bb | |||
dd1939d7b2 | |||
5a2a5f1248 | |||
9767b8767a | |||
546e139b46 | |||
28d70bb49f | |||
b71f134abd | |||
968b3c7449 | |||
d9558f7843 | |||
19e67ffdcc | |||
e983b66c28 | |||
c79f6a698f | |||
0ef098e9c8 | |||
d56350ff28 | |||
a4dc4e7950 | |||
424e911804 | |||
b5c4727bae | |||
b6f3fbf8a9 | |||
7241e7a8fd | |||
ae37148ece | |||
65c37bdd6f | |||
6acbe30e2e | |||
eb6f7889d0 | |||
e39da5fee9 | |||
b07628bb0b | |||
5815f9b202 | |||
846ea9997e | |||
de54db33ad | |||
314cb692ac | |||
73f8ded3fe | |||
a28b10ac51 | |||
927e2e0acc | |||
c496405818 | |||
020737e21b | |||
fe3560caac | |||
b2a7e67868 | |||
f772ca15ef | |||
71cfad146f | |||
43b1c13256 | |||
2c8b17f029 | |||
c6521d9160 | |||
72c74e44b5 | |||
2fb628213d | |||
373a4e2eac | |||
a202d05e9c | |||
6e97a7d83c | |||
04bb3b9ed0 | |||
29e502a32e | |||
75a118183e | |||
aa1fe594e8 | |||
cf761e19bf | |||
af7590c8de | |||
ee2e4fb856 | |||
ff5cecb07e | |||
e50b028c00 | |||
867dda8e7c | |||
d12d595f21 | |||
6117228ea4 | |||
b6b584f808 | |||
48fe6ce0a8 | |||
55acb39071 | |||
f7b17a4684 | |||
d95f6c8e7f | |||
001cdadb6b |
66
.gitea/workflows/default_nottags.yaml
Normal file
66
.gitea/workflows/default_nottags.yaml
Normal file
@ -0,0 +1,66 @@
|
||||
name: Default (not tags)
|
||||
|
||||
on:
|
||||
push:
|
||||
tags-ignore:
|
||||
- '**'
|
||||
|
||||
env:
|
||||
IMAGE: registry.gitlab.com/hosttoday/ht-docker-node:npmci
|
||||
NPMCI_COMPUTED_REPOURL: https://${{gitea.repository_owner}}:${{secrets.GITEA_TOKEN}}@gitea.lossless.digital/${{gitea.repository}}.git
|
||||
NPMCI_TOKEN_NPM: ${{secrets.NPMCI_TOKEN_NPM}}
|
||||
NPMCI_TOKEN_NPM2: ${{secrets.NPMCI_TOKEN_NPM2}}
|
||||
NPMCI_GIT_GITHUBTOKEN: ${{secrets.NPMCI_GIT_GITHUBTOKEN}}
|
||||
NPMCI_URL_CLOUDLY: ${{secrets.NPMCI_URL_CLOUDLY}}
|
||||
|
||||
jobs:
|
||||
security:
|
||||
runs-on: ubuntu-latest
|
||||
continue-on-error: true
|
||||
container:
|
||||
image: ${{ env.IMAGE }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install pnpm and npmci
|
||||
run: |
|
||||
pnpm install -g pnpm
|
||||
pnpm install -g @shipzone/npmci
|
||||
|
||||
- name: Run npm prepare
|
||||
run: npmci npm prepare
|
||||
|
||||
- name: Audit production dependencies
|
||||
run: |
|
||||
npmci command npm config set registry https://registry.npmjs.org
|
||||
npmci command pnpm audit --audit-level=high --prod
|
||||
continue-on-error: true
|
||||
|
||||
- name: Audit development dependencies
|
||||
run: |
|
||||
npmci command npm config set registry https://registry.npmjs.org
|
||||
npmci command pnpm audit --audit-level=high --dev
|
||||
continue-on-error: true
|
||||
|
||||
test:
|
||||
if: ${{ always() }}
|
||||
needs: security
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: ${{ env.IMAGE }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Test stable
|
||||
run: |
|
||||
npmci node install stable
|
||||
npmci npm install
|
||||
npmci npm test
|
||||
|
||||
- name: Test build
|
||||
run: |
|
||||
npmci node install stable
|
||||
npmci npm install
|
||||
npmci npm build
|
124
.gitea/workflows/default_tags.yaml
Normal file
124
.gitea/workflows/default_tags.yaml
Normal file
@ -0,0 +1,124 @@
|
||||
name: Default (tags)
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
env:
|
||||
IMAGE: registry.gitlab.com/hosttoday/ht-docker-node:npmci
|
||||
NPMCI_COMPUTED_REPOURL: https://${{gitea.repository_owner}}:${{secrets.GITEA_TOKEN}}@gitea.lossless.digital/${{gitea.repository}}.git
|
||||
NPMCI_TOKEN_NPM: ${{secrets.NPMCI_TOKEN_NPM}}
|
||||
NPMCI_TOKEN_NPM2: ${{secrets.NPMCI_TOKEN_NPM2}}
|
||||
NPMCI_GIT_GITHUBTOKEN: ${{secrets.NPMCI_GIT_GITHUBTOKEN}}
|
||||
NPMCI_URL_CLOUDLY: ${{secrets.NPMCI_URL_CLOUDLY}}
|
||||
|
||||
jobs:
|
||||
security:
|
||||
runs-on: ubuntu-latest
|
||||
continue-on-error: true
|
||||
container:
|
||||
image: ${{ env.IMAGE }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Prepare
|
||||
run: |
|
||||
pnpm install -g pnpm
|
||||
pnpm install -g @shipzone/npmci
|
||||
npmci npm prepare
|
||||
|
||||
- name: Audit production dependencies
|
||||
run: |
|
||||
npmci command npm config set registry https://registry.npmjs.org
|
||||
npmci command pnpm audit --audit-level=high --prod
|
||||
continue-on-error: true
|
||||
|
||||
- name: Audit development dependencies
|
||||
run: |
|
||||
npmci command npm config set registry https://registry.npmjs.org
|
||||
npmci command pnpm audit --audit-level=high --dev
|
||||
continue-on-error: true
|
||||
|
||||
test:
|
||||
if: ${{ always() }}
|
||||
needs: security
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: ${{ env.IMAGE }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Prepare
|
||||
run: |
|
||||
pnpm install -g pnpm
|
||||
pnpm install -g @shipzone/npmci
|
||||
npmci npm prepare
|
||||
|
||||
- name: Test stable
|
||||
run: |
|
||||
npmci node install stable
|
||||
npmci npm install
|
||||
npmci npm test
|
||||
|
||||
- name: Test build
|
||||
run: |
|
||||
npmci node install stable
|
||||
npmci npm install
|
||||
npmci npm build
|
||||
|
||||
release:
|
||||
needs: test
|
||||
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/')
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: ${{ env.IMAGE }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Prepare
|
||||
run: |
|
||||
pnpm install -g pnpm
|
||||
pnpm install -g @shipzone/npmci
|
||||
npmci npm prepare
|
||||
|
||||
- name: Release
|
||||
run: |
|
||||
npmci node install stable
|
||||
npmci npm publish
|
||||
|
||||
metadata:
|
||||
needs: test
|
||||
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/')
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: ${{ env.IMAGE }}
|
||||
continue-on-error: true
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Prepare
|
||||
run: |
|
||||
pnpm install -g pnpm
|
||||
pnpm install -g @shipzone/npmci
|
||||
npmci npm prepare
|
||||
|
||||
- name: Code quality
|
||||
run: |
|
||||
npmci command npm install -g typescript
|
||||
npmci npm install
|
||||
|
||||
- name: Trigger
|
||||
run: npmci trigger
|
||||
|
||||
- name: Build docs and upload artifacts
|
||||
run: |
|
||||
npmci node install stable
|
||||
npmci npm install
|
||||
pnpm install -g @git.zone/tsdoc
|
||||
npmci command tsdoc
|
||||
continue-on-error: true
|
20
.gitignore
vendored
20
.gitignore
vendored
@ -1,4 +1,20 @@
|
||||
.yarn/
|
||||
node_modules/
|
||||
.nogit/
|
||||
|
||||
# artifacts
|
||||
coverage/
|
||||
public/
|
||||
pages/
|
||||
|
||||
# installs
|
||||
node_modules/
|
||||
|
||||
# caches
|
||||
.yarn/
|
||||
.cache/
|
||||
.rpt2_cache
|
||||
|
||||
# builds
|
||||
dist/
|
||||
dist_*/
|
||||
|
||||
# custom
|
139
.gitlab-ci.yml
139
.gitlab-ci.yml
@ -1,139 +0,0 @@
|
||||
# gitzone standard
|
||||
image: hosttoday/ht-docker-node:npmci
|
||||
|
||||
cache:
|
||||
paths:
|
||||
- .npmci_cache/
|
||||
key: "$CI_BUILD_STAGE"
|
||||
|
||||
stages:
|
||||
- security
|
||||
- test
|
||||
- release
|
||||
- metadata
|
||||
|
||||
# ====================
|
||||
# security stage
|
||||
# ====================
|
||||
mirror:
|
||||
stage: security
|
||||
script:
|
||||
- npmci git mirror
|
||||
tags:
|
||||
- docker
|
||||
- notpriv
|
||||
|
||||
snyk:
|
||||
stage: security
|
||||
script:
|
||||
- npmci npm prepare
|
||||
- npmci command npm install -g snyk
|
||||
- npmci command npm install --ignore-scripts
|
||||
- npmci command snyk test
|
||||
tags:
|
||||
- docker
|
||||
- notpriv
|
||||
|
||||
sast:
|
||||
stage: security
|
||||
image: registry.gitlab.com/hosttoday/ht-docker-dbase:npmci
|
||||
variables:
|
||||
DOCKER_DRIVER: overlay2
|
||||
allow_failure: true
|
||||
services:
|
||||
- docker:stable-dind
|
||||
script:
|
||||
- npmci npm prepare
|
||||
- npmci npm install
|
||||
- npmci command npm run build
|
||||
- export SP_VERSION=$(echo "$CI_SERVER_VERSION" | sed 's/^\([0-9]*\)\.\([0-9]*\).*/\1-\2-stable/')
|
||||
- docker run
|
||||
--env SAST_CONFIDENCE_LEVEL="${SAST_CONFIDENCE_LEVEL:-3}"
|
||||
--volume "$PWD:/code"
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock
|
||||
"registry.gitlab.com/gitlab-org/security-products/sast:$SP_VERSION" /app/bin/run /code
|
||||
artifacts:
|
||||
reports:
|
||||
sast: gl-sast-report.json
|
||||
tags:
|
||||
- docker
|
||||
- priv
|
||||
|
||||
# ====================
|
||||
# test stage
|
||||
# ====================
|
||||
|
||||
testSTABLE:
|
||||
image: hosttoday/ht-docker-dbase:npmci
|
||||
stage: test
|
||||
script:
|
||||
- npmci npm prepare
|
||||
- npmci node install stable
|
||||
- npmci npm install
|
||||
- npmci npm test
|
||||
coverage: /\d+.?\d+?\%\s*coverage/
|
||||
tags:
|
||||
- docker
|
||||
- priv
|
||||
|
||||
release:
|
||||
stage: release
|
||||
script:
|
||||
- npmci node install stable
|
||||
- npmci npm publish
|
||||
only:
|
||||
- tags
|
||||
tags:
|
||||
- docker
|
||||
- notpriv
|
||||
|
||||
# ====================
|
||||
# metadata stage
|
||||
# ====================
|
||||
codequality:
|
||||
stage: metadata
|
||||
image: docker:stable
|
||||
allow_failure: true
|
||||
services:
|
||||
- docker:stable-dind
|
||||
script:
|
||||
- export SP_VERSION=$(echo "$CI_SERVER_VERSION" | sed 's/^\([0-9]*\)\.\([0-9]*\).*/\1-\2-stable/')
|
||||
- docker run
|
||||
--env SOURCE_CODE="$PWD"
|
||||
--volume "$PWD":/code
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock
|
||||
"registry.gitlab.com/gitlab-org/security-products/codequality:$SP_VERSION" /code
|
||||
artifacts:
|
||||
paths: [codeclimate.json]
|
||||
tags:
|
||||
- docker
|
||||
- priv
|
||||
|
||||
trigger:
|
||||
stage: metadata
|
||||
script:
|
||||
- npmci trigger
|
||||
only:
|
||||
- tags
|
||||
tags:
|
||||
- docker
|
||||
- notpriv
|
||||
|
||||
pages:
|
||||
image: hosttoday/ht-docker-node:npmci
|
||||
stage: metadata
|
||||
script:
|
||||
- npmci command npm install -g typedoc typescript
|
||||
- npmci npm prepare
|
||||
- npmci npm install
|
||||
- npmci command typedoc --module "commonjs" --target "ES2016" --out public/ ts/
|
||||
tags:
|
||||
- docker
|
||||
- notpriv
|
||||
only:
|
||||
- tags
|
||||
artifacts:
|
||||
expire_in: 1 week
|
||||
paths:
|
||||
- public
|
||||
allow_failure: true
|
11
.vscode/launch.json
vendored
Normal file
11
.vscode/launch.json
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
{
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"command": "npm test",
|
||||
"name": "Run npm test",
|
||||
"request": "launch",
|
||||
"type": "node-terminal"
|
||||
}
|
||||
]
|
||||
}
|
26
.vscode/settings.json
vendored
Normal file
26
.vscode/settings.json
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
{
|
||||
"json.schemas": [
|
||||
{
|
||||
"fileMatch": ["/npmextra.json"],
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"npmci": {
|
||||
"type": "object",
|
||||
"description": "settings for npmci"
|
||||
},
|
||||
"gitzone": {
|
||||
"type": "object",
|
||||
"description": "settings for gitzone",
|
||||
"properties": {
|
||||
"projectType": {
|
||||
"type": "string",
|
||||
"enum": ["website", "element", "service", "npm", "wcc"]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
52
README.md
52
README.md
@ -1,52 +0,0 @@
|
||||
# @mojoio/dockersock
|
||||
|
||||
easy communication with docker remote api from node, TypeScript ready
|
||||
|
||||
## Availabililty
|
||||
|
||||
[](https://www.npmjs.com/package/dockersock)
|
||||
[](https://GitLab.com/mojoio/dockersock)
|
||||
[](https://github.com/mojoio/dockersock)
|
||||
[](https://mojoio.gitlab.io/dockersock/)
|
||||
|
||||
## Status for master
|
||||
|
||||
[](https://GitLab.com/mojoio/dockersock/commits/master)
|
||||
[](https://GitLab.com/mojoio/dockersock/commits/master)
|
||||
[](https://www.npmjs.com/package/dockersock)
|
||||
[](https://david-dm.org/mojoio/dockersock)
|
||||
[](https://www.bithound.io/github/mojoio/dockersock/master/dependencies/npm)
|
||||
[](https://www.bithound.io/github/mojoio/dockersock)
|
||||
[](https://nodejs.org/dist/latest-v6.x/docs/api/)
|
||||
[](https://nodejs.org/dist/latest-v6.x/docs/api/)
|
||||
[](http://standardjs.com/)
|
||||
|
||||
## Usage
|
||||
|
||||
Use TypeScript for best in class instellisense.
|
||||
|
||||
```TypeScript
|
||||
import {Dockersock} from "dockersock"; // require Dockersock class
|
||||
|
||||
let myDockersock = new Dockersock(); // optional: you can pass a domain to the contructor, defaults to /var/run/docker.sock
|
||||
|
||||
myDockersock.listContainers() // promise, resolve gets container data
|
||||
myDockersock.listContainersDetailed() // promise, resolve gets more detailed container data (by combining several requests internally)
|
||||
myDockersock.listContainersRunning() // promise, resolve gets container data for currently running containers
|
||||
myDockersock.listContainersStopped() // promise, resolve gets container data for stopped containers
|
||||
|
||||
myDockersock.startContainer({ // starts a already present container
|
||||
name: "somecontainername"
|
||||
})
|
||||
|
||||
myDockersock.newContainer({ // start new Container, equals "docker run" shell command
|
||||
image: "someimagetag"
|
||||
})
|
||||
```
|
||||
|
||||
For further information read the linked docs at the top of this README.
|
||||
|
||||
> MIT licensed | **©** [Lossless GmbH](https://lossless.gmbh)
|
||||
> | By using this npm module you agree to our [privacy policy](https://lossless.gmbH/privacy.html)
|
||||
|
||||
[](https://mojo.io)
|
164
changelog.md
Normal file
164
changelog.md
Normal file
@ -0,0 +1,164 @@
|
||||
# Changelog
|
||||
|
||||
## 2024-12-23 - 1.3.0 - feat(core)
|
||||
Initial release of Docker client with TypeScript support
|
||||
|
||||
- Provides easy communication with Docker's remote API from Node.js
|
||||
- Includes implementations for managing Docker services, networks, secrets, containers, and images
|
||||
|
||||
## 2024-12-23 - 1.2.8 - fix(core)
|
||||
Improved the image creation process from tar stream in DockerImage class.
|
||||
|
||||
- Enhanced `DockerImage.createFromTarStream` method to handle streamed response and parse imported image details.
|
||||
- Fixed the dependency version for `@push.rocks/smartarchive` in package.json.
|
||||
|
||||
## 2024-10-13 - 1.2.7 - fix(core)
|
||||
Prepare patch release with minor fixes and improvements
|
||||
|
||||
|
||||
## 2024-10-13 - 1.2.6 - fix(core)
|
||||
Minor refactoring and code quality improvements.
|
||||
|
||||
|
||||
## 2024-10-13 - 1.2.5 - fix(dependencies)
|
||||
Update dependencies for stability improvements
|
||||
|
||||
- Updated @push.rocks/smartstream to version ^3.0.46
|
||||
- Updated @push.rocks/tapbundle to version ^5.3.0
|
||||
- Updated @types/node to version 22.7.5
|
||||
|
||||
## 2024-10-13 - 1.2.4 - fix(core)
|
||||
Refactored DockerImageStore constructor to remove DockerHost dependency
|
||||
|
||||
- Adjusted DockerImageStore constructor to remove dependency on DockerHost
|
||||
- Updated ts/classes.host.ts to align with DockerImageStore's new constructor signature
|
||||
|
||||
## 2024-08-21 - 1.2.3 - fix(dependencies)
|
||||
Update dependencies to the latest versions and fix image export test
|
||||
|
||||
- Updated several dependencies to their latest versions in package.json.
|
||||
- Enabled the previously skipped 'should export images' test.
|
||||
|
||||
## 2024-06-10 - 1.2.1-1.2.2 - Core/General
|
||||
General updates and fixes.
|
||||
|
||||
- Fix core update
|
||||
|
||||
## 2024-06-10 - 1.2.0 - Core
|
||||
Core updates and bug fixes.
|
||||
|
||||
- Fix core update
|
||||
|
||||
## 2024-06-08 - 1.2.0 - General/Core
|
||||
Major release with core enhancements.
|
||||
|
||||
- Processing images with extraction, retagging, repackaging, and long-term storage
|
||||
|
||||
## 2024-06-06 - 1.1.4 - General/Imagestore
|
||||
Significant feature addition.
|
||||
|
||||
- Add feature to process images with extraction, retagging, repackaging, and long-term storage
|
||||
|
||||
## 2024-05-08 - 1.0.112 - Images
|
||||
Add new functionality for image handling.
|
||||
|
||||
- Can now import and export images
|
||||
- Start work on local 100% JS OCI image registry
|
||||
|
||||
## 2024-06-05 - 1.1.0-1.1.3 - Core
|
||||
Regular updates and fixes.
|
||||
|
||||
- Fix core update
|
||||
|
||||
## 2024-02-02 - 1.0.105-1.0.110 - Core
|
||||
Routine core updates and fixes.
|
||||
|
||||
- Fix core update
|
||||
|
||||
## 2022-10-17 - 1.0.103-1.0.104 - Core
|
||||
Routine core updates.
|
||||
|
||||
- Fix core update
|
||||
|
||||
## 2020-10-01 - 1.0.99-1.0.102 - Core
|
||||
Routine core updates.
|
||||
|
||||
- Fix core update
|
||||
|
||||
## 2019-09-22 - 1.0.73-1.0.78 - Core
|
||||
Routine updates and core fixes.
|
||||
|
||||
- Fix core update
|
||||
|
||||
## 2019-09-13 - 1.0.60-1.0.72 - Core
|
||||
Routine updates and core fixes.
|
||||
|
||||
- Fix core update
|
||||
|
||||
## 2019-08-16 - 1.0.43-1.0.59 - Core
|
||||
Routine updates and core fixes.
|
||||
|
||||
- Fix core update
|
||||
|
||||
## 2019-08-15 - 1.0.37-1.0.42 - Core
|
||||
Routine updates and core fixes.
|
||||
|
||||
- Fix core update
|
||||
|
||||
## 2019-08-14 - 1.0.31-1.0.36 - Core
|
||||
Routine updates and core fixes.
|
||||
|
||||
- Fix core update
|
||||
|
||||
## 2019-01-10 - 1.0.27-1.0.30 - Core
|
||||
Routine updates and core fixes.
|
||||
|
||||
- Fix core update
|
||||
|
||||
## 2018-07-16 - 1.0.23-1.0.24 - Core
|
||||
Routine updates and core fixes.
|
||||
|
||||
- Fix core shift to new style
|
||||
|
||||
## 2017-07-16 - 1.0.20-1.0.22 - General
|
||||
Routine updates and fixes.
|
||||
|
||||
- Update node_modules within npmdocker
|
||||
|
||||
## 2017-04-02 - 1.0.18-1.0.19 - General
|
||||
Routine updates and fixes.
|
||||
|
||||
- Work with npmdocker and npmts 7.x.x
|
||||
- CI updates
|
||||
|
||||
## 2016-07-31 - 1.0.17 - General
|
||||
Enhancements and fixes.
|
||||
|
||||
- Now waiting for response to be stored before ending streaming request
|
||||
- Cosmetic fix
|
||||
|
||||
## 2016-07-29 - 1.0.14-1.0.16 - General
|
||||
Multiple updates and features added.
|
||||
|
||||
- Fix request for change observable and add npmdocker
|
||||
- Add request typings
|
||||
|
||||
## 2016-07-28 - 1.0.13 - Core
|
||||
Fixes and preparations.
|
||||
|
||||
- Fixed request for newer docker
|
||||
- Prepare for npmdocker
|
||||
|
||||
|
||||
## 2016-06-16 - 1.0.0-1.0.2 - General
|
||||
Initial sequence of releases, significant feature additions and CI setups.
|
||||
|
||||
- Implement container start and stop
|
||||
- Implement list containers and related functions
|
||||
- Add tests with in docker environment
|
||||
|
||||
## 2016-04-12 - unknown - Initial Commit
|
||||
Initial project setup.
|
||||
|
||||
- Initial commit
|
||||
|
@ -1,15 +1,37 @@
|
||||
{
|
||||
"npmts": {
|
||||
"mode": "default",
|
||||
"coverageTreshold": 10
|
||||
},
|
||||
"npmdocker": {
|
||||
"baseImage": "hosttoday/ht-docker-node:npmci",
|
||||
"command":
|
||||
"(ls -a && rm -r node_modules && yarn global add npmts && yarn install && npmts)",
|
||||
"command": "(ls -a && rm -r node_modules && yarn global add npmts && yarn install && npmts)",
|
||||
"dockerSock": true
|
||||
},
|
||||
"npmci": {
|
||||
"globalNpmTools": ["npmts", "npmdocker"]
|
||||
"npmGlobalTools": [],
|
||||
"npmAccessLevel": "public",
|
||||
"npmRegistryUrl": "registry.npmjs.org"
|
||||
},
|
||||
"gitzone": {
|
||||
"projectType": "npm",
|
||||
"module": {
|
||||
"githost": "gitlab.com",
|
||||
"gitscope": "mojoio",
|
||||
"gitrepo": "docker",
|
||||
"description": "Provides easy communication with Docker remote API from Node.js, with TypeScript support.",
|
||||
"npmPackagename": "@mojoio/docker",
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
"Docker",
|
||||
"API",
|
||||
"Node.js",
|
||||
"TypeScript",
|
||||
"Containers",
|
||||
"Images",
|
||||
"Networks",
|
||||
"Services",
|
||||
"Secrets"
|
||||
]
|
||||
}
|
||||
},
|
||||
"tsdoc": {
|
||||
"legal": "\n## License and Legal Information\n\nThis repository contains open-source code that is licensed under the MIT License. A copy of the MIT License can be found in the [license](license) file within this repository. \n\n**Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.\n\n### Trademarks\n\nThis project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and any related products or services are trademarks of Task Venture Capital GmbH and are not included within the scope of the MIT license granted herein. Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines, and any usage must be approved in writing by Task Venture Capital GmbH.\n\n### Company Information\n\nTask Venture Capital GmbH \nRegistered at District court Bremen HRB 35230 HB, Germany\n\nFor any legal inquiries or if you require further information, please contact us via email at hello@task.vc.\n\nBy using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.\n"
|
||||
}
|
||||
}
|
||||
}
|
1527
package-lock.json
generated
1527
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
85
package.json
85
package.json
@ -1,43 +1,76 @@
|
||||
{
|
||||
"name": "@mojoio/docker",
|
||||
"version": "1.0.27",
|
||||
"description": "easy communication with docker remote api from node, TypeScript ready",
|
||||
"main": "dist/index.js",
|
||||
"typings": "dist/index.d.ts",
|
||||
"name": "@apiclient.xyz/docker",
|
||||
"version": "1.3.0",
|
||||
"description": "Provides easy communication with Docker remote API from Node.js, with TypeScript support.",
|
||||
"private": false,
|
||||
"main": "dist_ts/index.js",
|
||||
"typings": "dist_ts/index.d.ts",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"test": "tsrun test/test.ts",
|
||||
"build": "(npmts --notest && npmdocker)"
|
||||
"test": "(tstest test/ --web)",
|
||||
"build": "(tsbuild --web --allowimplicitany)",
|
||||
"buildDocs": "tsdoc"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+ssh://git@gitlab.com/pushrocks/dockersock.git"
|
||||
"url": "git+https://gitlab.com/mojoio/docker.git"
|
||||
},
|
||||
"keywords": [
|
||||
"docker",
|
||||
"sock",
|
||||
"container",
|
||||
"request",
|
||||
"api"
|
||||
"Docker",
|
||||
"API",
|
||||
"Node.js",
|
||||
"TypeScript",
|
||||
"Containers",
|
||||
"Images",
|
||||
"Networks",
|
||||
"Services",
|
||||
"Secrets"
|
||||
],
|
||||
"author": "Lossless GmbH",
|
||||
"license": "MIT",
|
||||
"bugs": {
|
||||
"url": "https://gitlab.com/pushrocks/dockersock/issues"
|
||||
"url": "https://gitlab.com/mojoio/docker/issues"
|
||||
},
|
||||
"homepage": "https://gitlab.com/pushrocks/dockersock#README",
|
||||
"homepage": "https://gitlab.com/mojoio/docker#readme",
|
||||
"dependencies": {
|
||||
"@pushrocks/lik": "^3.0.4",
|
||||
"@pushrocks/smartlog": "^2.0.9",
|
||||
"@pushrocks/smartpromise": "^2.0.5",
|
||||
"@pushrocks/smartrequest": "^1.1.14",
|
||||
"rxjs": "^6.3.3"
|
||||
"@push.rocks/lik": "^6.0.15",
|
||||
"@push.rocks/smartarchive": "^4.0.39",
|
||||
"@push.rocks/smartbucket": "^3.0.22",
|
||||
"@push.rocks/smartfile": "^11.0.21",
|
||||
"@push.rocks/smartjson": "^5.0.20",
|
||||
"@push.rocks/smartlog": "^3.0.7",
|
||||
"@push.rocks/smartnetwork": "^3.0.0",
|
||||
"@push.rocks/smartpath": "^5.0.18",
|
||||
"@push.rocks/smartpromise": "^4.0.4",
|
||||
"@push.rocks/smartrequest": "^2.0.22",
|
||||
"@push.rocks/smartstream": "^3.0.46",
|
||||
"@push.rocks/smartstring": "^4.0.15",
|
||||
"@push.rocks/smartunique": "^3.0.9",
|
||||
"@push.rocks/smartversion": "^3.0.5",
|
||||
"@tsclass/tsclass": "^4.1.2",
|
||||
"rxjs": "^7.5.7"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@gitzone/tsrun": "^1.1.17",
|
||||
"@pushrocks/tapbundle": "^3.0.7",
|
||||
"@types/node": "^10.12.18",
|
||||
"tslint": "^5.12.0",
|
||||
"tslint-config-prettier": "^1.17.0"
|
||||
"@git.zone/tsbuild": "^2.1.84",
|
||||
"@git.zone/tsrun": "^1.2.49",
|
||||
"@git.zone/tstest": "^1.0.90",
|
||||
"@push.rocks/qenv": "^6.0.5",
|
||||
"@push.rocks/tapbundle": "^5.3.0",
|
||||
"@types/node": "22.7.5"
|
||||
},
|
||||
"private": false
|
||||
"files": [
|
||||
"ts/**/*",
|
||||
"ts_web/**/*",
|
||||
"dist/**/*",
|
||||
"dist_*/**/*",
|
||||
"dist_ts/**/*",
|
||||
"dist_ts_web/**/*",
|
||||
"assets/**/*",
|
||||
"cli.js",
|
||||
"npmextra.json",
|
||||
"readme.md"
|
||||
],
|
||||
"browserslist": [
|
||||
"last 1 chrome versions"
|
||||
]
|
||||
}
|
||||
|
6992
pnpm-lock.yaml
generated
Normal file
6992
pnpm-lock.yaml
generated
Normal file
File diff suppressed because it is too large
Load Diff
6
qenv.yml
Normal file
6
qenv.yml
Normal file
@ -0,0 +1,6 @@
|
||||
required:
|
||||
- S3_ENDPOINT
|
||||
- S3_ACCESSKEY
|
||||
- S3_ACCESSSECRET
|
||||
- S3_BUCKET
|
||||
|
152
readme.md
Normal file
152
readme.md
Normal file
@ -0,0 +1,152 @@
|
||||
# @apiclient.xyz/docker
|
||||
|
||||
easy communication with docker remote api from node, TypeScript ready
|
||||
|
||||
## Install
|
||||
|
||||
To install @apiclient.xyz/docker, you can use npm (npm package manager). Run the following command in your terminal:
|
||||
|
||||
```bash
|
||||
npm install @apiclient.xyz/docker --save
|
||||
```
|
||||
|
||||
This command installs the package and adds it as a dependency to your project's `package.json` file.
|
||||
|
||||
## Usage
|
||||
|
||||
The `@apiclient.xyz/docker` package provides a TypeScript-ready interface for interacting with Docker's Remote API directly from Node.js applications. It leverages TypeScript for strong type definitions, ensuring more reliable and maintainable code.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
Before you begin, ensure:
|
||||
|
||||
- You have Docker installed and running on your machine or a remote server.
|
||||
- You are familiar with TypeScript and have it set up in your development environment.
|
||||
|
||||
### Getting Started
|
||||
|
||||
First, import the required classes from the package:
|
||||
|
||||
```typescript
|
||||
import { DockerHost, DockerContainer, DockerService, DockerNetwork } from '@apiclient.xyz/docker';
|
||||
```
|
||||
|
||||
### Instantiate DockerHost
|
||||
|
||||
Start by creating a `DockerHost` instance. This class is the entry point to communicate with the Docker Remote API.
|
||||
|
||||
```typescript
|
||||
// Connect to local Docker instance
|
||||
const localDockerHost = new DockerHost();
|
||||
|
||||
// Or specify a custom path or URL to a Docker host
|
||||
const remoteDockerHost = new DockerHost('tcp://<REMOTE_DOCKER_HOST>:2375');
|
||||
```
|
||||
|
||||
### Working with Containers
|
||||
|
||||
#### List All Containers
|
||||
|
||||
```typescript
|
||||
async function listAllContainers() {
|
||||
const containers = await localDockerHost.getContainers();
|
||||
console.log(containers);
|
||||
}
|
||||
|
||||
listAllContainers();
|
||||
```
|
||||
|
||||
#### Create and Remove a Container
|
||||
|
||||
```typescript
|
||||
import { IContainerCreationDescriptor } from '@apiclient.xyz/docker';
|
||||
|
||||
async function createAndRemoveContainer() {
|
||||
const containerDescriptor: IContainerCreationDescriptor = {
|
||||
Hostname: 'test-container',
|
||||
Domainname: '',
|
||||
// Additional settings here
|
||||
};
|
||||
|
||||
// Create container
|
||||
const container = await DockerContainer.create(localDockerHost, containerDescriptor);
|
||||
console.log(`Container Created: ${container.Id}`);
|
||||
|
||||
// Remove container
|
||||
await container.remove();
|
||||
console.log(`Container Removed: ${container.Id}`);
|
||||
}
|
||||
|
||||
createAndRemoveContainer();
|
||||
```
|
||||
|
||||
### Working with Docker Services
|
||||
|
||||
#### Create a Docker Service
|
||||
|
||||
```typescript
|
||||
import { IServiceCreationDescriptor } from '@apiclient.xyz/docker';
|
||||
|
||||
async function createDockerService() {
|
||||
const serviceDescriptor: IServiceCreationDescriptor = {
|
||||
name: 'my-service',
|
||||
image: 'nginx:latest', // Docker Image
|
||||
// Additional settings
|
||||
};
|
||||
|
||||
const service = await DockerService.createService(localDockerHost, serviceDescriptor);
|
||||
console.log(`Service Created: ${service.Id}`);
|
||||
}
|
||||
|
||||
createDockerService();
|
||||
```
|
||||
|
||||
### Working with Docker Networks
|
||||
|
||||
#### Listing and Creating Networks
|
||||
|
||||
```typescript
|
||||
async function listAndCreateNetwork() {
|
||||
// List all networks
|
||||
const networks = await localDockerHost.getNetworks();
|
||||
console.log(networks);
|
||||
|
||||
// Create a new network
|
||||
const network = await DockerNetwork.createNetwork(localDockerHost, {
|
||||
Name: 'my-network'
|
||||
// Additional settings
|
||||
});
|
||||
console.log(`Network Created: ${network.Id}`);
|
||||
}
|
||||
|
||||
listAndCreateNetwork();
|
||||
```
|
||||
|
||||
### Advanced Usage
|
||||
|
||||
You can leverage the full potential of the Docker Remote API with `@apiclient.xyz/docker`. This includes managing images, volumes, swarms, and more. The package's design is consistent and intuitive, making it easy to extend your usage as needed.
|
||||
|
||||
Remember, the Docker Remote API offers extensive capabilities. Always refer to the [Docker API documentation](https://docs.docker.com/engine/api/latest/) for a comprehensive list of endpoints and actions you can perform.
|
||||
|
||||
### Conclusion
|
||||
|
||||
`@apiclient.xyz/docker` simplifies interaction with Docker's Remote API in TypeScript projects, providing strong typing and asynchronous operations. Whether you're managing containers, images, services or networks, it offers a comprehensive toolset to perform these tasks seamlessly.
|
||||
|
||||
## License and Legal Information
|
||||
|
||||
This repository contains open-source code that is licensed under the MIT License. A copy of the MIT License can be found in the [license](license) file within this repository.
|
||||
|
||||
**Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.
|
||||
|
||||
### Trademarks
|
||||
|
||||
This project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and any related products or services are trademarks of Task Venture Capital GmbH and are not included within the scope of the MIT license granted herein. Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines, and any usage must be approved in writing by Task Venture Capital GmbH.
|
||||
|
||||
### Company Information
|
||||
|
||||
Task Venture Capital GmbH
|
||||
Registered at District court Bremen HRB 35230 HB, Germany
|
||||
|
||||
For any legal inquiries or if you require further information, please contact us via email at hello@task.vc.
|
||||
|
||||
By using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.
|
11
scripts/testauth.ts
Normal file
11
scripts/testauth.ts
Normal file
@ -0,0 +1,11 @@
|
||||
import * as docker from '../ts';
|
||||
import * as smartstring from '@push.rocks/smartstring';
|
||||
|
||||
const run = async () => {
|
||||
const dockerHost = new docker.DockerHost();
|
||||
await docker.DockerImage.createFromRegistry(dockerHost, {
|
||||
imageUrl: 'registry.gitlab.com/servezone/private/cloudly:latest'
|
||||
});
|
||||
};
|
||||
|
||||
run();
|
169
test/test.nonci.node.ts
Normal file
169
test/test.nonci.node.ts
Normal file
@ -0,0 +1,169 @@
|
||||
import { expect, tap } from '@push.rocks/tapbundle';
|
||||
import { Qenv } from '@push.rocks/qenv';
|
||||
|
||||
const testQenv = new Qenv('./', './.nogit/');
|
||||
|
||||
import * as plugins from '../ts/plugins.js';
|
||||
import * as paths from '../ts/paths.js';
|
||||
|
||||
import * as docker from '../ts/index.js';
|
||||
|
||||
let testDockerHost: docker.DockerHost;
|
||||
|
||||
tap.test('should create a new Dockersock instance', async () => {
|
||||
testDockerHost = new docker.DockerHost({});
|
||||
await testDockerHost.start();
|
||||
return expect(testDockerHost).toBeInstanceOf(docker.DockerHost);
|
||||
});
|
||||
|
||||
tap.test('should create a docker swarm', async () => {
|
||||
await testDockerHost.activateSwarm();
|
||||
});
|
||||
|
||||
// Containers
|
||||
tap.test('should list containers', async () => {
|
||||
const containers = await testDockerHost.getContainers();
|
||||
console.log(containers);
|
||||
});
|
||||
|
||||
// Networks
|
||||
tap.test('should list networks', async () => {
|
||||
const networks = await testDockerHost.getNetworks();
|
||||
console.log(networks);
|
||||
});
|
||||
|
||||
tap.test('should create a network', async () => {
|
||||
const newNetwork = await docker.DockerNetwork.createNetwork(testDockerHost, {
|
||||
Name: 'webgateway',
|
||||
});
|
||||
expect(newNetwork).toBeInstanceOf(docker.DockerNetwork);
|
||||
expect(newNetwork.Name).toEqual('webgateway');
|
||||
});
|
||||
|
||||
tap.test('should remove a network', async () => {
|
||||
const webgateway = await docker.DockerNetwork.getNetworkByName(testDockerHost, 'webgateway');
|
||||
await webgateway.remove();
|
||||
});
|
||||
|
||||
// Images
|
||||
tap.test('should pull an image from imagetag', async () => {
|
||||
const image = await docker.DockerImage.createFromRegistry(testDockerHost, {
|
||||
creationObject: {
|
||||
imageUrl: 'hosttoday/ht-docker-node',
|
||||
imageTag: 'alpine',
|
||||
},
|
||||
});
|
||||
expect(image).toBeInstanceOf(docker.DockerImage);
|
||||
console.log(image);
|
||||
});
|
||||
|
||||
tap.test('should return a change Observable', async (tools) => {
|
||||
const testObservable = await testDockerHost.getEventObservable();
|
||||
const subscription = testObservable.subscribe((changeObject) => {
|
||||
console.log(changeObject);
|
||||
});
|
||||
await tools.delayFor(2000);
|
||||
subscription.unsubscribe();
|
||||
});
|
||||
|
||||
// SECRETS
|
||||
tap.test('should create a secret', async () => {
|
||||
const mySecret = await docker.DockerSecret.createSecret(testDockerHost, {
|
||||
name: 'testSecret',
|
||||
version: '1.0.3',
|
||||
contentArg: `{ "hi": "wow"}`,
|
||||
labels: {},
|
||||
});
|
||||
console.log(mySecret);
|
||||
});
|
||||
|
||||
tap.test('should remove a secret by name', async () => {
|
||||
const mySecret = await docker.DockerSecret.getSecretByName(testDockerHost, 'testSecret');
|
||||
await mySecret.remove();
|
||||
});
|
||||
|
||||
// SERVICES
|
||||
tap.test('should activate swarm mode', async () => {
|
||||
await testDockerHost.activateSwarm();
|
||||
});
|
||||
|
||||
tap.test('should list all services', async (tools) => {
|
||||
const services = await testDockerHost.getServices();
|
||||
console.log(services);
|
||||
});
|
||||
|
||||
tap.test('should create a service', async () => {
|
||||
const testNetwork = await docker.DockerNetwork.createNetwork(testDockerHost, {
|
||||
Name: 'testNetwork',
|
||||
});
|
||||
const testSecret = await docker.DockerSecret.createSecret(testDockerHost, {
|
||||
name: 'testSecret',
|
||||
version: '0.0.1',
|
||||
labels: {},
|
||||
contentArg: '{"hi": "wow"}',
|
||||
});
|
||||
const testImage = await docker.DockerImage.createFromRegistry(testDockerHost, {
|
||||
creationObject: {
|
||||
imageUrl: 'code.foss.global/host.today/ht-docker-node:latest',
|
||||
}
|
||||
});
|
||||
const testService = await docker.DockerService.createService(testDockerHost, {
|
||||
image: testImage,
|
||||
labels: {},
|
||||
name: 'testService',
|
||||
networks: [testNetwork],
|
||||
networkAlias: 'testService',
|
||||
secrets: [testSecret],
|
||||
ports: ['3000:80'],
|
||||
});
|
||||
|
||||
await testService.remove();
|
||||
await testNetwork.remove();
|
||||
await testSecret.remove();
|
||||
});
|
||||
|
||||
tap.test('should export images', async (toolsArg) => {
|
||||
const done = toolsArg.defer();
|
||||
const testImage = await docker.DockerImage.createFromRegistry(testDockerHost, {
|
||||
creationObject: {
|
||||
imageUrl: 'code.foss.global/host.today/ht-docker-node:latest',
|
||||
}
|
||||
});
|
||||
const fsWriteStream = plugins.smartfile.fsStream.createWriteStream(
|
||||
plugins.path.join(paths.nogitDir, 'testimage.tar')
|
||||
);
|
||||
const exportStream = await testImage.exportToTarStream();
|
||||
exportStream.pipe(fsWriteStream).on('finish', () => {
|
||||
done.resolve();
|
||||
});
|
||||
await done.promise;
|
||||
});
|
||||
|
||||
tap.test('should import images', async (toolsArg) => {
|
||||
const done = toolsArg.defer();
|
||||
const fsReadStream = plugins.smartfile.fsStream.createReadStream(
|
||||
plugins.path.join(paths.nogitDir, 'testimage.tar')
|
||||
);
|
||||
await docker.DockerImage.createFromTarStream(testDockerHost, {
|
||||
tarStream: fsReadStream,
|
||||
creationObject: {
|
||||
imageUrl: 'code.foss.global/host.today/ht-docker-node:latest',
|
||||
}
|
||||
})
|
||||
});
|
||||
|
||||
tap.test('should expose a working DockerImageStore', async () => {
|
||||
// lets first add am s3 target
|
||||
const s3Descriptor = {
|
||||
endpoint: await testQenv.getEnvVarOnDemand('S3_ENDPOINT'),
|
||||
accessKey: await testQenv.getEnvVarOnDemand('S3_ACCESSKEY'),
|
||||
accessSecret: await testQenv.getEnvVarOnDemand('S3_ACCESSSECRET'),
|
||||
bucketName: await testQenv.getEnvVarOnDemand('S3_BUCKET'),
|
||||
};
|
||||
await testDockerHost.addS3Storage(s3Descriptor);
|
||||
|
||||
//
|
||||
await testDockerHost.imageStore.storeImage('hello', plugins.smartfile.fsStream.createReadStream(plugins.path.join(paths.nogitDir, 'testimage.tar')));
|
||||
})
|
||||
|
||||
export default tap.start();
|
29
test/test.ts
29
test/test.ts
@ -1,29 +0,0 @@
|
||||
import { expect, tap } from '@pushrocks/tapbundle';
|
||||
import { DockerHost } from '../ts/index';
|
||||
|
||||
let testDockerHost: DockerHost;
|
||||
|
||||
tap.test('should create a new Dockersock instance', async () => {
|
||||
testDockerHost = new DockerHost();
|
||||
return expect(testDockerHost).to.be.instanceof(DockerHost);
|
||||
});
|
||||
|
||||
tap.test('should list containers', async () => {
|
||||
const containers = await testDockerHost.getContainers();
|
||||
console.log(containers);
|
||||
});
|
||||
|
||||
tap.skip.test('should pull an image from imagetag', async () => {
|
||||
// await testDockerHost.pullImage('hosttoday/ht-docker-node:npmci');
|
||||
});
|
||||
|
||||
tap.test('should return a change Objservable', async tools => {
|
||||
const testObservable = await testDockerHost.getEventObservable();
|
||||
const subscription = testObservable.subscribe(changeObject => {
|
||||
console.log(changeObject);
|
||||
});
|
||||
await tools.delayFor(2000);
|
||||
subscription.unsubscribe();
|
||||
});
|
||||
|
||||
tap.start();
|
8
ts/00_commitinfo_data.ts
Normal file
8
ts/00_commitinfo_data.ts
Normal file
@ -0,0 +1,8 @@
|
||||
/**
|
||||
* autocreated commitinfo by @push.rocks/commitinfo
|
||||
*/
|
||||
export const commitinfo = {
|
||||
name: '@apiclient.xyz/docker',
|
||||
version: '1.3.0',
|
||||
description: 'Provides easy communication with Docker remote API from Node.js, with TypeScript support.'
|
||||
}
|
99
ts/classes.container.ts
Normal file
99
ts/classes.container.ts
Normal file
@ -0,0 +1,99 @@
|
||||
import * as plugins from './plugins.js';
|
||||
import * as interfaces from './interfaces/index.js';
|
||||
|
||||
import { DockerHost } from './classes.host.js';
|
||||
import { logger } from './logger.js';
|
||||
|
||||
export class DockerContainer {
|
||||
// STATIC
|
||||
|
||||
/**
|
||||
* get all containers
|
||||
*/
|
||||
public static async getContainers(dockerHostArg: DockerHost): Promise<DockerContainer[]> {
|
||||
const result: DockerContainer[] = [];
|
||||
const response = await dockerHostArg.request('GET', '/containers/json');
|
||||
|
||||
// TODO: Think about getting the config by inpsecting the container
|
||||
for (const containerResult of response.body) {
|
||||
result.push(new DockerContainer(dockerHostArg, containerResult));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* gets an container by Id
|
||||
* @param containerId
|
||||
*/
|
||||
public static async getContainerById(containerId: string) {
|
||||
// TODO: implement get container by id
|
||||
}
|
||||
|
||||
/**
|
||||
* create a container
|
||||
*/
|
||||
public static async create(
|
||||
dockerHost: DockerHost,
|
||||
containerCreationDescriptor: interfaces.IContainerCreationDescriptor
|
||||
) {
|
||||
// check for unique hostname
|
||||
const existingContainers = await DockerContainer.getContainers(dockerHost);
|
||||
const sameHostNameContainer = existingContainers.find((container) => {
|
||||
// TODO implement HostName Detection;
|
||||
return false;
|
||||
});
|
||||
const response = await dockerHost.request('POST', '/containers/create', {
|
||||
Hostname: containerCreationDescriptor.Hostname,
|
||||
Domainname: containerCreationDescriptor.Domainname,
|
||||
User: 'root',
|
||||
});
|
||||
if (response.statusCode < 300) {
|
||||
logger.log('info', 'Container created successfully');
|
||||
} else {
|
||||
logger.log('error', 'There has been a problem when creating the container');
|
||||
}
|
||||
}
|
||||
|
||||
// INSTANCE
|
||||
// references
|
||||
public dockerHost: DockerHost;
|
||||
|
||||
// properties
|
||||
public Id: string;
|
||||
public Names: string[];
|
||||
public Image: string;
|
||||
public ImageID: string;
|
||||
public Command: string;
|
||||
public Created: number;
|
||||
public Ports: interfaces.TPorts;
|
||||
public Labels: interfaces.TLabels;
|
||||
public State: string;
|
||||
public Status: string;
|
||||
public HostConfig: any;
|
||||
public NetworkSettings: {
|
||||
Networks: {
|
||||
[key: string]: {
|
||||
IPAMConfig: any;
|
||||
Links: any;
|
||||
Aliases: any;
|
||||
NetworkID: string;
|
||||
EndpointID: string;
|
||||
Gateway: string;
|
||||
IPAddress: string;
|
||||
IPPrefixLen: number;
|
||||
IPv6Gateway: string;
|
||||
GlobalIPv6Address: string;
|
||||
GlobalIPv6PrefixLen: number;
|
||||
MacAddress: string;
|
||||
DriverOpts: any;
|
||||
};
|
||||
};
|
||||
};
|
||||
public Mounts: any;
|
||||
constructor(dockerHostArg: DockerHost, dockerContainerObjectArg: any) {
|
||||
this.dockerHost = dockerHostArg;
|
||||
Object.keys(dockerContainerObjectArg).forEach((keyArg) => {
|
||||
this[keyArg] = dockerContainerObjectArg[keyArg];
|
||||
});
|
||||
}
|
||||
}
|
295
ts/classes.host.ts
Normal file
295
ts/classes.host.ts
Normal file
@ -0,0 +1,295 @@
|
||||
import * as plugins from './plugins.js';
|
||||
import * as paths from './paths.js';
|
||||
import { DockerContainer } from './classes.container.js';
|
||||
import { DockerNetwork } from './classes.network.js';
|
||||
import { DockerService } from './classes.service.js';
|
||||
import { logger } from './logger.js';
|
||||
import path from 'path';
|
||||
import { DockerImageStore } from './classes.imagestore.js';
|
||||
import { DockerImage } from './classes.image.js';
|
||||
|
||||
export interface IAuthData {
|
||||
serveraddress: string;
|
||||
username: string;
|
||||
password: string;
|
||||
}
|
||||
|
||||
export interface IDockerHostConstructorOptions {
|
||||
dockerSockPath?: string;
|
||||
imageStoreDir?: string;
|
||||
}
|
||||
|
||||
export class DockerHost {
|
||||
public options: IDockerHostConstructorOptions;
|
||||
|
||||
/**
|
||||
* the path where the docker sock can be found
|
||||
*/
|
||||
public socketPath: string;
|
||||
private registryToken: string = '';
|
||||
public imageStore: DockerImageStore;
|
||||
public smartBucket: plugins.smartbucket.SmartBucket;
|
||||
|
||||
/**
|
||||
* the constructor to instantiate a new docker sock instance
|
||||
* @param pathArg
|
||||
*/
|
||||
constructor(optionsArg: IDockerHostConstructorOptions) {
|
||||
this.options = {
|
||||
...{
|
||||
imageStoreDir: plugins.path.join(paths.nogitDir, 'temp-docker-image-store'),
|
||||
},
|
||||
...optionsArg,
|
||||
}
|
||||
let pathToUse: string;
|
||||
if (optionsArg.dockerSockPath) {
|
||||
pathToUse = optionsArg.dockerSockPath;
|
||||
} else if (process.env.DOCKER_HOST) {
|
||||
pathToUse = process.env.DOCKER_HOST;
|
||||
} else if (process.env.CI) {
|
||||
pathToUse = 'http://docker:2375/';
|
||||
} else {
|
||||
pathToUse = 'http://unix:/var/run/docker.sock:';
|
||||
}
|
||||
if (pathToUse.startsWith('unix:///')) {
|
||||
pathToUse = pathToUse.replace('unix://', 'http://unix:');
|
||||
}
|
||||
if (pathToUse.endsWith('.sock')) {
|
||||
pathToUse = pathToUse.replace('.sock', '.sock:');
|
||||
}
|
||||
console.log(`using docker sock at ${pathToUse}`);
|
||||
this.socketPath = pathToUse;
|
||||
this.imageStore = new DockerImageStore({
|
||||
bucketDir: null,
|
||||
localDirPath: this.options.imageStoreDir,
|
||||
})
|
||||
}
|
||||
|
||||
public async start() {
|
||||
await this.imageStore.start();
|
||||
}
|
||||
public async stop() {
|
||||
await this.imageStore.stop();
|
||||
}
|
||||
|
||||
/**
|
||||
* authenticate against a registry
|
||||
* @param userArg
|
||||
* @param passArg
|
||||
*/
|
||||
public async auth(authData: IAuthData) {
|
||||
const response = await this.request('POST', '/auth', authData);
|
||||
if (response.body.Status !== 'Login Succeeded') {
|
||||
console.log(`Login failed with ${response.body.Status}`);
|
||||
throw new Error(response.body.Status);
|
||||
}
|
||||
console.log(response.body.Status);
|
||||
this.registryToken = plugins.smartstring.base64.encode(plugins.smartjson.stringify(authData));
|
||||
}
|
||||
|
||||
/**
|
||||
* gets the token from the .docker/config.json file for GitLab registry
|
||||
*/
|
||||
public async getAuthTokenFromDockerConfig(registryUrlArg: string) {
|
||||
const dockerConfigPath = plugins.smartpath.get.home('~/.docker/config.json');
|
||||
const configObject = plugins.smartfile.fs.toObjectSync(dockerConfigPath);
|
||||
const gitlabAuthBase64 = configObject.auths[registryUrlArg].auth;
|
||||
const gitlabAuth: string = plugins.smartstring.base64.decode(gitlabAuthBase64);
|
||||
const gitlabAuthArray = gitlabAuth.split(':');
|
||||
await this.auth({
|
||||
username: gitlabAuthArray[0],
|
||||
password: gitlabAuthArray[1],
|
||||
serveraddress: registryUrlArg,
|
||||
});
|
||||
}
|
||||
|
||||
// ==============
|
||||
// NETWORKS
|
||||
// ==============
|
||||
/**
|
||||
* gets all networks
|
||||
*/
|
||||
public async getNetworks() {
|
||||
return await DockerNetwork.getNetworks(this);
|
||||
}
|
||||
|
||||
/**
|
||||
* create a network
|
||||
*/
|
||||
public async createNetwork(optionsArg: Parameters<typeof DockerNetwork.createNetwork>[1]) {
|
||||
return await DockerNetwork.createNetwork(this, optionsArg);
|
||||
}
|
||||
|
||||
/**
|
||||
* get a network by name
|
||||
*/
|
||||
public async getNetworkByName(networkNameArg: string) {
|
||||
return await DockerNetwork.getNetworkByName(this, networkNameArg);
|
||||
}
|
||||
|
||||
|
||||
// ==============
|
||||
// CONTAINERS
|
||||
// ==============
|
||||
/**
|
||||
* gets all containers
|
||||
*/
|
||||
public async getContainers() {
|
||||
const containerArray = await DockerContainer.getContainers(this);
|
||||
return containerArray;
|
||||
}
|
||||
|
||||
// ==============
|
||||
// SERVICES
|
||||
// ==============
|
||||
|
||||
/**
|
||||
* gets all services
|
||||
*/
|
||||
public async getServices() {
|
||||
const serviceArray = await DockerService.getServices(this);
|
||||
return serviceArray;
|
||||
}
|
||||
|
||||
// ==============
|
||||
// IMAGES
|
||||
// ==============
|
||||
|
||||
/**
|
||||
* get all images
|
||||
*/
|
||||
public async getImages() {
|
||||
return await DockerImage.getImages(this);
|
||||
}
|
||||
|
||||
/**
|
||||
* get an image by name
|
||||
*/
|
||||
public async getImageByName(imageNameArg: string) {
|
||||
return await DockerImage.getImageByName(this, imageNameArg);
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public async getEventObservable(): Promise<plugins.rxjs.Observable<any>> {
|
||||
const response = await this.requestStreaming('GET', '/events');
|
||||
return plugins.rxjs.Observable.create((observer) => {
|
||||
response.on('data', (data) => {
|
||||
const eventString = data.toString();
|
||||
try {
|
||||
const eventObject = JSON.parse(eventString);
|
||||
observer.next(eventObject);
|
||||
} catch (e) {
|
||||
console.log(e);
|
||||
}
|
||||
});
|
||||
return () => {
|
||||
response.emit('end');
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* activates docker swarm
|
||||
*/
|
||||
public async activateSwarm(addvertisementIpArg?: string) {
|
||||
// determine advertisement address
|
||||
let addvertisementIp: string;
|
||||
if (addvertisementIpArg) {
|
||||
addvertisementIp = addvertisementIpArg;
|
||||
} else {
|
||||
const smartnetworkInstance = new plugins.smartnetwork.SmartNetwork();
|
||||
const defaultGateway = await smartnetworkInstance.getDefaultGateway();
|
||||
if (defaultGateway) {
|
||||
addvertisementIp = defaultGateway.ipv4.address;
|
||||
}
|
||||
}
|
||||
|
||||
const response = await this.request('POST', '/swarm/init', {
|
||||
ListenAddr: '0.0.0.0:2377',
|
||||
AdvertiseAddr: addvertisementIp,
|
||||
DataPathPort: 4789,
|
||||
DefaultAddrPool: ['10.10.0.0/8', '20.20.0.0/8'],
|
||||
SubnetSize: 24,
|
||||
ForceNewCluster: false,
|
||||
});
|
||||
if (response.statusCode === 200) {
|
||||
logger.log('info', 'created Swam succesfully');
|
||||
} else {
|
||||
logger.log('error', 'could not initiate swarm');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* fire a request
|
||||
*/
|
||||
public async request(methodArg: string, routeArg: string, dataArg = {}) {
|
||||
const requestUrl = `${this.socketPath}${routeArg}`;
|
||||
const response = await plugins.smartrequest.request(requestUrl, {
|
||||
method: methodArg,
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-Registry-Auth': this.registryToken,
|
||||
Host: 'docker.sock',
|
||||
},
|
||||
requestBody: dataArg,
|
||||
keepAlive: false,
|
||||
});
|
||||
if (response.statusCode !== 200) {
|
||||
console.log(response.body);
|
||||
}
|
||||
return response;
|
||||
}
|
||||
|
||||
public async requestStreaming(methodArg: string, routeArg: string, readStream?: plugins.smartstream.stream.Readable) {
|
||||
const requestUrl = `${this.socketPath}${routeArg}`;
|
||||
const response = await plugins.smartrequest.request(
|
||||
requestUrl,
|
||||
{
|
||||
method: methodArg,
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-Registry-Auth': this.registryToken,
|
||||
Host: 'docker.sock',
|
||||
},
|
||||
requestBody: null,
|
||||
keepAlive: false,
|
||||
},
|
||||
true,
|
||||
(readStream ? reqArg => {
|
||||
let counter = 0;
|
||||
const smartduplex = new plugins.smartstream.SmartDuplex({
|
||||
writeFunction: async (chunkArg) => {
|
||||
if (counter % 1000 === 0) {
|
||||
console.log(`posting chunk ${counter}`);
|
||||
}
|
||||
counter++;
|
||||
return chunkArg;
|
||||
}
|
||||
});
|
||||
readStream.pipe(smartduplex).pipe(reqArg);
|
||||
} : null),
|
||||
);
|
||||
console.log(response.statusCode);
|
||||
console.log(response.body);
|
||||
return response;
|
||||
}
|
||||
|
||||
/**
|
||||
* add s3 storage
|
||||
* @param optionsArg
|
||||
*/
|
||||
public async addS3Storage(optionsArg: plugins.tsclass.storage.IS3Descriptor) {
|
||||
this.smartBucket = new plugins.smartbucket.SmartBucket(optionsArg);
|
||||
if (!optionsArg.bucketName) {
|
||||
throw new Error('bucketName is required');
|
||||
}
|
||||
const bucket = await this.smartBucket.getBucketByName(optionsArg.bucketName);
|
||||
let wantedDirectory = await bucket.getBaseDirectory();
|
||||
if (optionsArg.directoryPath) {
|
||||
wantedDirectory = await wantedDirectory.getSubDirectoryByName(optionsArg.directoryPath);
|
||||
}
|
||||
this.imageStore.options.bucketDir = wantedDirectory;
|
||||
}
|
||||
}
|
275
ts/classes.image.ts
Normal file
275
ts/classes.image.ts
Normal file
@ -0,0 +1,275 @@
|
||||
import * as plugins from './plugins.js';
|
||||
import * as interfaces from './interfaces/index.js';
|
||||
import { DockerHost } from './classes.host.js';
|
||||
import { logger } from './logger.js';
|
||||
|
||||
/**
|
||||
* represents a docker image on the remote docker host
|
||||
*/
|
||||
export class DockerImage {
|
||||
// STATIC
|
||||
public static async getImages(dockerHost: DockerHost) {
|
||||
const images: DockerImage[] = [];
|
||||
const response = await dockerHost.request('GET', '/images/json');
|
||||
for (const imageObject of response.body) {
|
||||
images.push(new DockerImage(dockerHost, imageObject));
|
||||
}
|
||||
return images;
|
||||
}
|
||||
|
||||
public static async getImageByName(dockerHost: DockerHost, imageNameArg: string) {
|
||||
const images = await this.getImages(dockerHost);
|
||||
const result = images.find((image) => {
|
||||
if (image.RepoTags) {
|
||||
return image.RepoTags.includes(imageNameArg);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
});
|
||||
return result;
|
||||
}
|
||||
|
||||
public static async createFromRegistry(
|
||||
dockerHostArg: DockerHost,
|
||||
optionsArg: {
|
||||
creationObject: interfaces.IImageCreationDescriptor
|
||||
}
|
||||
): Promise<DockerImage> {
|
||||
// lets create a sanatized imageUrlObject
|
||||
const imageUrlObject: {
|
||||
imageUrl: string;
|
||||
imageTag: string;
|
||||
imageOriginTag: string;
|
||||
} = {
|
||||
imageUrl: optionsArg.creationObject.imageUrl,
|
||||
imageTag: optionsArg.creationObject.imageTag,
|
||||
imageOriginTag: null,
|
||||
};
|
||||
if (imageUrlObject.imageUrl.includes(':')) {
|
||||
const imageUrl = imageUrlObject.imageUrl.split(':')[0];
|
||||
const imageTag = imageUrlObject.imageUrl.split(':')[1];
|
||||
if (imageUrlObject.imageTag) {
|
||||
throw new Error(
|
||||
`imageUrl ${imageUrlObject.imageUrl} can't be tagged with ${imageUrlObject.imageTag} because it is already tagged with ${imageTag}`
|
||||
);
|
||||
} else {
|
||||
imageUrlObject.imageUrl = imageUrl;
|
||||
imageUrlObject.imageTag = imageTag;
|
||||
}
|
||||
} else if (!imageUrlObject.imageTag) {
|
||||
imageUrlObject.imageTag = 'latest';
|
||||
}
|
||||
imageUrlObject.imageOriginTag = `${imageUrlObject.imageUrl}:${imageUrlObject.imageTag}`;
|
||||
|
||||
// lets actually create the image
|
||||
const response = await dockerHostArg.request(
|
||||
'POST',
|
||||
`/images/create?fromImage=${encodeURIComponent(
|
||||
imageUrlObject.imageUrl
|
||||
)}&tag=${encodeURIComponent(imageUrlObject.imageTag)}`
|
||||
);
|
||||
if (response.statusCode < 300) {
|
||||
logger.log('info', `Successfully pulled image ${imageUrlObject.imageUrl} from the registry`);
|
||||
const image = await DockerImage.getImageByName(dockerHostArg, imageUrlObject.imageOriginTag);
|
||||
return image;
|
||||
} else {
|
||||
logger.log('error', `Failed at the attempt of creating a new image`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param dockerHostArg
|
||||
* @param tarStreamArg
|
||||
*/
|
||||
public static async createFromTarStream(
|
||||
dockerHostArg: DockerHost,
|
||||
optionsArg: {
|
||||
creationObject: interfaces.IImageCreationDescriptor;
|
||||
tarStream: plugins.smartstream.stream.Readable;
|
||||
}
|
||||
): Promise<DockerImage> {
|
||||
// Start the request for importing an image
|
||||
const response = await dockerHostArg.requestStreaming(
|
||||
'POST',
|
||||
'/images/load',
|
||||
optionsArg.tarStream
|
||||
);
|
||||
|
||||
/**
|
||||
* Docker typically returns lines like:
|
||||
* {"stream":"Loaded image: myrepo/myimage:latest"}
|
||||
*
|
||||
* So we will collect those lines and parse out the final image name.
|
||||
*/
|
||||
let rawOutput = '';
|
||||
response.on('data', (chunk) => {
|
||||
rawOutput += chunk.toString();
|
||||
});
|
||||
|
||||
// Wrap the end event in a Promise for easier async/await usage
|
||||
await new Promise<void>((resolve, reject) => {
|
||||
response.on('end', () => {
|
||||
resolve();
|
||||
});
|
||||
response.on('error', (err) => {
|
||||
reject(err);
|
||||
});
|
||||
});
|
||||
|
||||
// Attempt to parse each line to find something like "Loaded image: ..."
|
||||
let loadedImageTag: string | undefined;
|
||||
const lines = rawOutput.trim().split('\n').filter(Boolean);
|
||||
|
||||
for (const line of lines) {
|
||||
try {
|
||||
const jsonLine = JSON.parse(line);
|
||||
if (
|
||||
jsonLine.stream &&
|
||||
(jsonLine.stream.startsWith('Loaded image:') ||
|
||||
jsonLine.stream.startsWith('Loaded image ID:'))
|
||||
) {
|
||||
// Examples:
|
||||
// "Loaded image: your-image:latest"
|
||||
// "Loaded image ID: sha256:...."
|
||||
loadedImageTag = jsonLine.stream
|
||||
.replace('Loaded image: ', '')
|
||||
.replace('Loaded image ID: ', '')
|
||||
.trim();
|
||||
}
|
||||
} catch {
|
||||
// not valid JSON, ignore
|
||||
}
|
||||
}
|
||||
|
||||
if (!loadedImageTag) {
|
||||
throw new Error(
|
||||
`Could not parse the loaded image info from Docker response.\nResponse was:\n${rawOutput}`
|
||||
);
|
||||
}
|
||||
|
||||
// Now try to look up that image by the "loadedImageTag".
|
||||
// Depending on Docker’s response, it might be something like:
|
||||
// "myrepo/myimage:latest" OR "sha256:someHash..."
|
||||
// If Docker gave you an ID (e.g. "sha256:..."), you may need a separate
|
||||
// DockerImage.getImageById method; or if you prefer, you can treat it as a name.
|
||||
const newlyImportedImage = await DockerImage.getImageByName(dockerHostArg, loadedImageTag);
|
||||
|
||||
if (!newlyImportedImage) {
|
||||
throw new Error(
|
||||
`Image load succeeded, but no local reference found for "${loadedImageTag}".`
|
||||
);
|
||||
}
|
||||
|
||||
logger.log(
|
||||
'info',
|
||||
`Successfully imported image "${loadedImageTag}".`
|
||||
);
|
||||
|
||||
return newlyImportedImage;
|
||||
}
|
||||
|
||||
|
||||
public static async tagImageByIdOrName(
|
||||
dockerHost: DockerHost,
|
||||
idOrNameArg: string,
|
||||
newTagArg: string
|
||||
) {
|
||||
const response = await dockerHost.request(
|
||||
'POST',
|
||||
`/images/${encodeURIComponent(idOrNameArg)}/${encodeURIComponent(newTagArg)}`
|
||||
);
|
||||
|
||||
|
||||
}
|
||||
|
||||
public static async buildImage(dockerHostArg: DockerHost, dockerImageTag) {
|
||||
// TODO: implement building an image
|
||||
}
|
||||
|
||||
// INSTANCE
|
||||
// references
|
||||
public dockerHost: DockerHost;
|
||||
|
||||
// properties
|
||||
/**
|
||||
* the tags for an image
|
||||
*/
|
||||
public Containers: number;
|
||||
public Created: number;
|
||||
public Id: string;
|
||||
public Labels: interfaces.TLabels;
|
||||
public ParentId: string;
|
||||
public RepoDigests: string[];
|
||||
public RepoTags: string[];
|
||||
public SharedSize: number;
|
||||
public Size: number;
|
||||
public VirtualSize: number;
|
||||
|
||||
constructor(dockerHostArg, dockerImageObjectArg: any) {
|
||||
this.dockerHost = dockerHostArg;
|
||||
Object.keys(dockerImageObjectArg).forEach((keyArg) => {
|
||||
this[keyArg] = dockerImageObjectArg[keyArg];
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* tag an image
|
||||
* @param newTag
|
||||
*/
|
||||
public async tagImage(newTag) {
|
||||
throw new Error('.tagImage is not yet implemented');
|
||||
}
|
||||
|
||||
/**
|
||||
* pulls the latest version from the registry
|
||||
*/
|
||||
public async pullLatestImageFromRegistry(): Promise<boolean> {
|
||||
const updatedImage = await DockerImage.createFromRegistry(this.dockerHost, {
|
||||
creationObject: {
|
||||
imageUrl: this.RepoTags[0],
|
||||
},
|
||||
});
|
||||
Object.assign(this, updatedImage);
|
||||
// TODO: Compare image digists before and after
|
||||
return true;
|
||||
}
|
||||
|
||||
// get stuff
|
||||
public async getVersion() {
|
||||
if (this.Labels && this.Labels.version) {
|
||||
return this.Labels.version;
|
||||
} else {
|
||||
return '0.0.0';
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* exports an image to a tar ball
|
||||
*/
|
||||
public async exportToTarStream(): Promise<plugins.smartstream.stream.Readable> {
|
||||
logger.log('info', `Exporting image ${this.RepoTags[0]} to tar stream.`);
|
||||
const response = await this.dockerHost.requestStreaming('GET', `/images/${encodeURIComponent(this.RepoTags[0])}/get`);
|
||||
let counter = 0;
|
||||
const webduplexStream = new plugins.smartstream.SmartDuplex({
|
||||
writeFunction: async (chunk, tools) => {
|
||||
if (counter % 1000 === 0)
|
||||
console.log(`Got chunk: ${counter}`);
|
||||
counter++;
|
||||
return chunk;
|
||||
}
|
||||
});
|
||||
response.on('data', (chunk) => {
|
||||
if (!webduplexStream.write(chunk)) {
|
||||
response.pause();
|
||||
webduplexStream.once('drain', () => {
|
||||
response.resume();
|
||||
})
|
||||
};
|
||||
});
|
||||
response.on('end', () => {
|
||||
webduplexStream.end();
|
||||
})
|
||||
return webduplexStream;
|
||||
}
|
||||
}
|
114
ts/classes.imagestore.ts
Normal file
114
ts/classes.imagestore.ts
Normal file
@ -0,0 +1,114 @@
|
||||
import * as plugins from './plugins.js';
|
||||
import * as paths from './paths.js';
|
||||
import { logger } from './logger.js';
|
||||
import type { DockerHost } from './classes.host.js';
|
||||
|
||||
export interface IDockerImageStoreConstructorOptions {
|
||||
/**
|
||||
* used for preparing images for longer term storage
|
||||
*/
|
||||
localDirPath: string;
|
||||
/**
|
||||
* a smartbucket dir for longer term storage.
|
||||
*/
|
||||
bucketDir: plugins.smartbucket.Directory;
|
||||
}
|
||||
|
||||
export class DockerImageStore {
|
||||
public options: IDockerImageStoreConstructorOptions;
|
||||
|
||||
constructor(optionsArg: IDockerImageStoreConstructorOptions) {
|
||||
this.options = optionsArg;
|
||||
}
|
||||
|
||||
// Method to store tar stream
|
||||
public async storeImage(imageName: string, tarStream: plugins.smartstream.stream.Readable): Promise<void> {
|
||||
logger.log('info', `Storing image ${imageName}...`);
|
||||
const uniqueProcessingId = plugins.smartunique.shortId();
|
||||
|
||||
const initialTarDownloadPath = plugins.path.join(this.options.localDirPath, `${uniqueProcessingId}.tar`);
|
||||
const extractionDir = plugins.path.join(this.options.localDirPath, uniqueProcessingId);
|
||||
// Create a write stream to store the tar file
|
||||
const writeStream = plugins.smartfile.fsStream.createWriteStream(initialTarDownloadPath);
|
||||
|
||||
// lets wait for the write stream to finish
|
||||
await new Promise((resolve, reject) => {
|
||||
tarStream.pipe(writeStream);
|
||||
writeStream.on('finish', resolve);
|
||||
writeStream.on('error', reject);
|
||||
});
|
||||
logger.log('info', `Image ${imageName} stored locally for processing. Extracting...`);
|
||||
|
||||
// lets process the image
|
||||
const tarArchive = await plugins.smartarchive.SmartArchive.fromArchiveFile(initialTarDownloadPath);
|
||||
await tarArchive.exportToFs(extractionDir);
|
||||
logger.log('info', `Image ${imageName} extracted.`);
|
||||
await plugins.smartfile.fs.remove(initialTarDownloadPath);
|
||||
logger.log('info', `deleted original tar to save space.`);
|
||||
logger.log('info', `now repackaging for s3...`);
|
||||
const smartfileIndexJson = await plugins.smartfile.SmartFile.fromFilePath(plugins.path.join(extractionDir, 'index.json'));
|
||||
const smartfileManifestJson = await plugins.smartfile.SmartFile.fromFilePath(plugins.path.join(extractionDir, 'manifest.json'));
|
||||
const smartfileOciLayoutJson = await plugins.smartfile.SmartFile.fromFilePath(plugins.path.join(extractionDir, 'oci-layout'));
|
||||
const smartfileRepositoriesJson = await plugins.smartfile.SmartFile.fromFilePath(plugins.path.join(extractionDir, 'repositories'));
|
||||
const indexJson = JSON.parse(smartfileIndexJson.contents.toString());
|
||||
const manifestJson = JSON.parse(smartfileManifestJson.contents.toString());
|
||||
const ociLayoutJson = JSON.parse(smartfileOciLayoutJson.contents.toString());
|
||||
const repositoriesJson = JSON.parse(smartfileRepositoriesJson.contents.toString());
|
||||
|
||||
indexJson.manifests[0].annotations['io.containerd.image.name'] = imageName;
|
||||
manifestJson[0].RepoTags[0] = imageName;
|
||||
const repoFirstKey = Object.keys(repositoriesJson)[0];
|
||||
const repoFirstValue = repositoriesJson[repoFirstKey];
|
||||
repositoriesJson[imageName] = repoFirstValue;
|
||||
delete repositoriesJson[repoFirstKey];
|
||||
|
||||
smartfileIndexJson.contents = Buffer.from(JSON.stringify(indexJson, null, 2));
|
||||
smartfileManifestJson.contents = Buffer.from(JSON.stringify(manifestJson, null, 2));
|
||||
smartfileOciLayoutJson.contents = Buffer.from(JSON.stringify(ociLayoutJson, null, 2));
|
||||
smartfileRepositoriesJson.contents = Buffer.from(JSON.stringify(repositoriesJson, null, 2));
|
||||
await Promise.all([
|
||||
smartfileIndexJson.write(),
|
||||
smartfileManifestJson.write(),
|
||||
smartfileOciLayoutJson.write(),
|
||||
smartfileRepositoriesJson.write(),
|
||||
]);
|
||||
|
||||
logger.log('info', 'repackaging archive for s3...');
|
||||
const tartools = new plugins.smartarchive.TarTools();
|
||||
const newTarPack = await tartools.packDirectory(extractionDir);
|
||||
const finalTarName = `${uniqueProcessingId}.processed.tar`;
|
||||
const finalTarPath = plugins.path.join(this.options.localDirPath, finalTarName);
|
||||
const finalWriteStream = plugins.smartfile.fsStream.createWriteStream(finalTarPath);
|
||||
await new Promise((resolve, reject) => {
|
||||
newTarPack.finalize();
|
||||
newTarPack.pipe(finalWriteStream);
|
||||
finalWriteStream.on('finish', resolve);
|
||||
finalWriteStream.on('error', reject);
|
||||
});
|
||||
logger.log('ok', `Repackaged image ${imageName} for s3.`);
|
||||
await plugins.smartfile.fs.remove(extractionDir);
|
||||
const finalTarReadStream = plugins.smartfile.fsStream.createReadStream(finalTarPath);
|
||||
await this.options.bucketDir.fastPutStream({
|
||||
stream: finalTarReadStream,
|
||||
path: `${imageName}.tar`,
|
||||
});
|
||||
await plugins.smartfile.fs.remove(finalTarPath);
|
||||
}
|
||||
|
||||
public async start() {
|
||||
await plugins.smartfile.fs.ensureEmptyDir(this.options.localDirPath);
|
||||
}
|
||||
|
||||
public async stop() {}
|
||||
|
||||
// Method to retrieve tar stream
|
||||
public async getImage(imageName: string): Promise<plugins.smartstream.stream.Readable> {
|
||||
const imagePath = plugins.path.join(this.options.localDirPath, `${imageName}.tar`);
|
||||
|
||||
if (!(await plugins.smartfile.fs.fileExists(imagePath))) {
|
||||
throw new Error(`Image ${imageName} does not exist.`);
|
||||
}
|
||||
|
||||
return plugins.smartfile.fsStream.createReadStream(imagePath);
|
||||
}
|
||||
}
|
118
ts/classes.network.ts
Normal file
118
ts/classes.network.ts
Normal file
@ -0,0 +1,118 @@
|
||||
import * as plugins from './plugins.js';
|
||||
import * as interfaces from './interfaces/index.js';
|
||||
|
||||
import { DockerHost } from './classes.host.js';
|
||||
import { DockerService } from './classes.service.js';
|
||||
import { logger } from './logger.js';
|
||||
|
||||
export class DockerNetwork {
|
||||
public static async getNetworks(dockerHost: DockerHost): Promise<DockerNetwork[]> {
|
||||
const dockerNetworks: DockerNetwork[] = [];
|
||||
const response = await dockerHost.request('GET', '/networks');
|
||||
for (const networkObject of response.body) {
|
||||
const dockerNetwork = new DockerNetwork(dockerHost);
|
||||
Object.assign(dockerNetwork, networkObject);
|
||||
dockerNetworks.push(dockerNetwork);
|
||||
}
|
||||
return dockerNetworks;
|
||||
}
|
||||
|
||||
public static async getNetworkByName(dockerHost: DockerHost, dockerNetworkNameArg: string) {
|
||||
const networks = await DockerNetwork.getNetworks(dockerHost);
|
||||
return networks.find((dockerNetwork) => dockerNetwork.Name === dockerNetworkNameArg);
|
||||
}
|
||||
|
||||
public static async createNetwork(
|
||||
dockerHost: DockerHost,
|
||||
networkCreationDescriptor: interfaces.INetworkCreationDescriptor
|
||||
): Promise<DockerNetwork> {
|
||||
const response = await dockerHost.request('POST', '/networks/create', {
|
||||
Name: networkCreationDescriptor.Name,
|
||||
CheckDuplicate: true,
|
||||
Driver: 'overlay',
|
||||
EnableIPv6: false,
|
||||
/* IPAM: {
|
||||
Driver: 'default',
|
||||
Config: [
|
||||
{
|
||||
Subnet: `172.20.${networkCreationDescriptor.NetworkNumber}.0/16`,
|
||||
IPRange: `172.20.${networkCreationDescriptor.NetworkNumber}.0/24`,
|
||||
Gateway: `172.20.${networkCreationDescriptor.NetworkNumber}.11`
|
||||
}
|
||||
]
|
||||
}, */
|
||||
Internal: false,
|
||||
Attachable: true,
|
||||
Ingress: false,
|
||||
});
|
||||
if (response.statusCode < 300) {
|
||||
logger.log('info', 'Created network successfully');
|
||||
return await DockerNetwork.getNetworkByName(dockerHost, networkCreationDescriptor.Name);
|
||||
} else {
|
||||
logger.log('error', 'There has been an error creating the wanted network');
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
// INSTANCE
|
||||
// references
|
||||
public dockerHost: DockerHost;
|
||||
|
||||
// properties
|
||||
public Name: string;
|
||||
public Id: string;
|
||||
public Created: string;
|
||||
public Scope: string;
|
||||
public Driver: string;
|
||||
public EnableIPv6: boolean;
|
||||
public Internal: boolean;
|
||||
public Attachable: boolean;
|
||||
public Ingress: false;
|
||||
public IPAM: {
|
||||
Driver: 'default' | 'bridge' | 'overlay';
|
||||
Config: [
|
||||
{
|
||||
Subnet: string;
|
||||
IPRange: string;
|
||||
Gateway: string;
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
constructor(dockerHostArg: DockerHost) {
|
||||
this.dockerHost = dockerHostArg;
|
||||
}
|
||||
|
||||
/**
|
||||
* removes the network
|
||||
*/
|
||||
public async remove() {
|
||||
const response = await this.dockerHost.request('DELETE', `/networks/${this.Id}`);
|
||||
}
|
||||
|
||||
public async getContainersOnNetwork(): Promise<
|
||||
Array<{
|
||||
Name: string;
|
||||
EndpointID: string;
|
||||
MacAddress: string;
|
||||
IPv4Address: string;
|
||||
IPv6Address: string;
|
||||
}>
|
||||
> {
|
||||
const returnArray = [];
|
||||
const response = await this.dockerHost.request('GET', `/networks/${this.Id}`);
|
||||
for (const key of Object.keys(response.body.Containers)) {
|
||||
returnArray.push(response.body.Containers[key]);
|
||||
}
|
||||
|
||||
return returnArray;
|
||||
}
|
||||
|
||||
public async getContainersOnNetworkForService(serviceArg: DockerService) {
|
||||
const containersOnNetwork = await this.getContainersOnNetwork();
|
||||
const containersOfService = containersOnNetwork.filter((container) => {
|
||||
return container.Name.startsWith(serviceArg.Spec.Name);
|
||||
});
|
||||
return containersOfService;
|
||||
}
|
||||
}
|
92
ts/classes.secret.ts
Normal file
92
ts/classes.secret.ts
Normal file
@ -0,0 +1,92 @@
|
||||
import * as plugins from './plugins.js';
|
||||
import { DockerHost } from './classes.host.js';
|
||||
|
||||
// interfaces
|
||||
import * as interfaces from './interfaces/index.js';
|
||||
|
||||
export class DockerSecret {
|
||||
// STATIC
|
||||
public static async getSecrets(dockerHostArg: DockerHost) {
|
||||
const response = await dockerHostArg.request('GET', '/secrets');
|
||||
const secrets: DockerSecret[] = [];
|
||||
for (const secret of response.body) {
|
||||
const dockerSecretInstance = new DockerSecret(dockerHostArg);
|
||||
Object.assign(dockerSecretInstance, secret);
|
||||
secrets.push(dockerSecretInstance);
|
||||
}
|
||||
return secrets;
|
||||
}
|
||||
|
||||
public static async getSecretByID(dockerHostArg: DockerHost, idArg: string) {
|
||||
const secrets = await this.getSecrets(dockerHostArg);
|
||||
return secrets.find((secret) => secret.ID === idArg);
|
||||
}
|
||||
|
||||
public static async getSecretByName(dockerHostArg: DockerHost, nameArg: string) {
|
||||
const secrets = await this.getSecrets(dockerHostArg);
|
||||
return secrets.find((secret) => secret.Spec.Name === nameArg);
|
||||
}
|
||||
|
||||
public static async createSecret(
|
||||
dockerHostArg: DockerHost,
|
||||
secretDescriptor: interfaces.ISecretCreationDescriptor
|
||||
) {
|
||||
const labels: interfaces.TLabels = {
|
||||
...secretDescriptor.labels,
|
||||
version: secretDescriptor.version,
|
||||
};
|
||||
const response = await dockerHostArg.request('POST', '/secrets/create', {
|
||||
Name: secretDescriptor.name,
|
||||
Labels: labels,
|
||||
Data: plugins.smartstring.base64.encode(secretDescriptor.contentArg),
|
||||
});
|
||||
|
||||
const newSecretInstance = new DockerSecret(dockerHostArg);
|
||||
Object.assign(newSecretInstance, response.body);
|
||||
Object.assign(
|
||||
newSecretInstance,
|
||||
await DockerSecret.getSecretByID(dockerHostArg, newSecretInstance.ID)
|
||||
);
|
||||
return newSecretInstance;
|
||||
}
|
||||
|
||||
// INSTANCE
|
||||
public ID: string;
|
||||
public Spec: {
|
||||
Name: string;
|
||||
Labels: interfaces.TLabels;
|
||||
};
|
||||
public Version: {
|
||||
Index: string;
|
||||
};
|
||||
|
||||
public dockerHost: DockerHost;
|
||||
constructor(dockerHostArg: DockerHost) {
|
||||
this.dockerHost = dockerHostArg;
|
||||
}
|
||||
|
||||
/**
|
||||
* updates a secret
|
||||
*/
|
||||
public async update(contentArg: string) {
|
||||
const route = `/secrets/${this.ID}/update?=version=${this.Version.Index}`;
|
||||
const response = await this.dockerHost.request(
|
||||
'POST',
|
||||
`/secrets/${this.ID}/update?version=${this.Version.Index}`,
|
||||
{
|
||||
Name: this.Spec.Name,
|
||||
Labels: this.Spec.Labels,
|
||||
Data: plugins.smartstring.base64.encode(contentArg),
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
public async remove() {
|
||||
await this.dockerHost.request('DELETE', `/secrets/${this.ID}`);
|
||||
}
|
||||
|
||||
// get things
|
||||
public async getVersion() {
|
||||
return this.Spec.Labels.version;
|
||||
}
|
||||
}
|
249
ts/classes.service.ts
Normal file
249
ts/classes.service.ts
Normal file
@ -0,0 +1,249 @@
|
||||
import * as plugins from './plugins.js';
|
||||
import * as interfaces from './interfaces/index.js';
|
||||
|
||||
import { DockerHost } from './classes.host.js';
|
||||
import { DockerImage } from './classes.image.js';
|
||||
import { DockerSecret } from './classes.secret.js';
|
||||
import { logger } from './logger.js';
|
||||
|
||||
export class DockerService {
|
||||
// STATIC
|
||||
public static async getServices(dockerHost: DockerHost) {
|
||||
const services: DockerService[] = [];
|
||||
const response = await dockerHost.request('GET', '/services');
|
||||
for (const serviceObject of response.body) {
|
||||
const dockerService = new DockerService(dockerHost);
|
||||
Object.assign(dockerService, serviceObject);
|
||||
services.push(dockerService);
|
||||
}
|
||||
return services;
|
||||
}
|
||||
|
||||
public static async getServiceByName(
|
||||
dockerHost: DockerHost,
|
||||
networkName: string
|
||||
): Promise<DockerService> {
|
||||
const allServices = await DockerService.getServices(dockerHost);
|
||||
const wantedService = allServices.find((service) => {
|
||||
return service.Spec.Name === networkName;
|
||||
});
|
||||
return wantedService;
|
||||
}
|
||||
|
||||
/**
|
||||
* creates a service
|
||||
*/
|
||||
public static async createService(
|
||||
dockerHost: DockerHost,
|
||||
serviceCreationDescriptor: interfaces.IServiceCreationDescriptor
|
||||
): Promise<DockerService> {
|
||||
// lets get the image
|
||||
logger.log('info', `now creating service ${serviceCreationDescriptor.name}`);
|
||||
|
||||
// await serviceCreationDescriptor.image.pullLatestImageFromRegistry();
|
||||
const serviceVersion = await serviceCreationDescriptor.image.getVersion();
|
||||
|
||||
const labels: interfaces.TLabels = {
|
||||
...serviceCreationDescriptor.labels,
|
||||
version: serviceVersion,
|
||||
};
|
||||
|
||||
const mounts: Array<{
|
||||
/**
|
||||
* the target inside the container
|
||||
*/
|
||||
Target: string;
|
||||
/**
|
||||
* The Source from which to mount the data (Volume or host path)
|
||||
*/
|
||||
Source: string;
|
||||
Type: 'bind' | 'volume' | 'tmpfs' | 'npipe';
|
||||
ReadOnly: boolean;
|
||||
Consistency: 'default' | 'consistent' | 'cached' | 'delegated';
|
||||
}> = [];
|
||||
if (serviceCreationDescriptor.accessHostDockerSock) {
|
||||
mounts.push({
|
||||
Target: '/var/run/docker.sock',
|
||||
Source: '/var/run/docker.sock',
|
||||
Consistency: 'default',
|
||||
ReadOnly: false,
|
||||
Type: 'bind',
|
||||
});
|
||||
}
|
||||
|
||||
if (serviceCreationDescriptor.resources && serviceCreationDescriptor.resources.volumeMounts) {
|
||||
for (const volumeMount of serviceCreationDescriptor.resources.volumeMounts) {
|
||||
mounts.push({
|
||||
Target: volumeMount.containerFsPath,
|
||||
Source: volumeMount.hostFsPath,
|
||||
Consistency: 'default',
|
||||
ReadOnly: false,
|
||||
Type: 'bind',
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
const networkArray: Array<{
|
||||
Target: string;
|
||||
Aliases: string[];
|
||||
}> = [];
|
||||
|
||||
for (const network of serviceCreationDescriptor.networks) {
|
||||
networkArray.push({
|
||||
Target: network.Name,
|
||||
Aliases: [serviceCreationDescriptor.networkAlias],
|
||||
});
|
||||
}
|
||||
|
||||
const ports = [];
|
||||
for (const port of serviceCreationDescriptor.ports) {
|
||||
const portArray = port.split(':');
|
||||
const hostPort = portArray[0];
|
||||
const containerPort = portArray[1];
|
||||
ports.push({
|
||||
Protocol: 'tcp',
|
||||
PublishedPort: parseInt(hostPort, 10),
|
||||
TargetPort: parseInt(containerPort, 10),
|
||||
});
|
||||
}
|
||||
|
||||
// lets configure secrets
|
||||
const secretArray: any[] = [];
|
||||
for (const secret of serviceCreationDescriptor.secrets) {
|
||||
secretArray.push({
|
||||
File: {
|
||||
Name: 'secret.json', // TODO: make sure that works with multiple secrets
|
||||
UID: '33',
|
||||
GID: '33',
|
||||
Mode: 384,
|
||||
},
|
||||
SecretID: secret.ID,
|
||||
SecretName: secret.Spec.Name,
|
||||
});
|
||||
}
|
||||
|
||||
// lets configure limits
|
||||
|
||||
const memoryLimitMB =
|
||||
serviceCreationDescriptor.resources && serviceCreationDescriptor.resources.memorySizeMB
|
||||
? serviceCreationDescriptor.resources.memorySizeMB
|
||||
: 1000;
|
||||
|
||||
const limits = {
|
||||
MemoryBytes: memoryLimitMB * 1000000,
|
||||
};
|
||||
|
||||
if (serviceCreationDescriptor.resources) {
|
||||
limits.MemoryBytes = serviceCreationDescriptor.resources.memorySizeMB * 1000000;
|
||||
}
|
||||
|
||||
const response = await dockerHost.request('POST', '/services/create', {
|
||||
Name: serviceCreationDescriptor.name,
|
||||
TaskTemplate: {
|
||||
ContainerSpec: {
|
||||
Image: serviceCreationDescriptor.image.RepoTags[0],
|
||||
Labels: labels,
|
||||
Secrets: secretArray,
|
||||
Mounts: mounts,
|
||||
/* DNSConfig: {
|
||||
Nameservers: ['1.1.1.1']
|
||||
} */
|
||||
},
|
||||
UpdateConfig: {
|
||||
Parallelism: 0,
|
||||
Delay: 0,
|
||||
FailureAction: 'pause',
|
||||
Monitor: 15000000000,
|
||||
MaxFailureRatio: 0.15,
|
||||
},
|
||||
ForceUpdate: 1,
|
||||
Resources: {
|
||||
Limits: limits,
|
||||
},
|
||||
LogDriver: {
|
||||
Name: 'json-file',
|
||||
Options: {
|
||||
'max-file': '3',
|
||||
'max-size': '10M',
|
||||
},
|
||||
},
|
||||
},
|
||||
Labels: labels,
|
||||
Networks: networkArray,
|
||||
EndpointSpec: {
|
||||
Ports: ports,
|
||||
},
|
||||
});
|
||||
|
||||
const createdService = await DockerService.getServiceByName(
|
||||
dockerHost,
|
||||
serviceCreationDescriptor.name
|
||||
);
|
||||
return createdService;
|
||||
}
|
||||
|
||||
// INSTANCE
|
||||
public dockerHostRef: DockerHost;
|
||||
|
||||
public ID: string;
|
||||
public Version: { Index: number };
|
||||
public CreatedAt: string;
|
||||
public UpdatedAt: string;
|
||||
public Spec: {
|
||||
Name: string;
|
||||
Labels: interfaces.TLabels;
|
||||
TaskTemplate: {
|
||||
ContainerSpec: {
|
||||
Image: string;
|
||||
Isolation: string;
|
||||
Secrets: Array<{
|
||||
File: {
|
||||
Name: string;
|
||||
UID: string;
|
||||
GID: string;
|
||||
Mode: number;
|
||||
};
|
||||
SecretID: string;
|
||||
SecretName: string;
|
||||
}>;
|
||||
};
|
||||
ForceUpdate: 0;
|
||||
};
|
||||
Mode: {};
|
||||
Networks: [any[]];
|
||||
};
|
||||
public Endpoint: { Spec: {}; VirtualIPs: [any[]] };
|
||||
|
||||
constructor(dockerHostArg: DockerHost) {
|
||||
this.dockerHostRef = dockerHostArg;
|
||||
}
|
||||
|
||||
public async remove() {
|
||||
await this.dockerHostRef.request('DELETE', `/services/${this.ID}`);
|
||||
}
|
||||
|
||||
public async reReadFromDockerEngine() {
|
||||
const dockerData = await this.dockerHostRef.request('GET', `/services/${this.ID}`);
|
||||
// TODO: Better assign: Object.assign(this, dockerData);
|
||||
}
|
||||
|
||||
public async needsUpdate(): Promise<boolean> {
|
||||
// TODO: implement digest based update recognition
|
||||
|
||||
await this.reReadFromDockerEngine();
|
||||
const dockerImage = await DockerImage.createFromRegistry(this.dockerHostRef, {
|
||||
creationObject: {
|
||||
imageUrl: this.Spec.TaskTemplate.ContainerSpec.Image,
|
||||
}
|
||||
});
|
||||
|
||||
const imageVersion = new plugins.smartversion.SmartVersion(dockerImage.Labels.version);
|
||||
const serviceVersion = new plugins.smartversion.SmartVersion(this.Spec.Labels.version);
|
||||
if (imageVersion.greaterThan(serviceVersion)) {
|
||||
console.log(`service ${this.Spec.Name} needs to be updated`);
|
||||
return true;
|
||||
} else {
|
||||
console.log(`service ${this.Spec.Name} is up to date.`);
|
||||
}
|
||||
}
|
||||
}
|
@ -1 +0,0 @@
|
||||
import * as plugins from './dockersock.plugins';
|
@ -1,71 +0,0 @@
|
||||
import * as plugins from './dockersock.plugins';
|
||||
import * as interfaces from './interfaces';
|
||||
|
||||
import { DockerHost } from './docker.classes.host';
|
||||
|
||||
export class DockerContainer {
|
||||
// ======
|
||||
// STATIC
|
||||
// ======
|
||||
|
||||
/**
|
||||
* get all containers
|
||||
*/
|
||||
static async getContainers(dockerHostArg: DockerHost): Promise<DockerContainer[]> {
|
||||
const result: DockerContainer[] = [];
|
||||
const response = await dockerHostArg.request('GET', '/containers/json');
|
||||
for (const containerResult of response.body) {
|
||||
result.push(new DockerContainer(containerResult));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param containerId
|
||||
*/
|
||||
static async getContainerById(containerId: string) {}
|
||||
static async create() {}
|
||||
|
||||
// ========
|
||||
// INSTANCE
|
||||
// ========
|
||||
|
||||
constructor(dockerContainerObjectArg: any) {
|
||||
Object.keys(dockerContainerObjectArg).forEach(keyArg => {
|
||||
this[keyArg] = dockerContainerObjectArg[keyArg];
|
||||
});
|
||||
}
|
||||
|
||||
Id: string;
|
||||
Names: string[];
|
||||
Image: string;
|
||||
ImageID: string;
|
||||
Command: string;
|
||||
Created: number;
|
||||
Ports: interfaces.TPorts;
|
||||
Labels: interfaces.TLabels;
|
||||
State: string;
|
||||
Status: string;
|
||||
HostConfig: any;
|
||||
NetworkSettings: {
|
||||
Networks: {
|
||||
[key: string]: {
|
||||
IPAMConfig: any;
|
||||
Links: any;
|
||||
Aliases: any;
|
||||
NetworkID: string;
|
||||
EndpointID: string;
|
||||
Gateway: string;
|
||||
IPAddress: string;
|
||||
IPPrefixLen: number;
|
||||
IPv6Gateway: string;
|
||||
GlobalIPv6Address: string;
|
||||
GlobalIPv6PrefixLen: number;
|
||||
MacAddress: string;
|
||||
DriverOpts: any;
|
||||
};
|
||||
};
|
||||
};
|
||||
Mounts: any;
|
||||
}
|
@ -1,88 +0,0 @@
|
||||
import * as plugins from './dockersock.plugins';
|
||||
import { DockerContainer } from './docker.classes.container';
|
||||
|
||||
export class DockerHost {
|
||||
/**
|
||||
* the path where the docker sock can be found
|
||||
*/
|
||||
sockPath: string;
|
||||
|
||||
/**
|
||||
* keeping track of currently active requests to safely end this module at any time
|
||||
*/
|
||||
requestObjectmap = new plugins.lik.Objectmap<any>();
|
||||
|
||||
/**
|
||||
* the constructor to instantiate a new docker sock instance
|
||||
* @param pathArg
|
||||
*/
|
||||
constructor(pathArg: string = 'http://unix:/var/run/docker.sock:') {
|
||||
this.sockPath = pathArg;
|
||||
}
|
||||
|
||||
/**
|
||||
* authenticate against a registry
|
||||
* @param userArg
|
||||
* @param passArg
|
||||
*/
|
||||
auth(registryArg: string, userArg: string, passArg: string) {
|
||||
let done = plugins.smartpromise.defer();
|
||||
this.request('POST', '');
|
||||
return done.promise;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
async getContainers() {
|
||||
const containerArray = await DockerContainer.getContainers(this);
|
||||
return containerArray;
|
||||
}
|
||||
|
||||
async getEventObservable(): Promise<plugins.rxjs.Observable<any>> {
|
||||
const response = await this.requestStreaming('GET', '/events');
|
||||
return plugins.rxjs.Observable.create(observer => {
|
||||
response.on('data', data => {
|
||||
observer.next(data.toString());
|
||||
});
|
||||
return () => {
|
||||
response.emit('end');
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* fire a request
|
||||
*/
|
||||
async request(methodArg: string, routeArg: string, dataArg = {}) {
|
||||
const requestUrl = `${this.sockPath}${routeArg}`;
|
||||
const response = await plugins.smartrequest.request(requestUrl, {
|
||||
method: methodArg,
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
Host: 'docker.sock'
|
||||
},
|
||||
requestBody: dataArg
|
||||
});
|
||||
return response;
|
||||
}
|
||||
|
||||
async requestStreaming(methodArg: string, routeArg: string, dataArg = {}) {
|
||||
const requestUrl = `${this.sockPath}${routeArg}`;
|
||||
const response = await plugins.smartrequest.request(
|
||||
requestUrl,
|
||||
{
|
||||
method: methodArg,
|
||||
headers: {
|
||||
// 'Content-Type': 'application/json',
|
||||
Host: 'docker.sock'
|
||||
},
|
||||
requestBody: null
|
||||
},
|
||||
true
|
||||
);
|
||||
console.log(response.statusCode);
|
||||
console.log(response.body);
|
||||
return response;
|
||||
}
|
||||
}
|
@ -1,14 +0,0 @@
|
||||
import * as plugins from './dockersock.plugins';
|
||||
import { DockerHost } from './docker.classes.host';
|
||||
|
||||
export class DockerImage {
|
||||
/**
|
||||
* the tags for an image
|
||||
*/
|
||||
tags: string[] = [];
|
||||
|
||||
static async createFromRegsitry(dockerHostArg: DockerHost): Promise<DockerImage> {
|
||||
const resultingImage = new DockerImage();
|
||||
return resultingImage;
|
||||
}
|
||||
}
|
@ -1,12 +0,0 @@
|
||||
// @pushrocks scope
|
||||
import * as lik from '@pushrocks/lik';
|
||||
import * as smartlog from '@pushrocks/smartlog';
|
||||
import * as smartpromise from '@pushrocks/smartpromise';
|
||||
import * as smartrequest from '@pushrocks/smartrequest';
|
||||
|
||||
export { lik, smartlog, smartpromise, smartrequest };
|
||||
|
||||
// third party
|
||||
import * as rxjs from 'rxjs';
|
||||
|
||||
export { rxjs };
|
@ -1 +1,7 @@
|
||||
export { DockerHost } from './docker.classes.host';
|
||||
export * from './classes.host.js';
|
||||
export * from './classes.container.js';
|
||||
export * from './classes.image.js';
|
||||
export * from './classes.imagestore.js';
|
||||
export * from './classes.network.js';
|
||||
export * from './classes.secret.js';
|
||||
export * from './classes.service.js';
|
||||
|
7
ts/interfaces/container.ts
Normal file
7
ts/interfaces/container.ts
Normal file
@ -0,0 +1,7 @@
|
||||
import { DockerNetwork } from '../classes.network.js';
|
||||
|
||||
export interface IContainerCreationDescriptor {
|
||||
Hostname: string;
|
||||
Domainname: string;
|
||||
networks?: DockerNetwork[];
|
||||
}
|
4
ts/interfaces/image.ts
Normal file
4
ts/interfaces/image.ts
Normal file
@ -0,0 +1,4 @@
|
||||
export interface IImageCreationDescriptor {
|
||||
imageUrl: string;
|
||||
imageTag?: string;
|
||||
}
|
@ -1,2 +1,7 @@
|
||||
export * from './label';
|
||||
export * from './port';
|
||||
export * from './container.js';
|
||||
export * from './image.js';
|
||||
export * from './label.js';
|
||||
export * from './network.js';
|
||||
export * from './port.js';
|
||||
export * from './secret.js';
|
||||
export * from './service.js';
|
||||
|
6
ts/interfaces/network.ts
Normal file
6
ts/interfaces/network.ts
Normal file
@ -0,0 +1,6 @@
|
||||
/**
|
||||
* creates a new Network
|
||||
*/
|
||||
export interface INetworkCreationDescriptor {
|
||||
Name: string;
|
||||
}
|
8
ts/interfaces/secret.ts
Normal file
8
ts/interfaces/secret.ts
Normal file
@ -0,0 +1,8 @@
|
||||
import * as interfaces from './index.js';
|
||||
|
||||
export interface ISecretCreationDescriptor {
|
||||
name: string;
|
||||
version: string;
|
||||
contentArg: any;
|
||||
labels: interfaces.TLabels;
|
||||
}
|
21
ts/interfaces/service.ts
Normal file
21
ts/interfaces/service.ts
Normal file
@ -0,0 +1,21 @@
|
||||
import * as plugins from '../plugins.js';
|
||||
|
||||
import * as interfaces from './index.js';
|
||||
import { DockerNetwork } from '../classes.network.js';
|
||||
import { DockerSecret } from '../classes.secret.js';
|
||||
import { DockerImage } from '../classes.image.js';
|
||||
|
||||
export interface IServiceCreationDescriptor {
|
||||
name: string;
|
||||
image: DockerImage;
|
||||
labels: interfaces.TLabels;
|
||||
networks: DockerNetwork[];
|
||||
networkAlias: string;
|
||||
secrets: DockerSecret[];
|
||||
ports: string[];
|
||||
accessHostDockerSock?: boolean;
|
||||
resources?: {
|
||||
memorySizeMB?: number;
|
||||
volumeMounts?: plugins.tsclass.container.IVolumeMount[];
|
||||
};
|
||||
}
|
5
ts/logger.ts
Normal file
5
ts/logger.ts
Normal file
@ -0,0 +1,5 @@
|
||||
import * as plugins from './plugins.js';
|
||||
import { commitinfo } from './00_commitinfo_data.js';
|
||||
|
||||
export const logger = plugins.smartlog.Smartlog.createForCommitinfo(commitinfo);
|
||||
logger.enableConsole();
|
9
ts/paths.ts
Normal file
9
ts/paths.ts
Normal file
@ -0,0 +1,9 @@
|
||||
import * as plugins from './plugins.js';
|
||||
|
||||
export const packageDir = plugins.path.resolve(
|
||||
plugins.smartpath.get.dirnameFromImportMetaUrl(import.meta.url),
|
||||
'../'
|
||||
);
|
||||
|
||||
export const nogitDir = plugins.path.resolve(packageDir, '.nogit/');
|
||||
plugins.smartfile.fs.ensureDir(nogitDir);
|
47
ts/plugins.ts
Normal file
47
ts/plugins.ts
Normal file
@ -0,0 +1,47 @@
|
||||
// node native path
|
||||
import * as path from 'path';
|
||||
|
||||
export { path };
|
||||
|
||||
// @pushrocks scope
|
||||
import * as lik from '@push.rocks/lik';
|
||||
import * as smartarchive from '@push.rocks/smartarchive';
|
||||
import * as smartbucket from '@push.rocks/smartbucket';
|
||||
import * as smartfile from '@push.rocks/smartfile';
|
||||
import * as smartjson from '@push.rocks/smartjson';
|
||||
import * as smartlog from '@push.rocks/smartlog';
|
||||
import * as smartnetwork from '@push.rocks/smartnetwork';
|
||||
import * as smartpath from '@push.rocks/smartpath';
|
||||
import * as smartpromise from '@push.rocks/smartpromise';
|
||||
import * as smartrequest from '@push.rocks/smartrequest';
|
||||
import * as smartstring from '@push.rocks/smartstring';
|
||||
import * as smartstream from '@push.rocks/smartstream';
|
||||
import * as smartunique from '@push.rocks/smartunique';
|
||||
import * as smartversion from '@push.rocks/smartversion';
|
||||
|
||||
export {
|
||||
lik,
|
||||
smartarchive,
|
||||
smartbucket,
|
||||
smartfile,
|
||||
smartjson,
|
||||
smartlog,
|
||||
smartnetwork,
|
||||
smartpath,
|
||||
smartpromise,
|
||||
smartrequest,
|
||||
smartstring,
|
||||
smartstream,
|
||||
smartunique,
|
||||
smartversion,
|
||||
};
|
||||
|
||||
// @tsclass scope
|
||||
import * as tsclass from '@tsclass/tsclass';
|
||||
|
||||
export { tsclass };
|
||||
|
||||
// third party
|
||||
import * as rxjs from 'rxjs';
|
||||
|
||||
export { rxjs };
|
14
tsconfig.json
Normal file
14
tsconfig.json
Normal file
@ -0,0 +1,14 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"experimentalDecorators": true,
|
||||
"useDefineForClassFields": false,
|
||||
"target": "ES2022",
|
||||
"module": "NodeNext",
|
||||
"moduleResolution": "NodeNext",
|
||||
"esModuleInterop": true,
|
||||
"verbatimModuleSyntax": true
|
||||
},
|
||||
"exclude": [
|
||||
"dist_*/**/*.d.ts"
|
||||
]
|
||||
}
|
17
tslint.json
17
tslint.json
@ -1,17 +0,0 @@
|
||||
{
|
||||
"extends": ["tslint:latest", "tslint-config-prettier"],
|
||||
"rules": {
|
||||
"semicolon": [true, "always"],
|
||||
"no-console": false,
|
||||
"ordered-imports": false,
|
||||
"object-literal-sort-keys": false,
|
||||
"member-ordering": {
|
||||
"options":{
|
||||
"order": [
|
||||
"static-method"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"defaultSeverity": "warning"
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user