Compare commits
23 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| b8a26bf3bd | |||
| e6432b4ea9 | |||
| e9975ba7b8 | |||
| 396ce29d7a | |||
| 7c0935d585 | |||
| 52af76b7ed | |||
| 414d7dd727 | |||
| 4b1c908b89 | |||
| 6e313261e7 | |||
| 42df15a523 | |||
| 7ef2ebcf5b | |||
| 87f26b7b63 | |||
| ffdc61fb42 | |||
| 5b25704cf8 | |||
| 00e6033d8b | |||
| 453040983d | |||
| 456858bc36 | |||
| 606c82dafa | |||
| 9fc4afe4b8 | |||
| 90689c2645 | |||
| 4a1d649e5e | |||
| 66bd36dc4f | |||
| 349d711cc5 |
@@ -6,8 +6,8 @@ on:
|
|||||||
- '**'
|
- '**'
|
||||||
|
|
||||||
env:
|
env:
|
||||||
IMAGE: registry.gitlab.com/hosttoday/ht-docker-node:npmci
|
IMAGE: code.foss.global/host.today/ht-docker-node:npmci
|
||||||
NPMCI_COMPUTED_REPOURL: https://${{gitea.repository_owner}}:${{secrets.GITEA_TOKEN}}@gitea.lossless.digital/${{gitea.repository}}.git
|
NPMCI_COMPUTED_REPOURL: https://${{gitea.repository_owner}}:${{secrets.GITEA_TOKEN}}@/${{gitea.repository}}.git
|
||||||
NPMCI_TOKEN_NPM: ${{secrets.NPMCI_TOKEN_NPM}}
|
NPMCI_TOKEN_NPM: ${{secrets.NPMCI_TOKEN_NPM}}
|
||||||
NPMCI_TOKEN_NPM2: ${{secrets.NPMCI_TOKEN_NPM2}}
|
NPMCI_TOKEN_NPM2: ${{secrets.NPMCI_TOKEN_NPM2}}
|
||||||
NPMCI_GIT_GITHUBTOKEN: ${{secrets.NPMCI_GIT_GITHUBTOKEN}}
|
NPMCI_GIT_GITHUBTOKEN: ${{secrets.NPMCI_GIT_GITHUBTOKEN}}
|
||||||
@@ -26,7 +26,7 @@ jobs:
|
|||||||
- name: Install pnpm and npmci
|
- name: Install pnpm and npmci
|
||||||
run: |
|
run: |
|
||||||
pnpm install -g pnpm
|
pnpm install -g pnpm
|
||||||
pnpm install -g @shipzone/npmci
|
pnpm install -g @ship.zone/npmci
|
||||||
|
|
||||||
- name: Run npm prepare
|
- name: Run npm prepare
|
||||||
run: npmci npm prepare
|
run: npmci npm prepare
|
||||||
|
|||||||
@@ -6,8 +6,8 @@ on:
|
|||||||
- '*'
|
- '*'
|
||||||
|
|
||||||
env:
|
env:
|
||||||
IMAGE: registry.gitlab.com/hosttoday/ht-docker-node:npmci
|
IMAGE: code.foss.global/host.today/ht-docker-node:npmci
|
||||||
NPMCI_COMPUTED_REPOURL: https://${{gitea.repository_owner}}:${{secrets.GITEA_TOKEN}}@gitea.lossless.digital/${{gitea.repository}}.git
|
NPMCI_COMPUTED_REPOURL: https://${{gitea.repository_owner}}:${{secrets.GITEA_TOKEN}}@/${{gitea.repository}}.git
|
||||||
NPMCI_TOKEN_NPM: ${{secrets.NPMCI_TOKEN_NPM}}
|
NPMCI_TOKEN_NPM: ${{secrets.NPMCI_TOKEN_NPM}}
|
||||||
NPMCI_TOKEN_NPM2: ${{secrets.NPMCI_TOKEN_NPM2}}
|
NPMCI_TOKEN_NPM2: ${{secrets.NPMCI_TOKEN_NPM2}}
|
||||||
NPMCI_GIT_GITHUBTOKEN: ${{secrets.NPMCI_GIT_GITHUBTOKEN}}
|
NPMCI_GIT_GITHUBTOKEN: ${{secrets.NPMCI_GIT_GITHUBTOKEN}}
|
||||||
@@ -26,7 +26,7 @@ jobs:
|
|||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: |
|
run: |
|
||||||
pnpm install -g pnpm
|
pnpm install -g pnpm
|
||||||
pnpm install -g @shipzone/npmci
|
pnpm install -g @ship.zone/npmci
|
||||||
npmci npm prepare
|
npmci npm prepare
|
||||||
|
|
||||||
- name: Audit production dependencies
|
- name: Audit production dependencies
|
||||||
@@ -54,7 +54,7 @@ jobs:
|
|||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: |
|
run: |
|
||||||
pnpm install -g pnpm
|
pnpm install -g pnpm
|
||||||
pnpm install -g @shipzone/npmci
|
pnpm install -g @ship.zone/npmci
|
||||||
npmci npm prepare
|
npmci npm prepare
|
||||||
|
|
||||||
- name: Test stable
|
- name: Test stable
|
||||||
@@ -82,7 +82,7 @@ jobs:
|
|||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: |
|
run: |
|
||||||
pnpm install -g pnpm
|
pnpm install -g pnpm
|
||||||
pnpm install -g @shipzone/npmci
|
pnpm install -g @ship.zone/npmci
|
||||||
npmci npm prepare
|
npmci npm prepare
|
||||||
|
|
||||||
- name: Release
|
- name: Release
|
||||||
@@ -104,7 +104,7 @@ jobs:
|
|||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: |
|
run: |
|
||||||
pnpm install -g pnpm
|
pnpm install -g pnpm
|
||||||
pnpm install -g @shipzone/npmci
|
pnpm install -g @ship.zone/npmci
|
||||||
npmci npm prepare
|
npmci npm prepare
|
||||||
|
|
||||||
- name: Code quality
|
- name: Code quality
|
||||||
|
|||||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -3,7 +3,6 @@
|
|||||||
# artifacts
|
# artifacts
|
||||||
coverage/
|
coverage/
|
||||||
public/
|
public/
|
||||||
pages/
|
|
||||||
|
|
||||||
# installs
|
# installs
|
||||||
node_modules/
|
node_modules/
|
||||||
@@ -17,4 +16,8 @@ node_modules/
|
|||||||
dist/
|
dist/
|
||||||
dist_*/
|
dist_*/
|
||||||
|
|
||||||
# custom
|
# AI
|
||||||
|
.claude/
|
||||||
|
.serena/
|
||||||
|
|
||||||
|
#------# custom
|
||||||
115
changelog.md
115
changelog.md
@@ -1,10 +1,94 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## 2025-11-18 - 2.0.0 - BREAKING CHANGE(DockerHost)
|
||||||
|
Rename DockerHost constructor option 'dockerSockPath' to 'socketPath' and update internal socket path handling
|
||||||
|
|
||||||
|
- Breaking: constructor option renamed from 'dockerSockPath' to 'socketPath' — callers must update their code.
|
||||||
|
- Constructor now reads the provided 'socketPath' option first, then falls back to DOCKER_HOST, CI, and finally the default unix socket.
|
||||||
|
- README examples and documentation updated to use 'socketPath'.
|
||||||
|
|
||||||
|
## 2025-11-17 - 1.3.6 - fix(streaming)
|
||||||
|
Convert smartrequest v5 web ReadableStreams to Node.js streams and update deps for streaming compatibility
|
||||||
|
|
||||||
|
- Upgrade @push.rocks/smartrequest to ^5.0.1 and bump @git.zone dev tooling (@git.zone/tsbuild, tsrun, tstest).
|
||||||
|
- requestStreaming now uses response.stream() (web ReadableStream) and converts it to a Node.js Readable via plugins.smartstream.nodewebhelpers.convertWebReadableToNodeReadable for backward compatibility.
|
||||||
|
- Updated consumers of streaming responses (DockerHost.getEventObservable, DockerImage.createFromTarStream, DockerImage.exportToTarStream) to work with the converted Node.js stream and preserve event/backpressure semantics (.on, .pause, .resume).
|
||||||
|
- Added readme.hints.md documenting the smartrequest v5 migration, conversion approach, modified files, and test/build status (type errors resolved and Node.js tests passing).
|
||||||
|
- Removed project metadata file (.serena/project.yml) from the repository.
|
||||||
|
|
||||||
|
## 2025-08-19 - 1.3.5 - fix(core)
|
||||||
|
Stabilize CI/workflows and runtime: update CI images/metadata, improve streaming requests and image handling, and fix tests & package metadata
|
||||||
|
|
||||||
|
- Update CI workflows and images: switch workflow IMAGE to code.foss.global/host.today/ht-docker-node:npmci, fix NPMCI_COMPUTED_REPOURL placeholders, and replace @shipzone/npmci with @ship.zone/npmci in workflows
|
||||||
|
- Update npmextra.json gitzone metadata (githost -> code.foss.global, gitscope -> apiclient.xyz, npmPackagename -> @apiclient.xyz/docker) and npmdocker.baseImage -> host.today/ht-docker-node:npmci
|
||||||
|
- Adjust package.json repository/bugs/homepage to code.foss.global, add pnpm overrides entry and normalize package metadata
|
||||||
|
- Improve DockerHost streaming and request handling: reduce requestStreaming timeout to 30s, enable autoDrain for streaming requests, improve response parsing for streaming vs JSON endpoints to avoid hangs
|
||||||
|
- Enhance DockerImage and DockerImageStore stream handling and tar processing: more robust import/export parsing, safer stream-to-file writes, repackaging steps, and error handling
|
||||||
|
- Unskip and update tests: re-enable DockerImageStore integration test, change stored image name to 'hello2', add formatting fixes and ensure cleanup stops the test DockerHost
|
||||||
|
- Miscellaneous code and docs cleanup: numerous formatting fixes and trailing-comma normalization across README and TS sources, update commitinfo and logger newline fixes, and add local tool ignores (.claude/.serena) to .gitignore
|
||||||
|
|
||||||
|
## 2025-08-19 - 1.3.4 - fix(test)
|
||||||
|
|
||||||
|
Increase test timeout, enable DockerImageStore test, update test image name, bump smartrequest patch, and add local claude settings
|
||||||
|
|
||||||
|
- Increase tstest timeout from 120s to 600s in package.json to accommodate longer-running integration tests.
|
||||||
|
- Unskip the DockerImageStore integration test and change stored image name from 'hello' to 'hello2' in test/test.nonci.node.ts.
|
||||||
|
- Bump dependency @push.rocks/smartrequest from ^4.3.0 to ^4.3.1.
|
||||||
|
- Add .claude/settings.local.json to allow local agent permissions for running tests and related tooling.
|
||||||
|
|
||||||
|
## 2025-08-19 - 1.3.3 - fix(classes.host)
|
||||||
|
|
||||||
|
Adjust requestStreaming timeout and autoDrain; stabilize tests
|
||||||
|
|
||||||
|
- Reduced requestStreaming timeout from 10 minutes to 30 seconds to avoid long-running hanging requests.
|
||||||
|
- Enabled autoDrain for streaming requests to ensure response streams are properly drained and reduce resource issues.
|
||||||
|
- Marked the DockerImageStore S3 integration test as skipped to avoid CI dependence on external S3 and added a cleanup test to stop the test DockerHost.
|
||||||
|
- Added local tool settings file (.claude/settings.local.json) with local permissions (development-only).
|
||||||
|
|
||||||
|
## 2025-08-18 - 1.3.2 - fix(package.json)
|
||||||
|
|
||||||
|
Fix test script timeout typo, update dependency versions, and add typings & project configs
|
||||||
|
|
||||||
|
- Fix test script: correct 'tineout' -> 'timeout' for npm test command and set timeout to 120s
|
||||||
|
- Add 'typings': 'dist_ts/index.d.ts' to package.json
|
||||||
|
- Bump dependencies to newer compatible versions (notable packages: @push.rocks/lik, @push.rocks/smartarchive, @push.rocks/smartbucket, @push.rocks/smartfile, @push.rocks/smartlog, @push.rocks/smartpromise, @push.rocks/smartstream, rxjs)
|
||||||
|
- Add project/config files: .serena/project.yml and .claude/settings.local.json (editor/CI metadata)
|
||||||
|
- Include generated cache/metadata files (typescript document symbols cache) — not source changes but tooling/cache artifacts
|
||||||
|
|
||||||
|
## 2025-08-18 - 1.3.1 - fix(test)
|
||||||
|
|
||||||
|
Update test setup and devDependencies; adjust test import and add package metadata
|
||||||
|
|
||||||
|
- Update test script to run with additional flags: --verbose, --logfile and --tineout 120
|
||||||
|
- Bump devDependencies: @git.zone/tsbuild -> ^2.6.7, @git.zone/tsrun -> ^1.3.3, @git.zone/tstest -> ^2.3.5, @push.rocks/qenv -> ^6.1.3
|
||||||
|
- Change test import from @push.rocks/tapbundle to @git.zone/tstest/tapbundle
|
||||||
|
- Add typings field (dist_ts/index.d.ts)
|
||||||
|
- Add packageManager field for pnpm@10.14.0 with integrity hash
|
||||||
|
|
||||||
|
## 2024-12-23 - 1.3.0 - feat(core)
|
||||||
|
|
||||||
|
Initial release of Docker client with TypeScript support
|
||||||
|
|
||||||
|
- Provides easy communication with Docker's remote API from Node.js
|
||||||
|
- Includes implementations for managing Docker services, networks, secrets, containers, and images
|
||||||
|
|
||||||
|
## 2024-12-23 - 1.2.8 - fix(core)
|
||||||
|
|
||||||
|
Improved the image creation process from tar stream in DockerImage class.
|
||||||
|
|
||||||
|
- Enhanced `DockerImage.createFromTarStream` method to handle streamed response and parse imported image details.
|
||||||
|
- Fixed the dependency version for `@push.rocks/smartarchive` in package.json.
|
||||||
|
|
||||||
|
## 2024-10-13 - 1.2.7 - fix(core)
|
||||||
|
|
||||||
|
Prepare patch release with minor fixes and improvements
|
||||||
|
|
||||||
## 2024-10-13 - 1.2.6 - fix(core)
|
## 2024-10-13 - 1.2.6 - fix(core)
|
||||||
|
|
||||||
Minor refactoring and code quality improvements.
|
Minor refactoring and code quality improvements.
|
||||||
|
|
||||||
|
|
||||||
## 2024-10-13 - 1.2.5 - fix(dependencies)
|
## 2024-10-13 - 1.2.5 - fix(dependencies)
|
||||||
|
|
||||||
Update dependencies for stability improvements
|
Update dependencies for stability improvements
|
||||||
|
|
||||||
- Updated @push.rocks/smartstream to version ^3.0.46
|
- Updated @push.rocks/smartstream to version ^3.0.46
|
||||||
@@ -12,137 +96,160 @@ Update dependencies for stability improvements
|
|||||||
- Updated @types/node to version 22.7.5
|
- Updated @types/node to version 22.7.5
|
||||||
|
|
||||||
## 2024-10-13 - 1.2.4 - fix(core)
|
## 2024-10-13 - 1.2.4 - fix(core)
|
||||||
|
|
||||||
Refactored DockerImageStore constructor to remove DockerHost dependency
|
Refactored DockerImageStore constructor to remove DockerHost dependency
|
||||||
|
|
||||||
- Adjusted DockerImageStore constructor to remove dependency on DockerHost
|
- Adjusted DockerImageStore constructor to remove dependency on DockerHost
|
||||||
- Updated ts/classes.host.ts to align with DockerImageStore's new constructor signature
|
- Updated ts/classes.host.ts to align with DockerImageStore's new constructor signature
|
||||||
|
|
||||||
## 2024-08-21 - 1.2.3 - fix(dependencies)
|
## 2024-08-21 - 1.2.3 - fix(dependencies)
|
||||||
|
|
||||||
Update dependencies to the latest versions and fix image export test
|
Update dependencies to the latest versions and fix image export test
|
||||||
|
|
||||||
- Updated several dependencies to their latest versions in package.json.
|
- Updated several dependencies to their latest versions in package.json.
|
||||||
- Enabled the previously skipped 'should export images' test.
|
- Enabled the previously skipped 'should export images' test.
|
||||||
|
|
||||||
## 2024-06-10 - 1.2.1-1.2.2 - Core/General
|
## 2024-06-10 - 1.2.1-1.2.2 - Core/General
|
||||||
|
|
||||||
General updates and fixes.
|
General updates and fixes.
|
||||||
|
|
||||||
- Fix core update
|
- Fix core update
|
||||||
|
|
||||||
## 2024-06-10 - 1.2.0 - Core
|
## 2024-06-10 - 1.2.0 - Core
|
||||||
|
|
||||||
Core updates and bug fixes.
|
Core updates and bug fixes.
|
||||||
|
|
||||||
- Fix core update
|
- Fix core update
|
||||||
|
|
||||||
## 2024-06-08 - 1.2.0 - General/Core
|
## 2024-06-08 - 1.2.0 - General/Core
|
||||||
|
|
||||||
Major release with core enhancements.
|
Major release with core enhancements.
|
||||||
|
|
||||||
- Processing images with extraction, retagging, repackaging, and long-term storage
|
- Processing images with extraction, retagging, repackaging, and long-term storage
|
||||||
|
|
||||||
## 2024-06-06 - 1.1.4 - General/Imagestore
|
## 2024-06-06 - 1.1.4 - General/Imagestore
|
||||||
|
|
||||||
Significant feature addition.
|
Significant feature addition.
|
||||||
|
|
||||||
- Add feature to process images with extraction, retagging, repackaging, and long-term storage
|
- Add feature to process images with extraction, retagging, repackaging, and long-term storage
|
||||||
|
|
||||||
## 2024-05-08 - 1.0.112 - Images
|
## 2024-05-08 - 1.0.112 - Images
|
||||||
|
|
||||||
Add new functionality for image handling.
|
Add new functionality for image handling.
|
||||||
|
|
||||||
- Can now import and export images
|
- Can now import and export images
|
||||||
- Start work on local 100% JS OCI image registry
|
- Start work on local 100% JS OCI image registry
|
||||||
|
|
||||||
## 2024-06-05 - 1.1.0-1.1.3 - Core
|
## 2024-06-05 - 1.1.0-1.1.3 - Core
|
||||||
|
|
||||||
Regular updates and fixes.
|
Regular updates and fixes.
|
||||||
|
|
||||||
- Fix core update
|
- Fix core update
|
||||||
|
|
||||||
## 2024-02-02 - 1.0.105-1.0.110 - Core
|
## 2024-02-02 - 1.0.105-1.0.110 - Core
|
||||||
|
|
||||||
Routine core updates and fixes.
|
Routine core updates and fixes.
|
||||||
|
|
||||||
- Fix core update
|
- Fix core update
|
||||||
|
|
||||||
## 2022-10-17 - 1.0.103-1.0.104 - Core
|
## 2022-10-17 - 1.0.103-1.0.104 - Core
|
||||||
|
|
||||||
Routine core updates.
|
Routine core updates.
|
||||||
|
|
||||||
- Fix core update
|
- Fix core update
|
||||||
|
|
||||||
## 2020-10-01 - 1.0.99-1.0.102 - Core
|
## 2020-10-01 - 1.0.99-1.0.102 - Core
|
||||||
|
|
||||||
Routine core updates.
|
Routine core updates.
|
||||||
|
|
||||||
- Fix core update
|
- Fix core update
|
||||||
|
|
||||||
## 2019-09-22 - 1.0.73-1.0.78 - Core
|
## 2019-09-22 - 1.0.73-1.0.78 - Core
|
||||||
|
|
||||||
Routine updates and core fixes.
|
Routine updates and core fixes.
|
||||||
|
|
||||||
- Fix core update
|
- Fix core update
|
||||||
|
|
||||||
## 2019-09-13 - 1.0.60-1.0.72 - Core
|
## 2019-09-13 - 1.0.60-1.0.72 - Core
|
||||||
|
|
||||||
Routine updates and core fixes.
|
Routine updates and core fixes.
|
||||||
|
|
||||||
- Fix core update
|
- Fix core update
|
||||||
|
|
||||||
## 2019-08-16 - 1.0.43-1.0.59 - Core
|
## 2019-08-16 - 1.0.43-1.0.59 - Core
|
||||||
|
|
||||||
Routine updates and core fixes.
|
Routine updates and core fixes.
|
||||||
|
|
||||||
- Fix core update
|
- Fix core update
|
||||||
|
|
||||||
## 2019-08-15 - 1.0.37-1.0.42 - Core
|
## 2019-08-15 - 1.0.37-1.0.42 - Core
|
||||||
|
|
||||||
Routine updates and core fixes.
|
Routine updates and core fixes.
|
||||||
|
|
||||||
- Fix core update
|
- Fix core update
|
||||||
|
|
||||||
## 2019-08-14 - 1.0.31-1.0.36 - Core
|
## 2019-08-14 - 1.0.31-1.0.36 - Core
|
||||||
|
|
||||||
Routine updates and core fixes.
|
Routine updates and core fixes.
|
||||||
|
|
||||||
- Fix core update
|
- Fix core update
|
||||||
|
|
||||||
## 2019-01-10 - 1.0.27-1.0.30 - Core
|
## 2019-01-10 - 1.0.27-1.0.30 - Core
|
||||||
|
|
||||||
Routine updates and core fixes.
|
Routine updates and core fixes.
|
||||||
|
|
||||||
- Fix core update
|
- Fix core update
|
||||||
|
|
||||||
## 2018-07-16 - 1.0.23-1.0.24 - Core
|
## 2018-07-16 - 1.0.23-1.0.24 - Core
|
||||||
|
|
||||||
Routine updates and core fixes.
|
Routine updates and core fixes.
|
||||||
|
|
||||||
- Fix core shift to new style
|
- Fix core shift to new style
|
||||||
|
|
||||||
## 2017-07-16 - 1.0.20-1.0.22 - General
|
## 2017-07-16 - 1.0.20-1.0.22 - General
|
||||||
|
|
||||||
Routine updates and fixes.
|
Routine updates and fixes.
|
||||||
|
|
||||||
- Update node_modules within npmdocker
|
- Update node_modules within npmdocker
|
||||||
|
|
||||||
## 2017-04-02 - 1.0.18-1.0.19 - General
|
## 2017-04-02 - 1.0.18-1.0.19 - General
|
||||||
|
|
||||||
Routine updates and fixes.
|
Routine updates and fixes.
|
||||||
|
|
||||||
- Work with npmdocker and npmts 7.x.x
|
- Work with npmdocker and npmts 7.x.x
|
||||||
- CI updates
|
- CI updates
|
||||||
|
|
||||||
## 2016-07-31 - 1.0.17 - General
|
## 2016-07-31 - 1.0.17 - General
|
||||||
|
|
||||||
Enhancements and fixes.
|
Enhancements and fixes.
|
||||||
|
|
||||||
- Now waiting for response to be stored before ending streaming request
|
- Now waiting for response to be stored before ending streaming request
|
||||||
- Cosmetic fix
|
- Cosmetic fix
|
||||||
|
|
||||||
## 2016-07-29 - 1.0.14-1.0.16 - General
|
## 2016-07-29 - 1.0.14-1.0.16 - General
|
||||||
|
|
||||||
Multiple updates and features added.
|
Multiple updates and features added.
|
||||||
|
|
||||||
- Fix request for change observable and add npmdocker
|
- Fix request for change observable and add npmdocker
|
||||||
- Add request typings
|
- Add request typings
|
||||||
|
|
||||||
## 2016-07-28 - 1.0.13 - Core
|
## 2016-07-28 - 1.0.13 - Core
|
||||||
|
|
||||||
Fixes and preparations.
|
Fixes and preparations.
|
||||||
|
|
||||||
- Fixed request for newer docker
|
- Fixed request for newer docker
|
||||||
- Prepare for npmdocker
|
- Prepare for npmdocker
|
||||||
|
|
||||||
|
|
||||||
## 2016-06-16 - 1.0.0-1.0.2 - General
|
## 2016-06-16 - 1.0.0-1.0.2 - General
|
||||||
|
|
||||||
Initial sequence of releases, significant feature additions and CI setups.
|
Initial sequence of releases, significant feature additions and CI setups.
|
||||||
|
|
||||||
- Implement container start and stop
|
- Implement container start and stop
|
||||||
- Implement list containers and related functions
|
- Implement list containers and related functions
|
||||||
- Add tests with in docker environment
|
- Add tests with in docker environment
|
||||||
|
|
||||||
## 2016-04-12 - unknown - Initial Commit
|
## 2016-04-12 - unknown - Initial Commit
|
||||||
|
|
||||||
Initial project setup.
|
Initial project setup.
|
||||||
|
|
||||||
- Initial commit
|
- Initial commit
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"npmdocker": {
|
"npmdocker": {
|
||||||
"baseImage": "hosttoday/ht-docker-node:npmci",
|
"baseImage": "host.today/ht-docker-node:npmci",
|
||||||
"command": "(ls -a && rm -r node_modules && yarn global add npmts && yarn install && npmts)",
|
"command": "(ls -a && rm -r node_modules && yarn global add npmts && yarn install && npmts)",
|
||||||
"dockerSock": true
|
"dockerSock": true
|
||||||
},
|
},
|
||||||
@@ -12,11 +12,11 @@
|
|||||||
"gitzone": {
|
"gitzone": {
|
||||||
"projectType": "npm",
|
"projectType": "npm",
|
||||||
"module": {
|
"module": {
|
||||||
"githost": "gitlab.com",
|
"githost": "code.foss.global",
|
||||||
"gitscope": "mojoio",
|
"gitscope": "apiclient.xyz",
|
||||||
"gitrepo": "docker",
|
"gitrepo": "docker",
|
||||||
"description": "Provides easy communication with Docker remote API from Node.js, with TypeScript support.",
|
"description": "Provides easy communication with Docker remote API from Node.js, with TypeScript support.",
|
||||||
"npmPackagename": "@mojoio/docker",
|
"npmPackagename": "@apiclient.xyz/docker",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"keywords": [
|
"keywords": [
|
||||||
"Docker",
|
"Docker",
|
||||||
@@ -34,4 +34,4 @@
|
|||||||
"tsdoc": {
|
"tsdoc": {
|
||||||
"legal": "\n## License and Legal Information\n\nThis repository contains open-source code that is licensed under the MIT License. A copy of the MIT License can be found in the [license](license) file within this repository. \n\n**Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.\n\n### Trademarks\n\nThis project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and any related products or services are trademarks of Task Venture Capital GmbH and are not included within the scope of the MIT license granted herein. Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines, and any usage must be approved in writing by Task Venture Capital GmbH.\n\n### Company Information\n\nTask Venture Capital GmbH \nRegistered at District court Bremen HRB 35230 HB, Germany\n\nFor any legal inquiries or if you require further information, please contact us via email at hello@task.vc.\n\nBy using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.\n"
|
"legal": "\n## License and Legal Information\n\nThis repository contains open-source code that is licensed under the MIT License. A copy of the MIT License can be found in the [license](license) file within this repository. \n\n**Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.\n\n### Trademarks\n\nThis project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and any related products or services are trademarks of Task Venture Capital GmbH and are not included within the scope of the MIT license granted herein. Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines, and any usage must be approved in writing by Task Venture Capital GmbH.\n\n### Company Information\n\nTask Venture Capital GmbH \nRegistered at District court Bremen HRB 35230 HB, Germany\n\nFor any legal inquiries or if you require further information, please contact us via email at hello@task.vc.\n\nBy using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.\n"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
53
package.json
53
package.json
@@ -1,19 +1,19 @@
|
|||||||
{
|
{
|
||||||
"name": "@apiclient.xyz/docker",
|
"name": "@apiclient.xyz/docker",
|
||||||
"version": "1.2.6",
|
"version": "2.0.0",
|
||||||
"description": "Provides easy communication with Docker remote API from Node.js, with TypeScript support.",
|
"description": "Provides easy communication with Docker remote API from Node.js, with TypeScript support.",
|
||||||
"private": false,
|
"private": false,
|
||||||
"main": "dist_ts/index.js",
|
"main": "dist_ts/index.js",
|
||||||
"typings": "dist_ts/index.d.ts",
|
"typings": "dist_ts/index.d.ts",
|
||||||
"type": "module",
|
"type": "module",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"test": "(tstest test/ --web)",
|
"test": "(tstest test/ --verbose --logfile --timeout 600)",
|
||||||
"build": "(tsbuild --web --allowimplicitany)",
|
"build": "(tsbuild --web --allowimplicitany)",
|
||||||
"buildDocs": "tsdoc"
|
"buildDocs": "tsdoc"
|
||||||
},
|
},
|
||||||
"repository": {
|
"repository": {
|
||||||
"type": "git",
|
"type": "git",
|
||||||
"url": "git+https://gitlab.com/mojoio/docker.git"
|
"url": "https://code.foss.global/apiclient.xyz/docker.git"
|
||||||
},
|
},
|
||||||
"keywords": [
|
"keywords": [
|
||||||
"Docker",
|
"Docker",
|
||||||
@@ -29,33 +29,32 @@
|
|||||||
"author": "Lossless GmbH",
|
"author": "Lossless GmbH",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"bugs": {
|
"bugs": {
|
||||||
"url": "https://gitlab.com/mojoio/docker/issues"
|
"url": "https://code.foss.global/apiclient.xyz/docker/issues"
|
||||||
},
|
},
|
||||||
"homepage": "https://gitlab.com/mojoio/docker#readme",
|
"homepage": "https://code.foss.global/apiclient.xyz/docker#readme",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@push.rocks/lik": "^6.0.15",
|
"@push.rocks/lik": "^6.2.2",
|
||||||
"@push.rocks/smartarchive": "^4.0.37",
|
"@push.rocks/smartarchive": "^4.2.2",
|
||||||
"@push.rocks/smartbucket": "^3.0.22",
|
"@push.rocks/smartbucket": "^3.3.10",
|
||||||
"@push.rocks/smartfile": "^11.0.21",
|
"@push.rocks/smartfile": "^11.2.7",
|
||||||
"@push.rocks/smartjson": "^5.0.20",
|
"@push.rocks/smartjson": "^5.2.0",
|
||||||
"@push.rocks/smartlog": "^3.0.7",
|
"@push.rocks/smartlog": "^3.1.10",
|
||||||
"@push.rocks/smartnetwork": "^3.0.0",
|
"@push.rocks/smartnetwork": "^4.4.0",
|
||||||
"@push.rocks/smartpath": "^5.0.18",
|
"@push.rocks/smartpath": "^6.0.0",
|
||||||
"@push.rocks/smartpromise": "^4.0.4",
|
"@push.rocks/smartpromise": "^4.2.3",
|
||||||
"@push.rocks/smartrequest": "^2.0.22",
|
"@push.rocks/smartrequest": "^5.0.1",
|
||||||
"@push.rocks/smartstream": "^3.0.46",
|
"@push.rocks/smartstream": "^3.2.5",
|
||||||
"@push.rocks/smartstring": "^4.0.15",
|
"@push.rocks/smartstring": "^4.1.0",
|
||||||
"@push.rocks/smartunique": "^3.0.9",
|
"@push.rocks/smartunique": "^3.0.9",
|
||||||
"@push.rocks/smartversion": "^3.0.5",
|
"@push.rocks/smartversion": "^3.0.5",
|
||||||
"@tsclass/tsclass": "^4.1.2",
|
"@tsclass/tsclass": "^9.3.0",
|
||||||
"rxjs": "^7.5.7"
|
"rxjs": "^7.8.2"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@git.zone/tsbuild": "^2.1.84",
|
"@git.zone/tsbuild": "^3.1.0",
|
||||||
"@git.zone/tsrun": "^1.2.49",
|
"@git.zone/tsrun": "^2.0.0",
|
||||||
"@git.zone/tstest": "^1.0.90",
|
"@git.zone/tstest": "^2.8.2",
|
||||||
"@push.rocks/qenv": "^6.0.5",
|
"@push.rocks/qenv": "^6.1.3",
|
||||||
"@push.rocks/tapbundle": "^5.3.0",
|
|
||||||
"@types/node": "22.7.5"
|
"@types/node": "22.7.5"
|
||||||
},
|
},
|
||||||
"files": [
|
"files": [
|
||||||
@@ -72,5 +71,9 @@
|
|||||||
],
|
],
|
||||||
"browserslist": [
|
"browserslist": [
|
||||||
"last 1 chrome versions"
|
"last 1 chrome versions"
|
||||||
]
|
],
|
||||||
|
"packageManager": "pnpm@10.14.0+sha512.ad27a79641b49c3e481a16a805baa71817a04bbe06a38d17e60e2eaee83f6a146c6a688125f5792e48dd5ba30e7da52a5cda4c3992b9ccf333f9ce223af84748",
|
||||||
|
"pnpm": {
|
||||||
|
"overrides": {}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
8665
pnpm-lock.yaml
generated
8665
pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,29 @@
|
|||||||
|
# Docker Module - Development Hints
|
||||||
|
|
||||||
|
## smartrequest v5+ Migration (2025-11-17)
|
||||||
|
|
||||||
|
### Breaking Change
|
||||||
|
smartrequest v5.0.0+ returns web `ReadableStream` objects (Web Streams API) instead of Node.js streams.
|
||||||
|
|
||||||
|
### Solution Implemented
|
||||||
|
All streaming methods now convert web ReadableStreams to Node.js streams using:
|
||||||
|
```typescript
|
||||||
|
plugins.smartstream.nodewebhelpers.convertWebReadableToNodeReadable(webStream)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Files Modified
|
||||||
|
- `ts/classes.host.ts`:
|
||||||
|
- `requestStreaming()` - Converts web stream to Node.js stream before returning
|
||||||
|
- `getEventObservable()` - Works with converted Node.js stream
|
||||||
|
|
||||||
|
- `ts/classes.image.ts`:
|
||||||
|
- `createFromTarStream()` - Uses converted Node.js stream for event handling
|
||||||
|
- `exportToTarStream()` - Uses converted Node.js stream for backpressure management
|
||||||
|
|
||||||
|
### Testing
|
||||||
|
- Build: All 11 type errors resolved
|
||||||
|
- Tests: Node.js tests pass (DockerHost, DockerContainer, DockerImage, DockerImageStore)
|
||||||
|
|
||||||
|
### Notes
|
||||||
|
- The conversion maintains backward compatibility with existing code expecting Node.js stream methods (`.on()`, `.emit()`, `.pause()`, `.resume()`)
|
||||||
|
- smartstream's `nodewebhelpers` module provides bidirectional conversion utilities between web and Node.js streams
|
||||||
|
|||||||
727
readme.md
727
readme.md
@@ -1,140 +1,695 @@
|
|||||||
# @apiclient.xyz/docker
|
# @apiclient.xyz/docker 🐳
|
||||||
|
|
||||||
easy communication with docker remote api from node, TypeScript ready
|
> **Powerful TypeScript client for Docker Remote API** - Build, manage, and orchestrate Docker containers, images, networks, and swarm services with type-safe elegance.
|
||||||
|
|
||||||
## Install
|
## 🚀 Features
|
||||||
|
|
||||||
To install @apiclient.xyz/docker, you can use npm (npm package manager). Run the following command in your terminal:
|
- 🎯 **Full TypeScript Support** - Complete type definitions for all Docker API entities
|
||||||
|
- 🔄 **Async/Await Ready** - Modern promise-based architecture for seamless async operations
|
||||||
|
- 📦 **Container Management** - Create, list, inspect, and manage containers effortlessly
|
||||||
|
- 🖼️ **Image Handling** - Pull from registries, build from tarballs, export, and manage tags
|
||||||
|
- 🌐 **Network Operations** - Create and manage Docker networks with full IPAM support
|
||||||
|
- 🔐 **Secrets Management** - Handle Docker secrets securely in swarm mode
|
||||||
|
- 🎭 **Service Orchestration** - Deploy and manage services in Docker Swarm
|
||||||
|
- 💾 **S3 Image Storage** - Built-in support for storing/retrieving images from S3-compatible storage
|
||||||
|
- 📊 **Event Streaming** - Real-time Docker event monitoring with RxJS observables
|
||||||
|
- 🔧 **Registry Authentication** - Seamless authentication with Docker registries including private registries
|
||||||
|
- 🐝 **Swarm Mode** - Full support for Docker Swarm initialization and management
|
||||||
|
|
||||||
|
## 📦 Installation
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
# Using pnpm (recommended)
|
||||||
|
pnpm add @apiclient.xyz/docker
|
||||||
|
|
||||||
|
# Using npm
|
||||||
npm install @apiclient.xyz/docker --save
|
npm install @apiclient.xyz/docker --save
|
||||||
|
|
||||||
|
# Using yarn
|
||||||
|
yarn add @apiclient.xyz/docker
|
||||||
```
|
```
|
||||||
|
|
||||||
This command installs the package and adds it as a dependency to your project's `package.json` file.
|
## 🎯 Quick Start
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
The `@apiclient.xyz/docker` package provides a TypeScript-ready interface for interacting with Docker's Remote API directly from Node.js applications. It leverages TypeScript for strong type definitions, ensuring more reliable and maintainable code.
|
|
||||||
|
|
||||||
### Prerequisites
|
|
||||||
|
|
||||||
Before you begin, ensure:
|
|
||||||
|
|
||||||
- You have Docker installed and running on your machine or a remote server.
|
|
||||||
- You are familiar with TypeScript and have it set up in your development environment.
|
|
||||||
|
|
||||||
### Getting Started
|
|
||||||
|
|
||||||
First, import the required classes from the package:
|
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
import { DockerHost, DockerContainer, DockerService, DockerNetwork } from '@apiclient.xyz/docker';
|
import { DockerHost } from '@apiclient.xyz/docker';
|
||||||
|
|
||||||
|
// Connect to local Docker daemon (default: /var/run/docker.sock)
|
||||||
|
const docker = new DockerHost({});
|
||||||
|
await docker.start();
|
||||||
|
|
||||||
|
// Or connect to remote Docker host via TCP
|
||||||
|
const remoteDocker = new DockerHost({
|
||||||
|
socketPath: 'tcp://192.168.1.100:2375',
|
||||||
|
});
|
||||||
|
await remoteDocker.start();
|
||||||
|
|
||||||
|
// List all containers
|
||||||
|
const containers = await docker.getContainers();
|
||||||
|
console.log(`Found ${containers.length} containers`);
|
||||||
|
|
||||||
|
// Don't forget to clean up
|
||||||
|
await docker.stop();
|
||||||
```
|
```
|
||||||
|
|
||||||
### Instantiate DockerHost
|
## 🔌 Socket Path Configuration
|
||||||
|
|
||||||
Start by creating a `DockerHost` instance. This class is the entry point to communicate with the Docker Remote API.
|
The library determines which Docker socket to use in the following priority order:
|
||||||
|
|
||||||
|
1. **Constructor option** - `socketPath` parameter (highest priority)
|
||||||
|
2. **Environment variable** - `DOCKER_HOST` environment variable
|
||||||
|
3. **CI environment** - If `CI` env var is set, uses `http://docker:2375/`
|
||||||
|
4. **Default** - Falls back to `http://unix:/var/run/docker.sock:`
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
// Connect to local Docker instance
|
// Explicit socket path (highest priority)
|
||||||
const localDockerHost = new DockerHost();
|
const docker1 = new DockerHost({
|
||||||
|
socketPath: 'tcp://remote-host:2375',
|
||||||
|
});
|
||||||
|
|
||||||
// Or specify a custom path or URL to a Docker host
|
// Uses DOCKER_HOST environment variable if set
|
||||||
const remoteDockerHost = new DockerHost('tcp://<REMOTE_DOCKER_HOST>:2375');
|
const docker2 = new DockerHost({});
|
||||||
|
|
||||||
|
// Custom image store directory
|
||||||
|
const docker3 = new DockerHost({
|
||||||
|
imageStoreDir: '/custom/path/to/image-store',
|
||||||
|
});
|
||||||
```
|
```
|
||||||
|
|
||||||
### Working with Containers
|
## 📚 Complete API Guide
|
||||||
|
|
||||||
|
### 🐳 DockerHost - Your Gateway to Docker
|
||||||
|
|
||||||
|
The `DockerHost` class is your primary interface to interact with the Docker daemon.
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { DockerHost } from '@apiclient.xyz/docker';
|
||||||
|
|
||||||
|
// Initialize with options
|
||||||
|
const docker = new DockerHost({
|
||||||
|
socketPath: '/var/run/docker.sock', // Optional: custom socket path
|
||||||
|
imageStoreDir: './docker-images', // Optional: custom image store location
|
||||||
|
});
|
||||||
|
|
||||||
|
// Start the docker host (initializes image store)
|
||||||
|
await docker.start();
|
||||||
|
|
||||||
|
// ... perform operations ...
|
||||||
|
|
||||||
|
// Stop and clean up
|
||||||
|
await docker.stop();
|
||||||
|
```
|
||||||
|
|
||||||
|
### 📦 Container Management
|
||||||
|
|
||||||
#### List All Containers
|
#### List All Containers
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
async function listAllContainers() {
|
// Get all containers (running and stopped)
|
||||||
const containers = await localDockerHost.getContainers();
|
const containers = await docker.getContainers();
|
||||||
console.log(containers);
|
|
||||||
}
|
|
||||||
|
|
||||||
listAllContainers();
|
containers.forEach((container) => {
|
||||||
|
console.log(`Container: ${container.Names[0]}`);
|
||||||
|
console.log(` ID: ${container.Id}`);
|
||||||
|
console.log(` Status: ${container.Status}`);
|
||||||
|
console.log(` Image: ${container.Image}`);
|
||||||
|
console.log(` State: ${container.State}`);
|
||||||
|
});
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Create and Remove a Container
|
#### Get Container by ID
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
import { IContainerCreationDescriptor } from '@apiclient.xyz/docker';
|
import { DockerContainer } from '@apiclient.xyz/docker';
|
||||||
|
|
||||||
async function createAndRemoveContainer() {
|
const container = await DockerContainer.getContainerById(docker, 'abc123');
|
||||||
const containerDescriptor: IContainerCreationDescriptor = {
|
if (container) {
|
||||||
Hostname: 'test-container',
|
console.log(`Found: ${container.Names[0]}`);
|
||||||
Domainname: '',
|
console.log(`Running: ${container.State === 'running'}`);
|
||||||
// Additional settings here
|
|
||||||
};
|
|
||||||
|
|
||||||
// Create container
|
|
||||||
const container = await DockerContainer.create(localDockerHost, containerDescriptor);
|
|
||||||
console.log(`Container Created: ${container.Id}`);
|
|
||||||
|
|
||||||
// Remove container
|
|
||||||
await container.remove();
|
|
||||||
console.log(`Container Removed: ${container.Id}`);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
createAndRemoveContainer();
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Working with Docker Services
|
### 🖼️ Image Management
|
||||||
|
|
||||||
#### Create a Docker Service
|
#### Pull Images from Registry
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
import { IServiceCreationDescriptor } from '@apiclient.xyz/docker';
|
import { DockerImage } from '@apiclient.xyz/docker';
|
||||||
|
|
||||||
async function createDockerService() {
|
// Pull from Docker Hub
|
||||||
const serviceDescriptor: IServiceCreationDescriptor = {
|
const image = await DockerImage.createFromRegistry(docker, {
|
||||||
name: 'my-service',
|
creationObject: {
|
||||||
image: 'nginx:latest', // Docker Image
|
imageUrl: 'nginx',
|
||||||
// Additional settings
|
imageTag: 'alpine', // Optional, defaults to 'latest'
|
||||||
};
|
},
|
||||||
|
});
|
||||||
const service = await DockerService.createService(localDockerHost, serviceDescriptor);
|
|
||||||
console.log(`Service Created: ${service.Id}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
createDockerService();
|
console.log(`Image pulled: ${image.RepoTags[0]}`);
|
||||||
|
console.log(`Size: ${(image.Size / 1024 / 1024).toFixed(2)} MB`);
|
||||||
|
|
||||||
|
// Pull from private registry
|
||||||
|
const privateImage = await DockerImage.createFromRegistry(docker, {
|
||||||
|
creationObject: {
|
||||||
|
imageUrl: 'registry.example.com/my-app',
|
||||||
|
imageTag: 'v2.0.0',
|
||||||
|
},
|
||||||
|
});
|
||||||
```
|
```
|
||||||
|
|
||||||
### Working with Docker Networks
|
#### Import Images from Tar Stream
|
||||||
|
|
||||||
#### Listing and Creating Networks
|
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
async function listAndCreateNetwork() {
|
import * as fs from 'fs';
|
||||||
// List all networks
|
import { DockerImage } from '@apiclient.xyz/docker';
|
||||||
const networks = await localDockerHost.getNetworks();
|
|
||||||
console.log(networks);
|
// Import from a tar file
|
||||||
|
const tarStream = fs.createReadStream('./my-image.tar');
|
||||||
// Create a new network
|
const importedImage = await DockerImage.createFromTarStream(docker, {
|
||||||
const network = await DockerNetwork.createNetwork(localDockerHost, {
|
tarStream,
|
||||||
Name: 'my-network'
|
creationObject: {
|
||||||
// Additional settings
|
imageUrl: 'my-app',
|
||||||
|
imageTag: 'v1.0.0',
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log(`Imported: ${importedImage.RepoTags[0]}`);
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Export Images to Tar Stream
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Get image by name
|
||||||
|
const image = await DockerImage.getImageByName(docker, 'nginx:alpine');
|
||||||
|
|
||||||
|
// Export to tar stream
|
||||||
|
const exportStream = await image.exportToTarStream();
|
||||||
|
|
||||||
|
// Save to file
|
||||||
|
const writeStream = fs.createWriteStream('./nginx-export.tar');
|
||||||
|
exportStream.pipe(writeStream);
|
||||||
|
|
||||||
|
writeStream.on('finish', () => {
|
||||||
|
console.log('Image exported successfully');
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Tag Images
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Tag an existing image
|
||||||
|
await DockerImage.tagImageByIdOrName(docker, 'nginx:alpine', {
|
||||||
|
registry: 'myregistry.com',
|
||||||
|
imageName: 'web-server',
|
||||||
|
imageTag: 'v1.0.0',
|
||||||
|
});
|
||||||
|
// Result: myregistry.com/web-server:v1.0.0
|
||||||
|
```
|
||||||
|
|
||||||
|
#### List All Images
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
const images = await docker.getImages();
|
||||||
|
|
||||||
|
images.forEach((img) => {
|
||||||
|
console.log(`Image: ${img.RepoTags ? img.RepoTags.join(', ') : '<none>'}`);
|
||||||
|
console.log(` ID: ${img.Id}`);
|
||||||
|
console.log(` Size: ${(img.Size / 1024 / 1024).toFixed(2)} MB`);
|
||||||
|
console.log(` Created: ${new Date(img.Created * 1000).toISOString()}`);
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
### 🌐 Network Management
|
||||||
|
|
||||||
|
#### Create Custom Networks
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { DockerNetwork } from '@apiclient.xyz/docker';
|
||||||
|
|
||||||
|
// Create a bridge network
|
||||||
|
const network = await DockerNetwork.createNetwork(docker, {
|
||||||
|
Name: 'my-app-network',
|
||||||
|
Driver: 'bridge',
|
||||||
|
EnableIPv6: false,
|
||||||
|
IPAM: {
|
||||||
|
Driver: 'default',
|
||||||
|
Config: [
|
||||||
|
{
|
||||||
|
Subnet: '172.28.0.0/16',
|
||||||
|
Gateway: '172.28.0.1',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
Labels: {
|
||||||
|
project: 'my-app',
|
||||||
|
environment: 'production',
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log(`Network created: ${network.Name} (${network.Id})`);
|
||||||
|
```
|
||||||
|
|
||||||
|
#### List and Inspect Networks
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Get all networks
|
||||||
|
const networks = await docker.getNetworks();
|
||||||
|
|
||||||
|
networks.forEach((net) => {
|
||||||
|
console.log(`Network: ${net.Name} (${net.Driver})`);
|
||||||
|
console.log(` Scope: ${net.Scope}`);
|
||||||
|
console.log(` Internal: ${net.Internal}`);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Get specific network by name
|
||||||
|
const appNetwork = await DockerNetwork.getNetworkByName(docker, 'my-app-network');
|
||||||
|
|
||||||
|
// Get containers connected to this network
|
||||||
|
const containers = await appNetwork.getContainersOnNetwork();
|
||||||
|
console.log(`Containers on network: ${containers.length}`);
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Remove a Network
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
const network = await DockerNetwork.getNetworkByName(docker, 'my-app-network');
|
||||||
|
await network.remove();
|
||||||
|
console.log('Network removed');
|
||||||
|
```
|
||||||
|
|
||||||
|
### 🎭 Service Management (Swarm Mode)
|
||||||
|
|
||||||
|
#### Activate Swarm Mode
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Initialize swarm mode first
|
||||||
|
await docker.activateSwarm('192.168.1.100'); // Optional: advertisement IP
|
||||||
|
console.log('Swarm mode activated');
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Deploy Services
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { DockerService, DockerImage, DockerNetwork, DockerSecret } from '@apiclient.xyz/docker';
|
||||||
|
|
||||||
|
// Create prerequisites
|
||||||
|
const network = await DockerNetwork.createNetwork(docker, {
|
||||||
|
Name: 'app-network',
|
||||||
|
Driver: 'overlay', // Use overlay for swarm
|
||||||
|
});
|
||||||
|
|
||||||
|
const image = await DockerImage.createFromRegistry(docker, {
|
||||||
|
creationObject: {
|
||||||
|
imageUrl: 'nginx',
|
||||||
|
imageTag: 'latest',
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
const secret = await DockerSecret.createSecret(docker, {
|
||||||
|
name: 'api-key',
|
||||||
|
version: '1.0.0',
|
||||||
|
contentArg: 'super-secret-key',
|
||||||
|
labels: { app: 'my-app' },
|
||||||
|
});
|
||||||
|
|
||||||
|
// Create a service
|
||||||
|
const service = await DockerService.createService(docker, {
|
||||||
|
name: 'web-api',
|
||||||
|
image: image,
|
||||||
|
labels: {
|
||||||
|
app: 'api',
|
||||||
|
version: '1.0.0',
|
||||||
|
},
|
||||||
|
networks: [network],
|
||||||
|
networkAlias: 'api',
|
||||||
|
secrets: [secret],
|
||||||
|
ports: ['80:3000'], // host:container
|
||||||
|
resources: {
|
||||||
|
memorySizeMB: 512,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log(`Service deployed: ${service.ID}`);
|
||||||
|
```
|
||||||
|
|
||||||
|
#### List and Manage Services
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// List all services
|
||||||
|
const services = await docker.getServices();
|
||||||
|
|
||||||
|
services.forEach((service) => {
|
||||||
|
console.log(`Service: ${service.Spec.Name}`);
|
||||||
|
console.log(` Image: ${service.Spec.TaskTemplate.ContainerSpec.Image}`);
|
||||||
|
if (service.Spec.Mode.Replicated) {
|
||||||
|
console.log(` Replicas: ${service.Spec.Mode.Replicated.Replicas}`);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Get service by name
|
||||||
|
const myService = await DockerService.getServiceByName(docker, 'web-api');
|
||||||
|
|
||||||
|
// Check if service needs update
|
||||||
|
const needsUpdate = await myService.needsUpdate();
|
||||||
|
if (needsUpdate) {
|
||||||
|
console.log('⚠️ Service configuration has changed, update needed');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove service
|
||||||
|
await myService.remove();
|
||||||
|
console.log('Service removed');
|
||||||
|
```
|
||||||
|
|
||||||
|
### 🔐 Secrets Management
|
||||||
|
|
||||||
|
Secrets are only available in Docker Swarm mode.
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { DockerSecret } from '@apiclient.xyz/docker';
|
||||||
|
|
||||||
|
// Create a secret
|
||||||
|
const secret = await DockerSecret.createSecret(docker, {
|
||||||
|
name: 'database-password',
|
||||||
|
version: '1.0.0',
|
||||||
|
contentArg: 'my-super-secret-password',
|
||||||
|
labels: {
|
||||||
|
app: 'my-app',
|
||||||
|
type: 'credential',
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log(`Secret created: ${secret.ID}`);
|
||||||
|
|
||||||
|
// List all secrets
|
||||||
|
const secrets = await DockerSecret.getSecrets(docker);
|
||||||
|
secrets.forEach((s) => {
|
||||||
|
console.log(`Secret: ${s.Spec.Name}`);
|
||||||
|
console.log(` Labels:`, s.Spec.Labels);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Get secret by name
|
||||||
|
const dbSecret = await DockerSecret.getSecretByName(docker, 'database-password');
|
||||||
|
|
||||||
|
// Update secret content
|
||||||
|
await dbSecret.update('new-password-value');
|
||||||
|
|
||||||
|
// Remove secret
|
||||||
|
await dbSecret.remove();
|
||||||
|
console.log('Secret removed');
|
||||||
|
```
|
||||||
|
|
||||||
|
### 💾 S3 Image Storage
|
||||||
|
|
||||||
|
Store and retrieve Docker images from S3-compatible storage:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Configure S3 storage for the image store
|
||||||
|
await docker.addS3Storage({
|
||||||
|
endpoint: 's3.amazonaws.com',
|
||||||
|
accessKey: 'AKIAIOSFODNN7EXAMPLE',
|
||||||
|
accessSecret: 'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY',
|
||||||
|
bucketName: 'my-docker-images',
|
||||||
|
});
|
||||||
|
|
||||||
|
// Store an image to S3
|
||||||
|
const imageStream = fs.createReadStream('./my-app.tar');
|
||||||
|
await docker.imageStore.storeImage('my-app-v1', imageStream);
|
||||||
|
|
||||||
|
console.log('Image stored to S3');
|
||||||
|
```
|
||||||
|
|
||||||
|
### 📊 Event Monitoring
|
||||||
|
|
||||||
|
Monitor Docker events in real-time using RxJS observables:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Subscribe to Docker events
|
||||||
|
const eventObservable = await docker.getEventObservable();
|
||||||
|
|
||||||
|
const subscription = eventObservable.subscribe({
|
||||||
|
next: (event) => {
|
||||||
|
console.log(`📡 Event: ${event.Type} - ${event.Action}`);
|
||||||
|
console.log(` Actor: ${event.Actor.ID}`);
|
||||||
|
console.log(` Time: ${new Date(event.time * 1000).toISOString()}`);
|
||||||
|
|
||||||
|
if (event.Type === 'container') {
|
||||||
|
console.log(` Container: ${event.Actor.Attributes.name}`);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
error: (err) => console.error('❌ Event stream error:', err),
|
||||||
|
complete: () => console.log('Event stream completed'),
|
||||||
|
});
|
||||||
|
|
||||||
|
// Unsubscribe when done
|
||||||
|
// subscription.unsubscribe();
|
||||||
|
```
|
||||||
|
|
||||||
|
### 🔧 Registry Authentication
|
||||||
|
|
||||||
|
Authenticate with Docker registries to pull private images:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Authenticate with a registry
|
||||||
|
await docker.auth({
|
||||||
|
username: 'your-username',
|
||||||
|
password: 'your-password',
|
||||||
|
serveraddress: 'https://index.docker.io/v1/', // Docker Hub
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log('✅ Authenticated with registry');
|
||||||
|
|
||||||
|
// Or read credentials from Docker config file
|
||||||
|
const authToken = await docker.getAuthTokenFromDockerConfig('registry.example.com');
|
||||||
|
|
||||||
|
// Now you can pull private images
|
||||||
|
const privateImage = await DockerImage.createFromRegistry(docker, {
|
||||||
|
creationObject: {
|
||||||
|
imageUrl: 'registry.example.com/private/app',
|
||||||
|
imageTag: 'latest',
|
||||||
|
},
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
## 🏗️ Advanced Examples
|
||||||
|
|
||||||
|
### Complete Application Stack with Swarm
|
||||||
|
|
||||||
|
Deploy a complete multi-service application stack:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { DockerHost, DockerNetwork, DockerSecret, DockerService, DockerImage } from '@apiclient.xyz/docker';
|
||||||
|
|
||||||
|
async function deployStack() {
|
||||||
|
const docker = new DockerHost({});
|
||||||
|
await docker.start();
|
||||||
|
|
||||||
|
// Initialize swarm
|
||||||
|
await docker.activateSwarm();
|
||||||
|
console.log('✅ Swarm initialized');
|
||||||
|
|
||||||
|
// Create overlay network for service communication
|
||||||
|
const network = await DockerNetwork.createNetwork(docker, {
|
||||||
|
Name: 'app-network',
|
||||||
|
Driver: 'overlay',
|
||||||
|
Attachable: true,
|
||||||
});
|
});
|
||||||
console.log(`Network Created: ${network.Id}`);
|
console.log('✅ Network created');
|
||||||
|
|
||||||
|
// Create secrets
|
||||||
|
const dbPassword = await DockerSecret.createSecret(docker, {
|
||||||
|
name: 'db-password',
|
||||||
|
version: '1.0.0',
|
||||||
|
contentArg: 'strong-database-password',
|
||||||
|
labels: { app: 'stack' },
|
||||||
|
});
|
||||||
|
console.log('✅ Secrets created');
|
||||||
|
|
||||||
|
// Pull images
|
||||||
|
const postgresImage = await DockerImage.createFromRegistry(docker, {
|
||||||
|
creationObject: {
|
||||||
|
imageUrl: 'postgres',
|
||||||
|
imageTag: '14-alpine',
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
const appImage = await DockerImage.createFromRegistry(docker, {
|
||||||
|
creationObject: {
|
||||||
|
imageUrl: 'my-app',
|
||||||
|
imageTag: 'latest',
|
||||||
|
},
|
||||||
|
});
|
||||||
|
console.log('✅ Images pulled');
|
||||||
|
|
||||||
|
// Deploy database service
|
||||||
|
const dbService = await DockerService.createService(docker, {
|
||||||
|
name: 'postgres-db',
|
||||||
|
image: postgresImage,
|
||||||
|
labels: { tier: 'database' },
|
||||||
|
networks: [network],
|
||||||
|
networkAlias: 'postgres',
|
||||||
|
secrets: [dbPassword],
|
||||||
|
ports: [],
|
||||||
|
resources: {
|
||||||
|
memorySizeMB: 1024,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
console.log('✅ Database service deployed');
|
||||||
|
|
||||||
|
// Deploy application service
|
||||||
|
const appService = await DockerService.createService(docker, {
|
||||||
|
name: 'web-app',
|
||||||
|
image: appImage,
|
||||||
|
labels: { tier: 'application' },
|
||||||
|
networks: [network],
|
||||||
|
networkAlias: 'app',
|
||||||
|
secrets: [dbPassword],
|
||||||
|
ports: ['80:3000'],
|
||||||
|
resources: {
|
||||||
|
memorySizeMB: 512,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
console.log('✅ Application service deployed');
|
||||||
|
|
||||||
|
console.log('🚀 Stack deployment complete!');
|
||||||
}
|
}
|
||||||
|
|
||||||
listAndCreateNetwork();
|
deployStack().catch(console.error);
|
||||||
```
|
```
|
||||||
|
|
||||||
### Advanced Usage
|
### Image Pipeline: Pull, Tag, Export
|
||||||
|
|
||||||
You can leverage the full potential of the Docker Remote API with `@apiclient.xyz/docker`. This includes managing images, volumes, swarms, and more. The package's design is consistent and intuitive, making it easy to extend your usage as needed.
|
```typescript
|
||||||
|
async function imagePipeline() {
|
||||||
|
const docker = new DockerHost({});
|
||||||
|
await docker.start();
|
||||||
|
|
||||||
Remember, the Docker Remote API offers extensive capabilities. Always refer to the [Docker API documentation](https://docs.docker.com/engine/api/latest/) for a comprehensive list of endpoints and actions you can perform.
|
// Pull latest image
|
||||||
|
const image = await DockerImage.createFromRegistry(docker, {
|
||||||
|
creationObject: {
|
||||||
|
imageUrl: 'node',
|
||||||
|
imageTag: '18-alpine',
|
||||||
|
},
|
||||||
|
});
|
||||||
|
console.log('✅ Image pulled');
|
||||||
|
|
||||||
### Conclusion
|
// Tag for private registry
|
||||||
|
await DockerImage.tagImageByIdOrName(docker, 'node:18-alpine', {
|
||||||
|
registry: 'registry.company.com',
|
||||||
|
imageName: 'base/node',
|
||||||
|
imageTag: 'v18-alpine',
|
||||||
|
});
|
||||||
|
console.log('✅ Image tagged');
|
||||||
|
|
||||||
`@apiclient.xyz/docker` simplifies interaction with Docker's Remote API in TypeScript projects, providing strong typing and asynchronous operations. Whether you're managing containers, images, services or networks, it offers a comprehensive toolset to perform these tasks seamlessly.
|
// Export to tar
|
||||||
|
const exportStream = await image.exportToTarStream();
|
||||||
|
const writeStream = fs.createWriteStream('./node-18-alpine.tar');
|
||||||
|
|
||||||
|
exportStream.pipe(writeStream);
|
||||||
|
|
||||||
|
await new Promise((resolve, reject) => {
|
||||||
|
writeStream.on('finish', resolve);
|
||||||
|
writeStream.on('error', reject);
|
||||||
|
});
|
||||||
|
console.log('✅ Image exported to tar');
|
||||||
|
|
||||||
|
await docker.stop();
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## 🔍 TypeScript Support
|
||||||
|
|
||||||
|
Full TypeScript definitions for all Docker API entities:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import type {
|
||||||
|
IDockerHostConstructorOptions,
|
||||||
|
IImageCreationDescriptor,
|
||||||
|
IServiceCreationDescriptor,
|
||||||
|
ISecretCreationDescriptor,
|
||||||
|
TLabels,
|
||||||
|
} from '@apiclient.xyz/docker';
|
||||||
|
|
||||||
|
// Full IntelliSense support
|
||||||
|
const options: IDockerHostConstructorOptions = {
|
||||||
|
socketPath: '/var/run/docker.sock',
|
||||||
|
imageStoreDir: '/tmp/docker-images',
|
||||||
|
};
|
||||||
|
|
||||||
|
const imageConfig: IImageCreationDescriptor = {
|
||||||
|
imageUrl: 'nginx',
|
||||||
|
imageTag: 'alpine',
|
||||||
|
};
|
||||||
|
|
||||||
|
const labels: TLabels = {
|
||||||
|
app: 'my-app',
|
||||||
|
environment: 'production',
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
## 🎯 Real-World Use Cases
|
||||||
|
|
||||||
|
### CI/CD Pipeline Integration
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// In your CI/CD pipeline
|
||||||
|
const docker = new DockerHost({
|
||||||
|
socketPath: process.env.DOCKER_HOST || '/var/run/docker.sock',
|
||||||
|
});
|
||||||
|
|
||||||
|
await docker.start();
|
||||||
|
|
||||||
|
// Build and push process
|
||||||
|
const image = await DockerImage.createFromTarStream(docker, {
|
||||||
|
tarStream: buildArtifactStream,
|
||||||
|
creationObject: {
|
||||||
|
imageUrl: 'my-app',
|
||||||
|
imageTag: process.env.CI_COMMIT_SHA,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
await DockerImage.tagImageByIdOrName(docker, `my-app:${process.env.CI_COMMIT_SHA}`, {
|
||||||
|
registry: 'registry.company.com',
|
||||||
|
imageName: 'production/my-app',
|
||||||
|
imageTag: 'latest',
|
||||||
|
});
|
||||||
|
|
||||||
|
// Push to registry (authentication required)
|
||||||
|
// Note: Pushing requires proper registry authentication
|
||||||
|
```
|
||||||
|
|
||||||
|
### Dynamic Service Scaling
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Monitor and scale services based on load
|
||||||
|
const services = await docker.getServices();
|
||||||
|
const webService = services.find(s => s.Spec.Name === 'web-app');
|
||||||
|
|
||||||
|
if (webService && webService.Spec.Mode.Replicated) {
|
||||||
|
const currentReplicas = webService.Spec.Mode.Replicated.Replicas;
|
||||||
|
console.log(`Current replicas: ${currentReplicas}`);
|
||||||
|
|
||||||
|
// Scale based on your metrics
|
||||||
|
// (Scaling API would need to be implemented)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## 📖 API Documentation
|
||||||
|
|
||||||
|
- **Package Repository**: [https://code.foss.global/apiclient.xyz/docker](https://code.foss.global/apiclient.xyz/docker)
|
||||||
|
- **Docker Engine API Reference**: [https://docs.docker.com/engine/api/latest/](https://docs.docker.com/engine/api/latest/)
|
||||||
|
- **Issues & Bug Reports**: [https://code.foss.global/apiclient.xyz/docker/issues](https://code.foss.global/apiclient.xyz/docker/issues)
|
||||||
|
|
||||||
|
## 🔑 Key Concepts
|
||||||
|
|
||||||
|
- **DockerHost**: Main entry point for Docker API communication
|
||||||
|
- **Socket Path Priority**: Constructor option → `DOCKER_HOST` env → CI mode → default socket
|
||||||
|
- **Swarm Mode Required**: Services and secrets require Docker Swarm to be activated
|
||||||
|
- **Type Safety**: Full TypeScript support with comprehensive interfaces
|
||||||
|
- **Streaming Support**: Real-time event monitoring and tar stream operations
|
||||||
|
- **S3 Integration**: Built-in image storage/retrieval from S3-compatible storage
|
||||||
|
|
||||||
## License and Legal Information
|
## License and Legal Information
|
||||||
|
|
||||||
This repository contains open-source code that is licensed under the MIT License. A copy of the MIT License can be found in the [license](license) file within this repository.
|
This repository contains open-source code that is licensed under the MIT License. A copy of the MIT License can be found in the [license](license) file within this repository.
|
||||||
|
|
||||||
**Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.
|
**Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
@@ -144,7 +699,7 @@ This project is owned and maintained by Task Venture Capital GmbH. The names and
|
|||||||
|
|
||||||
### Company Information
|
### Company Information
|
||||||
|
|
||||||
Task Venture Capital GmbH
|
Task Venture Capital GmbH
|
||||||
Registered at District court Bremen HRB 35230 HB, Germany
|
Registered at District court Bremen HRB 35230 HB, Germany
|
||||||
|
|
||||||
For any legal inquiries or if you require further information, please contact us via email at hello@task.vc.
|
For any legal inquiries or if you require further information, please contact us via email at hello@task.vc.
|
||||||
|
|||||||
40
test-stream.js
Normal file
40
test-stream.js
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
const { SmartRequest } = require('@push.rocks/smartrequest');
|
||||||
|
|
||||||
|
async function test() {
|
||||||
|
try {
|
||||||
|
const response = await SmartRequest.create()
|
||||||
|
.url('http://unix:/run/user/1000/docker.sock:/images/hello-world:latest/get')
|
||||||
|
.header('Host', 'docker.sock')
|
||||||
|
.get();
|
||||||
|
|
||||||
|
console.log('Response status:', response.status);
|
||||||
|
console.log('Response type:', typeof response);
|
||||||
|
|
||||||
|
const stream = response.streamNode();
|
||||||
|
console.log('Stream type:', typeof stream);
|
||||||
|
console.log('Has on method:', typeof stream.on);
|
||||||
|
|
||||||
|
if (stream) {
|
||||||
|
let chunks = 0;
|
||||||
|
stream.on('data', (chunk) => {
|
||||||
|
chunks++;
|
||||||
|
if (chunks <= 3) console.log('Got chunk', chunks, chunk.length);
|
||||||
|
});
|
||||||
|
stream.on('end', () => {
|
||||||
|
console.log('Stream ended, total chunks:', chunks);
|
||||||
|
process.exit(0);
|
||||||
|
});
|
||||||
|
stream.on('error', (err) => {
|
||||||
|
console.error('Stream error:', err);
|
||||||
|
process.exit(1);
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
console.log('No stream available');
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error:', error);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
test();
|
||||||
46
test-stream.mjs
Normal file
46
test-stream.mjs
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
import { SmartRequest } from '@push.rocks/smartrequest';
|
||||||
|
|
||||||
|
async function test() {
|
||||||
|
try {
|
||||||
|
const response = await SmartRequest.create()
|
||||||
|
.url('http://unix:/run/user/1000/docker.sock:/images/hello-world:latest/get')
|
||||||
|
.header('Host', 'docker.sock')
|
||||||
|
.get();
|
||||||
|
|
||||||
|
console.log('Response status:', response.status);
|
||||||
|
console.log('Response type:', typeof response);
|
||||||
|
|
||||||
|
const stream = response.streamNode();
|
||||||
|
console.log('Stream type:', typeof stream);
|
||||||
|
console.log('Has on method:', typeof stream.on);
|
||||||
|
|
||||||
|
if (stream) {
|
||||||
|
let chunks = 0;
|
||||||
|
stream.on('data', (chunk) => {
|
||||||
|
chunks++;
|
||||||
|
if (chunks <= 3) console.log('Got chunk', chunks, chunk.length);
|
||||||
|
});
|
||||||
|
stream.on('end', () => {
|
||||||
|
console.log('Stream ended, total chunks:', chunks);
|
||||||
|
process.exit(0);
|
||||||
|
});
|
||||||
|
stream.on('error', (err) => {
|
||||||
|
console.error('Stream error:', err);
|
||||||
|
process.exit(1);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Set a timeout in case stream doesn't end
|
||||||
|
setTimeout(() => {
|
||||||
|
console.log('Timeout after 5 seconds');
|
||||||
|
process.exit(1);
|
||||||
|
}, 5000);
|
||||||
|
} else {
|
||||||
|
console.log('No stream available');
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error:', error);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
test();
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
import { expect, tap } from '@push.rocks/tapbundle';
|
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||||
import { Qenv } from '@push.rocks/qenv';
|
import { Qenv } from '@push.rocks/qenv';
|
||||||
|
|
||||||
const testQenv = new Qenv('./', './.nogit/');
|
const testQenv = new Qenv('./', './.nogit/');
|
||||||
@@ -41,7 +41,10 @@ tap.test('should create a network', async () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
tap.test('should remove a network', async () => {
|
tap.test('should remove a network', async () => {
|
||||||
const webgateway = await docker.DockerNetwork.getNetworkByName(testDockerHost, 'webgateway');
|
const webgateway = await docker.DockerNetwork.getNetworkByName(
|
||||||
|
testDockerHost,
|
||||||
|
'webgateway',
|
||||||
|
);
|
||||||
await webgateway.remove();
|
await webgateway.remove();
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -78,7 +81,10 @@ tap.test('should create a secret', async () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
tap.test('should remove a secret by name', async () => {
|
tap.test('should remove a secret by name', async () => {
|
||||||
const mySecret = await docker.DockerSecret.getSecretByName(testDockerHost, 'testSecret');
|
const mySecret = await docker.DockerSecret.getSecretByName(
|
||||||
|
testDockerHost,
|
||||||
|
'testSecret',
|
||||||
|
);
|
||||||
await mySecret.remove();
|
await mySecret.remove();
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -102,11 +108,14 @@ tap.test('should create a service', async () => {
|
|||||||
labels: {},
|
labels: {},
|
||||||
contentArg: '{"hi": "wow"}',
|
contentArg: '{"hi": "wow"}',
|
||||||
});
|
});
|
||||||
const testImage = await docker.DockerImage.createFromRegistry(testDockerHost, {
|
const testImage = await docker.DockerImage.createFromRegistry(
|
||||||
creationObject: {
|
testDockerHost,
|
||||||
imageUrl: 'code.foss.global/host.today/ht-docker-node:latest',
|
{
|
||||||
}
|
creationObject: {
|
||||||
});
|
imageUrl: 'code.foss.global/host.today/ht-docker-node:latest',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
);
|
||||||
const testService = await docker.DockerService.createService(testDockerHost, {
|
const testService = await docker.DockerService.createService(testDockerHost, {
|
||||||
image: testImage,
|
image: testImage,
|
||||||
labels: {},
|
labels: {},
|
||||||
@@ -124,13 +133,16 @@ tap.test('should create a service', async () => {
|
|||||||
|
|
||||||
tap.test('should export images', async (toolsArg) => {
|
tap.test('should export images', async (toolsArg) => {
|
||||||
const done = toolsArg.defer();
|
const done = toolsArg.defer();
|
||||||
const testImage = await docker.DockerImage.createFromRegistry(testDockerHost, {
|
const testImage = await docker.DockerImage.createFromRegistry(
|
||||||
creationObject: {
|
testDockerHost,
|
||||||
imageUrl: 'code.foss.global/host.today/ht-docker-node:latest',
|
{
|
||||||
}
|
creationObject: {
|
||||||
});
|
imageUrl: 'code.foss.global/host.today/ht-docker-node:latest',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
);
|
||||||
const fsWriteStream = plugins.smartfile.fsStream.createWriteStream(
|
const fsWriteStream = plugins.smartfile.fsStream.createWriteStream(
|
||||||
plugins.path.join(paths.nogitDir, 'testimage.tar')
|
plugins.path.join(paths.nogitDir, 'testimage.tar'),
|
||||||
);
|
);
|
||||||
const exportStream = await testImage.exportToTarStream();
|
const exportStream = await testImage.exportToTarStream();
|
||||||
exportStream.pipe(fsWriteStream).on('finish', () => {
|
exportStream.pipe(fsWriteStream).on('finish', () => {
|
||||||
@@ -139,17 +151,20 @@ tap.test('should export images', async (toolsArg) => {
|
|||||||
await done.promise;
|
await done.promise;
|
||||||
});
|
});
|
||||||
|
|
||||||
tap.test('should import images', async (toolsArg) => {
|
tap.test('should import images', async () => {
|
||||||
const done = toolsArg.defer();
|
|
||||||
const fsReadStream = plugins.smartfile.fsStream.createReadStream(
|
const fsReadStream = plugins.smartfile.fsStream.createReadStream(
|
||||||
plugins.path.join(paths.nogitDir, 'testimage.tar')
|
plugins.path.join(paths.nogitDir, 'testimage.tar'),
|
||||||
);
|
);
|
||||||
await docker.DockerImage.createFromTarStream(testDockerHost, {
|
const importedImage = await docker.DockerImage.createFromTarStream(
|
||||||
tarStream: fsReadStream,
|
testDockerHost,
|
||||||
creationObject: {
|
{
|
||||||
imageUrl: 'code.foss.global/host.today/ht-docker-node:latest',
|
tarStream: fsReadStream,
|
||||||
}
|
creationObject: {
|
||||||
})
|
imageUrl: 'code.foss.global/host.today/ht-docker-node:latest',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
);
|
||||||
|
expect(importedImage).toBeInstanceOf(docker.DockerImage);
|
||||||
});
|
});
|
||||||
|
|
||||||
tap.test('should expose a working DockerImageStore', async () => {
|
tap.test('should expose a working DockerImageStore', async () => {
|
||||||
@@ -163,7 +178,16 @@ tap.test('should expose a working DockerImageStore', async () => {
|
|||||||
await testDockerHost.addS3Storage(s3Descriptor);
|
await testDockerHost.addS3Storage(s3Descriptor);
|
||||||
|
|
||||||
//
|
//
|
||||||
await testDockerHost.imageStore.storeImage('hello', plugins.smartfile.fsStream.createReadStream(plugins.path.join(paths.nogitDir, 'testimage.tar')));
|
await testDockerHost.imageStore.storeImage(
|
||||||
})
|
'hello2',
|
||||||
|
plugins.smartfile.fsStream.createReadStream(
|
||||||
|
plugins.path.join(paths.nogitDir, 'testimage.tar'),
|
||||||
|
),
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('cleanup', async () => {
|
||||||
|
await testDockerHost.stop();
|
||||||
|
});
|
||||||
|
|
||||||
export default tap.start();
|
export default tap.start();
|
||||||
@@ -3,6 +3,6 @@
|
|||||||
*/
|
*/
|
||||||
export const commitinfo = {
|
export const commitinfo = {
|
||||||
name: '@apiclient.xyz/docker',
|
name: '@apiclient.xyz/docker',
|
||||||
version: '1.2.6',
|
version: '2.0.0',
|
||||||
description: 'Provides easy communication with Docker remote API from Node.js, with TypeScript support.'
|
description: 'Provides easy communication with Docker remote API from Node.js, with TypeScript support.'
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,7 +10,9 @@ export class DockerContainer {
|
|||||||
/**
|
/**
|
||||||
* get all containers
|
* get all containers
|
||||||
*/
|
*/
|
||||||
public static async getContainers(dockerHostArg: DockerHost): Promise<DockerContainer[]> {
|
public static async getContainers(
|
||||||
|
dockerHostArg: DockerHost,
|
||||||
|
): Promise<DockerContainer[]> {
|
||||||
const result: DockerContainer[] = [];
|
const result: DockerContainer[] = [];
|
||||||
const response = await dockerHostArg.request('GET', '/containers/json');
|
const response = await dockerHostArg.request('GET', '/containers/json');
|
||||||
|
|
||||||
@@ -34,7 +36,7 @@ export class DockerContainer {
|
|||||||
*/
|
*/
|
||||||
public static async create(
|
public static async create(
|
||||||
dockerHost: DockerHost,
|
dockerHost: DockerHost,
|
||||||
containerCreationDescriptor: interfaces.IContainerCreationDescriptor
|
containerCreationDescriptor: interfaces.IContainerCreationDescriptor,
|
||||||
) {
|
) {
|
||||||
// check for unique hostname
|
// check for unique hostname
|
||||||
const existingContainers = await DockerContainer.getContainers(dockerHost);
|
const existingContainers = await DockerContainer.getContainers(dockerHost);
|
||||||
@@ -50,7 +52,10 @@ export class DockerContainer {
|
|||||||
if (response.statusCode < 300) {
|
if (response.statusCode < 300) {
|
||||||
logger.log('info', 'Container created successfully');
|
logger.log('info', 'Container created successfully');
|
||||||
} else {
|
} else {
|
||||||
logger.log('error', 'There has been a problem when creating the container');
|
logger.log(
|
||||||
|
'error',
|
||||||
|
'There has been a problem when creating the container',
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ import { DockerContainer } from './classes.container.js';
|
|||||||
import { DockerNetwork } from './classes.network.js';
|
import { DockerNetwork } from './classes.network.js';
|
||||||
import { DockerService } from './classes.service.js';
|
import { DockerService } from './classes.service.js';
|
||||||
import { logger } from './logger.js';
|
import { logger } from './logger.js';
|
||||||
import path from 'path';
|
|
||||||
import { DockerImageStore } from './classes.imagestore.js';
|
import { DockerImageStore } from './classes.imagestore.js';
|
||||||
import { DockerImage } from './classes.image.js';
|
import { DockerImage } from './classes.image.js';
|
||||||
|
|
||||||
@@ -15,7 +14,7 @@ export interface IAuthData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
export interface IDockerHostConstructorOptions {
|
export interface IDockerHostConstructorOptions {
|
||||||
dockerSockPath?: string;
|
socketPath?: string;
|
||||||
imageStoreDir?: string;
|
imageStoreDir?: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -37,13 +36,16 @@ export class DockerHost {
|
|||||||
constructor(optionsArg: IDockerHostConstructorOptions) {
|
constructor(optionsArg: IDockerHostConstructorOptions) {
|
||||||
this.options = {
|
this.options = {
|
||||||
...{
|
...{
|
||||||
imageStoreDir: plugins.path.join(paths.nogitDir, 'temp-docker-image-store'),
|
imageStoreDir: plugins.path.join(
|
||||||
|
paths.nogitDir,
|
||||||
|
'temp-docker-image-store',
|
||||||
|
),
|
||||||
},
|
},
|
||||||
...optionsArg,
|
...optionsArg,
|
||||||
}
|
};
|
||||||
let pathToUse: string;
|
let pathToUse: string;
|
||||||
if (optionsArg.dockerSockPath) {
|
if (optionsArg.socketPath) {
|
||||||
pathToUse = optionsArg.dockerSockPath;
|
pathToUse = optionsArg.socketPath;
|
||||||
} else if (process.env.DOCKER_HOST) {
|
} else if (process.env.DOCKER_HOST) {
|
||||||
pathToUse = process.env.DOCKER_HOST;
|
pathToUse = process.env.DOCKER_HOST;
|
||||||
} else if (process.env.CI) {
|
} else if (process.env.CI) {
|
||||||
@@ -62,7 +64,7 @@ export class DockerHost {
|
|||||||
this.imageStore = new DockerImageStore({
|
this.imageStore = new DockerImageStore({
|
||||||
bucketDir: null,
|
bucketDir: null,
|
||||||
localDirPath: this.options.imageStoreDir,
|
localDirPath: this.options.imageStoreDir,
|
||||||
})
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
public async start() {
|
public async start() {
|
||||||
@@ -84,17 +86,22 @@ export class DockerHost {
|
|||||||
throw new Error(response.body.Status);
|
throw new Error(response.body.Status);
|
||||||
}
|
}
|
||||||
console.log(response.body.Status);
|
console.log(response.body.Status);
|
||||||
this.registryToken = plugins.smartstring.base64.encode(plugins.smartjson.stringify(authData));
|
this.registryToken = plugins.smartstring.base64.encode(
|
||||||
|
plugins.smartjson.stringify(authData),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gets the token from the .docker/config.json file for GitLab registry
|
* gets the token from the .docker/config.json file for GitLab registry
|
||||||
*/
|
*/
|
||||||
public async getAuthTokenFromDockerConfig(registryUrlArg: string) {
|
public async getAuthTokenFromDockerConfig(registryUrlArg: string) {
|
||||||
const dockerConfigPath = plugins.smartpath.get.home('~/.docker/config.json');
|
const dockerConfigPath = plugins.smartpath.get.home(
|
||||||
|
'~/.docker/config.json',
|
||||||
|
);
|
||||||
const configObject = plugins.smartfile.fs.toObjectSync(dockerConfigPath);
|
const configObject = plugins.smartfile.fs.toObjectSync(dockerConfigPath);
|
||||||
const gitlabAuthBase64 = configObject.auths[registryUrlArg].auth;
|
const gitlabAuthBase64 = configObject.auths[registryUrlArg].auth;
|
||||||
const gitlabAuth: string = plugins.smartstring.base64.decode(gitlabAuthBase64);
|
const gitlabAuth: string =
|
||||||
|
plugins.smartstring.base64.decode(gitlabAuthBase64);
|
||||||
const gitlabAuthArray = gitlabAuth.split(':');
|
const gitlabAuthArray = gitlabAuth.split(':');
|
||||||
await this.auth({
|
await this.auth({
|
||||||
username: gitlabAuthArray[0],
|
username: gitlabAuthArray[0],
|
||||||
@@ -116,7 +123,9 @@ export class DockerHost {
|
|||||||
/**
|
/**
|
||||||
* create a network
|
* create a network
|
||||||
*/
|
*/
|
||||||
public async createNetwork(optionsArg: Parameters<typeof DockerNetwork.createNetwork>[1]) {
|
public async createNetwork(
|
||||||
|
optionsArg: Parameters<typeof DockerNetwork.createNetwork>[1],
|
||||||
|
) {
|
||||||
return await DockerNetwork.createNetwork(this, optionsArg);
|
return await DockerNetwork.createNetwork(this, optionsArg);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -127,7 +136,6 @@ export class DockerHost {
|
|||||||
return await DockerNetwork.getNetworkByName(this, networkNameArg);
|
return await DockerNetwork.getNetworkByName(this, networkNameArg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// ==============
|
// ==============
|
||||||
// CONTAINERS
|
// CONTAINERS
|
||||||
// ==============
|
// ==============
|
||||||
@@ -174,8 +182,12 @@ export class DockerHost {
|
|||||||
*/
|
*/
|
||||||
public async getEventObservable(): Promise<plugins.rxjs.Observable<any>> {
|
public async getEventObservable(): Promise<plugins.rxjs.Observable<any>> {
|
||||||
const response = await this.requestStreaming('GET', '/events');
|
const response = await this.requestStreaming('GET', '/events');
|
||||||
|
|
||||||
|
// requestStreaming now returns Node.js stream, not web stream
|
||||||
|
const nodeStream = response as plugins.smartstream.stream.Readable;
|
||||||
|
|
||||||
return plugins.rxjs.Observable.create((observer) => {
|
return plugins.rxjs.Observable.create((observer) => {
|
||||||
response.on('data', (data) => {
|
nodeStream.on('data', (data) => {
|
||||||
const eventString = data.toString();
|
const eventString = data.toString();
|
||||||
try {
|
try {
|
||||||
const eventObject = JSON.parse(eventString);
|
const eventObject = JSON.parse(eventString);
|
||||||
@@ -185,7 +197,7 @@ export class DockerHost {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
return () => {
|
return () => {
|
||||||
response.emit('end');
|
nodeStream.emit('end');
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@@ -226,54 +238,163 @@ export class DockerHost {
|
|||||||
*/
|
*/
|
||||||
public async request(methodArg: string, routeArg: string, dataArg = {}) {
|
public async request(methodArg: string, routeArg: string, dataArg = {}) {
|
||||||
const requestUrl = `${this.socketPath}${routeArg}`;
|
const requestUrl = `${this.socketPath}${routeArg}`;
|
||||||
const response = await plugins.smartrequest.request(requestUrl, {
|
|
||||||
method: methodArg,
|
// Build the request using the fluent API
|
||||||
headers: {
|
const smartRequest = plugins.smartrequest.SmartRequest.create()
|
||||||
'Content-Type': 'application/json',
|
.url(requestUrl)
|
||||||
'X-Registry-Auth': this.registryToken,
|
.header('Content-Type', 'application/json')
|
||||||
Host: 'docker.sock',
|
.header('X-Registry-Auth', this.registryToken)
|
||||||
},
|
.header('Host', 'docker.sock')
|
||||||
requestBody: dataArg,
|
.options({ keepAlive: false });
|
||||||
keepAlive: false,
|
|
||||||
});
|
// Add body for methods that support it
|
||||||
if (response.statusCode !== 200) {
|
if (dataArg && Object.keys(dataArg).length > 0) {
|
||||||
console.log(response.body);
|
smartRequest.json(dataArg);
|
||||||
}
|
}
|
||||||
return response;
|
|
||||||
|
// Execute the request based on method
|
||||||
|
let response;
|
||||||
|
switch (methodArg.toUpperCase()) {
|
||||||
|
case 'GET':
|
||||||
|
response = await smartRequest.get();
|
||||||
|
break;
|
||||||
|
case 'POST':
|
||||||
|
response = await smartRequest.post();
|
||||||
|
break;
|
||||||
|
case 'PUT':
|
||||||
|
response = await smartRequest.put();
|
||||||
|
break;
|
||||||
|
case 'DELETE':
|
||||||
|
response = await smartRequest.delete();
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
throw new Error(`Unsupported HTTP method: ${methodArg}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse the response body based on content type
|
||||||
|
let body;
|
||||||
|
const contentType = response.headers['content-type'] || '';
|
||||||
|
|
||||||
|
// Docker's streaming endpoints (like /images/create) return newline-delimited JSON
|
||||||
|
// which can't be parsed as a single JSON object
|
||||||
|
const isStreamingEndpoint =
|
||||||
|
routeArg.includes('/images/create') ||
|
||||||
|
routeArg.includes('/images/load') ||
|
||||||
|
routeArg.includes('/build');
|
||||||
|
|
||||||
|
if (contentType.includes('application/json') && !isStreamingEndpoint) {
|
||||||
|
body = await response.json();
|
||||||
|
} else {
|
||||||
|
body = await response.text();
|
||||||
|
// Try to parse as JSON if it looks like JSON and is not a streaming response
|
||||||
|
if (
|
||||||
|
!isStreamingEndpoint &&
|
||||||
|
body &&
|
||||||
|
(body.startsWith('{') || body.startsWith('['))
|
||||||
|
) {
|
||||||
|
try {
|
||||||
|
body = JSON.parse(body);
|
||||||
|
} catch {
|
||||||
|
// Keep as text if parsing fails
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a response object compatible with existing code
|
||||||
|
const legacyResponse = {
|
||||||
|
statusCode: response.status,
|
||||||
|
body: body,
|
||||||
|
headers: response.headers,
|
||||||
|
};
|
||||||
|
|
||||||
|
if (response.status !== 200) {
|
||||||
|
console.log(body);
|
||||||
|
}
|
||||||
|
|
||||||
|
return legacyResponse;
|
||||||
}
|
}
|
||||||
|
|
||||||
public async requestStreaming(methodArg: string, routeArg: string, readStream?: plugins.smartstream.stream.Readable) {
|
public async requestStreaming(
|
||||||
|
methodArg: string,
|
||||||
|
routeArg: string,
|
||||||
|
readStream?: plugins.smartstream.stream.Readable,
|
||||||
|
) {
|
||||||
const requestUrl = `${this.socketPath}${routeArg}`;
|
const requestUrl = `${this.socketPath}${routeArg}`;
|
||||||
const response = await plugins.smartrequest.request(
|
|
||||||
requestUrl,
|
// Build the request using the fluent API
|
||||||
{
|
const smartRequest = plugins.smartrequest.SmartRequest.create()
|
||||||
method: methodArg,
|
.url(requestUrl)
|
||||||
headers: {
|
.header('Content-Type', 'application/json')
|
||||||
'Content-Type': 'application/json',
|
.header('X-Registry-Auth', this.registryToken)
|
||||||
'X-Registry-Auth': this.registryToken,
|
.header('Host', 'docker.sock')
|
||||||
Host: 'docker.sock',
|
.timeout(30000)
|
||||||
},
|
.options({ keepAlive: false, autoDrain: true }); // Disable auto-drain for streaming
|
||||||
requestBody: null,
|
|
||||||
keepAlive: false,
|
// If we have a readStream, use the new stream method with logging
|
||||||
},
|
if (readStream) {
|
||||||
true,
|
let counter = 0;
|
||||||
(readStream ? reqArg => {
|
const smartduplex = new plugins.smartstream.SmartDuplex({
|
||||||
let counter = 0;
|
writeFunction: async (chunkArg) => {
|
||||||
const smartduplex = new plugins.smartstream.SmartDuplex({
|
if (counter % 1000 === 0) {
|
||||||
writeFunction: async (chunkArg) => {
|
console.log(`posting chunk ${counter}`);
|
||||||
if (counter % 1000 === 0) {
|
|
||||||
console.log(`posting chunk ${counter}`);
|
|
||||||
}
|
|
||||||
counter++;
|
|
||||||
return chunkArg;
|
|
||||||
}
|
}
|
||||||
});
|
counter++;
|
||||||
readStream.pipe(smartduplex).pipe(reqArg);
|
return chunkArg;
|
||||||
} : null),
|
},
|
||||||
);
|
});
|
||||||
console.log(response.statusCode);
|
|
||||||
console.log(response.body);
|
// Pipe through the logging duplex stream
|
||||||
return response;
|
const loggedStream = readStream.pipe(smartduplex);
|
||||||
|
|
||||||
|
// Use the new stream method to stream the data
|
||||||
|
smartRequest.stream(loggedStream, 'application/octet-stream');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute the request based on method
|
||||||
|
let response: plugins.smartrequest.ICoreResponse;
|
||||||
|
switch (methodArg.toUpperCase()) {
|
||||||
|
case 'GET':
|
||||||
|
response = await smartRequest.get();
|
||||||
|
break;
|
||||||
|
case 'POST':
|
||||||
|
response = await smartRequest.post();
|
||||||
|
break;
|
||||||
|
case 'PUT':
|
||||||
|
response = await smartRequest.put();
|
||||||
|
break;
|
||||||
|
case 'DELETE':
|
||||||
|
response = await smartRequest.delete();
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
throw new Error(`Unsupported HTTP method: ${methodArg}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(response.status);
|
||||||
|
|
||||||
|
// For streaming responses, get the web stream
|
||||||
|
const webStream = response.stream();
|
||||||
|
|
||||||
|
if (!webStream) {
|
||||||
|
// If no stream is available, consume the body as text
|
||||||
|
const body = await response.text();
|
||||||
|
console.log(body);
|
||||||
|
|
||||||
|
// Return a compatible response object
|
||||||
|
return {
|
||||||
|
statusCode: response.status,
|
||||||
|
body: body,
|
||||||
|
headers: response.headers,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert web ReadableStream to Node.js stream for backward compatibility
|
||||||
|
const nodeStream = plugins.smartstream.nodewebhelpers.convertWebReadableToNodeReadable(webStream);
|
||||||
|
|
||||||
|
// Add properties for compatibility
|
||||||
|
(nodeStream as any).statusCode = response.status;
|
||||||
|
(nodeStream as any).body = ''; // For compatibility
|
||||||
|
|
||||||
|
return nodeStream;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -285,10 +406,14 @@ export class DockerHost {
|
|||||||
if (!optionsArg.bucketName) {
|
if (!optionsArg.bucketName) {
|
||||||
throw new Error('bucketName is required');
|
throw new Error('bucketName is required');
|
||||||
}
|
}
|
||||||
const bucket = await this.smartBucket.getBucketByName(optionsArg.bucketName);
|
const bucket = await this.smartBucket.getBucketByName(
|
||||||
|
optionsArg.bucketName,
|
||||||
|
);
|
||||||
let wantedDirectory = await bucket.getBaseDirectory();
|
let wantedDirectory = await bucket.getBaseDirectory();
|
||||||
if (optionsArg.directoryPath) {
|
if (optionsArg.directoryPath) {
|
||||||
wantedDirectory = await wantedDirectory.getSubDirectoryByName(optionsArg.directoryPath);
|
wantedDirectory = await wantedDirectory.getSubDirectoryByName(
|
||||||
|
optionsArg.directoryPath,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
this.imageStore.options.bucketDir = wantedDirectory;
|
this.imageStore.options.bucketDir = wantedDirectory;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,7 +17,10 @@ export class DockerImage {
|
|||||||
return images;
|
return images;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static async getImageByName(dockerHost: DockerHost, imageNameArg: string) {
|
public static async getImageByName(
|
||||||
|
dockerHost: DockerHost,
|
||||||
|
imageNameArg: string,
|
||||||
|
) {
|
||||||
const images = await this.getImages(dockerHost);
|
const images = await this.getImages(dockerHost);
|
||||||
const result = images.find((image) => {
|
const result = images.find((image) => {
|
||||||
if (image.RepoTags) {
|
if (image.RepoTags) {
|
||||||
@@ -32,8 +35,8 @@ export class DockerImage {
|
|||||||
public static async createFromRegistry(
|
public static async createFromRegistry(
|
||||||
dockerHostArg: DockerHost,
|
dockerHostArg: DockerHost,
|
||||||
optionsArg: {
|
optionsArg: {
|
||||||
creationObject: interfaces.IImageCreationDescriptor
|
creationObject: interfaces.IImageCreationDescriptor;
|
||||||
}
|
},
|
||||||
): Promise<DockerImage> {
|
): Promise<DockerImage> {
|
||||||
// lets create a sanatized imageUrlObject
|
// lets create a sanatized imageUrlObject
|
||||||
const imageUrlObject: {
|
const imageUrlObject: {
|
||||||
@@ -50,7 +53,7 @@ export class DockerImage {
|
|||||||
const imageTag = imageUrlObject.imageUrl.split(':')[1];
|
const imageTag = imageUrlObject.imageUrl.split(':')[1];
|
||||||
if (imageUrlObject.imageTag) {
|
if (imageUrlObject.imageTag) {
|
||||||
throw new Error(
|
throw new Error(
|
||||||
`imageUrl ${imageUrlObject.imageUrl} can't be tagged with ${imageUrlObject.imageTag} because it is already tagged with ${imageTag}`
|
`imageUrl ${imageUrlObject.imageUrl} can't be tagged with ${imageUrlObject.imageTag} because it is already tagged with ${imageTag}`,
|
||||||
);
|
);
|
||||||
} else {
|
} else {
|
||||||
imageUrlObject.imageUrl = imageUrl;
|
imageUrlObject.imageUrl = imageUrl;
|
||||||
@@ -65,12 +68,18 @@ export class DockerImage {
|
|||||||
const response = await dockerHostArg.request(
|
const response = await dockerHostArg.request(
|
||||||
'POST',
|
'POST',
|
||||||
`/images/create?fromImage=${encodeURIComponent(
|
`/images/create?fromImage=${encodeURIComponent(
|
||||||
imageUrlObject.imageUrl
|
imageUrlObject.imageUrl,
|
||||||
)}&tag=${encodeURIComponent(imageUrlObject.imageTag)}`
|
)}&tag=${encodeURIComponent(imageUrlObject.imageTag)}`,
|
||||||
);
|
);
|
||||||
if (response.statusCode < 300) {
|
if (response.statusCode < 300) {
|
||||||
logger.log('info', `Successfully pulled image ${imageUrlObject.imageUrl} from the registry`);
|
logger.log(
|
||||||
const image = await DockerImage.getImageByName(dockerHostArg, imageUrlObject.imageOriginTag);
|
'info',
|
||||||
|
`Successfully pulled image ${imageUrlObject.imageUrl} from the registry`,
|
||||||
|
);
|
||||||
|
const image = await DockerImage.getImageByName(
|
||||||
|
dockerHostArg,
|
||||||
|
imageUrlObject.imageOriginTag,
|
||||||
|
);
|
||||||
return image;
|
return image;
|
||||||
} else {
|
} else {
|
||||||
logger.log('error', `Failed at the attempt of creating a new image`);
|
logger.log('error', `Failed at the attempt of creating a new image`);
|
||||||
@@ -78,26 +87,108 @@ export class DockerImage {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
* @param dockerHostArg
|
* @param dockerHostArg
|
||||||
* @param tarStreamArg
|
* @param tarStreamArg
|
||||||
*/
|
*/
|
||||||
public static async createFromTarStream(dockerHostArg: DockerHost, optionsArg: {
|
public static async createFromTarStream(
|
||||||
creationObject: interfaces.IImageCreationDescriptor,
|
dockerHostArg: DockerHost,
|
||||||
tarStream: plugins.smartstream.stream.Readable,
|
optionsArg: {
|
||||||
}) {
|
creationObject: interfaces.IImageCreationDescriptor;
|
||||||
const response = await dockerHostArg.requestStreaming('POST', '/images/load', optionsArg.tarStream);
|
tarStream: plugins.smartstream.stream.Readable;
|
||||||
return response;
|
},
|
||||||
|
): Promise<DockerImage> {
|
||||||
|
// Start the request for importing an image
|
||||||
|
const response = await dockerHostArg.requestStreaming(
|
||||||
|
'POST',
|
||||||
|
'/images/load',
|
||||||
|
optionsArg.tarStream,
|
||||||
|
);
|
||||||
|
|
||||||
|
// requestStreaming now returns Node.js stream
|
||||||
|
const nodeStream = response as plugins.smartstream.stream.Readable;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Docker typically returns lines like:
|
||||||
|
* {"stream":"Loaded image: myrepo/myimage:latest"}
|
||||||
|
*
|
||||||
|
* So we will collect those lines and parse out the final image name.
|
||||||
|
*/
|
||||||
|
let rawOutput = '';
|
||||||
|
nodeStream.on('data', (chunk) => {
|
||||||
|
rawOutput += chunk.toString();
|
||||||
|
});
|
||||||
|
|
||||||
|
// Wrap the end event in a Promise for easier async/await usage
|
||||||
|
await new Promise<void>((resolve, reject) => {
|
||||||
|
nodeStream.on('end', () => {
|
||||||
|
resolve();
|
||||||
|
});
|
||||||
|
nodeStream.on('error', (err) => {
|
||||||
|
reject(err);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
// Attempt to parse each line to find something like "Loaded image: ..."
|
||||||
|
let loadedImageTag: string | undefined;
|
||||||
|
const lines = rawOutput.trim().split('\n').filter(Boolean);
|
||||||
|
|
||||||
|
for (const line of lines) {
|
||||||
|
try {
|
||||||
|
const jsonLine = JSON.parse(line);
|
||||||
|
if (
|
||||||
|
jsonLine.stream &&
|
||||||
|
(jsonLine.stream.startsWith('Loaded image:') ||
|
||||||
|
jsonLine.stream.startsWith('Loaded image ID:'))
|
||||||
|
) {
|
||||||
|
// Examples:
|
||||||
|
// "Loaded image: your-image:latest"
|
||||||
|
// "Loaded image ID: sha256:...."
|
||||||
|
loadedImageTag = jsonLine.stream
|
||||||
|
.replace('Loaded image: ', '')
|
||||||
|
.replace('Loaded image ID: ', '')
|
||||||
|
.trim();
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// not valid JSON, ignore
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!loadedImageTag) {
|
||||||
|
throw new Error(
|
||||||
|
`Could not parse the loaded image info from Docker response.\nResponse was:\n${rawOutput}`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now try to look up that image by the "loadedImageTag".
|
||||||
|
// Depending on Docker’s response, it might be something like:
|
||||||
|
// "myrepo/myimage:latest" OR "sha256:someHash..."
|
||||||
|
// If Docker gave you an ID (e.g. "sha256:..."), you may need a separate
|
||||||
|
// DockerImage.getImageById method; or if you prefer, you can treat it as a name.
|
||||||
|
const newlyImportedImage = await DockerImage.getImageByName(
|
||||||
|
dockerHostArg,
|
||||||
|
loadedImageTag,
|
||||||
|
);
|
||||||
|
|
||||||
|
if (!newlyImportedImage) {
|
||||||
|
throw new Error(
|
||||||
|
`Image load succeeded, but no local reference found for "${loadedImageTag}".`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.log('info', `Successfully imported image "${loadedImageTag}".`);
|
||||||
|
|
||||||
|
return newlyImportedImage;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static async tagImageByIdOrName(
|
public static async tagImageByIdOrName(
|
||||||
dockerHost: DockerHost,
|
dockerHost: DockerHost,
|
||||||
idOrNameArg: string,
|
idOrNameArg: string,
|
||||||
newTagArg: string
|
newTagArg: string,
|
||||||
) {
|
) {
|
||||||
const response = await dockerHost.request(
|
const response = await dockerHost.request(
|
||||||
'POST',
|
'POST',
|
||||||
`/images/${encodeURIComponent(idOrNameArg)}/${encodeURIComponent(newTagArg)}`
|
`/images/${encodeURIComponent(idOrNameArg)}/${encodeURIComponent(newTagArg)}`,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -167,27 +258,41 @@ export class DockerImage {
|
|||||||
*/
|
*/
|
||||||
public async exportToTarStream(): Promise<plugins.smartstream.stream.Readable> {
|
public async exportToTarStream(): Promise<plugins.smartstream.stream.Readable> {
|
||||||
logger.log('info', `Exporting image ${this.RepoTags[0]} to tar stream.`);
|
logger.log('info', `Exporting image ${this.RepoTags[0]} to tar stream.`);
|
||||||
const response = await this.dockerHost.requestStreaming('GET', `/images/${encodeURIComponent(this.RepoTags[0])}/get`);
|
const response = await this.dockerHost.requestStreaming(
|
||||||
|
'GET',
|
||||||
|
`/images/${encodeURIComponent(this.RepoTags[0])}/get`,
|
||||||
|
);
|
||||||
|
|
||||||
|
// requestStreaming now returns Node.js stream
|
||||||
|
const nodeStream = response as plugins.smartstream.stream.Readable;
|
||||||
|
|
||||||
let counter = 0;
|
let counter = 0;
|
||||||
const webduplexStream = new plugins.smartstream.SmartDuplex({
|
const webduplexStream = new plugins.smartstream.SmartDuplex({
|
||||||
writeFunction: async (chunk, tools) => {
|
writeFunction: async (chunk, tools) => {
|
||||||
if (counter % 1000 === 0)
|
if (counter % 1000 === 0) console.log(`Got chunk: ${counter}`);
|
||||||
console.log(`Got chunk: ${counter}`);
|
|
||||||
counter++;
|
counter++;
|
||||||
return chunk;
|
return chunk;
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
nodeStream.on('data', (chunk) => {
|
||||||
|
if (!webduplexStream.write(chunk)) {
|
||||||
|
nodeStream.pause();
|
||||||
|
webduplexStream.once('drain', () => {
|
||||||
|
nodeStream.resume();
|
||||||
|
});
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
response.on('data', (chunk) => {
|
|
||||||
if (!webduplexStream.write(chunk)) {
|
nodeStream.on('end', () => {
|
||||||
response.pause();
|
|
||||||
webduplexStream.once('drain', () => {
|
|
||||||
response.resume();
|
|
||||||
})
|
|
||||||
};
|
|
||||||
});
|
|
||||||
response.on('end', () => {
|
|
||||||
webduplexStream.end();
|
webduplexStream.end();
|
||||||
})
|
});
|
||||||
|
|
||||||
|
nodeStream.on('error', (error) => {
|
||||||
|
logger.log('error', `Error during image export: ${error.message}`);
|
||||||
|
webduplexStream.destroy(error);
|
||||||
|
});
|
||||||
|
|
||||||
return webduplexStream;
|
return webduplexStream;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -22,14 +22,25 @@ export class DockerImageStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Method to store tar stream
|
// Method to store tar stream
|
||||||
public async storeImage(imageName: string, tarStream: plugins.smartstream.stream.Readable): Promise<void> {
|
public async storeImage(
|
||||||
|
imageName: string,
|
||||||
|
tarStream: plugins.smartstream.stream.Readable,
|
||||||
|
): Promise<void> {
|
||||||
logger.log('info', `Storing image ${imageName}...`);
|
logger.log('info', `Storing image ${imageName}...`);
|
||||||
const uniqueProcessingId = plugins.smartunique.shortId();
|
const uniqueProcessingId = plugins.smartunique.shortId();
|
||||||
|
|
||||||
const initialTarDownloadPath = plugins.path.join(this.options.localDirPath, `${uniqueProcessingId}.tar`);
|
const initialTarDownloadPath = plugins.path.join(
|
||||||
const extractionDir = plugins.path.join(this.options.localDirPath, uniqueProcessingId);
|
this.options.localDirPath,
|
||||||
|
`${uniqueProcessingId}.tar`,
|
||||||
|
);
|
||||||
|
const extractionDir = plugins.path.join(
|
||||||
|
this.options.localDirPath,
|
||||||
|
uniqueProcessingId,
|
||||||
|
);
|
||||||
// Create a write stream to store the tar file
|
// Create a write stream to store the tar file
|
||||||
const writeStream = plugins.smartfile.fsStream.createWriteStream(initialTarDownloadPath);
|
const writeStream = plugins.smartfile.fsStream.createWriteStream(
|
||||||
|
initialTarDownloadPath,
|
||||||
|
);
|
||||||
|
|
||||||
// lets wait for the write stream to finish
|
// lets wait for the write stream to finish
|
||||||
await new Promise((resolve, reject) => {
|
await new Promise((resolve, reject) => {
|
||||||
@@ -37,23 +48,43 @@ export class DockerImageStore {
|
|||||||
writeStream.on('finish', resolve);
|
writeStream.on('finish', resolve);
|
||||||
writeStream.on('error', reject);
|
writeStream.on('error', reject);
|
||||||
});
|
});
|
||||||
logger.log('info', `Image ${imageName} stored locally for processing. Extracting...`);
|
logger.log(
|
||||||
|
'info',
|
||||||
|
`Image ${imageName} stored locally for processing. Extracting...`,
|
||||||
|
);
|
||||||
|
|
||||||
// lets process the image
|
// lets process the image
|
||||||
const tarArchive = await plugins.smartarchive.SmartArchive.fromArchiveFile(initialTarDownloadPath);
|
const tarArchive = await plugins.smartarchive.SmartArchive.fromArchiveFile(
|
||||||
|
initialTarDownloadPath,
|
||||||
|
);
|
||||||
await tarArchive.exportToFs(extractionDir);
|
await tarArchive.exportToFs(extractionDir);
|
||||||
logger.log('info', `Image ${imageName} extracted.`);
|
logger.log('info', `Image ${imageName} extracted.`);
|
||||||
await plugins.smartfile.fs.remove(initialTarDownloadPath);
|
await plugins.smartfile.fs.remove(initialTarDownloadPath);
|
||||||
logger.log('info', `deleted original tar to save space.`);
|
logger.log('info', `deleted original tar to save space.`);
|
||||||
logger.log('info', `now repackaging for s3...`);
|
logger.log('info', `now repackaging for s3...`);
|
||||||
const smartfileIndexJson = await plugins.smartfile.SmartFile.fromFilePath(plugins.path.join(extractionDir, 'index.json'));
|
const smartfileIndexJson = await plugins.smartfile.SmartFile.fromFilePath(
|
||||||
const smartfileManifestJson = await plugins.smartfile.SmartFile.fromFilePath(plugins.path.join(extractionDir, 'manifest.json'));
|
plugins.path.join(extractionDir, 'index.json'),
|
||||||
const smartfileOciLayoutJson = await plugins.smartfile.SmartFile.fromFilePath(plugins.path.join(extractionDir, 'oci-layout'));
|
);
|
||||||
const smartfileRepositoriesJson = await plugins.smartfile.SmartFile.fromFilePath(plugins.path.join(extractionDir, 'repositories'));
|
const smartfileManifestJson =
|
||||||
|
await plugins.smartfile.SmartFile.fromFilePath(
|
||||||
|
plugins.path.join(extractionDir, 'manifest.json'),
|
||||||
|
);
|
||||||
|
const smartfileOciLayoutJson =
|
||||||
|
await plugins.smartfile.SmartFile.fromFilePath(
|
||||||
|
plugins.path.join(extractionDir, 'oci-layout'),
|
||||||
|
);
|
||||||
|
const smartfileRepositoriesJson =
|
||||||
|
await plugins.smartfile.SmartFile.fromFilePath(
|
||||||
|
plugins.path.join(extractionDir, 'repositories'),
|
||||||
|
);
|
||||||
const indexJson = JSON.parse(smartfileIndexJson.contents.toString());
|
const indexJson = JSON.parse(smartfileIndexJson.contents.toString());
|
||||||
const manifestJson = JSON.parse(smartfileManifestJson.contents.toString());
|
const manifestJson = JSON.parse(smartfileManifestJson.contents.toString());
|
||||||
const ociLayoutJson = JSON.parse(smartfileOciLayoutJson.contents.toString());
|
const ociLayoutJson = JSON.parse(
|
||||||
const repositoriesJson = JSON.parse(smartfileRepositoriesJson.contents.toString());
|
smartfileOciLayoutJson.contents.toString(),
|
||||||
|
);
|
||||||
|
const repositoriesJson = JSON.parse(
|
||||||
|
smartfileRepositoriesJson.contents.toString(),
|
||||||
|
);
|
||||||
|
|
||||||
indexJson.manifests[0].annotations['io.containerd.image.name'] = imageName;
|
indexJson.manifests[0].annotations['io.containerd.image.name'] = imageName;
|
||||||
manifestJson[0].RepoTags[0] = imageName;
|
manifestJson[0].RepoTags[0] = imageName;
|
||||||
@@ -62,10 +93,18 @@ export class DockerImageStore {
|
|||||||
repositoriesJson[imageName] = repoFirstValue;
|
repositoriesJson[imageName] = repoFirstValue;
|
||||||
delete repositoriesJson[repoFirstKey];
|
delete repositoriesJson[repoFirstKey];
|
||||||
|
|
||||||
smartfileIndexJson.contents = Buffer.from(JSON.stringify(indexJson, null, 2));
|
smartfileIndexJson.contents = Buffer.from(
|
||||||
smartfileManifestJson.contents = Buffer.from(JSON.stringify(manifestJson, null, 2));
|
JSON.stringify(indexJson, null, 2),
|
||||||
smartfileOciLayoutJson.contents = Buffer.from(JSON.stringify(ociLayoutJson, null, 2));
|
);
|
||||||
smartfileRepositoriesJson.contents = Buffer.from(JSON.stringify(repositoriesJson, null, 2));
|
smartfileManifestJson.contents = Buffer.from(
|
||||||
|
JSON.stringify(manifestJson, null, 2),
|
||||||
|
);
|
||||||
|
smartfileOciLayoutJson.contents = Buffer.from(
|
||||||
|
JSON.stringify(ociLayoutJson, null, 2),
|
||||||
|
);
|
||||||
|
smartfileRepositoriesJson.contents = Buffer.from(
|
||||||
|
JSON.stringify(repositoriesJson, null, 2),
|
||||||
|
);
|
||||||
await Promise.all([
|
await Promise.all([
|
||||||
smartfileIndexJson.write(),
|
smartfileIndexJson.write(),
|
||||||
smartfileManifestJson.write(),
|
smartfileManifestJson.write(),
|
||||||
@@ -77,8 +116,12 @@ export class DockerImageStore {
|
|||||||
const tartools = new plugins.smartarchive.TarTools();
|
const tartools = new plugins.smartarchive.TarTools();
|
||||||
const newTarPack = await tartools.packDirectory(extractionDir);
|
const newTarPack = await tartools.packDirectory(extractionDir);
|
||||||
const finalTarName = `${uniqueProcessingId}.processed.tar`;
|
const finalTarName = `${uniqueProcessingId}.processed.tar`;
|
||||||
const finalTarPath = plugins.path.join(this.options.localDirPath, finalTarName);
|
const finalTarPath = plugins.path.join(
|
||||||
const finalWriteStream = plugins.smartfile.fsStream.createWriteStream(finalTarPath);
|
this.options.localDirPath,
|
||||||
|
finalTarName,
|
||||||
|
);
|
||||||
|
const finalWriteStream =
|
||||||
|
plugins.smartfile.fsStream.createWriteStream(finalTarPath);
|
||||||
await new Promise((resolve, reject) => {
|
await new Promise((resolve, reject) => {
|
||||||
newTarPack.finalize();
|
newTarPack.finalize();
|
||||||
newTarPack.pipe(finalWriteStream);
|
newTarPack.pipe(finalWriteStream);
|
||||||
@@ -87,7 +130,8 @@ export class DockerImageStore {
|
|||||||
});
|
});
|
||||||
logger.log('ok', `Repackaged image ${imageName} for s3.`);
|
logger.log('ok', `Repackaged image ${imageName} for s3.`);
|
||||||
await plugins.smartfile.fs.remove(extractionDir);
|
await plugins.smartfile.fs.remove(extractionDir);
|
||||||
const finalTarReadStream = plugins.smartfile.fsStream.createReadStream(finalTarPath);
|
const finalTarReadStream =
|
||||||
|
plugins.smartfile.fsStream.createReadStream(finalTarPath);
|
||||||
await this.options.bucketDir.fastPutStream({
|
await this.options.bucketDir.fastPutStream({
|
||||||
stream: finalTarReadStream,
|
stream: finalTarReadStream,
|
||||||
path: `${imageName}.tar`,
|
path: `${imageName}.tar`,
|
||||||
@@ -102,8 +146,13 @@ export class DockerImageStore {
|
|||||||
public async stop() {}
|
public async stop() {}
|
||||||
|
|
||||||
// Method to retrieve tar stream
|
// Method to retrieve tar stream
|
||||||
public async getImage(imageName: string): Promise<plugins.smartstream.stream.Readable> {
|
public async getImage(
|
||||||
const imagePath = plugins.path.join(this.options.localDirPath, `${imageName}.tar`);
|
imageName: string,
|
||||||
|
): Promise<plugins.smartstream.stream.Readable> {
|
||||||
|
const imagePath = plugins.path.join(
|
||||||
|
this.options.localDirPath,
|
||||||
|
`${imageName}.tar`,
|
||||||
|
);
|
||||||
|
|
||||||
if (!(await plugins.smartfile.fs.fileExists(imagePath))) {
|
if (!(await plugins.smartfile.fs.fileExists(imagePath))) {
|
||||||
throw new Error(`Image ${imageName} does not exist.`);
|
throw new Error(`Image ${imageName} does not exist.`);
|
||||||
|
|||||||
@@ -6,7 +6,9 @@ import { DockerService } from './classes.service.js';
|
|||||||
import { logger } from './logger.js';
|
import { logger } from './logger.js';
|
||||||
|
|
||||||
export class DockerNetwork {
|
export class DockerNetwork {
|
||||||
public static async getNetworks(dockerHost: DockerHost): Promise<DockerNetwork[]> {
|
public static async getNetworks(
|
||||||
|
dockerHost: DockerHost,
|
||||||
|
): Promise<DockerNetwork[]> {
|
||||||
const dockerNetworks: DockerNetwork[] = [];
|
const dockerNetworks: DockerNetwork[] = [];
|
||||||
const response = await dockerHost.request('GET', '/networks');
|
const response = await dockerHost.request('GET', '/networks');
|
||||||
for (const networkObject of response.body) {
|
for (const networkObject of response.body) {
|
||||||
@@ -17,14 +19,19 @@ export class DockerNetwork {
|
|||||||
return dockerNetworks;
|
return dockerNetworks;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static async getNetworkByName(dockerHost: DockerHost, dockerNetworkNameArg: string) {
|
public static async getNetworkByName(
|
||||||
|
dockerHost: DockerHost,
|
||||||
|
dockerNetworkNameArg: string,
|
||||||
|
) {
|
||||||
const networks = await DockerNetwork.getNetworks(dockerHost);
|
const networks = await DockerNetwork.getNetworks(dockerHost);
|
||||||
return networks.find((dockerNetwork) => dockerNetwork.Name === dockerNetworkNameArg);
|
return networks.find(
|
||||||
|
(dockerNetwork) => dockerNetwork.Name === dockerNetworkNameArg,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static async createNetwork(
|
public static async createNetwork(
|
||||||
dockerHost: DockerHost,
|
dockerHost: DockerHost,
|
||||||
networkCreationDescriptor: interfaces.INetworkCreationDescriptor
|
networkCreationDescriptor: interfaces.INetworkCreationDescriptor,
|
||||||
): Promise<DockerNetwork> {
|
): Promise<DockerNetwork> {
|
||||||
const response = await dockerHost.request('POST', '/networks/create', {
|
const response = await dockerHost.request('POST', '/networks/create', {
|
||||||
Name: networkCreationDescriptor.Name,
|
Name: networkCreationDescriptor.Name,
|
||||||
@@ -47,9 +54,15 @@ export class DockerNetwork {
|
|||||||
});
|
});
|
||||||
if (response.statusCode < 300) {
|
if (response.statusCode < 300) {
|
||||||
logger.log('info', 'Created network successfully');
|
logger.log('info', 'Created network successfully');
|
||||||
return await DockerNetwork.getNetworkByName(dockerHost, networkCreationDescriptor.Name);
|
return await DockerNetwork.getNetworkByName(
|
||||||
|
dockerHost,
|
||||||
|
networkCreationDescriptor.Name,
|
||||||
|
);
|
||||||
} else {
|
} else {
|
||||||
logger.log('error', 'There has been an error creating the wanted network');
|
logger.log(
|
||||||
|
'error',
|
||||||
|
'There has been an error creating the wanted network',
|
||||||
|
);
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -75,7 +88,7 @@ export class DockerNetwork {
|
|||||||
Subnet: string;
|
Subnet: string;
|
||||||
IPRange: string;
|
IPRange: string;
|
||||||
Gateway: string;
|
Gateway: string;
|
||||||
}
|
},
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -87,7 +100,10 @@ export class DockerNetwork {
|
|||||||
* removes the network
|
* removes the network
|
||||||
*/
|
*/
|
||||||
public async remove() {
|
public async remove() {
|
||||||
const response = await this.dockerHost.request('DELETE', `/networks/${this.Id}`);
|
const response = await this.dockerHost.request(
|
||||||
|
'DELETE',
|
||||||
|
`/networks/${this.Id}`,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
public async getContainersOnNetwork(): Promise<
|
public async getContainersOnNetwork(): Promise<
|
||||||
@@ -100,7 +116,10 @@ export class DockerNetwork {
|
|||||||
}>
|
}>
|
||||||
> {
|
> {
|
||||||
const returnArray = [];
|
const returnArray = [];
|
||||||
const response = await this.dockerHost.request('GET', `/networks/${this.Id}`);
|
const response = await this.dockerHost.request(
|
||||||
|
'GET',
|
||||||
|
`/networks/${this.Id}`,
|
||||||
|
);
|
||||||
for (const key of Object.keys(response.body.Containers)) {
|
for (const key of Object.keys(response.body.Containers)) {
|
||||||
returnArray.push(response.body.Containers[key]);
|
returnArray.push(response.body.Containers[key]);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -22,14 +22,17 @@ export class DockerSecret {
|
|||||||
return secrets.find((secret) => secret.ID === idArg);
|
return secrets.find((secret) => secret.ID === idArg);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static async getSecretByName(dockerHostArg: DockerHost, nameArg: string) {
|
public static async getSecretByName(
|
||||||
|
dockerHostArg: DockerHost,
|
||||||
|
nameArg: string,
|
||||||
|
) {
|
||||||
const secrets = await this.getSecrets(dockerHostArg);
|
const secrets = await this.getSecrets(dockerHostArg);
|
||||||
return secrets.find((secret) => secret.Spec.Name === nameArg);
|
return secrets.find((secret) => secret.Spec.Name === nameArg);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static async createSecret(
|
public static async createSecret(
|
||||||
dockerHostArg: DockerHost,
|
dockerHostArg: DockerHost,
|
||||||
secretDescriptor: interfaces.ISecretCreationDescriptor
|
secretDescriptor: interfaces.ISecretCreationDescriptor,
|
||||||
) {
|
) {
|
||||||
const labels: interfaces.TLabels = {
|
const labels: interfaces.TLabels = {
|
||||||
...secretDescriptor.labels,
|
...secretDescriptor.labels,
|
||||||
@@ -45,7 +48,7 @@ export class DockerSecret {
|
|||||||
Object.assign(newSecretInstance, response.body);
|
Object.assign(newSecretInstance, response.body);
|
||||||
Object.assign(
|
Object.assign(
|
||||||
newSecretInstance,
|
newSecretInstance,
|
||||||
await DockerSecret.getSecretByID(dockerHostArg, newSecretInstance.ID)
|
await DockerSecret.getSecretByID(dockerHostArg, newSecretInstance.ID),
|
||||||
);
|
);
|
||||||
return newSecretInstance;
|
return newSecretInstance;
|
||||||
}
|
}
|
||||||
@@ -77,7 +80,7 @@ export class DockerSecret {
|
|||||||
Name: this.Spec.Name,
|
Name: this.Spec.Name,
|
||||||
Labels: this.Spec.Labels,
|
Labels: this.Spec.Labels,
|
||||||
Data: plugins.smartstring.base64.encode(contentArg),
|
Data: plugins.smartstring.base64.encode(contentArg),
|
||||||
}
|
},
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ export class DockerService {
|
|||||||
|
|
||||||
public static async getServiceByName(
|
public static async getServiceByName(
|
||||||
dockerHost: DockerHost,
|
dockerHost: DockerHost,
|
||||||
networkName: string
|
networkName: string,
|
||||||
): Promise<DockerService> {
|
): Promise<DockerService> {
|
||||||
const allServices = await DockerService.getServices(dockerHost);
|
const allServices = await DockerService.getServices(dockerHost);
|
||||||
const wantedService = allServices.find((service) => {
|
const wantedService = allServices.find((service) => {
|
||||||
@@ -35,10 +35,13 @@ export class DockerService {
|
|||||||
*/
|
*/
|
||||||
public static async createService(
|
public static async createService(
|
||||||
dockerHost: DockerHost,
|
dockerHost: DockerHost,
|
||||||
serviceCreationDescriptor: interfaces.IServiceCreationDescriptor
|
serviceCreationDescriptor: interfaces.IServiceCreationDescriptor,
|
||||||
): Promise<DockerService> {
|
): Promise<DockerService> {
|
||||||
// lets get the image
|
// lets get the image
|
||||||
logger.log('info', `now creating service ${serviceCreationDescriptor.name}`);
|
logger.log(
|
||||||
|
'info',
|
||||||
|
`now creating service ${serviceCreationDescriptor.name}`,
|
||||||
|
);
|
||||||
|
|
||||||
// await serviceCreationDescriptor.image.pullLatestImageFromRegistry();
|
// await serviceCreationDescriptor.image.pullLatestImageFromRegistry();
|
||||||
const serviceVersion = await serviceCreationDescriptor.image.getVersion();
|
const serviceVersion = await serviceCreationDescriptor.image.getVersion();
|
||||||
@@ -71,8 +74,12 @@ export class DockerService {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
if (serviceCreationDescriptor.resources && serviceCreationDescriptor.resources.volumeMounts) {
|
if (
|
||||||
for (const volumeMount of serviceCreationDescriptor.resources.volumeMounts) {
|
serviceCreationDescriptor.resources &&
|
||||||
|
serviceCreationDescriptor.resources.volumeMounts
|
||||||
|
) {
|
||||||
|
for (const volumeMount of serviceCreationDescriptor.resources
|
||||||
|
.volumeMounts) {
|
||||||
mounts.push({
|
mounts.push({
|
||||||
Target: volumeMount.containerFsPath,
|
Target: volumeMount.containerFsPath,
|
||||||
Source: volumeMount.hostFsPath,
|
Source: volumeMount.hostFsPath,
|
||||||
@@ -89,6 +96,11 @@ export class DockerService {
|
|||||||
}> = [];
|
}> = [];
|
||||||
|
|
||||||
for (const network of serviceCreationDescriptor.networks) {
|
for (const network of serviceCreationDescriptor.networks) {
|
||||||
|
// Skip null networks (can happen if network creation fails)
|
||||||
|
if (!network) {
|
||||||
|
logger.log('warn', 'Skipping null network in service creation');
|
||||||
|
continue;
|
||||||
|
}
|
||||||
networkArray.push({
|
networkArray.push({
|
||||||
Target: network.Name,
|
Target: network.Name,
|
||||||
Aliases: [serviceCreationDescriptor.networkAlias],
|
Aliases: [serviceCreationDescriptor.networkAlias],
|
||||||
@@ -125,7 +137,8 @@ export class DockerService {
|
|||||||
// lets configure limits
|
// lets configure limits
|
||||||
|
|
||||||
const memoryLimitMB =
|
const memoryLimitMB =
|
||||||
serviceCreationDescriptor.resources && serviceCreationDescriptor.resources.memorySizeMB
|
serviceCreationDescriptor.resources &&
|
||||||
|
serviceCreationDescriptor.resources.memorySizeMB
|
||||||
? serviceCreationDescriptor.resources.memorySizeMB
|
? serviceCreationDescriptor.resources.memorySizeMB
|
||||||
: 1000;
|
: 1000;
|
||||||
|
|
||||||
@@ -134,7 +147,8 @@ export class DockerService {
|
|||||||
};
|
};
|
||||||
|
|
||||||
if (serviceCreationDescriptor.resources) {
|
if (serviceCreationDescriptor.resources) {
|
||||||
limits.MemoryBytes = serviceCreationDescriptor.resources.memorySizeMB * 1000000;
|
limits.MemoryBytes =
|
||||||
|
serviceCreationDescriptor.resources.memorySizeMB * 1000000;
|
||||||
}
|
}
|
||||||
|
|
||||||
const response = await dockerHost.request('POST', '/services/create', {
|
const response = await dockerHost.request('POST', '/services/create', {
|
||||||
@@ -177,7 +191,7 @@ export class DockerService {
|
|||||||
|
|
||||||
const createdService = await DockerService.getServiceByName(
|
const createdService = await DockerService.getServiceByName(
|
||||||
dockerHost,
|
dockerHost,
|
||||||
serviceCreationDescriptor.name
|
serviceCreationDescriptor.name,
|
||||||
);
|
);
|
||||||
return createdService;
|
return createdService;
|
||||||
}
|
}
|
||||||
@@ -223,7 +237,10 @@ export class DockerService {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public async reReadFromDockerEngine() {
|
public async reReadFromDockerEngine() {
|
||||||
const dockerData = await this.dockerHostRef.request('GET', `/services/${this.ID}`);
|
const dockerData = await this.dockerHostRef.request(
|
||||||
|
'GET',
|
||||||
|
`/services/${this.ID}`,
|
||||||
|
);
|
||||||
// TODO: Better assign: Object.assign(this, dockerData);
|
// TODO: Better assign: Object.assign(this, dockerData);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -231,14 +248,21 @@ export class DockerService {
|
|||||||
// TODO: implement digest based update recognition
|
// TODO: implement digest based update recognition
|
||||||
|
|
||||||
await this.reReadFromDockerEngine();
|
await this.reReadFromDockerEngine();
|
||||||
const dockerImage = await DockerImage.createFromRegistry(this.dockerHostRef, {
|
const dockerImage = await DockerImage.createFromRegistry(
|
||||||
creationObject: {
|
this.dockerHostRef,
|
||||||
imageUrl: this.Spec.TaskTemplate.ContainerSpec.Image,
|
{
|
||||||
}
|
creationObject: {
|
||||||
});
|
imageUrl: this.Spec.TaskTemplate.ContainerSpec.Image,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
const imageVersion = new plugins.smartversion.SmartVersion(dockerImage.Labels.version);
|
const imageVersion = new plugins.smartversion.SmartVersion(
|
||||||
const serviceVersion = new plugins.smartversion.SmartVersion(this.Spec.Labels.version);
|
dockerImage.Labels.version,
|
||||||
|
);
|
||||||
|
const serviceVersion = new plugins.smartversion.SmartVersion(
|
||||||
|
this.Spec.Labels.version,
|
||||||
|
);
|
||||||
if (imageVersion.greaterThan(serviceVersion)) {
|
if (imageVersion.greaterThan(serviceVersion)) {
|
||||||
console.log(`service ${this.Spec.Name} needs to be updated`);
|
console.log(`service ${this.Spec.Name} needs to be updated`);
|
||||||
return true;
|
return true;
|
||||||
|
|||||||
@@ -2,4 +2,4 @@ import * as plugins from './plugins.js';
|
|||||||
import { commitinfo } from './00_commitinfo_data.js';
|
import { commitinfo } from './00_commitinfo_data.js';
|
||||||
|
|
||||||
export const logger = plugins.smartlog.Smartlog.createForCommitinfo(commitinfo);
|
export const logger = plugins.smartlog.Smartlog.createForCommitinfo(commitinfo);
|
||||||
logger.enableConsole();
|
logger.enableConsole();
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ import * as plugins from './plugins.js';
|
|||||||
|
|
||||||
export const packageDir = plugins.path.resolve(
|
export const packageDir = plugins.path.resolve(
|
||||||
plugins.smartpath.get.dirnameFromImportMetaUrl(import.meta.url),
|
plugins.smartpath.get.dirnameFromImportMetaUrl(import.meta.url),
|
||||||
'../'
|
'../',
|
||||||
);
|
);
|
||||||
|
|
||||||
export const nogitDir = plugins.path.resolve(packageDir, '.nogit/');
|
export const nogitDir = plugins.path.resolve(packageDir, '.nogit/');
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
// node native path
|
// node native path
|
||||||
import * as path from 'path';
|
import * as path from 'node:path';
|
||||||
|
|
||||||
export { path };
|
export { path };
|
||||||
|
|
||||||
|
|||||||
@@ -6,9 +6,9 @@
|
|||||||
"module": "NodeNext",
|
"module": "NodeNext",
|
||||||
"moduleResolution": "NodeNext",
|
"moduleResolution": "NodeNext",
|
||||||
"esModuleInterop": true,
|
"esModuleInterop": true,
|
||||||
"verbatimModuleSyntax": true
|
"verbatimModuleSyntax": true,
|
||||||
|
"baseUrl": ".",
|
||||||
|
"paths": {}
|
||||||
},
|
},
|
||||||
"exclude": [
|
"exclude": ["dist_*/**/*.d.ts"]
|
||||||
"dist_*/**/*.d.ts"
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user