Compare commits

..

7 Commits

25 changed files with 9350 additions and 1539 deletions

View File

@@ -6,8 +6,8 @@ on:
- '**' - '**'
env: env:
IMAGE: registry.gitlab.com/hosttoday/ht-docker-node:npmci IMAGE: code.foss.global/host.today/ht-docker-node:npmci
NPMCI_COMPUTED_REPOURL: https://${{gitea.repository_owner}}:${{secrets.GITEA_TOKEN}}@gitea.lossless.digital/${{gitea.repository}}.git NPMCI_COMPUTED_REPOURL: https://${{gitea.repository_owner}}:${{secrets.GITEA_TOKEN}}@/${{gitea.repository}}.git
NPMCI_TOKEN_NPM: ${{secrets.NPMCI_TOKEN_NPM}} NPMCI_TOKEN_NPM: ${{secrets.NPMCI_TOKEN_NPM}}
NPMCI_TOKEN_NPM2: ${{secrets.NPMCI_TOKEN_NPM2}} NPMCI_TOKEN_NPM2: ${{secrets.NPMCI_TOKEN_NPM2}}
NPMCI_GIT_GITHUBTOKEN: ${{secrets.NPMCI_GIT_GITHUBTOKEN}} NPMCI_GIT_GITHUBTOKEN: ${{secrets.NPMCI_GIT_GITHUBTOKEN}}
@@ -26,7 +26,7 @@ jobs:
- name: Install pnpm and npmci - name: Install pnpm and npmci
run: | run: |
pnpm install -g pnpm pnpm install -g pnpm
pnpm install -g @shipzone/npmci pnpm install -g @ship.zone/npmci
- name: Run npm prepare - name: Run npm prepare
run: npmci npm prepare run: npmci npm prepare

View File

@@ -6,8 +6,8 @@ on:
- '*' - '*'
env: env:
IMAGE: registry.gitlab.com/hosttoday/ht-docker-node:npmci IMAGE: code.foss.global/host.today/ht-docker-node:npmci
NPMCI_COMPUTED_REPOURL: https://${{gitea.repository_owner}}:${{secrets.GITEA_TOKEN}}@gitea.lossless.digital/${{gitea.repository}}.git NPMCI_COMPUTED_REPOURL: https://${{gitea.repository_owner}}:${{secrets.GITEA_TOKEN}}@/${{gitea.repository}}.git
NPMCI_TOKEN_NPM: ${{secrets.NPMCI_TOKEN_NPM}} NPMCI_TOKEN_NPM: ${{secrets.NPMCI_TOKEN_NPM}}
NPMCI_TOKEN_NPM2: ${{secrets.NPMCI_TOKEN_NPM2}} NPMCI_TOKEN_NPM2: ${{secrets.NPMCI_TOKEN_NPM2}}
NPMCI_GIT_GITHUBTOKEN: ${{secrets.NPMCI_GIT_GITHUBTOKEN}} NPMCI_GIT_GITHUBTOKEN: ${{secrets.NPMCI_GIT_GITHUBTOKEN}}
@@ -26,7 +26,7 @@ jobs:
- name: Prepare - name: Prepare
run: | run: |
pnpm install -g pnpm pnpm install -g pnpm
pnpm install -g @shipzone/npmci pnpm install -g @ship.zone/npmci
npmci npm prepare npmci npm prepare
- name: Audit production dependencies - name: Audit production dependencies
@@ -54,7 +54,7 @@ jobs:
- name: Prepare - name: Prepare
run: | run: |
pnpm install -g pnpm pnpm install -g pnpm
pnpm install -g @shipzone/npmci pnpm install -g @ship.zone/npmci
npmci npm prepare npmci npm prepare
- name: Test stable - name: Test stable
@@ -82,7 +82,7 @@ jobs:
- name: Prepare - name: Prepare
run: | run: |
pnpm install -g pnpm pnpm install -g pnpm
pnpm install -g @shipzone/npmci pnpm install -g @ship.zone/npmci
npmci npm prepare npmci npm prepare
- name: Release - name: Release
@@ -104,7 +104,7 @@ jobs:
- name: Prepare - name: Prepare
run: | run: |
pnpm install -g pnpm pnpm install -g pnpm
pnpm install -g @shipzone/npmci pnpm install -g @ship.zone/npmci
npmci npm prepare npmci npm prepare
- name: Code quality - name: Code quality

7
.gitignore vendored
View File

@@ -3,7 +3,6 @@
# artifacts # artifacts
coverage/ coverage/
public/ public/
pages/
# installs # installs
node_modules/ node_modules/
@@ -17,4 +16,8 @@ node_modules/
dist/ dist/
dist_*/ dist_*/
# custom # AI
.claude/
.serena/
#------# custom

View File

@@ -1,68 +0,0 @@
# language of the project (csharp, python, rust, java, typescript, go, cpp, or ruby)
# * For C, use cpp
# * For JavaScript, use typescript
# Special requirements:
# * csharp: Requires the presence of a .sln file in the project folder.
language: typescript
# whether to use the project's gitignore file to ignore files
# Added on 2025-04-07
ignore_all_files_in_gitignore: true
# list of additional paths to ignore
# same syntax as gitignore, so you can use * and **
# Was previously called `ignored_dirs`, please update your config if you are using that.
# Added (renamed) on 2025-04-07
ignored_paths: []
# whether the project is in read-only mode
# If set to true, all editing tools will be disabled and attempts to use them will result in an error
# Added on 2025-04-18
read_only: false
# list of tool names to exclude. We recommend not excluding any tools, see the readme for more details.
# Below is the complete list of tools for convenience.
# To make sure you have the latest list of tools, and to view their descriptions,
# execute `uv run scripts/print_tool_overview.py`.
#
# * `activate_project`: Activates a project by name.
# * `check_onboarding_performed`: Checks whether project onboarding was already performed.
# * `create_text_file`: Creates/overwrites a file in the project directory.
# * `delete_lines`: Deletes a range of lines within a file.
# * `delete_memory`: Deletes a memory from Serena's project-specific memory store.
# * `execute_shell_command`: Executes a shell command.
# * `find_referencing_code_snippets`: Finds code snippets in which the symbol at the given location is referenced.
# * `find_referencing_symbols`: Finds symbols that reference the symbol at the given location (optionally filtered by type).
# * `find_symbol`: Performs a global (or local) search for symbols with/containing a given name/substring (optionally filtered by type).
# * `get_current_config`: Prints the current configuration of the agent, including the active and available projects, tools, contexts, and modes.
# * `get_symbols_overview`: Gets an overview of the top-level symbols defined in a given file.
# * `initial_instructions`: Gets the initial instructions for the current project.
# Should only be used in settings where the system prompt cannot be set,
# e.g. in clients you have no control over, like Claude Desktop.
# * `insert_after_symbol`: Inserts content after the end of the definition of a given symbol.
# * `insert_at_line`: Inserts content at a given line in a file.
# * `insert_before_symbol`: Inserts content before the beginning of the definition of a given symbol.
# * `list_dir`: Lists files and directories in the given directory (optionally with recursion).
# * `list_memories`: Lists memories in Serena's project-specific memory store.
# * `onboarding`: Performs onboarding (identifying the project structure and essential tasks, e.g. for testing or building).
# * `prepare_for_new_conversation`: Provides instructions for preparing for a new conversation (in order to continue with the necessary context).
# * `read_file`: Reads a file within the project directory.
# * `read_memory`: Reads the memory with the given name from Serena's project-specific memory store.
# * `remove_project`: Removes a project from the Serena configuration.
# * `replace_lines`: Replaces a range of lines within a file with new content.
# * `replace_symbol_body`: Replaces the full definition of a symbol.
# * `restart_language_server`: Restarts the language server, may be necessary when edits not through Serena happen.
# * `search_for_pattern`: Performs a search for a pattern in the project.
# * `summarize_changes`: Provides instructions for summarizing the changes made to the codebase.
# * `switch_modes`: Activates modes by providing a list of their names
# * `think_about_collected_information`: Thinking tool for pondering the completeness of collected information.
# * `think_about_task_adherence`: Thinking tool for determining whether the agent is still on track with the current task.
# * `think_about_whether_you_are_done`: Thinking tool for determining whether the task is truly completed.
# * `write_memory`: Writes a named memory (for future reference) to Serena's project-specific memory store.
excluded_tools: []
# initial prompt for the project. It will always be given to the LLM upon activating the project
# (contrary to the memories, which are loaded on demand).
initial_prompt: ""
project_name: "docker"

View File

@@ -1,6 +1,34 @@
# Changelog # Changelog
## 2025-11-18 - 2.0.0 - BREAKING CHANGE(DockerHost)
Rename DockerHost constructor option 'dockerSockPath' to 'socketPath' and update internal socket path handling
- Breaking: constructor option renamed from 'dockerSockPath' to 'socketPath' — callers must update their code.
- Constructor now reads the provided 'socketPath' option first, then falls back to DOCKER_HOST, CI, and finally the default unix socket.
- README examples and documentation updated to use 'socketPath'.
## 2025-11-17 - 1.3.6 - fix(streaming)
Convert smartrequest v5 web ReadableStreams to Node.js streams and update deps for streaming compatibility
- Upgrade @push.rocks/smartrequest to ^5.0.1 and bump @git.zone dev tooling (@git.zone/tsbuild, tsrun, tstest).
- requestStreaming now uses response.stream() (web ReadableStream) and converts it to a Node.js Readable via plugins.smartstream.nodewebhelpers.convertWebReadableToNodeReadable for backward compatibility.
- Updated consumers of streaming responses (DockerHost.getEventObservable, DockerImage.createFromTarStream, DockerImage.exportToTarStream) to work with the converted Node.js stream and preserve event/backpressure semantics (.on, .pause, .resume).
- Added readme.hints.md documenting the smartrequest v5 migration, conversion approach, modified files, and test/build status (type errors resolved and Node.js tests passing).
- Removed project metadata file (.serena/project.yml) from the repository.
## 2025-08-19 - 1.3.5 - fix(core)
Stabilize CI/workflows and runtime: update CI images/metadata, improve streaming requests and image handling, and fix tests & package metadata
- Update CI workflows and images: switch workflow IMAGE to code.foss.global/host.today/ht-docker-node:npmci, fix NPMCI_COMPUTED_REPOURL placeholders, and replace @shipzone/npmci with @ship.zone/npmci in workflows
- Update npmextra.json gitzone metadata (githost -> code.foss.global, gitscope -> apiclient.xyz, npmPackagename -> @apiclient.xyz/docker) and npmdocker.baseImage -> host.today/ht-docker-node:npmci
- Adjust package.json repository/bugs/homepage to code.foss.global, add pnpm overrides entry and normalize package metadata
- Improve DockerHost streaming and request handling: reduce requestStreaming timeout to 30s, enable autoDrain for streaming requests, improve response parsing for streaming vs JSON endpoints to avoid hangs
- Enhance DockerImage and DockerImageStore stream handling and tar processing: more robust import/export parsing, safer stream-to-file writes, repackaging steps, and error handling
- Unskip and update tests: re-enable DockerImageStore integration test, change stored image name to 'hello2', add formatting fixes and ensure cleanup stops the test DockerHost
- Miscellaneous code and docs cleanup: numerous formatting fixes and trailing-comma normalization across README and TS sources, update commitinfo and logger newline fixes, and add local tool ignores (.claude/.serena) to .gitignore
## 2025-08-19 - 1.3.4 - fix(test) ## 2025-08-19 - 1.3.4 - fix(test)
Increase test timeout, enable DockerImageStore test, update test image name, bump smartrequest patch, and add local claude settings Increase test timeout, enable DockerImageStore test, update test image name, bump smartrequest patch, and add local claude settings
- Increase tstest timeout from 120s to 600s in package.json to accommodate longer-running integration tests. - Increase tstest timeout from 120s to 600s in package.json to accommodate longer-running integration tests.
@@ -9,6 +37,7 @@ Increase test timeout, enable DockerImageStore test, update test image name, bum
- Add .claude/settings.local.json to allow local agent permissions for running tests and related tooling. - Add .claude/settings.local.json to allow local agent permissions for running tests and related tooling.
## 2025-08-19 - 1.3.3 - fix(classes.host) ## 2025-08-19 - 1.3.3 - fix(classes.host)
Adjust requestStreaming timeout and autoDrain; stabilize tests Adjust requestStreaming timeout and autoDrain; stabilize tests
- Reduced requestStreaming timeout from 10 minutes to 30 seconds to avoid long-running hanging requests. - Reduced requestStreaming timeout from 10 minutes to 30 seconds to avoid long-running hanging requests.
@@ -17,6 +46,7 @@ Adjust requestStreaming timeout and autoDrain; stabilize tests
- Added local tool settings file (.claude/settings.local.json) with local permissions (development-only). - Added local tool settings file (.claude/settings.local.json) with local permissions (development-only).
## 2025-08-18 - 1.3.2 - fix(package.json) ## 2025-08-18 - 1.3.2 - fix(package.json)
Fix test script timeout typo, update dependency versions, and add typings & project configs Fix test script timeout typo, update dependency versions, and add typings & project configs
- Fix test script: correct 'tineout' -> 'timeout' for npm test command and set timeout to 120s - Fix test script: correct 'tineout' -> 'timeout' for npm test command and set timeout to 120s
@@ -26,6 +56,7 @@ Fix test script timeout typo, update dependency versions, and add typings & proj
- Include generated cache/metadata files (typescript document symbols cache) — not source changes but tooling/cache artifacts - Include generated cache/metadata files (typescript document symbols cache) — not source changes but tooling/cache artifacts
## 2025-08-18 - 1.3.1 - fix(test) ## 2025-08-18 - 1.3.1 - fix(test)
Update test setup and devDependencies; adjust test import and add package metadata Update test setup and devDependencies; adjust test import and add package metadata
- Update test script to run with additional flags: --verbose, --logfile and --tineout 120 - Update test script to run with additional flags: --verbose, --logfile and --tineout 120
@@ -35,26 +66,29 @@ Update test setup and devDependencies; adjust test import and add package metada
- Add packageManager field for pnpm@10.14.0 with integrity hash - Add packageManager field for pnpm@10.14.0 with integrity hash
## 2024-12-23 - 1.3.0 - feat(core) ## 2024-12-23 - 1.3.0 - feat(core)
Initial release of Docker client with TypeScript support Initial release of Docker client with TypeScript support
- Provides easy communication with Docker's remote API from Node.js - Provides easy communication with Docker's remote API from Node.js
- Includes implementations for managing Docker services, networks, secrets, containers, and images - Includes implementations for managing Docker services, networks, secrets, containers, and images
## 2024-12-23 - 1.2.8 - fix(core) ## 2024-12-23 - 1.2.8 - fix(core)
Improved the image creation process from tar stream in DockerImage class. Improved the image creation process from tar stream in DockerImage class.
- Enhanced `DockerImage.createFromTarStream` method to handle streamed response and parse imported image details. - Enhanced `DockerImage.createFromTarStream` method to handle streamed response and parse imported image details.
- Fixed the dependency version for `@push.rocks/smartarchive` in package.json. - Fixed the dependency version for `@push.rocks/smartarchive` in package.json.
## 2024-10-13 - 1.2.7 - fix(core) ## 2024-10-13 - 1.2.7 - fix(core)
Prepare patch release with minor fixes and improvements Prepare patch release with minor fixes and improvements
## 2024-10-13 - 1.2.6 - fix(core) ## 2024-10-13 - 1.2.6 - fix(core)
Minor refactoring and code quality improvements. Minor refactoring and code quality improvements.
## 2024-10-13 - 1.2.5 - fix(dependencies) ## 2024-10-13 - 1.2.5 - fix(dependencies)
Update dependencies for stability improvements Update dependencies for stability improvements
- Updated @push.rocks/smartstream to version ^3.0.46 - Updated @push.rocks/smartstream to version ^3.0.46
@@ -62,129 +96,152 @@ Update dependencies for stability improvements
- Updated @types/node to version 22.7.5 - Updated @types/node to version 22.7.5
## 2024-10-13 - 1.2.4 - fix(core) ## 2024-10-13 - 1.2.4 - fix(core)
Refactored DockerImageStore constructor to remove DockerHost dependency Refactored DockerImageStore constructor to remove DockerHost dependency
- Adjusted DockerImageStore constructor to remove dependency on DockerHost - Adjusted DockerImageStore constructor to remove dependency on DockerHost
- Updated ts/classes.host.ts to align with DockerImageStore's new constructor signature - Updated ts/classes.host.ts to align with DockerImageStore's new constructor signature
## 2024-08-21 - 1.2.3 - fix(dependencies) ## 2024-08-21 - 1.2.3 - fix(dependencies)
Update dependencies to the latest versions and fix image export test Update dependencies to the latest versions and fix image export test
- Updated several dependencies to their latest versions in package.json. - Updated several dependencies to their latest versions in package.json.
- Enabled the previously skipped 'should export images' test. - Enabled the previously skipped 'should export images' test.
## 2024-06-10 - 1.2.1-1.2.2 - Core/General ## 2024-06-10 - 1.2.1-1.2.2 - Core/General
General updates and fixes. General updates and fixes.
- Fix core update - Fix core update
## 2024-06-10 - 1.2.0 - Core ## 2024-06-10 - 1.2.0 - Core
Core updates and bug fixes. Core updates and bug fixes.
- Fix core update - Fix core update
## 2024-06-08 - 1.2.0 - General/Core ## 2024-06-08 - 1.2.0 - General/Core
Major release with core enhancements. Major release with core enhancements.
- Processing images with extraction, retagging, repackaging, and long-term storage - Processing images with extraction, retagging, repackaging, and long-term storage
## 2024-06-06 - 1.1.4 - General/Imagestore ## 2024-06-06 - 1.1.4 - General/Imagestore
Significant feature addition. Significant feature addition.
- Add feature to process images with extraction, retagging, repackaging, and long-term storage - Add feature to process images with extraction, retagging, repackaging, and long-term storage
## 2024-05-08 - 1.0.112 - Images ## 2024-05-08 - 1.0.112 - Images
Add new functionality for image handling. Add new functionality for image handling.
- Can now import and export images - Can now import and export images
- Start work on local 100% JS OCI image registry - Start work on local 100% JS OCI image registry
## 2024-06-05 - 1.1.0-1.1.3 - Core ## 2024-06-05 - 1.1.0-1.1.3 - Core
Regular updates and fixes. Regular updates and fixes.
- Fix core update - Fix core update
## 2024-02-02 - 1.0.105-1.0.110 - Core ## 2024-02-02 - 1.0.105-1.0.110 - Core
Routine core updates and fixes. Routine core updates and fixes.
- Fix core update - Fix core update
## 2022-10-17 - 1.0.103-1.0.104 - Core ## 2022-10-17 - 1.0.103-1.0.104 - Core
Routine core updates. Routine core updates.
- Fix core update - Fix core update
## 2020-10-01 - 1.0.99-1.0.102 - Core ## 2020-10-01 - 1.0.99-1.0.102 - Core
Routine core updates. Routine core updates.
- Fix core update - Fix core update
## 2019-09-22 - 1.0.73-1.0.78 - Core ## 2019-09-22 - 1.0.73-1.0.78 - Core
Routine updates and core fixes. Routine updates and core fixes.
- Fix core update - Fix core update
## 2019-09-13 - 1.0.60-1.0.72 - Core ## 2019-09-13 - 1.0.60-1.0.72 - Core
Routine updates and core fixes. Routine updates and core fixes.
- Fix core update - Fix core update
## 2019-08-16 - 1.0.43-1.0.59 - Core ## 2019-08-16 - 1.0.43-1.0.59 - Core
Routine updates and core fixes. Routine updates and core fixes.
- Fix core update - Fix core update
## 2019-08-15 - 1.0.37-1.0.42 - Core ## 2019-08-15 - 1.0.37-1.0.42 - Core
Routine updates and core fixes. Routine updates and core fixes.
- Fix core update - Fix core update
## 2019-08-14 - 1.0.31-1.0.36 - Core ## 2019-08-14 - 1.0.31-1.0.36 - Core
Routine updates and core fixes. Routine updates and core fixes.
- Fix core update - Fix core update
## 2019-01-10 - 1.0.27-1.0.30 - Core ## 2019-01-10 - 1.0.27-1.0.30 - Core
Routine updates and core fixes. Routine updates and core fixes.
- Fix core update - Fix core update
## 2018-07-16 - 1.0.23-1.0.24 - Core ## 2018-07-16 - 1.0.23-1.0.24 - Core
Routine updates and core fixes. Routine updates and core fixes.
- Fix core shift to new style - Fix core shift to new style
## 2017-07-16 - 1.0.20-1.0.22 - General ## 2017-07-16 - 1.0.20-1.0.22 - General
Routine updates and fixes. Routine updates and fixes.
- Update node_modules within npmdocker - Update node_modules within npmdocker
## 2017-04-02 - 1.0.18-1.0.19 - General ## 2017-04-02 - 1.0.18-1.0.19 - General
Routine updates and fixes. Routine updates and fixes.
- Work with npmdocker and npmts 7.x.x - Work with npmdocker and npmts 7.x.x
- CI updates - CI updates
## 2016-07-31 - 1.0.17 - General ## 2016-07-31 - 1.0.17 - General
Enhancements and fixes. Enhancements and fixes.
- Now waiting for response to be stored before ending streaming request - Now waiting for response to be stored before ending streaming request
- Cosmetic fix - Cosmetic fix
## 2016-07-29 - 1.0.14-1.0.16 - General ## 2016-07-29 - 1.0.14-1.0.16 - General
Multiple updates and features added. Multiple updates and features added.
- Fix request for change observable and add npmdocker - Fix request for change observable and add npmdocker
- Add request typings - Add request typings
## 2016-07-28 - 1.0.13 - Core ## 2016-07-28 - 1.0.13 - Core
Fixes and preparations. Fixes and preparations.
- Fixed request for newer docker - Fixed request for newer docker
- Prepare for npmdocker - Prepare for npmdocker
## 2016-06-16 - 1.0.0-1.0.2 - General ## 2016-06-16 - 1.0.0-1.0.2 - General
Initial sequence of releases, significant feature additions and CI setups. Initial sequence of releases, significant feature additions and CI setups.
- Implement container start and stop - Implement container start and stop
@@ -192,7 +249,7 @@ Initial sequence of releases, significant feature additions and CI setups.
- Add tests with in docker environment - Add tests with in docker environment
## 2016-04-12 - unknown - Initial Commit ## 2016-04-12 - unknown - Initial Commit
Initial project setup. Initial project setup.
- Initial commit - Initial commit

7241
deno.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
{ {
"npmdocker": { "npmdocker": {
"baseImage": "hosttoday/ht-docker-node:npmci", "baseImage": "host.today/ht-docker-node:npmci",
"command": "(ls -a && rm -r node_modules && yarn global add npmts && yarn install && npmts)", "command": "(ls -a && rm -r node_modules && yarn global add npmts && yarn install && npmts)",
"dockerSock": true "dockerSock": true
}, },
@@ -12,11 +12,11 @@
"gitzone": { "gitzone": {
"projectType": "npm", "projectType": "npm",
"module": { "module": {
"githost": "gitlab.com", "githost": "code.foss.global",
"gitscope": "mojoio", "gitscope": "apiclient.xyz",
"gitrepo": "docker", "gitrepo": "docker",
"description": "Provides easy communication with Docker remote API from Node.js, with TypeScript support.", "description": "Provides easy communication with Docker remote API from Node.js, with TypeScript support.",
"npmPackagename": "@mojoio/docker", "npmPackagename": "@apiclient.xyz/docker",
"license": "MIT", "license": "MIT",
"keywords": [ "keywords": [
"Docker", "Docker",

View File

@@ -1,6 +1,6 @@
{ {
"name": "@apiclient.xyz/docker", "name": "@apiclient.xyz/docker",
"version": "1.3.4", "version": "2.0.0",
"description": "Provides easy communication with Docker remote API from Node.js, with TypeScript support.", "description": "Provides easy communication with Docker remote API from Node.js, with TypeScript support.",
"private": false, "private": false,
"main": "dist_ts/index.js", "main": "dist_ts/index.js",
@@ -13,7 +13,7 @@
}, },
"repository": { "repository": {
"type": "git", "type": "git",
"url": "git+https://gitlab.com/mojoio/docker.git" "url": "https://code.foss.global/apiclient.xyz/docker.git"
}, },
"keywords": [ "keywords": [
"Docker", "Docker",
@@ -29,31 +29,31 @@
"author": "Lossless GmbH", "author": "Lossless GmbH",
"license": "MIT", "license": "MIT",
"bugs": { "bugs": {
"url": "https://gitlab.com/mojoio/docker/issues" "url": "https://code.foss.global/apiclient.xyz/docker/issues"
}, },
"homepage": "https://gitlab.com/mojoio/docker#readme", "homepage": "https://code.foss.global/apiclient.xyz/docker#readme",
"dependencies": { "dependencies": {
"@push.rocks/lik": "^6.2.2", "@push.rocks/lik": "^6.2.2",
"@push.rocks/smartarchive": "^4.2.2", "@push.rocks/smartarchive": "^4.2.2",
"@push.rocks/smartbucket": "^3.3.10", "@push.rocks/smartbucket": "^3.3.10",
"@push.rocks/smartfile": "^11.2.7", "@push.rocks/smartfile": "^11.2.7",
"@push.rocks/smartjson": "^5.0.20", "@push.rocks/smartjson": "^5.2.0",
"@push.rocks/smartlog": "^3.1.8", "@push.rocks/smartlog": "^3.1.10",
"@push.rocks/smartnetwork": "^4.1.2", "@push.rocks/smartnetwork": "^4.4.0",
"@push.rocks/smartpath": "^6.0.0", "@push.rocks/smartpath": "^6.0.0",
"@push.rocks/smartpromise": "^4.2.3", "@push.rocks/smartpromise": "^4.2.3",
"@push.rocks/smartrequest": "^4.3.1", "@push.rocks/smartrequest": "^5.0.1",
"@push.rocks/smartstream": "^3.2.5", "@push.rocks/smartstream": "^3.2.5",
"@push.rocks/smartstring": "^4.0.15", "@push.rocks/smartstring": "^4.1.0",
"@push.rocks/smartunique": "^3.0.9", "@push.rocks/smartunique": "^3.0.9",
"@push.rocks/smartversion": "^3.0.5", "@push.rocks/smartversion": "^3.0.5",
"@tsclass/tsclass": "^9.2.0", "@tsclass/tsclass": "^9.3.0",
"rxjs": "^7.8.2" "rxjs": "^7.8.2"
}, },
"devDependencies": { "devDependencies": {
"@git.zone/tsbuild": "^2.6.7", "@git.zone/tsbuild": "^3.1.0",
"@git.zone/tsrun": "^1.3.3", "@git.zone/tsrun": "^2.0.0",
"@git.zone/tstest": "^2.3.5", "@git.zone/tstest": "^2.8.2",
"@push.rocks/qenv": "^6.1.3", "@push.rocks/qenv": "^6.1.3",
"@types/node": "22.7.5" "@types/node": "22.7.5"
}, },
@@ -72,5 +72,8 @@
"browserslist": [ "browserslist": [
"last 1 chrome versions" "last 1 chrome versions"
], ],
"packageManager": "pnpm@10.14.0+sha512.ad27a79641b49c3e481a16a805baa71817a04bbe06a38d17e60e2eaee83f6a146c6a688125f5792e48dd5ba30e7da52a5cda4c3992b9ccf333f9ce223af84748" "packageManager": "pnpm@10.14.0+sha512.ad27a79641b49c3e481a16a805baa71817a04bbe06a38d17e60e2eaee83f6a146c6a688125f5792e48dd5ba30e7da52a5cda4c3992b9ccf333f9ce223af84748",
"pnpm": {
"overrides": {}
}
} }

2274
pnpm-lock.yaml generated

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,29 @@
# Docker Module - Development Hints
## smartrequest v5+ Migration (2025-11-17)
### Breaking Change
smartrequest v5.0.0+ returns web `ReadableStream` objects (Web Streams API) instead of Node.js streams.
### Solution Implemented
All streaming methods now convert web ReadableStreams to Node.js streams using:
```typescript
plugins.smartstream.nodewebhelpers.convertWebReadableToNodeReadable(webStream)
```
### Files Modified
- `ts/classes.host.ts`:
- `requestStreaming()` - Converts web stream to Node.js stream before returning
- `getEventObservable()` - Works with converted Node.js stream
- `ts/classes.image.ts`:
- `createFromTarStream()` - Uses converted Node.js stream for event handling
- `exportToTarStream()` - Uses converted Node.js stream for backpressure management
### Testing
- Build:  All 11 type errors resolved
- Tests:  Node.js tests pass (DockerHost, DockerContainer, DockerImage, DockerImageStore)
### Notes
- The conversion maintains backward compatibility with existing code expecting Node.js stream methods (`.on()`, `.emit()`, `.pause()`, `.resume()`)
- smartstream's `nodewebhelpers` module provides bidirectional conversion utilities between web and Node.js streams

614
readme.md
View File

@@ -1,29 +1,30 @@
# @apiclient.xyz/docker 🐳 # @apiclient.xyz/docker 🐳
> **Powerful TypeScript client for Docker Remote API** - Build, manage, and orchestrate Docker containers, images, networks, and more with type-safe elegance. > **Powerful TypeScript client for Docker Remote API** - Build, manage, and orchestrate Docker containers, images, networks, and swarm services with type-safe elegance.
## 🚀 Features ## 🚀 Features
- 🎯 **Full TypeScript Support** - Complete type definitions for Docker API entities - 🎯 **Full TypeScript Support** - Complete type definitions for all Docker API entities
- 🔄 **Async/Await Ready** - Modern promise-based architecture for seamless async operations - 🔄 **Async/Await Ready** - Modern promise-based architecture for seamless async operations
- 📦 **Container Management** - Create, list, inspect, and remove containers effortlessly - 📦 **Container Management** - Create, list, inspect, and manage containers effortlessly
- 🖼️ **Image Handling** - Pull from registries, build from tarballs, export, and manage tags - 🖼️ **Image Handling** - Pull from registries, build from tarballs, export, and manage tags
- 🌐 **Network Operations** - Create and manage Docker networks with full IPAM support - 🌐 **Network Operations** - Create and manage Docker networks with full IPAM support
- 🔐 **Secrets Management** - Handle Docker secrets securely in swarm mode - 🔐 **Secrets Management** - Handle Docker secrets securely in swarm mode
- 🎭 **Service Orchestration** - Deploy and manage services in Docker Swarm - 🎭 **Service Orchestration** - Deploy and manage services in Docker Swarm
- 💾 **S3 Image Storage** - Built-in support for storing/retrieving images from S3 - 💾 **S3 Image Storage** - Built-in support for storing/retrieving images from S3-compatible storage
- 📊 **Event Streaming** - Real-time Docker event monitoring with RxJS observables - 📊 **Event Streaming** - Real-time Docker event monitoring with RxJS observables
- 🔧 **Registry Authentication** - Seamless authentication with Docker registries - 🔧 **Registry Authentication** - Seamless authentication with Docker registries including private registries
- 🐝 **Swarm Mode** - Full support for Docker Swarm initialization and management
## 📦 Installation ## 📦 Installation
```bash ```bash
# Using npm
npm install @apiclient.xyz/docker --save
# Using pnpm (recommended) # Using pnpm (recommended)
pnpm add @apiclient.xyz/docker pnpm add @apiclient.xyz/docker
# Using npm
npm install @apiclient.xyz/docker --save
# Using yarn # Using yarn
yarn add @apiclient.xyz/docker yarn add @apiclient.xyz/docker
``` ```
@@ -33,12 +34,45 @@ yarn add @apiclient.xyz/docker
```typescript ```typescript
import { DockerHost } from '@apiclient.xyz/docker'; import { DockerHost } from '@apiclient.xyz/docker';
// Connect to local Docker daemon // Connect to local Docker daemon (default: /var/run/docker.sock)
const docker = new DockerHost(); const docker = new DockerHost({});
await docker.start();
// Or connect to remote Docker host // Or connect to remote Docker host via TCP
const remoteDocker = new DockerHost({ const remoteDocker = new DockerHost({
socketPath: 'tcp://remote-docker-host:2375' socketPath: 'tcp://192.168.1.100:2375',
});
await remoteDocker.start();
// List all containers
const containers = await docker.getContainers();
console.log(`Found ${containers.length} containers`);
// Don't forget to clean up
await docker.stop();
```
## 🔌 Socket Path Configuration
The library determines which Docker socket to use in the following priority order:
1. **Constructor option** - `socketPath` parameter (highest priority)
2. **Environment variable** - `DOCKER_HOST` environment variable
3. **CI environment** - If `CI` env var is set, uses `http://docker:2375/`
4. **Default** - Falls back to `http://unix:/var/run/docker.sock:`
```typescript
// Explicit socket path (highest priority)
const docker1 = new DockerHost({
socketPath: 'tcp://remote-host:2375',
});
// Uses DOCKER_HOST environment variable if set
const docker2 = new DockerHost({});
// Custom image store directory
const docker3 = new DockerHost({
imageStoreDir: '/custom/path/to/image-store',
}); });
``` ```
@@ -51,19 +85,18 @@ The `DockerHost` class is your primary interface to interact with the Docker dae
```typescript ```typescript
import { DockerHost } from '@apiclient.xyz/docker'; import { DockerHost } from '@apiclient.xyz/docker';
// Initialize with default local socket // Initialize with options
const docker = new DockerHost(); const docker = new DockerHost({
socketPath: '/var/run/docker.sock', // Optional: custom socket path
// Custom initialization options imageStoreDir: './docker-images', // Optional: custom image store location
const customDocker = new DockerHost({
socketPath: '/var/run/docker.sock', // Unix socket path
// or
socketPath: 'tcp://192.168.1.100:2375' // TCP connection
}); });
// Start and stop (for lifecycle management) // Start the docker host (initializes image store)
await docker.start(); await docker.start();
// ... do your work
// ... perform operations ...
// Stop and clean up
await docker.stop(); await docker.stop();
``` ```
@@ -72,11 +105,10 @@ await docker.stop();
#### List All Containers #### List All Containers
```typescript ```typescript
// Get all containers (including stopped ones) // Get all containers (running and stopped)
const allContainers = await docker.getContainers(); const containers = await docker.getContainers();
// Each container includes detailed information containers.forEach((container) => {
allContainers.forEach(container => {
console.log(`Container: ${container.Names[0]}`); console.log(`Container: ${container.Names[0]}`);
console.log(` ID: ${container.Id}`); console.log(` ID: ${container.Id}`);
console.log(` Status: ${container.Status}`); console.log(` Status: ${container.Status}`);
@@ -85,48 +117,15 @@ allContainers.forEach(container => {
}); });
``` ```
#### Create and Manage Containers #### Get Container by ID
```typescript ```typescript
import { DockerContainer } from '@apiclient.xyz/docker'; import { DockerContainer } from '@apiclient.xyz/docker';
// Create a container with detailed configuration const container = await DockerContainer.getContainerById(docker, 'abc123');
const container = await DockerContainer.create(docker, {
Image: 'nginx:latest',
name: 'my-nginx-server',
HostConfig: {
PortBindings: {
'80/tcp': [{ HostPort: '8080' }]
},
RestartPolicy: {
Name: 'unless-stopped'
},
Memory: 512 * 1024 * 1024, // 512MB memory limit
},
Env: [
'NODE_ENV=production',
'LOG_LEVEL=info'
],
Labels: {
'app': 'web-server',
'environment': 'production'
}
});
console.log(`Container created: ${container.Id}`);
// Container operations (these would need to be implemented)
// await container.start();
// await container.stop();
// await container.remove();
```
#### Get Container by ID
```typescript
const container = await DockerContainer.getContainerById(docker, 'container-id-here');
if (container) { if (container) {
console.log(`Found container: ${container.Names[0]}`); console.log(`Found: ${container.Names[0]}`);
console.log(`Running: ${container.State === 'running'}`);
} }
``` ```
@@ -137,54 +136,86 @@ if (container) {
```typescript ```typescript
import { DockerImage } from '@apiclient.xyz/docker'; import { DockerImage } from '@apiclient.xyz/docker';
// Pull an image from Docker Hub // Pull from Docker Hub
const image = await DockerImage.createFromRegistry(docker, { const image = await DockerImage.createFromRegistry(docker, {
imageName: 'node', creationObject: {
imageTag: '18-alpine', imageUrl: 'nginx',
// Optional: provide registry authentication imageTag: 'alpine', // Optional, defaults to 'latest'
authToken: 'your-registry-auth-token' },
}); });
console.log(`Image pulled: ${image.RepoTags[0]}`); console.log(`Image pulled: ${image.RepoTags[0]}`);
console.log(`Size: ${(image.Size / 1024 / 1024).toFixed(2)} MB`); console.log(`Size: ${(image.Size / 1024 / 1024).toFixed(2)} MB`);
```
#### Import Images from Tar // Pull from private registry
const privateImage = await DockerImage.createFromRegistry(docker, {
```typescript creationObject: {
import * as fs from 'fs'; imageUrl: 'registry.example.com/my-app',
imageTag: 'v2.0.0',
// Import from a tar stream },
const tarStream = fs.createReadStream('./my-image.tar');
const importedImage = await DockerImage.createFromTarStream(docker, {
tarStream,
imageUrl: 'file://./my-image.tar',
imageTag: 'my-app:v1.0.0'
}); });
``` ```
#### Export Images to Tar #### Import Images from Tar Stream
```typescript ```typescript
// Export an image to a tar stream import * as fs from 'fs';
const image = await DockerImage.getImageByName(docker, 'nginx:latest'); import { DockerImage } from '@apiclient.xyz/docker';
// Import from a tar file
const tarStream = fs.createReadStream('./my-image.tar');
const importedImage = await DockerImage.createFromTarStream(docker, {
tarStream,
creationObject: {
imageUrl: 'my-app',
imageTag: 'v1.0.0',
},
});
console.log(`Imported: ${importedImage.RepoTags[0]}`);
```
#### Export Images to Tar Stream
```typescript
// Get image by name
const image = await DockerImage.getImageByName(docker, 'nginx:alpine');
// Export to tar stream
const exportStream = await image.exportToTarStream(); const exportStream = await image.exportToTarStream();
// Save to file // Save to file
const writeStream = fs.createWriteStream('./nginx-export.tar'); const writeStream = fs.createWriteStream('./nginx-export.tar');
exportStream.pipe(writeStream); exportStream.pipe(writeStream);
writeStream.on('finish', () => {
console.log('Image exported successfully');
});
``` ```
#### Tag Images #### Tag Images
```typescript ```typescript
// Tag an existing image // Tag an existing image
await DockerImage.tagImageByIdOrName(docker, 'node:18-alpine', { await DockerImage.tagImageByIdOrName(docker, 'nginx:alpine', {
registry: 'myregistry.com', registry: 'myregistry.com',
imageName: 'my-node-app', imageName: 'web-server',
imageTag: 'v2.0.0' imageTag: 'v1.0.0',
});
// Result: myregistry.com/web-server:v1.0.0
```
#### List All Images
```typescript
const images = await docker.getImages();
images.forEach((img) => {
console.log(`Image: ${img.RepoTags ? img.RepoTags.join(', ') : '<none>'}`);
console.log(` ID: ${img.Id}`);
console.log(` Size: ${(img.Size / 1024 / 1024).toFixed(2)} MB`);
console.log(` Created: ${new Date(img.Created * 1000).toISOString()}`);
}); });
// Result: myregistry.com/my-node-app:v2.0.0
``` ```
### 🌐 Network Management ### 🌐 Network Management
@@ -201,18 +232,20 @@ const network = await DockerNetwork.createNetwork(docker, {
EnableIPv6: false, EnableIPv6: false,
IPAM: { IPAM: {
Driver: 'default', Driver: 'default',
Config: [{ Config: [
{
Subnet: '172.28.0.0/16', Subnet: '172.28.0.0/16',
Gateway: '172.28.0.1' Gateway: '172.28.0.1',
}] },
],
}, },
Labels: { Labels: {
'project': 'my-app', project: 'my-app',
'environment': 'production' environment: 'production',
} },
}); });
console.log(`Network created: ${network.Id}`); console.log(`Network created: ${network.Name} (${network.Id})`);
``` ```
#### List and Inspect Networks #### List and Inspect Networks
@@ -220,68 +253,96 @@ console.log(`Network created: ${network.Id}`);
```typescript ```typescript
// Get all networks // Get all networks
const networks = await docker.getNetworks(); const networks = await docker.getNetworks();
networks.forEach(net => {
networks.forEach((net) => {
console.log(`Network: ${net.Name} (${net.Driver})`); console.log(`Network: ${net.Name} (${net.Driver})`);
console.log(` Scope: ${net.Scope}`); console.log(` Scope: ${net.Scope}`);
console.log(` Internal: ${net.Internal}`); console.log(` Internal: ${net.Internal}`);
}); });
// Get specific network // Get specific network by name
const appNetwork = await DockerNetwork.getNetworkByName(docker, 'my-app-network'); const appNetwork = await DockerNetwork.getNetworkByName(docker, 'my-app-network');
// Get containers on network // Get containers connected to this network
const containers = await appNetwork.getContainersOnNetwork(); const containers = await appNetwork.getContainersOnNetwork();
console.log(`Containers on network: ${containers.length}`); console.log(`Containers on network: ${containers.length}`);
``` ```
#### Remove a Network
```typescript
const network = await DockerNetwork.getNetworkByName(docker, 'my-app-network');
await network.remove();
console.log('Network removed');
```
### 🎭 Service Management (Swarm Mode) ### 🎭 Service Management (Swarm Mode)
#### Activate Swarm Mode
```typescript
// Initialize swarm mode first
await docker.activateSwarm('192.168.1.100'); // Optional: advertisement IP
console.log('Swarm mode activated');
```
#### Deploy Services #### Deploy Services
```typescript ```typescript
import { DockerService } from '@apiclient.xyz/docker'; import { DockerService, DockerImage, DockerNetwork, DockerSecret } from '@apiclient.xyz/docker';
// Create a replicated service // Create prerequisites
const network = await DockerNetwork.createNetwork(docker, {
Name: 'app-network',
Driver: 'overlay', // Use overlay for swarm
});
const image = await DockerImage.createFromRegistry(docker, {
creationObject: {
imageUrl: 'nginx',
imageTag: 'latest',
},
});
const secret = await DockerSecret.createSecret(docker, {
name: 'api-key',
version: '1.0.0',
contentArg: 'super-secret-key',
labels: { app: 'my-app' },
});
// Create a service
const service = await DockerService.createService(docker, { const service = await DockerService.createService(docker, {
name: 'web-api', name: 'web-api',
image: 'my-api:latest', image: image,
replicas: 3,
ports: [{
Protocol: 'tcp',
PublishedPort: 80,
TargetPort: 3000
}],
networks: ['my-app-network'],
labels: { labels: {
'app': 'api', app: 'api',
'version': '2.0.0' version: '1.0.0',
}, },
networks: [network],
networkAlias: 'api',
secrets: [secret],
ports: ['80:3000'], // host:container
resources: { resources: {
limits: { memorySizeMB: 512,
Memory: 256 * 1024 * 1024, // 256MB
CPUs: 0.5
}
}, },
secrets: ['api-key', 'db-password'],
mounts: [{
Target: '/data',
Source: 'app-data',
Type: 'volume'
}]
}); });
console.log(`Service deployed: ${service.ID}`); console.log(`Service deployed: ${service.ID}`);
``` ```
#### Manage Services #### List and Manage Services
```typescript ```typescript
// List all services // List all services
const services = await docker.getServices(); const services = await docker.getServices();
services.forEach(service => {
services.forEach((service) => {
console.log(`Service: ${service.Spec.Name}`); console.log(`Service: ${service.Spec.Name}`);
console.log(` Replicas: ${service.Spec.Mode.Replicated.Replicas}`);
console.log(` Image: ${service.Spec.TaskTemplate.ContainerSpec.Image}`); console.log(` Image: ${service.Spec.TaskTemplate.ContainerSpec.Image}`);
if (service.Spec.Mode.Replicated) {
console.log(` Replicas: ${service.Spec.Mode.Replicated.Replicas}`);
}
}); });
// Get service by name // Get service by name
@@ -290,46 +351,50 @@ const myService = await DockerService.getServiceByName(docker, 'web-api');
// Check if service needs update // Check if service needs update
const needsUpdate = await myService.needsUpdate(); const needsUpdate = await myService.needsUpdate();
if (needsUpdate) { if (needsUpdate) {
console.log('Service configuration has changed, update needed'); console.log('⚠️ Service configuration has changed, update needed');
} }
// Remove service // Remove service
await myService.remove(); await myService.remove();
console.log('Service removed');
``` ```
### 🔐 Secrets Management ### 🔐 Secrets Management
Secrets are only available in Docker Swarm mode.
```typescript ```typescript
import { DockerSecret } from '@apiclient.xyz/docker'; import { DockerSecret } from '@apiclient.xyz/docker';
// Create a secret // Create a secret
const secret = await DockerSecret.createSecret(docker, { const secret = await DockerSecret.createSecret(docker, {
name: 'api-key', name: 'database-password',
data: Buffer.from('super-secret-key-123').toString('base64'), version: '1.0.0',
contentArg: 'my-super-secret-password',
labels: { labels: {
'app': 'my-app', app: 'my-app',
'type': 'api-key' type: 'credential',
} },
}); });
console.log(`Secret created: ${secret.ID}`); console.log(`Secret created: ${secret.ID}`);
// List secrets // List all secrets
const secrets = await DockerSecret.getSecrets(docker); const secrets = await DockerSecret.getSecrets(docker);
secrets.forEach(secret => { secrets.forEach((s) => {
console.log(`Secret: ${secret.Spec.Name}`); console.log(`Secret: ${s.Spec.Name}`);
console.log(` Labels:`, s.Spec.Labels);
}); });
// Get secret by name // Get secret by name
const apiKeySecret = await DockerSecret.getSecretByName(docker, 'api-key'); const dbSecret = await DockerSecret.getSecretByName(docker, 'database-password');
// Update secret // Update secret content
await apiKeySecret.update({ await dbSecret.update('new-password-value');
data: Buffer.from('new-secret-key-456').toString('base64')
});
// Remove secret // Remove secret
await apiKeySecret.remove(); await dbSecret.remove();
console.log('Secret removed');
``` ```
### 💾 S3 Image Storage ### 💾 S3 Image Storage
@@ -337,20 +402,19 @@ await apiKeySecret.remove();
Store and retrieve Docker images from S3-compatible storage: Store and retrieve Docker images from S3-compatible storage:
```typescript ```typescript
// Configure S3 storage // Configure S3 storage for the image store
await docker.addS3Storage({ await docker.addS3Storage({
endpoint: 's3.amazonaws.com', endpoint: 's3.amazonaws.com',
accessKeyId: 'your-access-key', accessKey: 'AKIAIOSFODNN7EXAMPLE',
secretAccessKey: 'your-secret-key', accessSecret: 'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY',
bucket: 'docker-images' bucketName: 'my-docker-images',
}); });
// Store an image to S3 // Store an image to S3
const imageStore = docker.imageStore; const imageStream = fs.createReadStream('./my-app.tar');
await imageStore.storeImage('my-app:v1.0.0'); await docker.imageStore.storeImage('my-app-v1', imageStream);
// Retrieve an image from S3 console.log('Image stored to S3');
const retrievedImage = await imageStore.getImage('my-app:v1.0.0');
``` ```
### 📊 Event Monitoring ### 📊 Event Monitoring
@@ -359,137 +423,269 @@ Monitor Docker events in real-time using RxJS observables:
```typescript ```typescript
// Subscribe to Docker events // Subscribe to Docker events
const eventStream = docker.getEventObservable(); const eventObservable = await docker.getEventObservable();
const subscription = eventStream.subscribe({ const subscription = eventObservable.subscribe({
next: (event) => { next: (event) => {
console.log(`Event: ${event.Type} - ${event.Action}`); console.log(`📡 Event: ${event.Type} - ${event.Action}`);
console.log(` Actor: ${event.Actor.ID}`); console.log(` Actor: ${event.Actor.ID}`);
console.log(` Time: ${new Date(event.time * 1000).toISOString()}`); console.log(` Time: ${new Date(event.time * 1000).toISOString()}`);
if (event.Type === 'container') {
console.log(` Container: ${event.Actor.Attributes.name}`);
}
}, },
error: (err) => console.error('Event stream error:', err), error: (err) => console.error('Event stream error:', err),
complete: () => console.log('Event stream completed') complete: () => console.log('Event stream completed'),
}); });
// Unsubscribe when done // Unsubscribe when done
subscription.unsubscribe(); // subscription.unsubscribe();
``` ```
### 🔧 Registry Authentication ### 🔧 Registry Authentication
Authenticate with Docker registries for private images: Authenticate with Docker registries to pull private images:
```typescript ```typescript
// Authenticate with Docker Hub // Authenticate with a registry
await docker.auth({ await docker.auth({
username: 'your-username', username: 'your-username',
password: 'your-password', password: 'your-password',
serveraddress: 'https://index.docker.io/v1/' serveraddress: 'https://index.docker.io/v1/', // Docker Hub
}); });
// Or use existing Docker config console.log('✅ Authenticated with registry');
const authToken = await docker.getAuthTokenFromDockerConfig('myregistry.com');
// Use auth token when pulling images // Or read credentials from Docker config file
const authToken = await docker.getAuthTokenFromDockerConfig('registry.example.com');
// Now you can pull private images
const privateImage = await DockerImage.createFromRegistry(docker, { const privateImage = await DockerImage.createFromRegistry(docker, {
imageName: 'myregistry.com/private/image', creationObject: {
imageUrl: 'registry.example.com/private/app',
imageTag: 'latest', imageTag: 'latest',
authToken },
});
```
### 🔄 Swarm Mode
Initialize and manage Docker Swarm:
```typescript
// Initialize swarm mode
await docker.activateSwarm({
ListenAddr: '0.0.0.0:2377',
AdvertiseAddr: '192.168.1.100:2377',
ForceNewCluster: false
});
// Now you can create services, secrets, and use swarm features
const service = await DockerService.createService(docker, {
name: 'my-swarm-service',
image: 'nginx:latest',
replicas: 5
// ... more service config
}); });
``` ```
## 🏗️ Advanced Examples ## 🏗️ Advanced Examples
### Complete Application Stack ### Complete Application Stack with Swarm
Deploy a complete multi-service application stack:
```typescript ```typescript
async function deployStack() { import { DockerHost, DockerNetwork, DockerSecret, DockerService, DockerImage } from '@apiclient.xyz/docker';
const docker = new DockerHost();
// Create network async function deployStack() {
const docker = new DockerHost({});
await docker.start();
// Initialize swarm
await docker.activateSwarm();
console.log('✅ Swarm initialized');
// Create overlay network for service communication
const network = await DockerNetwork.createNetwork(docker, { const network = await DockerNetwork.createNetwork(docker, {
Name: 'app-network', Name: 'app-network',
Driver: 'overlay' // for swarm mode Driver: 'overlay',
Attachable: true,
}); });
console.log('✅ Network created');
// Create secrets // Create secrets
const dbPassword = await DockerSecret.createSecret(docker, { const dbPassword = await DockerSecret.createSecret(docker, {
name: 'db-password', name: 'db-password',
data: Buffer.from('strong-password').toString('base64') version: '1.0.0',
contentArg: 'strong-database-password',
labels: { app: 'stack' },
}); });
console.log('✅ Secrets created');
// Pull images
const postgresImage = await DockerImage.createFromRegistry(docker, {
creationObject: {
imageUrl: 'postgres',
imageTag: '14-alpine',
},
});
const appImage = await DockerImage.createFromRegistry(docker, {
creationObject: {
imageUrl: 'my-app',
imageTag: 'latest',
},
});
console.log('✅ Images pulled');
// Deploy database service // Deploy database service
const dbService = await DockerService.createService(docker, { const dbService = await DockerService.createService(docker, {
name: 'postgres', name: 'postgres-db',
image: 'postgres:14', image: postgresImage,
networks: ['app-network'], labels: { tier: 'database' },
secrets: ['db-password'], networks: [network],
env: ['POSTGRES_PASSWORD_FILE=/run/secrets/db-password'] networkAlias: 'postgres',
secrets: [dbPassword],
ports: [],
resources: {
memorySizeMB: 1024,
},
}); });
console.log('✅ Database service deployed');
// Deploy application service // Deploy application service
const appService = await DockerService.createService(docker, { const appService = await DockerService.createService(docker, {
name: 'web-app', name: 'web-app',
image: 'my-app:latest', image: appImage,
replicas: 3, labels: { tier: 'application' },
networks: ['app-network'], networks: [network],
ports: [{ Protocol: 'tcp', PublishedPort: 80, TargetPort: 3000 }] networkAlias: 'app',
secrets: [dbPassword],
ports: ['80:3000'],
resources: {
memorySizeMB: 512,
},
}); });
console.log('✅ Application service deployed');
console.log('Stack deployed successfully!'); console.log('🚀 Stack deployment complete!');
}
deployStack().catch(console.error);
```
### Image Pipeline: Pull, Tag, Export
```typescript
async function imagePipeline() {
const docker = new DockerHost({});
await docker.start();
// Pull latest image
const image = await DockerImage.createFromRegistry(docker, {
creationObject: {
imageUrl: 'node',
imageTag: '18-alpine',
},
});
console.log('✅ Image pulled');
// Tag for private registry
await DockerImage.tagImageByIdOrName(docker, 'node:18-alpine', {
registry: 'registry.company.com',
imageName: 'base/node',
imageTag: 'v18-alpine',
});
console.log('✅ Image tagged');
// Export to tar
const exportStream = await image.exportToTarStream();
const writeStream = fs.createWriteStream('./node-18-alpine.tar');
exportStream.pipe(writeStream);
await new Promise((resolve, reject) => {
writeStream.on('finish', resolve);
writeStream.on('error', reject);
});
console.log('✅ Image exported to tar');
await docker.stop();
} }
``` ```
## 🔍 TypeScript Support ## 🔍 TypeScript Support
This package provides comprehensive TypeScript definitions for all Docker API entities: Full TypeScript definitions for all Docker API entities:
```typescript ```typescript
import type { import type {
IContainerCreationDescriptor, IDockerHostConstructorOptions,
IServiceCreationDescriptor,
INetworkCreationDescriptor,
IImageCreationDescriptor, IImageCreationDescriptor,
ISecretCreationDescriptor IServiceCreationDescriptor,
ISecretCreationDescriptor,
TLabels,
} from '@apiclient.xyz/docker'; } from '@apiclient.xyz/docker';
// Full IntelliSense support for all configuration options // Full IntelliSense support
const containerConfig: IContainerCreationDescriptor = { const options: IDockerHostConstructorOptions = {
Image: 'node:18', socketPath: '/var/run/docker.sock',
// Your IDE will provide full autocomplete here imageStoreDir: '/tmp/docker-images',
};
const imageConfig: IImageCreationDescriptor = {
imageUrl: 'nginx',
imageTag: 'alpine',
};
const labels: TLabels = {
app: 'my-app',
environment: 'production',
}; };
``` ```
## 🤝 Contributing ## 🎯 Real-World Use Cases
We welcome contributions! Please feel free to submit issues and pull requests. ### CI/CD Pipeline Integration
```typescript
// In your CI/CD pipeline
const docker = new DockerHost({
socketPath: process.env.DOCKER_HOST || '/var/run/docker.sock',
});
await docker.start();
// Build and push process
const image = await DockerImage.createFromTarStream(docker, {
tarStream: buildArtifactStream,
creationObject: {
imageUrl: 'my-app',
imageTag: process.env.CI_COMMIT_SHA,
},
});
await DockerImage.tagImageByIdOrName(docker, `my-app:${process.env.CI_COMMIT_SHA}`, {
registry: 'registry.company.com',
imageName: 'production/my-app',
imageTag: 'latest',
});
// Push to registry (authentication required)
// Note: Pushing requires proper registry authentication
```
### Dynamic Service Scaling
```typescript
// Monitor and scale services based on load
const services = await docker.getServices();
const webService = services.find(s => s.Spec.Name === 'web-app');
if (webService && webService.Spec.Mode.Replicated) {
const currentReplicas = webService.Spec.Mode.Replicated.Replicas;
console.log(`Current replicas: ${currentReplicas}`);
// Scale based on your metrics
// (Scaling API would need to be implemented)
}
```
## 📖 API Documentation ## 📖 API Documentation
For complete API documentation, visit [https://apiclient.xyz/docker](https://apiclient.xyz/docker) - **Package Repository**: [https://code.foss.global/apiclient.xyz/docker](https://code.foss.global/apiclient.xyz/docker)
- **Docker Engine API Reference**: [https://docs.docker.com/engine/api/latest/](https://docs.docker.com/engine/api/latest/)
- **Issues & Bug Reports**: [https://code.foss.global/apiclient.xyz/docker/issues](https://code.foss.global/apiclient.xyz/docker/issues)
For Docker Remote API reference, see [Docker Engine API Documentation](https://docs.docker.com/engine/api/latest/) ## 🔑 Key Concepts
- **DockerHost**: Main entry point for Docker API communication
- **Socket Path Priority**: Constructor option → `DOCKER_HOST` env → CI mode → default socket
- **Swarm Mode Required**: Services and secrets require Docker Swarm to be activated
- **Type Safety**: Full TypeScript support with comprehensive interfaces
- **Streaming Support**: Real-time event monitoring and tar stream operations
- **S3 Integration**: Built-in image storage/retrieval from S3-compatible storage
## License and Legal Information ## License and Legal Information

View File

@@ -41,7 +41,10 @@ tap.test('should create a network', async () => {
}); });
tap.test('should remove a network', async () => { tap.test('should remove a network', async () => {
const webgateway = await docker.DockerNetwork.getNetworkByName(testDockerHost, 'webgateway'); const webgateway = await docker.DockerNetwork.getNetworkByName(
testDockerHost,
'webgateway',
);
await webgateway.remove(); await webgateway.remove();
}); });
@@ -78,7 +81,10 @@ tap.test('should create a secret', async () => {
}); });
tap.test('should remove a secret by name', async () => { tap.test('should remove a secret by name', async () => {
const mySecret = await docker.DockerSecret.getSecretByName(testDockerHost, 'testSecret'); const mySecret = await docker.DockerSecret.getSecretByName(
testDockerHost,
'testSecret',
);
await mySecret.remove(); await mySecret.remove();
}); });
@@ -102,11 +108,14 @@ tap.test('should create a service', async () => {
labels: {}, labels: {},
contentArg: '{"hi": "wow"}', contentArg: '{"hi": "wow"}',
}); });
const testImage = await docker.DockerImage.createFromRegistry(testDockerHost, { const testImage = await docker.DockerImage.createFromRegistry(
testDockerHost,
{
creationObject: { creationObject: {
imageUrl: 'code.foss.global/host.today/ht-docker-node:latest', imageUrl: 'code.foss.global/host.today/ht-docker-node:latest',
} },
}); },
);
const testService = await docker.DockerService.createService(testDockerHost, { const testService = await docker.DockerService.createService(testDockerHost, {
image: testImage, image: testImage,
labels: {}, labels: {},
@@ -124,13 +133,16 @@ tap.test('should create a service', async () => {
tap.test('should export images', async (toolsArg) => { tap.test('should export images', async (toolsArg) => {
const done = toolsArg.defer(); const done = toolsArg.defer();
const testImage = await docker.DockerImage.createFromRegistry(testDockerHost, { const testImage = await docker.DockerImage.createFromRegistry(
testDockerHost,
{
creationObject: { creationObject: {
imageUrl: 'code.foss.global/host.today/ht-docker-node:latest', imageUrl: 'code.foss.global/host.today/ht-docker-node:latest',
} },
}); },
);
const fsWriteStream = plugins.smartfile.fsStream.createWriteStream( const fsWriteStream = plugins.smartfile.fsStream.createWriteStream(
plugins.path.join(paths.nogitDir, 'testimage.tar') plugins.path.join(paths.nogitDir, 'testimage.tar'),
); );
const exportStream = await testImage.exportToTarStream(); const exportStream = await testImage.exportToTarStream();
exportStream.pipe(fsWriteStream).on('finish', () => { exportStream.pipe(fsWriteStream).on('finish', () => {
@@ -141,14 +153,17 @@ tap.test('should export images', async (toolsArg) => {
tap.test('should import images', async () => { tap.test('should import images', async () => {
const fsReadStream = plugins.smartfile.fsStream.createReadStream( const fsReadStream = plugins.smartfile.fsStream.createReadStream(
plugins.path.join(paths.nogitDir, 'testimage.tar') plugins.path.join(paths.nogitDir, 'testimage.tar'),
); );
const importedImage = await docker.DockerImage.createFromTarStream(testDockerHost, { const importedImage = await docker.DockerImage.createFromTarStream(
testDockerHost,
{
tarStream: fsReadStream, tarStream: fsReadStream,
creationObject: { creationObject: {
imageUrl: 'code.foss.global/host.today/ht-docker-node:latest', imageUrl: 'code.foss.global/host.today/ht-docker-node:latest',
} },
}); },
);
expect(importedImage).toBeInstanceOf(docker.DockerImage); expect(importedImage).toBeInstanceOf(docker.DockerImage);
}); });
@@ -163,11 +178,16 @@ tap.test('should expose a working DockerImageStore', async () => {
await testDockerHost.addS3Storage(s3Descriptor); await testDockerHost.addS3Storage(s3Descriptor);
// //
await testDockerHost.imageStore.storeImage('hello2', plugins.smartfile.fsStream.createReadStream(plugins.path.join(paths.nogitDir, 'testimage.tar'))); await testDockerHost.imageStore.storeImage(
}) 'hello2',
plugins.smartfile.fsStream.createReadStream(
plugins.path.join(paths.nogitDir, 'testimage.tar'),
),
);
});
tap.test('cleanup', async () => { tap.test('cleanup', async () => {
await testDockerHost.stop(); await testDockerHost.stop();
}) });
export default tap.start(); export default tap.start();

View File

@@ -3,6 +3,6 @@
*/ */
export const commitinfo = { export const commitinfo = {
name: '@apiclient.xyz/docker', name: '@apiclient.xyz/docker',
version: '1.3.4', version: '2.0.0',
description: 'Provides easy communication with Docker remote API from Node.js, with TypeScript support.' description: 'Provides easy communication with Docker remote API from Node.js, with TypeScript support.'
} }

View File

@@ -10,7 +10,9 @@ export class DockerContainer {
/** /**
* get all containers * get all containers
*/ */
public static async getContainers(dockerHostArg: DockerHost): Promise<DockerContainer[]> { public static async getContainers(
dockerHostArg: DockerHost,
): Promise<DockerContainer[]> {
const result: DockerContainer[] = []; const result: DockerContainer[] = [];
const response = await dockerHostArg.request('GET', '/containers/json'); const response = await dockerHostArg.request('GET', '/containers/json');
@@ -34,7 +36,7 @@ export class DockerContainer {
*/ */
public static async create( public static async create(
dockerHost: DockerHost, dockerHost: DockerHost,
containerCreationDescriptor: interfaces.IContainerCreationDescriptor containerCreationDescriptor: interfaces.IContainerCreationDescriptor,
) { ) {
// check for unique hostname // check for unique hostname
const existingContainers = await DockerContainer.getContainers(dockerHost); const existingContainers = await DockerContainer.getContainers(dockerHost);
@@ -50,7 +52,10 @@ export class DockerContainer {
if (response.statusCode < 300) { if (response.statusCode < 300) {
logger.log('info', 'Container created successfully'); logger.log('info', 'Container created successfully');
} else { } else {
logger.log('error', 'There has been a problem when creating the container'); logger.log(
'error',
'There has been a problem when creating the container',
);
} }
} }

View File

@@ -4,7 +4,6 @@ import { DockerContainer } from './classes.container.js';
import { DockerNetwork } from './classes.network.js'; import { DockerNetwork } from './classes.network.js';
import { DockerService } from './classes.service.js'; import { DockerService } from './classes.service.js';
import { logger } from './logger.js'; import { logger } from './logger.js';
import path from 'path';
import { DockerImageStore } from './classes.imagestore.js'; import { DockerImageStore } from './classes.imagestore.js';
import { DockerImage } from './classes.image.js'; import { DockerImage } from './classes.image.js';
@@ -15,7 +14,7 @@ export interface IAuthData {
} }
export interface IDockerHostConstructorOptions { export interface IDockerHostConstructorOptions {
dockerSockPath?: string; socketPath?: string;
imageStoreDir?: string; imageStoreDir?: string;
} }
@@ -37,13 +36,16 @@ export class DockerHost {
constructor(optionsArg: IDockerHostConstructorOptions) { constructor(optionsArg: IDockerHostConstructorOptions) {
this.options = { this.options = {
...{ ...{
imageStoreDir: plugins.path.join(paths.nogitDir, 'temp-docker-image-store'), imageStoreDir: plugins.path.join(
paths.nogitDir,
'temp-docker-image-store',
),
}, },
...optionsArg, ...optionsArg,
} };
let pathToUse: string; let pathToUse: string;
if (optionsArg.dockerSockPath) { if (optionsArg.socketPath) {
pathToUse = optionsArg.dockerSockPath; pathToUse = optionsArg.socketPath;
} else if (process.env.DOCKER_HOST) { } else if (process.env.DOCKER_HOST) {
pathToUse = process.env.DOCKER_HOST; pathToUse = process.env.DOCKER_HOST;
} else if (process.env.CI) { } else if (process.env.CI) {
@@ -62,7 +64,7 @@ export class DockerHost {
this.imageStore = new DockerImageStore({ this.imageStore = new DockerImageStore({
bucketDir: null, bucketDir: null,
localDirPath: this.options.imageStoreDir, localDirPath: this.options.imageStoreDir,
}) });
} }
public async start() { public async start() {
@@ -84,17 +86,22 @@ export class DockerHost {
throw new Error(response.body.Status); throw new Error(response.body.Status);
} }
console.log(response.body.Status); console.log(response.body.Status);
this.registryToken = plugins.smartstring.base64.encode(plugins.smartjson.stringify(authData)); this.registryToken = plugins.smartstring.base64.encode(
plugins.smartjson.stringify(authData),
);
} }
/** /**
* gets the token from the .docker/config.json file for GitLab registry * gets the token from the .docker/config.json file for GitLab registry
*/ */
public async getAuthTokenFromDockerConfig(registryUrlArg: string) { public async getAuthTokenFromDockerConfig(registryUrlArg: string) {
const dockerConfigPath = plugins.smartpath.get.home('~/.docker/config.json'); const dockerConfigPath = plugins.smartpath.get.home(
'~/.docker/config.json',
);
const configObject = plugins.smartfile.fs.toObjectSync(dockerConfigPath); const configObject = plugins.smartfile.fs.toObjectSync(dockerConfigPath);
const gitlabAuthBase64 = configObject.auths[registryUrlArg].auth; const gitlabAuthBase64 = configObject.auths[registryUrlArg].auth;
const gitlabAuth: string = plugins.smartstring.base64.decode(gitlabAuthBase64); const gitlabAuth: string =
plugins.smartstring.base64.decode(gitlabAuthBase64);
const gitlabAuthArray = gitlabAuth.split(':'); const gitlabAuthArray = gitlabAuth.split(':');
await this.auth({ await this.auth({
username: gitlabAuthArray[0], username: gitlabAuthArray[0],
@@ -116,7 +123,9 @@ export class DockerHost {
/** /**
* create a network * create a network
*/ */
public async createNetwork(optionsArg: Parameters<typeof DockerNetwork.createNetwork>[1]) { public async createNetwork(
optionsArg: Parameters<typeof DockerNetwork.createNetwork>[1],
) {
return await DockerNetwork.createNetwork(this, optionsArg); return await DockerNetwork.createNetwork(this, optionsArg);
} }
@@ -127,7 +136,6 @@ export class DockerHost {
return await DockerNetwork.getNetworkByName(this, networkNameArg); return await DockerNetwork.getNetworkByName(this, networkNameArg);
} }
// ============== // ==============
// CONTAINERS // CONTAINERS
// ============== // ==============
@@ -174,8 +182,12 @@ export class DockerHost {
*/ */
public async getEventObservable(): Promise<plugins.rxjs.Observable<any>> { public async getEventObservable(): Promise<plugins.rxjs.Observable<any>> {
const response = await this.requestStreaming('GET', '/events'); const response = await this.requestStreaming('GET', '/events');
// requestStreaming now returns Node.js stream, not web stream
const nodeStream = response as plugins.smartstream.stream.Readable;
return plugins.rxjs.Observable.create((observer) => { return plugins.rxjs.Observable.create((observer) => {
response.on('data', (data) => { nodeStream.on('data', (data) => {
const eventString = data.toString(); const eventString = data.toString();
try { try {
const eventObject = JSON.parse(eventString); const eventObject = JSON.parse(eventString);
@@ -185,7 +197,7 @@ export class DockerHost {
} }
}); });
return () => { return () => {
response.emit('end'); nodeStream.emit('end');
}; };
}); });
} }
@@ -265,7 +277,8 @@ export class DockerHost {
// Docker's streaming endpoints (like /images/create) return newline-delimited JSON // Docker's streaming endpoints (like /images/create) return newline-delimited JSON
// which can't be parsed as a single JSON object // which can't be parsed as a single JSON object
const isStreamingEndpoint = routeArg.includes('/images/create') || const isStreamingEndpoint =
routeArg.includes('/images/create') ||
routeArg.includes('/images/load') || routeArg.includes('/images/load') ||
routeArg.includes('/build'); routeArg.includes('/build');
@@ -274,7 +287,11 @@ export class DockerHost {
} else { } else {
body = await response.text(); body = await response.text();
// Try to parse as JSON if it looks like JSON and is not a streaming response // Try to parse as JSON if it looks like JSON and is not a streaming response
if (!isStreamingEndpoint && body && (body.startsWith('{') || body.startsWith('['))) { if (
!isStreamingEndpoint &&
body &&
(body.startsWith('{') || body.startsWith('['))
) {
try { try {
body = JSON.parse(body); body = JSON.parse(body);
} catch { } catch {
@@ -287,7 +304,7 @@ export class DockerHost {
const legacyResponse = { const legacyResponse = {
statusCode: response.status, statusCode: response.status,
body: body, body: body,
headers: response.headers headers: response.headers,
}; };
if (response.status !== 200) { if (response.status !== 200) {
@@ -297,7 +314,11 @@ export class DockerHost {
return legacyResponse; return legacyResponse;
} }
public async requestStreaming(methodArg: string, routeArg: string, readStream?: plugins.smartstream.stream.Readable) { public async requestStreaming(
methodArg: string,
routeArg: string,
readStream?: plugins.smartstream.stream.Readable,
) {
const requestUrl = `${this.socketPath}${routeArg}`; const requestUrl = `${this.socketPath}${routeArg}`;
// Build the request using the fluent API // Build the request using the fluent API
@@ -319,7 +340,7 @@ export class DockerHost {
} }
counter++; counter++;
return chunkArg; return chunkArg;
} },
}); });
// Pipe through the logging duplex stream // Pipe through the logging duplex stream
@@ -330,7 +351,7 @@ export class DockerHost {
} }
// Execute the request based on method // Execute the request based on method
let response; let response: plugins.smartrequest.ICoreResponse;
switch (methodArg.toUpperCase()) { switch (methodArg.toUpperCase()) {
case 'GET': case 'GET':
response = await smartRequest.get(); response = await smartRequest.get();
@@ -350,10 +371,10 @@ export class DockerHost {
console.log(response.status); console.log(response.status);
// For streaming responses, get the Node.js stream // For streaming responses, get the web stream
const nodeStream = response.streamNode(); const webStream = response.stream();
if (!nodeStream) { if (!webStream) {
// If no stream is available, consume the body as text // If no stream is available, consume the body as text
const body = await response.text(); const body = await response.text();
console.log(body); console.log(body);
@@ -362,11 +383,14 @@ export class DockerHost {
return { return {
statusCode: response.status, statusCode: response.status,
body: body, body: body,
headers: response.headers headers: response.headers,
}; };
} }
// For streaming responses, return the stream with added properties // Convert web ReadableStream to Node.js stream for backward compatibility
const nodeStream = plugins.smartstream.nodewebhelpers.convertWebReadableToNodeReadable(webStream);
// Add properties for compatibility
(nodeStream as any).statusCode = response.status; (nodeStream as any).statusCode = response.status;
(nodeStream as any).body = ''; // For compatibility (nodeStream as any).body = ''; // For compatibility
@@ -382,10 +406,14 @@ export class DockerHost {
if (!optionsArg.bucketName) { if (!optionsArg.bucketName) {
throw new Error('bucketName is required'); throw new Error('bucketName is required');
} }
const bucket = await this.smartBucket.getBucketByName(optionsArg.bucketName); const bucket = await this.smartBucket.getBucketByName(
optionsArg.bucketName,
);
let wantedDirectory = await bucket.getBaseDirectory(); let wantedDirectory = await bucket.getBaseDirectory();
if (optionsArg.directoryPath) { if (optionsArg.directoryPath) {
wantedDirectory = await wantedDirectory.getSubDirectoryByName(optionsArg.directoryPath); wantedDirectory = await wantedDirectory.getSubDirectoryByName(
optionsArg.directoryPath,
);
} }
this.imageStore.options.bucketDir = wantedDirectory; this.imageStore.options.bucketDir = wantedDirectory;
} }

View File

@@ -17,7 +17,10 @@ export class DockerImage {
return images; return images;
} }
public static async getImageByName(dockerHost: DockerHost, imageNameArg: string) { public static async getImageByName(
dockerHost: DockerHost,
imageNameArg: string,
) {
const images = await this.getImages(dockerHost); const images = await this.getImages(dockerHost);
const result = images.find((image) => { const result = images.find((image) => {
if (image.RepoTags) { if (image.RepoTags) {
@@ -32,8 +35,8 @@ export class DockerImage {
public static async createFromRegistry( public static async createFromRegistry(
dockerHostArg: DockerHost, dockerHostArg: DockerHost,
optionsArg: { optionsArg: {
creationObject: interfaces.IImageCreationDescriptor creationObject: interfaces.IImageCreationDescriptor;
} },
): Promise<DockerImage> { ): Promise<DockerImage> {
// lets create a sanatized imageUrlObject // lets create a sanatized imageUrlObject
const imageUrlObject: { const imageUrlObject: {
@@ -50,7 +53,7 @@ export class DockerImage {
const imageTag = imageUrlObject.imageUrl.split(':')[1]; const imageTag = imageUrlObject.imageUrl.split(':')[1];
if (imageUrlObject.imageTag) { if (imageUrlObject.imageTag) {
throw new Error( throw new Error(
`imageUrl ${imageUrlObject.imageUrl} can't be tagged with ${imageUrlObject.imageTag} because it is already tagged with ${imageTag}` `imageUrl ${imageUrlObject.imageUrl} can't be tagged with ${imageUrlObject.imageTag} because it is already tagged with ${imageTag}`,
); );
} else { } else {
imageUrlObject.imageUrl = imageUrl; imageUrlObject.imageUrl = imageUrl;
@@ -65,12 +68,18 @@ export class DockerImage {
const response = await dockerHostArg.request( const response = await dockerHostArg.request(
'POST', 'POST',
`/images/create?fromImage=${encodeURIComponent( `/images/create?fromImage=${encodeURIComponent(
imageUrlObject.imageUrl imageUrlObject.imageUrl,
)}&tag=${encodeURIComponent(imageUrlObject.imageTag)}` )}&tag=${encodeURIComponent(imageUrlObject.imageTag)}`,
); );
if (response.statusCode < 300) { if (response.statusCode < 300) {
logger.log('info', `Successfully pulled image ${imageUrlObject.imageUrl} from the registry`); logger.log(
const image = await DockerImage.getImageByName(dockerHostArg, imageUrlObject.imageOriginTag); 'info',
`Successfully pulled image ${imageUrlObject.imageUrl} from the registry`,
);
const image = await DockerImage.getImageByName(
dockerHostArg,
imageUrlObject.imageOriginTag,
);
return image; return image;
} else { } else {
logger.log('error', `Failed at the attempt of creating a new image`); logger.log('error', `Failed at the attempt of creating a new image`);
@@ -87,15 +96,18 @@ export class DockerImage {
optionsArg: { optionsArg: {
creationObject: interfaces.IImageCreationDescriptor; creationObject: interfaces.IImageCreationDescriptor;
tarStream: plugins.smartstream.stream.Readable; tarStream: plugins.smartstream.stream.Readable;
} },
): Promise<DockerImage> { ): Promise<DockerImage> {
// Start the request for importing an image // Start the request for importing an image
const response = await dockerHostArg.requestStreaming( const response = await dockerHostArg.requestStreaming(
'POST', 'POST',
'/images/load', '/images/load',
optionsArg.tarStream optionsArg.tarStream,
); );
// requestStreaming now returns Node.js stream
const nodeStream = response as plugins.smartstream.stream.Readable;
/** /**
* Docker typically returns lines like: * Docker typically returns lines like:
* {"stream":"Loaded image: myrepo/myimage:latest"} * {"stream":"Loaded image: myrepo/myimage:latest"}
@@ -103,16 +115,16 @@ export class DockerImage {
* So we will collect those lines and parse out the final image name. * So we will collect those lines and parse out the final image name.
*/ */
let rawOutput = ''; let rawOutput = '';
response.on('data', (chunk) => { nodeStream.on('data', (chunk) => {
rawOutput += chunk.toString(); rawOutput += chunk.toString();
}); });
// Wrap the end event in a Promise for easier async/await usage // Wrap the end event in a Promise for easier async/await usage
await new Promise<void>((resolve, reject) => { await new Promise<void>((resolve, reject) => {
response.on('end', () => { nodeStream.on('end', () => {
resolve(); resolve();
}); });
response.on('error', (err) => { nodeStream.on('error', (err) => {
reject(err); reject(err);
}); });
}); });
@@ -144,7 +156,7 @@ export class DockerImage {
if (!loadedImageTag) { if (!loadedImageTag) {
throw new Error( throw new Error(
`Could not parse the loaded image info from Docker response.\nResponse was:\n${rawOutput}` `Could not parse the loaded image info from Docker response.\nResponse was:\n${rawOutput}`,
); );
} }
@@ -153,34 +165,31 @@ export class DockerImage {
// "myrepo/myimage:latest" OR "sha256:someHash..." // "myrepo/myimage:latest" OR "sha256:someHash..."
// If Docker gave you an ID (e.g. "sha256:..."), you may need a separate // If Docker gave you an ID (e.g. "sha256:..."), you may need a separate
// DockerImage.getImageById method; or if you prefer, you can treat it as a name. // DockerImage.getImageById method; or if you prefer, you can treat it as a name.
const newlyImportedImage = await DockerImage.getImageByName(dockerHostArg, loadedImageTag); const newlyImportedImage = await DockerImage.getImageByName(
dockerHostArg,
loadedImageTag,
);
if (!newlyImportedImage) { if (!newlyImportedImage) {
throw new Error( throw new Error(
`Image load succeeded, but no local reference found for "${loadedImageTag}".` `Image load succeeded, but no local reference found for "${loadedImageTag}".`,
); );
} }
logger.log( logger.log('info', `Successfully imported image "${loadedImageTag}".`);
'info',
`Successfully imported image "${loadedImageTag}".`
);
return newlyImportedImage; return newlyImportedImage;
} }
public static async tagImageByIdOrName( public static async tagImageByIdOrName(
dockerHost: DockerHost, dockerHost: DockerHost,
idOrNameArg: string, idOrNameArg: string,
newTagArg: string newTagArg: string,
) { ) {
const response = await dockerHost.request( const response = await dockerHost.request(
'POST', 'POST',
`/images/${encodeURIComponent(idOrNameArg)}/${encodeURIComponent(newTagArg)}` `/images/${encodeURIComponent(idOrNameArg)}/${encodeURIComponent(newTagArg)}`,
); );
} }
public static async buildImage(dockerHostArg: DockerHost, dockerImageTag) { public static async buildImage(dockerHostArg: DockerHost, dockerImageTag) {
@@ -249,37 +258,37 @@ export class DockerImage {
*/ */
public async exportToTarStream(): Promise<plugins.smartstream.stream.Readable> { public async exportToTarStream(): Promise<plugins.smartstream.stream.Readable> {
logger.log('info', `Exporting image ${this.RepoTags[0]} to tar stream.`); logger.log('info', `Exporting image ${this.RepoTags[0]} to tar stream.`);
const response = await this.dockerHost.requestStreaming('GET', `/images/${encodeURIComponent(this.RepoTags[0])}/get`); const response = await this.dockerHost.requestStreaming(
'GET',
`/images/${encodeURIComponent(this.RepoTags[0])}/get`,
);
// Check if response is a Node.js stream // requestStreaming now returns Node.js stream
if (!response || typeof response.on !== 'function') { const nodeStream = response as plugins.smartstream.stream.Readable;
throw new Error('Failed to get streaming response for image export');
}
let counter = 0; let counter = 0;
const webduplexStream = new plugins.smartstream.SmartDuplex({ const webduplexStream = new plugins.smartstream.SmartDuplex({
writeFunction: async (chunk, tools) => { writeFunction: async (chunk, tools) => {
if (counter % 1000 === 0) if (counter % 1000 === 0) console.log(`Got chunk: ${counter}`);
console.log(`Got chunk: ${counter}`);
counter++; counter++;
return chunk; return chunk;
} },
}); });
response.on('data', (chunk) => { nodeStream.on('data', (chunk) => {
if (!webduplexStream.write(chunk)) { if (!webduplexStream.write(chunk)) {
response.pause(); nodeStream.pause();
webduplexStream.once('drain', () => { webduplexStream.once('drain', () => {
response.resume(); nodeStream.resume();
}); });
} }
}); });
response.on('end', () => { nodeStream.on('end', () => {
webduplexStream.end(); webduplexStream.end();
}); });
response.on('error', (error) => { nodeStream.on('error', (error) => {
logger.log('error', `Error during image export: ${error.message}`); logger.log('error', `Error during image export: ${error.message}`);
webduplexStream.destroy(error); webduplexStream.destroy(error);
}); });

View File

@@ -22,14 +22,25 @@ export class DockerImageStore {
} }
// Method to store tar stream // Method to store tar stream
public async storeImage(imageName: string, tarStream: plugins.smartstream.stream.Readable): Promise<void> { public async storeImage(
imageName: string,
tarStream: plugins.smartstream.stream.Readable,
): Promise<void> {
logger.log('info', `Storing image ${imageName}...`); logger.log('info', `Storing image ${imageName}...`);
const uniqueProcessingId = plugins.smartunique.shortId(); const uniqueProcessingId = plugins.smartunique.shortId();
const initialTarDownloadPath = plugins.path.join(this.options.localDirPath, `${uniqueProcessingId}.tar`); const initialTarDownloadPath = plugins.path.join(
const extractionDir = plugins.path.join(this.options.localDirPath, uniqueProcessingId); this.options.localDirPath,
`${uniqueProcessingId}.tar`,
);
const extractionDir = plugins.path.join(
this.options.localDirPath,
uniqueProcessingId,
);
// Create a write stream to store the tar file // Create a write stream to store the tar file
const writeStream = plugins.smartfile.fsStream.createWriteStream(initialTarDownloadPath); const writeStream = plugins.smartfile.fsStream.createWriteStream(
initialTarDownloadPath,
);
// lets wait for the write stream to finish // lets wait for the write stream to finish
await new Promise((resolve, reject) => { await new Promise((resolve, reject) => {
@@ -37,23 +48,43 @@ export class DockerImageStore {
writeStream.on('finish', resolve); writeStream.on('finish', resolve);
writeStream.on('error', reject); writeStream.on('error', reject);
}); });
logger.log('info', `Image ${imageName} stored locally for processing. Extracting...`); logger.log(
'info',
`Image ${imageName} stored locally for processing. Extracting...`,
);
// lets process the image // lets process the image
const tarArchive = await plugins.smartarchive.SmartArchive.fromArchiveFile(initialTarDownloadPath); const tarArchive = await plugins.smartarchive.SmartArchive.fromArchiveFile(
initialTarDownloadPath,
);
await tarArchive.exportToFs(extractionDir); await tarArchive.exportToFs(extractionDir);
logger.log('info', `Image ${imageName} extracted.`); logger.log('info', `Image ${imageName} extracted.`);
await plugins.smartfile.fs.remove(initialTarDownloadPath); await plugins.smartfile.fs.remove(initialTarDownloadPath);
logger.log('info', `deleted original tar to save space.`); logger.log('info', `deleted original tar to save space.`);
logger.log('info', `now repackaging for s3...`); logger.log('info', `now repackaging for s3...`);
const smartfileIndexJson = await plugins.smartfile.SmartFile.fromFilePath(plugins.path.join(extractionDir, 'index.json')); const smartfileIndexJson = await plugins.smartfile.SmartFile.fromFilePath(
const smartfileManifestJson = await plugins.smartfile.SmartFile.fromFilePath(plugins.path.join(extractionDir, 'manifest.json')); plugins.path.join(extractionDir, 'index.json'),
const smartfileOciLayoutJson = await plugins.smartfile.SmartFile.fromFilePath(plugins.path.join(extractionDir, 'oci-layout')); );
const smartfileRepositoriesJson = await plugins.smartfile.SmartFile.fromFilePath(plugins.path.join(extractionDir, 'repositories')); const smartfileManifestJson =
await plugins.smartfile.SmartFile.fromFilePath(
plugins.path.join(extractionDir, 'manifest.json'),
);
const smartfileOciLayoutJson =
await plugins.smartfile.SmartFile.fromFilePath(
plugins.path.join(extractionDir, 'oci-layout'),
);
const smartfileRepositoriesJson =
await plugins.smartfile.SmartFile.fromFilePath(
plugins.path.join(extractionDir, 'repositories'),
);
const indexJson = JSON.parse(smartfileIndexJson.contents.toString()); const indexJson = JSON.parse(smartfileIndexJson.contents.toString());
const manifestJson = JSON.parse(smartfileManifestJson.contents.toString()); const manifestJson = JSON.parse(smartfileManifestJson.contents.toString());
const ociLayoutJson = JSON.parse(smartfileOciLayoutJson.contents.toString()); const ociLayoutJson = JSON.parse(
const repositoriesJson = JSON.parse(smartfileRepositoriesJson.contents.toString()); smartfileOciLayoutJson.contents.toString(),
);
const repositoriesJson = JSON.parse(
smartfileRepositoriesJson.contents.toString(),
);
indexJson.manifests[0].annotations['io.containerd.image.name'] = imageName; indexJson.manifests[0].annotations['io.containerd.image.name'] = imageName;
manifestJson[0].RepoTags[0] = imageName; manifestJson[0].RepoTags[0] = imageName;
@@ -62,10 +93,18 @@ export class DockerImageStore {
repositoriesJson[imageName] = repoFirstValue; repositoriesJson[imageName] = repoFirstValue;
delete repositoriesJson[repoFirstKey]; delete repositoriesJson[repoFirstKey];
smartfileIndexJson.contents = Buffer.from(JSON.stringify(indexJson, null, 2)); smartfileIndexJson.contents = Buffer.from(
smartfileManifestJson.contents = Buffer.from(JSON.stringify(manifestJson, null, 2)); JSON.stringify(indexJson, null, 2),
smartfileOciLayoutJson.contents = Buffer.from(JSON.stringify(ociLayoutJson, null, 2)); );
smartfileRepositoriesJson.contents = Buffer.from(JSON.stringify(repositoriesJson, null, 2)); smartfileManifestJson.contents = Buffer.from(
JSON.stringify(manifestJson, null, 2),
);
smartfileOciLayoutJson.contents = Buffer.from(
JSON.stringify(ociLayoutJson, null, 2),
);
smartfileRepositoriesJson.contents = Buffer.from(
JSON.stringify(repositoriesJson, null, 2),
);
await Promise.all([ await Promise.all([
smartfileIndexJson.write(), smartfileIndexJson.write(),
smartfileManifestJson.write(), smartfileManifestJson.write(),
@@ -77,8 +116,12 @@ export class DockerImageStore {
const tartools = new plugins.smartarchive.TarTools(); const tartools = new plugins.smartarchive.TarTools();
const newTarPack = await tartools.packDirectory(extractionDir); const newTarPack = await tartools.packDirectory(extractionDir);
const finalTarName = `${uniqueProcessingId}.processed.tar`; const finalTarName = `${uniqueProcessingId}.processed.tar`;
const finalTarPath = plugins.path.join(this.options.localDirPath, finalTarName); const finalTarPath = plugins.path.join(
const finalWriteStream = plugins.smartfile.fsStream.createWriteStream(finalTarPath); this.options.localDirPath,
finalTarName,
);
const finalWriteStream =
plugins.smartfile.fsStream.createWriteStream(finalTarPath);
await new Promise((resolve, reject) => { await new Promise((resolve, reject) => {
newTarPack.finalize(); newTarPack.finalize();
newTarPack.pipe(finalWriteStream); newTarPack.pipe(finalWriteStream);
@@ -87,7 +130,8 @@ export class DockerImageStore {
}); });
logger.log('ok', `Repackaged image ${imageName} for s3.`); logger.log('ok', `Repackaged image ${imageName} for s3.`);
await plugins.smartfile.fs.remove(extractionDir); await plugins.smartfile.fs.remove(extractionDir);
const finalTarReadStream = plugins.smartfile.fsStream.createReadStream(finalTarPath); const finalTarReadStream =
plugins.smartfile.fsStream.createReadStream(finalTarPath);
await this.options.bucketDir.fastPutStream({ await this.options.bucketDir.fastPutStream({
stream: finalTarReadStream, stream: finalTarReadStream,
path: `${imageName}.tar`, path: `${imageName}.tar`,
@@ -102,8 +146,13 @@ export class DockerImageStore {
public async stop() {} public async stop() {}
// Method to retrieve tar stream // Method to retrieve tar stream
public async getImage(imageName: string): Promise<plugins.smartstream.stream.Readable> { public async getImage(
const imagePath = plugins.path.join(this.options.localDirPath, `${imageName}.tar`); imageName: string,
): Promise<plugins.smartstream.stream.Readable> {
const imagePath = plugins.path.join(
this.options.localDirPath,
`${imageName}.tar`,
);
if (!(await plugins.smartfile.fs.fileExists(imagePath))) { if (!(await plugins.smartfile.fs.fileExists(imagePath))) {
throw new Error(`Image ${imageName} does not exist.`); throw new Error(`Image ${imageName} does not exist.`);

View File

@@ -6,7 +6,9 @@ import { DockerService } from './classes.service.js';
import { logger } from './logger.js'; import { logger } from './logger.js';
export class DockerNetwork { export class DockerNetwork {
public static async getNetworks(dockerHost: DockerHost): Promise<DockerNetwork[]> { public static async getNetworks(
dockerHost: DockerHost,
): Promise<DockerNetwork[]> {
const dockerNetworks: DockerNetwork[] = []; const dockerNetworks: DockerNetwork[] = [];
const response = await dockerHost.request('GET', '/networks'); const response = await dockerHost.request('GET', '/networks');
for (const networkObject of response.body) { for (const networkObject of response.body) {
@@ -17,14 +19,19 @@ export class DockerNetwork {
return dockerNetworks; return dockerNetworks;
} }
public static async getNetworkByName(dockerHost: DockerHost, dockerNetworkNameArg: string) { public static async getNetworkByName(
dockerHost: DockerHost,
dockerNetworkNameArg: string,
) {
const networks = await DockerNetwork.getNetworks(dockerHost); const networks = await DockerNetwork.getNetworks(dockerHost);
return networks.find((dockerNetwork) => dockerNetwork.Name === dockerNetworkNameArg); return networks.find(
(dockerNetwork) => dockerNetwork.Name === dockerNetworkNameArg,
);
} }
public static async createNetwork( public static async createNetwork(
dockerHost: DockerHost, dockerHost: DockerHost,
networkCreationDescriptor: interfaces.INetworkCreationDescriptor networkCreationDescriptor: interfaces.INetworkCreationDescriptor,
): Promise<DockerNetwork> { ): Promise<DockerNetwork> {
const response = await dockerHost.request('POST', '/networks/create', { const response = await dockerHost.request('POST', '/networks/create', {
Name: networkCreationDescriptor.Name, Name: networkCreationDescriptor.Name,
@@ -47,9 +54,15 @@ export class DockerNetwork {
}); });
if (response.statusCode < 300) { if (response.statusCode < 300) {
logger.log('info', 'Created network successfully'); logger.log('info', 'Created network successfully');
return await DockerNetwork.getNetworkByName(dockerHost, networkCreationDescriptor.Name); return await DockerNetwork.getNetworkByName(
dockerHost,
networkCreationDescriptor.Name,
);
} else { } else {
logger.log('error', 'There has been an error creating the wanted network'); logger.log(
'error',
'There has been an error creating the wanted network',
);
return null; return null;
} }
} }
@@ -75,7 +88,7 @@ export class DockerNetwork {
Subnet: string; Subnet: string;
IPRange: string; IPRange: string;
Gateway: string; Gateway: string;
} },
]; ];
}; };
@@ -87,7 +100,10 @@ export class DockerNetwork {
* removes the network * removes the network
*/ */
public async remove() { public async remove() {
const response = await this.dockerHost.request('DELETE', `/networks/${this.Id}`); const response = await this.dockerHost.request(
'DELETE',
`/networks/${this.Id}`,
);
} }
public async getContainersOnNetwork(): Promise< public async getContainersOnNetwork(): Promise<
@@ -100,7 +116,10 @@ export class DockerNetwork {
}> }>
> { > {
const returnArray = []; const returnArray = [];
const response = await this.dockerHost.request('GET', `/networks/${this.Id}`); const response = await this.dockerHost.request(
'GET',
`/networks/${this.Id}`,
);
for (const key of Object.keys(response.body.Containers)) { for (const key of Object.keys(response.body.Containers)) {
returnArray.push(response.body.Containers[key]); returnArray.push(response.body.Containers[key]);
} }

View File

@@ -22,14 +22,17 @@ export class DockerSecret {
return secrets.find((secret) => secret.ID === idArg); return secrets.find((secret) => secret.ID === idArg);
} }
public static async getSecretByName(dockerHostArg: DockerHost, nameArg: string) { public static async getSecretByName(
dockerHostArg: DockerHost,
nameArg: string,
) {
const secrets = await this.getSecrets(dockerHostArg); const secrets = await this.getSecrets(dockerHostArg);
return secrets.find((secret) => secret.Spec.Name === nameArg); return secrets.find((secret) => secret.Spec.Name === nameArg);
} }
public static async createSecret( public static async createSecret(
dockerHostArg: DockerHost, dockerHostArg: DockerHost,
secretDescriptor: interfaces.ISecretCreationDescriptor secretDescriptor: interfaces.ISecretCreationDescriptor,
) { ) {
const labels: interfaces.TLabels = { const labels: interfaces.TLabels = {
...secretDescriptor.labels, ...secretDescriptor.labels,
@@ -45,7 +48,7 @@ export class DockerSecret {
Object.assign(newSecretInstance, response.body); Object.assign(newSecretInstance, response.body);
Object.assign( Object.assign(
newSecretInstance, newSecretInstance,
await DockerSecret.getSecretByID(dockerHostArg, newSecretInstance.ID) await DockerSecret.getSecretByID(dockerHostArg, newSecretInstance.ID),
); );
return newSecretInstance; return newSecretInstance;
} }
@@ -77,7 +80,7 @@ export class DockerSecret {
Name: this.Spec.Name, Name: this.Spec.Name,
Labels: this.Spec.Labels, Labels: this.Spec.Labels,
Data: plugins.smartstring.base64.encode(contentArg), Data: plugins.smartstring.base64.encode(contentArg),
} },
); );
} }

View File

@@ -21,7 +21,7 @@ export class DockerService {
public static async getServiceByName( public static async getServiceByName(
dockerHost: DockerHost, dockerHost: DockerHost,
networkName: string networkName: string,
): Promise<DockerService> { ): Promise<DockerService> {
const allServices = await DockerService.getServices(dockerHost); const allServices = await DockerService.getServices(dockerHost);
const wantedService = allServices.find((service) => { const wantedService = allServices.find((service) => {
@@ -35,10 +35,13 @@ export class DockerService {
*/ */
public static async createService( public static async createService(
dockerHost: DockerHost, dockerHost: DockerHost,
serviceCreationDescriptor: interfaces.IServiceCreationDescriptor serviceCreationDescriptor: interfaces.IServiceCreationDescriptor,
): Promise<DockerService> { ): Promise<DockerService> {
// lets get the image // lets get the image
logger.log('info', `now creating service ${serviceCreationDescriptor.name}`); logger.log(
'info',
`now creating service ${serviceCreationDescriptor.name}`,
);
// await serviceCreationDescriptor.image.pullLatestImageFromRegistry(); // await serviceCreationDescriptor.image.pullLatestImageFromRegistry();
const serviceVersion = await serviceCreationDescriptor.image.getVersion(); const serviceVersion = await serviceCreationDescriptor.image.getVersion();
@@ -71,8 +74,12 @@ export class DockerService {
}); });
} }
if (serviceCreationDescriptor.resources && serviceCreationDescriptor.resources.volumeMounts) { if (
for (const volumeMount of serviceCreationDescriptor.resources.volumeMounts) { serviceCreationDescriptor.resources &&
serviceCreationDescriptor.resources.volumeMounts
) {
for (const volumeMount of serviceCreationDescriptor.resources
.volumeMounts) {
mounts.push({ mounts.push({
Target: volumeMount.containerFsPath, Target: volumeMount.containerFsPath,
Source: volumeMount.hostFsPath, Source: volumeMount.hostFsPath,
@@ -130,7 +137,8 @@ export class DockerService {
// lets configure limits // lets configure limits
const memoryLimitMB = const memoryLimitMB =
serviceCreationDescriptor.resources && serviceCreationDescriptor.resources.memorySizeMB serviceCreationDescriptor.resources &&
serviceCreationDescriptor.resources.memorySizeMB
? serviceCreationDescriptor.resources.memorySizeMB ? serviceCreationDescriptor.resources.memorySizeMB
: 1000; : 1000;
@@ -139,7 +147,8 @@ export class DockerService {
}; };
if (serviceCreationDescriptor.resources) { if (serviceCreationDescriptor.resources) {
limits.MemoryBytes = serviceCreationDescriptor.resources.memorySizeMB * 1000000; limits.MemoryBytes =
serviceCreationDescriptor.resources.memorySizeMB * 1000000;
} }
const response = await dockerHost.request('POST', '/services/create', { const response = await dockerHost.request('POST', '/services/create', {
@@ -182,7 +191,7 @@ export class DockerService {
const createdService = await DockerService.getServiceByName( const createdService = await DockerService.getServiceByName(
dockerHost, dockerHost,
serviceCreationDescriptor.name serviceCreationDescriptor.name,
); );
return createdService; return createdService;
} }
@@ -228,7 +237,10 @@ export class DockerService {
} }
public async reReadFromDockerEngine() { public async reReadFromDockerEngine() {
const dockerData = await this.dockerHostRef.request('GET', `/services/${this.ID}`); const dockerData = await this.dockerHostRef.request(
'GET',
`/services/${this.ID}`,
);
// TODO: Better assign: Object.assign(this, dockerData); // TODO: Better assign: Object.assign(this, dockerData);
} }
@@ -236,14 +248,21 @@ export class DockerService {
// TODO: implement digest based update recognition // TODO: implement digest based update recognition
await this.reReadFromDockerEngine(); await this.reReadFromDockerEngine();
const dockerImage = await DockerImage.createFromRegistry(this.dockerHostRef, { const dockerImage = await DockerImage.createFromRegistry(
this.dockerHostRef,
{
creationObject: { creationObject: {
imageUrl: this.Spec.TaskTemplate.ContainerSpec.Image, imageUrl: this.Spec.TaskTemplate.ContainerSpec.Image,
} },
}); },
);
const imageVersion = new plugins.smartversion.SmartVersion(dockerImage.Labels.version); const imageVersion = new plugins.smartversion.SmartVersion(
const serviceVersion = new plugins.smartversion.SmartVersion(this.Spec.Labels.version); dockerImage.Labels.version,
);
const serviceVersion = new plugins.smartversion.SmartVersion(
this.Spec.Labels.version,
);
if (imageVersion.greaterThan(serviceVersion)) { if (imageVersion.greaterThan(serviceVersion)) {
console.log(`service ${this.Spec.Name} needs to be updated`); console.log(`service ${this.Spec.Name} needs to be updated`);
return true; return true;

View File

@@ -2,7 +2,7 @@ import * as plugins from './plugins.js';
export const packageDir = plugins.path.resolve( export const packageDir = plugins.path.resolve(
plugins.smartpath.get.dirnameFromImportMetaUrl(import.meta.url), plugins.smartpath.get.dirnameFromImportMetaUrl(import.meta.url),
'../' '../',
); );
export const nogitDir = plugins.path.resolve(packageDir, '.nogit/'); export const nogitDir = plugins.path.resolve(packageDir, '.nogit/');

View File

@@ -1,5 +1,5 @@
// node native path // node native path
import * as path from 'path'; import * as path from 'node:path';
export { path }; export { path };

View File

@@ -6,9 +6,9 @@
"module": "NodeNext", "module": "NodeNext",
"moduleResolution": "NodeNext", "moduleResolution": "NodeNext",
"esModuleInterop": true, "esModuleInterop": true,
"verbatimModuleSyntax": true "verbatimModuleSyntax": true,
"baseUrl": ".",
"paths": {}
}, },
"exclude": [ "exclude": ["dist_*/**/*.d.ts"]
"dist_*/**/*.d.ts"
]
} }