Compare commits
47 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 889b017d4f | |||
| 35e8eff092 | |||
| 2ecd4e9d7c | |||
| 08dbad47bc | |||
| 15e5dedae4 | |||
| 5834721da8 | |||
| 2f31e14cbe | |||
| 5691e5fb78 | |||
| 8d043d20a8 | |||
| 6fe70e0a1d | |||
| cc9c20882e | |||
| 08af9fec14 | |||
| b8a26bf3bd | |||
| e6432b4ea9 | |||
| e9975ba7b8 | |||
| 396ce29d7a | |||
| 7c0935d585 | |||
| 52af76b7ed | |||
| 414d7dd727 | |||
| 4b1c908b89 | |||
| 6e313261e7 | |||
| 42df15a523 | |||
| 7ef2ebcf5b | |||
| 87f26b7b63 | |||
| ffdc61fb42 | |||
| 5b25704cf8 | |||
| 00e6033d8b | |||
| 453040983d | |||
| 456858bc36 | |||
| 606c82dafa | |||
| 9fc4afe4b8 | |||
| 90689c2645 | |||
| 4a1d649e5e | |||
| 66bd36dc4f | |||
| 349d711cc5 | |||
| c74a4bcd5b | |||
| ff835c4160 | |||
| 05eceeb056 | |||
| de55beda08 | |||
| 9aa2b0c7be | |||
| a283bbfba0 | |||
| 8a4e300581 | |||
| 6b0d96b745 | |||
| a08c11838f | |||
| 7c5225125c | |||
| bc4778f7db | |||
| 2e7e8ae5cf |
@@ -6,8 +6,8 @@ on:
|
|||||||
- '**'
|
- '**'
|
||||||
|
|
||||||
env:
|
env:
|
||||||
IMAGE: registry.gitlab.com/hosttoday/ht-docker-node:npmci
|
IMAGE: code.foss.global/host.today/ht-docker-node:npmci
|
||||||
NPMCI_COMPUTED_REPOURL: https://${{gitea.repository_owner}}:${{secrets.GITEA_TOKEN}}@gitea.lossless.digital/${{gitea.repository}}.git
|
NPMCI_COMPUTED_REPOURL: https://${{gitea.repository_owner}}:${{secrets.GITEA_TOKEN}}@/${{gitea.repository}}.git
|
||||||
NPMCI_TOKEN_NPM: ${{secrets.NPMCI_TOKEN_NPM}}
|
NPMCI_TOKEN_NPM: ${{secrets.NPMCI_TOKEN_NPM}}
|
||||||
NPMCI_TOKEN_NPM2: ${{secrets.NPMCI_TOKEN_NPM2}}
|
NPMCI_TOKEN_NPM2: ${{secrets.NPMCI_TOKEN_NPM2}}
|
||||||
NPMCI_GIT_GITHUBTOKEN: ${{secrets.NPMCI_GIT_GITHUBTOKEN}}
|
NPMCI_GIT_GITHUBTOKEN: ${{secrets.NPMCI_GIT_GITHUBTOKEN}}
|
||||||
@@ -26,7 +26,7 @@ jobs:
|
|||||||
- name: Install pnpm and npmci
|
- name: Install pnpm and npmci
|
||||||
run: |
|
run: |
|
||||||
pnpm install -g pnpm
|
pnpm install -g pnpm
|
||||||
pnpm install -g @shipzone/npmci
|
pnpm install -g @ship.zone/npmci
|
||||||
|
|
||||||
- name: Run npm prepare
|
- name: Run npm prepare
|
||||||
run: npmci npm prepare
|
run: npmci npm prepare
|
||||||
|
|||||||
@@ -6,8 +6,8 @@ on:
|
|||||||
- '*'
|
- '*'
|
||||||
|
|
||||||
env:
|
env:
|
||||||
IMAGE: registry.gitlab.com/hosttoday/ht-docker-node:npmci
|
IMAGE: code.foss.global/host.today/ht-docker-node:npmci
|
||||||
NPMCI_COMPUTED_REPOURL: https://${{gitea.repository_owner}}:${{secrets.GITEA_TOKEN}}@gitea.lossless.digital/${{gitea.repository}}.git
|
NPMCI_COMPUTED_REPOURL: https://${{gitea.repository_owner}}:${{secrets.GITEA_TOKEN}}@/${{gitea.repository}}.git
|
||||||
NPMCI_TOKEN_NPM: ${{secrets.NPMCI_TOKEN_NPM}}
|
NPMCI_TOKEN_NPM: ${{secrets.NPMCI_TOKEN_NPM}}
|
||||||
NPMCI_TOKEN_NPM2: ${{secrets.NPMCI_TOKEN_NPM2}}
|
NPMCI_TOKEN_NPM2: ${{secrets.NPMCI_TOKEN_NPM2}}
|
||||||
NPMCI_GIT_GITHUBTOKEN: ${{secrets.NPMCI_GIT_GITHUBTOKEN}}
|
NPMCI_GIT_GITHUBTOKEN: ${{secrets.NPMCI_GIT_GITHUBTOKEN}}
|
||||||
@@ -26,7 +26,7 @@ jobs:
|
|||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: |
|
run: |
|
||||||
pnpm install -g pnpm
|
pnpm install -g pnpm
|
||||||
pnpm install -g @shipzone/npmci
|
pnpm install -g @ship.zone/npmci
|
||||||
npmci npm prepare
|
npmci npm prepare
|
||||||
|
|
||||||
- name: Audit production dependencies
|
- name: Audit production dependencies
|
||||||
@@ -54,7 +54,7 @@ jobs:
|
|||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: |
|
run: |
|
||||||
pnpm install -g pnpm
|
pnpm install -g pnpm
|
||||||
pnpm install -g @shipzone/npmci
|
pnpm install -g @ship.zone/npmci
|
||||||
npmci npm prepare
|
npmci npm prepare
|
||||||
|
|
||||||
- name: Test stable
|
- name: Test stable
|
||||||
@@ -82,7 +82,7 @@ jobs:
|
|||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: |
|
run: |
|
||||||
pnpm install -g pnpm
|
pnpm install -g pnpm
|
||||||
pnpm install -g @shipzone/npmci
|
pnpm install -g @ship.zone/npmci
|
||||||
npmci npm prepare
|
npmci npm prepare
|
||||||
|
|
||||||
- name: Release
|
- name: Release
|
||||||
@@ -104,7 +104,7 @@ jobs:
|
|||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: |
|
run: |
|
||||||
pnpm install -g pnpm
|
pnpm install -g pnpm
|
||||||
pnpm install -g @shipzone/npmci
|
pnpm install -g @ship.zone/npmci
|
||||||
npmci npm prepare
|
npmci npm prepare
|
||||||
|
|
||||||
- name: Code quality
|
- name: Code quality
|
||||||
|
|||||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -3,7 +3,6 @@
|
|||||||
# artifacts
|
# artifacts
|
||||||
coverage/
|
coverage/
|
||||||
public/
|
public/
|
||||||
pages/
|
|
||||||
|
|
||||||
# installs
|
# installs
|
||||||
node_modules/
|
node_modules/
|
||||||
@@ -17,4 +16,8 @@ node_modules/
|
|||||||
dist/
|
dist/
|
||||||
dist_*/
|
dist_*/
|
||||||
|
|
||||||
# custom
|
# AI
|
||||||
|
.claude/
|
||||||
|
.serena/
|
||||||
|
|
||||||
|
#------# custom
|
||||||
347
changelog.md
Normal file
347
changelog.md
Normal file
@@ -0,0 +1,347 @@
|
|||||||
|
# Changelog
|
||||||
|
|
||||||
|
## 2025-11-24 - 5.0.2 - fix(DockerContainer)
|
||||||
|
Fix getContainerById to return undefined for non-existent containers
|
||||||
|
|
||||||
|
- Prevented creation of an invalid DockerContainer from Docker API error responses when a container does not exist.
|
||||||
|
- Changed DockerContainer._fromId to use the list+find pattern and return Promise<DockerContainer | undefined>.
|
||||||
|
- Updated DockerHost.getContainerById to return Promise<DockerContainer | undefined> for type safety and consistent behavior.
|
||||||
|
- Added tests to verify undefined is returned for non-existent container IDs and that valid IDs return DockerContainer instances.
|
||||||
|
- Bumped package version to 5.0.1 and updated changelog and readme hints to document the fix.
|
||||||
|
|
||||||
|
## 2025-11-24 - 5.0.0 - BREAKING CHANGE(DockerHost)
|
||||||
|
Rename array-returning get* methods to list* on DockerHost and related resource classes; update docs, tests and changelog
|
||||||
|
|
||||||
|
- Renamed public DockerHost methods: getContainers → listContainers, getNetworks → listNetworks, getServices → listServices, getImages → listImages, getSecrets → listSecrets.
|
||||||
|
- Renamed DockerNetwork.getContainersOnNetwork → DockerNetwork.listContainersOnNetwork and updated usages (e.g. getContainersOnNetworkForService).
|
||||||
|
- Updated internal/static method docs/comments to recommend dockerHost.list*() usage and adjusted implementations accordingly.
|
||||||
|
- Updated README, readme.hints.md, tests (test.nonci.node+deno.ts) and changelog to reflect the new list* method names.
|
||||||
|
- Bumped package version to 4.0.0.
|
||||||
|
- Migration note: replace calls to get*() with list*() for methods that return multiple items (arrays). Single-item getters such as getContainerById or getNetworkByName remain unchanged.
|
||||||
|
|
||||||
|
## 2025-11-24 - 5.0.1 - fix(DockerContainer)
|
||||||
|
Fix getContainerById() to return undefined instead of invalid container object when container doesn't exist
|
||||||
|
|
||||||
|
**Bug Fixed:**
|
||||||
|
- `getContainerById()` was creating a DockerContainer object from error responses when a container didn't exist
|
||||||
|
- The error object `{ message: "No such container: ..." }` was being passed to the constructor
|
||||||
|
- Calling `.logs()` on this invalid container returned "[object Object]" instead of logs
|
||||||
|
|
||||||
|
**Solution:**
|
||||||
|
- Changed `DockerContainer._fromId()` to use the list+filter pattern (consistent with all other resource getters)
|
||||||
|
- Now returns `undefined` when container is not found (matches DockerImage, DockerNetwork, DockerService, DockerSecret behavior)
|
||||||
|
- Updated return type to `Promise<DockerContainer | undefined>` for type safety
|
||||||
|
- Added tests to verify undefined is returned for non-existent containers
|
||||||
|
|
||||||
|
**Migration:**
|
||||||
|
No breaking changes - users should already be checking for undefined/null based on TypeScript types and documentation.
|
||||||
|
|
||||||
|
## 2025-11-24 - 4.0.0 - BREAKING CHANGE: Rename list methods for consistency
|
||||||
|
|
||||||
|
**Breaking Changes:**
|
||||||
|
- Renamed all "get*" methods that return arrays to "list*" methods for better clarity:
|
||||||
|
- `getContainers()` → `listContainers()`
|
||||||
|
- `getNetworks()` → `listNetworks()`
|
||||||
|
- `getServices()` → `listServices()`
|
||||||
|
- `getImages()` → `listImages()`
|
||||||
|
- `getSecrets()` → `listSecrets()`
|
||||||
|
- `getContainersOnNetwork()` → `listContainersOnNetwork()` (on DockerNetwork class)
|
||||||
|
|
||||||
|
**Migration Guide:**
|
||||||
|
Update all method calls from `get*()` to `list*()` where the method returns an array of resources. Single-item getters like `getContainerById()`, `getNetworkByName()`, etc. remain unchanged.
|
||||||
|
|
||||||
|
**Rationale:**
|
||||||
|
The `list*` naming convention more clearly indicates that these methods return multiple items (arrays), while `get*` methods are reserved for retrieving single items by ID or name. This follows standard API design patterns and improves code readability.
|
||||||
|
|
||||||
|
## 2025-11-24 - 3.0.2 - fix(readme)
|
||||||
|
Update README to document 3.0.0+ changes: architecture refactor, streaming improvements, health check and circular dependency fixes
|
||||||
|
|
||||||
|
- Documented major refactor to a Clean OOP / Facade pattern with DockerHost as the single entry point
|
||||||
|
- Added/clarified real-time container streaming APIs: streamLogs(), attach(), exec()
|
||||||
|
- Clarified support for flexible descriptors (accept both string references and class instances)
|
||||||
|
- Documented complete container lifecycle API (start, stop, remove, logs, inspect, stats)
|
||||||
|
- Documented new ping() health check method to verify Docker daemon availability
|
||||||
|
- Noted fix for circular dependency issues in Node.js by using type-only imports
|
||||||
|
- Mentioned improved TypeScript definitions and expanded examples, migration guides, and real-world use cases
|
||||||
|
|
||||||
|
## 2025-11-24 - 3.0.1 - fix(classes.base)
|
||||||
|
Use type-only import for DockerHost in classes.base to avoid runtime side-effects
|
||||||
|
|
||||||
|
- Changed the import in ts/classes.base.ts to a type-only import: import type { DockerHost } from './classes.host.js';
|
||||||
|
- Prevents a runtime import of classes.host when only the type is needed, reducing risk of circular dependencies and unintended side-effects during module initialization.
|
||||||
|
- No behavior changes to the public API — TypeScript-only change; intended to improve bundling and runtime stability.
|
||||||
|
|
||||||
|
## 2025-11-24 - 3.0.0 - BREAKING CHANGE(DockerHost)
|
||||||
|
Refactor public API to DockerHost facade; introduce DockerResource base; make resource static methods internal; support flexible descriptors and stream compatibility
|
||||||
|
|
||||||
|
- Refactored architecture: DockerHost is now the single public entry point (Facade) for all operations; direct static calls like DockerImage.createFromRegistry(...) are now internal and replaced by DockerHost.createImageFromRegistry(...) and similar factory methods.
|
||||||
|
- Introduced DockerResource abstract base class used by all resource classes (DockerContainer, DockerImage, DockerNetwork, DockerSecret, DockerService) with a required refresh() method and standardized dockerHost property.
|
||||||
|
- Static methods on resource classes were renamed / scoped as internal (prefixed with _): _list, _fromName/_fromId, _create, _createFromRegistry, _createFromTarStream, _build, etc. Consumers should call DockerHost methods instead.
|
||||||
|
- Creation descriptor interfaces (container, service, etc.) now accept either string identifiers or resource instances (e.g. image: string | DockerImage, networks: (string | DockerNetwork)[], secrets: (string | DockerSecret)[]). DockerHost resolves instances internally.
|
||||||
|
- DockerImageStore imageStore has been made private on DockerHost; new public methods DockerHost.storeImage(name, stream) and DockerHost.retrieveImage(name) provide access to the image store.
|
||||||
|
- Streaming compatibility: updated requestStreaming to convert web ReadableStreams (smartrequest v5+) to Node.js streams via smartstream.nodewebhelpers, preserving backward compatibility for existing streaming APIs (container logs, attach, exec, image import/export, events).
|
||||||
|
- Container enhancements: added full lifecycle and streaming/interactive APIs on DockerContainer: refresh(), inspect(), start(), stop(), remove(), logs(), stats(), streamLogs(), attach(), exec().
|
||||||
|
- Service creation updated: resolves image/network/secret descriptors (strings or instances); adds labels.version from image; improved resource handling and port/secret/network resolution.
|
||||||
|
- Network and Secret classes updated to extend DockerResource and to expose refresh(), remove() and lookup methods via DockerHost (createNetwork/listNetworks/getNetworkByName, createSecret/listSecrets/getSecretByName/getSecretById).
|
||||||
|
- Tests and docs updated: migration guide and examples added (readme.hints.md, README); test timeout reduced from 600s to 300s in package.json.
|
||||||
|
- BREAKING: Public API changes require consumers to migrate away from direct resource static calls and direct imageStore access to the new DockerHost-based factory methods and storeImage/retrieveImage APIs.
|
||||||
|
|
||||||
|
## 2025-11-18 - 2.1.0 - feat(DockerHost)
|
||||||
|
Add DockerHost.ping() to check Docker daemon availability and document health-check usage
|
||||||
|
|
||||||
|
- Add DockerHost.ping() method that issues a GET to /_ping and throws an error if the response status is not 200
|
||||||
|
- Update README: show ping() in Quick Start, add health check examples (isDockerHealthy, waitForDocker) and mention Health Checks in Key Concepts
|
||||||
|
|
||||||
|
## 2025-11-18 - 2.0.0 - BREAKING CHANGE(DockerHost)
|
||||||
|
Rename DockerHost constructor option 'dockerSockPath' to 'socketPath' and update internal socket path handling
|
||||||
|
|
||||||
|
- Breaking: constructor option renamed from 'dockerSockPath' to 'socketPath' — callers must update their code.
|
||||||
|
- Constructor now reads the provided 'socketPath' option first, then falls back to DOCKER_HOST, CI, and finally the default unix socket.
|
||||||
|
- README examples and documentation updated to use 'socketPath'.
|
||||||
|
|
||||||
|
## 2025-11-17 - 1.3.6 - fix(streaming)
|
||||||
|
Convert smartrequest v5 web ReadableStreams to Node.js streams and update deps for streaming compatibility
|
||||||
|
|
||||||
|
- Upgrade @push.rocks/smartrequest to ^5.0.1 and bump @git.zone dev tooling (@git.zone/tsbuild, tsrun, tstest).
|
||||||
|
- requestStreaming now uses response.stream() (web ReadableStream) and converts it to a Node.js Readable via plugins.smartstream.nodewebhelpers.convertWebReadableToNodeReadable for backward compatibility.
|
||||||
|
- Updated consumers of streaming responses (DockerHost.getEventObservable, DockerImage.createFromTarStream, DockerImage.exportToTarStream) to work with the converted Node.js stream and preserve event/backpressure semantics (.on, .pause, .resume).
|
||||||
|
- Added readme.hints.md documenting the smartrequest v5 migration, conversion approach, modified files, and test/build status (type errors resolved and Node.js tests passing).
|
||||||
|
- Removed project metadata file (.serena/project.yml) from the repository.
|
||||||
|
|
||||||
|
## 2025-08-19 - 1.3.5 - fix(core)
|
||||||
|
Stabilize CI/workflows and runtime: update CI images/metadata, improve streaming requests and image handling, and fix tests & package metadata
|
||||||
|
|
||||||
|
- Update CI workflows and images: switch workflow IMAGE to code.foss.global/host.today/ht-docker-node:npmci, fix NPMCI_COMPUTED_REPOURL placeholders, and replace @shipzone/npmci with @ship.zone/npmci in workflows
|
||||||
|
- Update npmextra.json gitzone metadata (githost -> code.foss.global, gitscope -> apiclient.xyz, npmPackagename -> @apiclient.xyz/docker) and npmdocker.baseImage -> host.today/ht-docker-node:npmci
|
||||||
|
- Adjust package.json repository/bugs/homepage to code.foss.global, add pnpm overrides entry and normalize package metadata
|
||||||
|
- Improve DockerHost streaming and request handling: reduce requestStreaming timeout to 30s, enable autoDrain for streaming requests, improve response parsing for streaming vs JSON endpoints to avoid hangs
|
||||||
|
- Enhance DockerImage and DockerImageStore stream handling and tar processing: more robust import/export parsing, safer stream-to-file writes, repackaging steps, and error handling
|
||||||
|
- Unskip and update tests: re-enable DockerImageStore integration test, change stored image name to 'hello2', add formatting fixes and ensure cleanup stops the test DockerHost
|
||||||
|
- Miscellaneous code and docs cleanup: numerous formatting fixes and trailing-comma normalization across README and TS sources, update commitinfo and logger newline fixes, and add local tool ignores (.claude/.serena) to .gitignore
|
||||||
|
|
||||||
|
## 2025-08-19 - 1.3.4 - fix(test)
|
||||||
|
|
||||||
|
Increase test timeout, enable DockerImageStore test, update test image name, bump smartrequest patch, and add local claude settings
|
||||||
|
|
||||||
|
- Increase tstest timeout from 120s to 600s in package.json to accommodate longer-running integration tests.
|
||||||
|
- Unskip the DockerImageStore integration test and change stored image name from 'hello' to 'hello2' in test/test.nonci.node.ts.
|
||||||
|
- Bump dependency @push.rocks/smartrequest from ^4.3.0 to ^4.3.1.
|
||||||
|
- Add .claude/settings.local.json to allow local agent permissions for running tests and related tooling.
|
||||||
|
|
||||||
|
## 2025-08-19 - 1.3.3 - fix(classes.host)
|
||||||
|
|
||||||
|
Adjust requestStreaming timeout and autoDrain; stabilize tests
|
||||||
|
|
||||||
|
- Reduced requestStreaming timeout from 10 minutes to 30 seconds to avoid long-running hanging requests.
|
||||||
|
- Enabled autoDrain for streaming requests to ensure response streams are properly drained and reduce resource issues.
|
||||||
|
- Marked the DockerImageStore S3 integration test as skipped to avoid CI dependence on external S3 and added a cleanup test to stop the test DockerHost.
|
||||||
|
- Added local tool settings file (.claude/settings.local.json) with local permissions (development-only).
|
||||||
|
|
||||||
|
## 2025-08-18 - 1.3.2 - fix(package.json)
|
||||||
|
|
||||||
|
Fix test script timeout typo, update dependency versions, and add typings & project configs
|
||||||
|
|
||||||
|
- Fix test script: correct 'tineout' -> 'timeout' for npm test command and set timeout to 120s
|
||||||
|
- Add 'typings': 'dist_ts/index.d.ts' to package.json
|
||||||
|
- Bump dependencies to newer compatible versions (notable packages: @push.rocks/lik, @push.rocks/smartarchive, @push.rocks/smartbucket, @push.rocks/smartfile, @push.rocks/smartlog, @push.rocks/smartpromise, @push.rocks/smartstream, rxjs)
|
||||||
|
- Add project/config files: .serena/project.yml and .claude/settings.local.json (editor/CI metadata)
|
||||||
|
- Include generated cache/metadata files (typescript document symbols cache) — not source changes but tooling/cache artifacts
|
||||||
|
|
||||||
|
## 2025-08-18 - 1.3.1 - fix(test)
|
||||||
|
|
||||||
|
Update test setup and devDependencies; adjust test import and add package metadata
|
||||||
|
|
||||||
|
- Update test script to run with additional flags: --verbose, --logfile and --tineout 120
|
||||||
|
- Bump devDependencies: @git.zone/tsbuild -> ^2.6.7, @git.zone/tsrun -> ^1.3.3, @git.zone/tstest -> ^2.3.5, @push.rocks/qenv -> ^6.1.3
|
||||||
|
- Change test import from @push.rocks/tapbundle to @git.zone/tstest/tapbundle
|
||||||
|
- Add typings field (dist_ts/index.d.ts)
|
||||||
|
- Add packageManager field for pnpm@10.14.0 with integrity hash
|
||||||
|
|
||||||
|
## 2024-12-23 - 1.3.0 - feat(core)
|
||||||
|
|
||||||
|
Initial release of Docker client with TypeScript support
|
||||||
|
|
||||||
|
- Provides easy communication with Docker's remote API from Node.js
|
||||||
|
- Includes implementations for managing Docker services, networks, secrets, containers, and images
|
||||||
|
|
||||||
|
## 2024-12-23 - 1.2.8 - fix(core)
|
||||||
|
|
||||||
|
Improved the image creation process from tar stream in DockerImage class.
|
||||||
|
|
||||||
|
- Enhanced `DockerImage.createFromTarStream` method to handle streamed response and parse imported image details.
|
||||||
|
- Fixed the dependency version for `@push.rocks/smartarchive` in package.json.
|
||||||
|
|
||||||
|
## 2024-10-13 - 1.2.7 - fix(core)
|
||||||
|
|
||||||
|
Prepare patch release with minor fixes and improvements
|
||||||
|
|
||||||
|
## 2024-10-13 - 1.2.6 - fix(core)
|
||||||
|
|
||||||
|
Minor refactoring and code quality improvements.
|
||||||
|
|
||||||
|
## 2024-10-13 - 1.2.5 - fix(dependencies)
|
||||||
|
|
||||||
|
Update dependencies for stability improvements
|
||||||
|
|
||||||
|
- Updated @push.rocks/smartstream to version ^3.0.46
|
||||||
|
- Updated @push.rocks/tapbundle to version ^5.3.0
|
||||||
|
- Updated @types/node to version 22.7.5
|
||||||
|
|
||||||
|
## 2024-10-13 - 1.2.4 - fix(core)
|
||||||
|
|
||||||
|
Refactored DockerImageStore constructor to remove DockerHost dependency
|
||||||
|
|
||||||
|
- Adjusted DockerImageStore constructor to remove dependency on DockerHost
|
||||||
|
- Updated ts/classes.host.ts to align with DockerImageStore's new constructor signature
|
||||||
|
|
||||||
|
## 2024-08-21 - 1.2.3 - fix(dependencies)
|
||||||
|
|
||||||
|
Update dependencies to the latest versions and fix image export test
|
||||||
|
|
||||||
|
- Updated several dependencies to their latest versions in package.json.
|
||||||
|
- Enabled the previously skipped 'should export images' test.
|
||||||
|
|
||||||
|
## 2024-06-10 - 1.2.1-1.2.2 - Core/General
|
||||||
|
|
||||||
|
General updates and fixes.
|
||||||
|
|
||||||
|
- Fix core update
|
||||||
|
|
||||||
|
## 2024-06-10 - 1.2.0 - Core
|
||||||
|
|
||||||
|
Core updates and bug fixes.
|
||||||
|
|
||||||
|
- Fix core update
|
||||||
|
|
||||||
|
## 2024-06-08 - 1.2.0 - General/Core
|
||||||
|
|
||||||
|
Major release with core enhancements.
|
||||||
|
|
||||||
|
- Processing images with extraction, retagging, repackaging, and long-term storage
|
||||||
|
|
||||||
|
## 2024-06-06 - 1.1.4 - General/Imagestore
|
||||||
|
|
||||||
|
Significant feature addition.
|
||||||
|
|
||||||
|
- Add feature to process images with extraction, retagging, repackaging, and long-term storage
|
||||||
|
|
||||||
|
## 2024-05-08 - 1.0.112 - Images
|
||||||
|
|
||||||
|
Add new functionality for image handling.
|
||||||
|
|
||||||
|
- Can now import and export images
|
||||||
|
- Start work on local 100% JS OCI image registry
|
||||||
|
|
||||||
|
## 2024-06-05 - 1.1.0-1.1.3 - Core
|
||||||
|
|
||||||
|
Regular updates and fixes.
|
||||||
|
|
||||||
|
- Fix core update
|
||||||
|
|
||||||
|
## 2024-02-02 - 1.0.105-1.0.110 - Core
|
||||||
|
|
||||||
|
Routine core updates and fixes.
|
||||||
|
|
||||||
|
- Fix core update
|
||||||
|
|
||||||
|
## 2022-10-17 - 1.0.103-1.0.104 - Core
|
||||||
|
|
||||||
|
Routine core updates.
|
||||||
|
|
||||||
|
- Fix core update
|
||||||
|
|
||||||
|
## 2020-10-01 - 1.0.99-1.0.102 - Core
|
||||||
|
|
||||||
|
Routine core updates.
|
||||||
|
|
||||||
|
- Fix core update
|
||||||
|
|
||||||
|
## 2019-09-22 - 1.0.73-1.0.78 - Core
|
||||||
|
|
||||||
|
Routine updates and core fixes.
|
||||||
|
|
||||||
|
- Fix core update
|
||||||
|
|
||||||
|
## 2019-09-13 - 1.0.60-1.0.72 - Core
|
||||||
|
|
||||||
|
Routine updates and core fixes.
|
||||||
|
|
||||||
|
- Fix core update
|
||||||
|
|
||||||
|
## 2019-08-16 - 1.0.43-1.0.59 - Core
|
||||||
|
|
||||||
|
Routine updates and core fixes.
|
||||||
|
|
||||||
|
- Fix core update
|
||||||
|
|
||||||
|
## 2019-08-15 - 1.0.37-1.0.42 - Core
|
||||||
|
|
||||||
|
Routine updates and core fixes.
|
||||||
|
|
||||||
|
- Fix core update
|
||||||
|
|
||||||
|
## 2019-08-14 - 1.0.31-1.0.36 - Core
|
||||||
|
|
||||||
|
Routine updates and core fixes.
|
||||||
|
|
||||||
|
- Fix core update
|
||||||
|
|
||||||
|
## 2019-01-10 - 1.0.27-1.0.30 - Core
|
||||||
|
|
||||||
|
Routine updates and core fixes.
|
||||||
|
|
||||||
|
- Fix core update
|
||||||
|
|
||||||
|
## 2018-07-16 - 1.0.23-1.0.24 - Core
|
||||||
|
|
||||||
|
Routine updates and core fixes.
|
||||||
|
|
||||||
|
- Fix core shift to new style
|
||||||
|
|
||||||
|
## 2017-07-16 - 1.0.20-1.0.22 - General
|
||||||
|
|
||||||
|
Routine updates and fixes.
|
||||||
|
|
||||||
|
- Update node_modules within npmdocker
|
||||||
|
|
||||||
|
## 2017-04-02 - 1.0.18-1.0.19 - General
|
||||||
|
|
||||||
|
Routine updates and fixes.
|
||||||
|
|
||||||
|
- Work with npmdocker and npmts 7.x.x
|
||||||
|
- CI updates
|
||||||
|
|
||||||
|
## 2016-07-31 - 1.0.17 - General
|
||||||
|
|
||||||
|
Enhancements and fixes.
|
||||||
|
|
||||||
|
- Now waiting for response to be stored before ending streaming request
|
||||||
|
- Cosmetic fix
|
||||||
|
|
||||||
|
## 2016-07-29 - 1.0.14-1.0.16 - General
|
||||||
|
|
||||||
|
Multiple updates and features added.
|
||||||
|
|
||||||
|
- Fix request for change observable and add npmdocker
|
||||||
|
- Add request typings
|
||||||
|
|
||||||
|
## 2016-07-28 - 1.0.13 - Core
|
||||||
|
|
||||||
|
Fixes and preparations.
|
||||||
|
|
||||||
|
- Fixed request for newer docker
|
||||||
|
- Prepare for npmdocker
|
||||||
|
|
||||||
|
## 2016-06-16 - 1.0.0-1.0.2 - General
|
||||||
|
|
||||||
|
Initial sequence of releases, significant feature additions and CI setups.
|
||||||
|
|
||||||
|
- Implement container start and stop
|
||||||
|
- Implement list containers and related functions
|
||||||
|
- Add tests with in docker environment
|
||||||
|
|
||||||
|
## 2016-04-12 - unknown - Initial Commit
|
||||||
|
|
||||||
|
Initial project setup.
|
||||||
|
|
||||||
|
- Initial commit
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"npmdocker": {
|
"npmdocker": {
|
||||||
"baseImage": "hosttoday/ht-docker-node:npmci",
|
"baseImage": "host.today/ht-docker-node:npmci",
|
||||||
"command": "(ls -a && rm -r node_modules && yarn global add npmts && yarn install && npmts)",
|
"command": "(ls -a && rm -r node_modules && yarn global add npmts && yarn install && npmts)",
|
||||||
"dockerSock": true
|
"dockerSock": true
|
||||||
},
|
},
|
||||||
@@ -12,11 +12,11 @@
|
|||||||
"gitzone": {
|
"gitzone": {
|
||||||
"projectType": "npm",
|
"projectType": "npm",
|
||||||
"module": {
|
"module": {
|
||||||
"githost": "gitlab.com",
|
"githost": "code.foss.global",
|
||||||
"gitscope": "mojoio",
|
"gitscope": "apiclient.xyz",
|
||||||
"gitrepo": "docker",
|
"gitrepo": "docker",
|
||||||
"description": "Provides easy communication with Docker remote API from Node.js, with TypeScript support.",
|
"description": "Provides easy communication with Docker remote API from Node.js, with TypeScript support.",
|
||||||
"npmPackagename": "@mojoio/docker",
|
"npmPackagename": "@apiclient.xyz/docker",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"keywords": [
|
"keywords": [
|
||||||
"Docker",
|
"Docker",
|
||||||
|
|||||||
54
package.json
54
package.json
@@ -1,19 +1,19 @@
|
|||||||
{
|
{
|
||||||
"name": "@apiclient.xyz/docker",
|
"name": "@apiclient.xyz/docker",
|
||||||
"version": "1.2.0",
|
"version": "5.0.2",
|
||||||
"description": "Provides easy communication with Docker remote API from Node.js, with TypeScript support.",
|
"description": "Provides easy communication with Docker remote API from Node.js, with TypeScript support.",
|
||||||
"private": false,
|
"private": false,
|
||||||
"main": "dist_ts/index.js",
|
"main": "dist_ts/index.js",
|
||||||
"typings": "dist_ts/index.d.ts",
|
"typings": "dist_ts/index.d.ts",
|
||||||
"type": "module",
|
"type": "module",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"test": "(tstest test/ --web)",
|
"test": "(tstest test/ --verbose --logfile --timeout 300)",
|
||||||
"build": "(tsbuild --web --allowimplicitany)",
|
"build": "(tsbuild --web --allowimplicitany)",
|
||||||
"buildDocs": "tsdoc"
|
"buildDocs": "tsdoc"
|
||||||
},
|
},
|
||||||
"repository": {
|
"repository": {
|
||||||
"type": "git",
|
"type": "git",
|
||||||
"url": "git+https://gitlab.com/mojoio/docker.git"
|
"url": "https://code.foss.global/apiclient.xyz/docker.git"
|
||||||
},
|
},
|
||||||
"keywords": [
|
"keywords": [
|
||||||
"Docker",
|
"Docker",
|
||||||
@@ -29,33 +29,33 @@
|
|||||||
"author": "Lossless GmbH",
|
"author": "Lossless GmbH",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"bugs": {
|
"bugs": {
|
||||||
"url": "https://gitlab.com/mojoio/docker/issues"
|
"url": "https://code.foss.global/apiclient.xyz/docker/issues"
|
||||||
},
|
},
|
||||||
"homepage": "https://gitlab.com/mojoio/docker#readme",
|
"homepage": "https://code.foss.global/apiclient.xyz/docker#readme",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@push.rocks/lik": "^6.0.15",
|
"@push.rocks/lik": "^6.2.2",
|
||||||
"@push.rocks/smartarchive": "^4.0.36",
|
"@push.rocks/smartarchive": "^4.2.2",
|
||||||
"@push.rocks/smartbucket": "^3.0.10",
|
"@push.rocks/smartbucket": "^3.3.10",
|
||||||
"@push.rocks/smartfile": "^11.0.20",
|
"@push.rocks/smartfile": "^11.2.7",
|
||||||
"@push.rocks/smartjson": "^5.0.20",
|
"@push.rocks/smartjson": "^5.2.0",
|
||||||
"@push.rocks/smartlog": "^3.0.7",
|
"@push.rocks/smartlog": "^3.1.10",
|
||||||
"@push.rocks/smartnetwork": "^3.0.0",
|
"@push.rocks/smartnetwork": "^4.4.0",
|
||||||
"@push.rocks/smartpath": "^5.0.18",
|
"@push.rocks/smartpath": "^6.0.0",
|
||||||
"@push.rocks/smartpromise": "^4.0.3",
|
"@push.rocks/smartpromise": "^4.2.3",
|
||||||
"@push.rocks/smartrequest": "^2.0.22",
|
"@push.rocks/smartrequest": "^5.0.1",
|
||||||
"@push.rocks/smartstream": "^3.0.44",
|
"@push.rocks/smartstream": "^3.2.5",
|
||||||
"@push.rocks/smartstring": "^4.0.15",
|
"@push.rocks/smartstring": "^4.1.0",
|
||||||
"@push.rocks/smartunique": "^3.0.9",
|
"@push.rocks/smartunique": "^3.0.9",
|
||||||
"@push.rocks/smartversion": "^3.0.5",
|
"@push.rocks/smartversion": "^3.0.5",
|
||||||
"@tsclass/tsclass": "^4.0.54",
|
"@tsclass/tsclass": "^9.3.0",
|
||||||
"rxjs": "^7.5.7"
|
"rxjs": "^7.8.2"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@git.zone/tsbuild": "^2.1.80",
|
"@git.zone/tsbuild": "^3.1.0",
|
||||||
"@git.zone/tsrun": "^1.2.12",
|
"@git.zone/tsrun": "^2.0.0",
|
||||||
"@git.zone/tstest": "^1.0.90",
|
"@git.zone/tstest": "^2.8.2",
|
||||||
"@push.rocks/tapbundle": "^5.0.23",
|
"@push.rocks/qenv": "^6.1.3",
|
||||||
"@types/node": "20.14.2"
|
"@types/node": "22.7.5"
|
||||||
},
|
},
|
||||||
"files": [
|
"files": [
|
||||||
"ts/**/*",
|
"ts/**/*",
|
||||||
@@ -71,5 +71,9 @@
|
|||||||
],
|
],
|
||||||
"browserslist": [
|
"browserslist": [
|
||||||
"last 1 chrome versions"
|
"last 1 chrome versions"
|
||||||
]
|
],
|
||||||
|
"packageManager": "pnpm@10.14.0+sha512.ad27a79641b49c3e481a16a805baa71817a04bbe06a38d17e60e2eaee83f6a146c6a688125f5792e48dd5ba30e7da52a5cda4c3992b9ccf333f9ce223af84748",
|
||||||
|
"pnpm": {
|
||||||
|
"overrides": {}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
8292
pnpm-lock.yaml
generated
8292
pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
6
qenv.yml
Normal file
6
qenv.yml
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
required:
|
||||||
|
- S3_ENDPOINT
|
||||||
|
- S3_ACCESSKEY
|
||||||
|
- S3_ACCESSSECRET
|
||||||
|
- S3_BUCKET
|
||||||
|
|
||||||
194
readme.hints.md
194
readme.hints.md
@@ -0,0 +1,194 @@
|
|||||||
|
# Docker Module - Development Hints
|
||||||
|
|
||||||
|
## getContainerById() Bug Fix (2025-11-24 - v5.0.1)
|
||||||
|
|
||||||
|
### Problem
|
||||||
|
The `getContainerById()` method had a critical bug where it would create a DockerContainer object from Docker API error responses when a container didn't exist.
|
||||||
|
|
||||||
|
**Symptoms:**
|
||||||
|
- Calling `docker.getContainerById('invalid-id')` returned a DockerContainer object with `{ message: "No such container: invalid-id" }`
|
||||||
|
- Calling `.logs()` on this invalid container returned "[object Object]" instead of logs or throwing an error
|
||||||
|
- No way to detect the error state without checking for a `.message` property
|
||||||
|
|
||||||
|
**Root Cause:**
|
||||||
|
The `DockerContainer._fromId()` method made a direct API call to `/containers/{id}/json` and blindly passed `response.body` to the constructor, even when the API returned a 404 error response.
|
||||||
|
|
||||||
|
### Solution
|
||||||
|
Changed `DockerContainer._fromId()` to use the **list+filter pattern**, matching the behavior of all other resource getter methods (DockerImage, DockerNetwork, DockerService, DockerSecret):
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Before (buggy):
|
||||||
|
public static async _fromId(dockerHostArg: DockerHost, containerId: string): Promise<DockerContainer> {
|
||||||
|
const response = await dockerHostArg.request('GET', `/containers/${containerId}/json`);
|
||||||
|
return new DockerContainer(dockerHostArg, response.body); // Creates invalid object from error!
|
||||||
|
}
|
||||||
|
|
||||||
|
// After (fixed):
|
||||||
|
public static async _fromId(dockerHostArg: DockerHost, containerId: string): Promise<DockerContainer | undefined> {
|
||||||
|
const containers = await this._list(dockerHostArg);
|
||||||
|
return containers.find((container) => container.Id === containerId); // Returns undefined if not found
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Benefits:**
|
||||||
|
- 100% consistent with all other resource classes
|
||||||
|
- Type-safe return signature: `Promise<DockerContainer | undefined>`
|
||||||
|
- Cannot create invalid objects - `.find()` naturally returns undefined
|
||||||
|
- Users can now properly check for non-existent containers
|
||||||
|
|
||||||
|
**Usage:**
|
||||||
|
```typescript
|
||||||
|
const container = await docker.getContainerById('abc123');
|
||||||
|
if (container) {
|
||||||
|
const logs = await container.logs();
|
||||||
|
console.log(logs);
|
||||||
|
} else {
|
||||||
|
console.log('Container not found');
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## OOP Refactoring - Clean Architecture (2025-11-24)
|
||||||
|
|
||||||
|
### Architecture Changes
|
||||||
|
The module has been restructured to follow a clean OOP Facade pattern:
|
||||||
|
- **DockerHost** is now the single entry point for all Docker operations
|
||||||
|
- All resource classes extend abstract `DockerResource` base class
|
||||||
|
- Static methods are prefixed with `_` to indicate internal use
|
||||||
|
- Public API is exclusively through DockerHost methods
|
||||||
|
|
||||||
|
### Key Changes
|
||||||
|
|
||||||
|
**1. Factory Pattern**
|
||||||
|
- All resource creation/retrieval goes through DockerHost:
|
||||||
|
```typescript
|
||||||
|
// Old (deprecated):
|
||||||
|
const container = await DockerContainer.getContainers(dockerHost);
|
||||||
|
const network = await DockerNetwork.createNetwork(dockerHost, descriptor);
|
||||||
|
|
||||||
|
// New (clean API):
|
||||||
|
const containers = await dockerHost.listContainers();
|
||||||
|
const network = await dockerHost.createNetwork(descriptor);
|
||||||
|
```
|
||||||
|
|
||||||
|
**2. Container Management Methods Added**
|
||||||
|
The DockerContainer class now has full CRUD and streaming operations:
|
||||||
|
|
||||||
|
**Lifecycle:**
|
||||||
|
- `container.start()` - Start container
|
||||||
|
- `container.stop(options?)` - Stop container
|
||||||
|
- `container.remove(options?)` - Remove container
|
||||||
|
- `container.refresh()` - Reload state
|
||||||
|
|
||||||
|
**Information:**
|
||||||
|
- `container.inspect()` - Get detailed info
|
||||||
|
- `container.logs(options)` - Get logs as string (one-shot)
|
||||||
|
- `container.stats(options)` - Get stats
|
||||||
|
|
||||||
|
**Streaming & Interactive:**
|
||||||
|
- `container.streamLogs(options)` - Stream logs continuously (follow mode)
|
||||||
|
- `container.attach(options)` - Attach to main process (PID 1) with bidirectional stream
|
||||||
|
- `container.exec(command, options)` - Execute commands in container interactively
|
||||||
|
|
||||||
|
**Example - Stream Logs:**
|
||||||
|
```typescript
|
||||||
|
const container = await dockerHost.getContainerById('abc123');
|
||||||
|
const logStream = await container.streamLogs({ timestamps: true });
|
||||||
|
|
||||||
|
logStream.on('data', (chunk) => {
|
||||||
|
console.log(chunk.toString());
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
**Example - Attach to Container:**
|
||||||
|
```typescript
|
||||||
|
const { stream, close } = await container.attach({
|
||||||
|
stdin: true,
|
||||||
|
stdout: true,
|
||||||
|
stderr: true
|
||||||
|
});
|
||||||
|
|
||||||
|
// Pipe to/from process
|
||||||
|
process.stdin.pipe(stream);
|
||||||
|
stream.pipe(process.stdout);
|
||||||
|
|
||||||
|
// Later: detach
|
||||||
|
await close();
|
||||||
|
```
|
||||||
|
|
||||||
|
**Example - Execute Command:**
|
||||||
|
```typescript
|
||||||
|
const { stream, close } = await container.exec('ls -la /app', {
|
||||||
|
tty: true
|
||||||
|
});
|
||||||
|
|
||||||
|
stream.on('data', (chunk) => {
|
||||||
|
console.log(chunk.toString());
|
||||||
|
});
|
||||||
|
|
||||||
|
stream.on('end', async () => {
|
||||||
|
await close();
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
**3. DockerResource Base Class**
|
||||||
|
All resource classes now extend `DockerResource`:
|
||||||
|
- Consistent `dockerHost` property (not `dockerHostRef`)
|
||||||
|
- Required `refresh()` method
|
||||||
|
- Standardized constructor pattern
|
||||||
|
|
||||||
|
**4. ImageStore Encapsulation**
|
||||||
|
- `dockerHost.imageStore` is now private
|
||||||
|
- Use `dockerHost.storeImage(name, stream)` instead
|
||||||
|
- Use `dockerHost.retrieveImage(name)` instead
|
||||||
|
|
||||||
|
**5. Creation Descriptors Support Both Primitives and Instances**
|
||||||
|
Interfaces now accept both strings and class instances:
|
||||||
|
```typescript
|
||||||
|
// Both work:
|
||||||
|
await dockerHost.createService({
|
||||||
|
image: 'nginx:latest', // String
|
||||||
|
networks: ['my-network'], // String array
|
||||||
|
secrets: ['my-secret'] // String array
|
||||||
|
});
|
||||||
|
|
||||||
|
await dockerHost.createService({
|
||||||
|
image: imageInstance, // DockerImage instance
|
||||||
|
networks: [networkInstance], // DockerNetwork array
|
||||||
|
secrets: [secretInstance] // DockerSecret array
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
### Migration Guide
|
||||||
|
Replace all static method calls with dockerHost methods:
|
||||||
|
- `DockerContainer.getContainers(host)` → `dockerHost.listContainers()`
|
||||||
|
- `DockerImage.createFromRegistry(host, opts)` → `dockerHost.createImageFromRegistry(opts)`
|
||||||
|
- `DockerService.createService(host, desc)` → `dockerHost.createService(desc)`
|
||||||
|
- `dockerHost.imageStore.storeImage(...)` → `dockerHost.storeImage(...)`
|
||||||
|
|
||||||
|
## smartrequest v5+ Migration (2025-11-17)
|
||||||
|
|
||||||
|
### Breaking Change
|
||||||
|
smartrequest v5.0.0+ returns web `ReadableStream` objects (Web Streams API) instead of Node.js streams.
|
||||||
|
|
||||||
|
### Solution Implemented
|
||||||
|
All streaming methods now convert web ReadableStreams to Node.js streams using:
|
||||||
|
```typescript
|
||||||
|
plugins.smartstream.nodewebhelpers.convertWebReadableToNodeReadable(webStream)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Files Modified
|
||||||
|
- `ts/classes.host.ts`:
|
||||||
|
- `requestStreaming()` - Converts web stream to Node.js stream before returning
|
||||||
|
- `getEventObservable()` - Works with converted Node.js stream
|
||||||
|
|
||||||
|
- `ts/classes.image.ts`:
|
||||||
|
- `createFromTarStream()` - Uses converted Node.js stream for event handling
|
||||||
|
- `exportToTarStream()` - Uses converted Node.js stream for backpressure management
|
||||||
|
|
||||||
|
### Testing
|
||||||
|
- Build: All 11 type errors resolved
|
||||||
|
- Tests: Node.js tests pass (DockerHost, DockerContainer, DockerImage, DockerImageStore)
|
||||||
|
|
||||||
|
### Notes
|
||||||
|
- The conversion maintains backward compatibility with existing code expecting Node.js stream methods (`.on()`, `.emit()`, `.pause()`, `.resume()`)
|
||||||
|
- smartstream's `nodewebhelpers` module provides bidirectional conversion utilities between web and Node.js streams
|
||||||
|
|||||||
40
test-stream.js
Normal file
40
test-stream.js
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
const { SmartRequest } = require('@push.rocks/smartrequest');
|
||||||
|
|
||||||
|
async function test() {
|
||||||
|
try {
|
||||||
|
const response = await SmartRequest.create()
|
||||||
|
.url('http://unix:/run/user/1000/docker.sock:/images/hello-world:latest/get')
|
||||||
|
.header('Host', 'docker.sock')
|
||||||
|
.get();
|
||||||
|
|
||||||
|
console.log('Response status:', response.status);
|
||||||
|
console.log('Response type:', typeof response);
|
||||||
|
|
||||||
|
const stream = response.streamNode();
|
||||||
|
console.log('Stream type:', typeof stream);
|
||||||
|
console.log('Has on method:', typeof stream.on);
|
||||||
|
|
||||||
|
if (stream) {
|
||||||
|
let chunks = 0;
|
||||||
|
stream.on('data', (chunk) => {
|
||||||
|
chunks++;
|
||||||
|
if (chunks <= 3) console.log('Got chunk', chunks, chunk.length);
|
||||||
|
});
|
||||||
|
stream.on('end', () => {
|
||||||
|
console.log('Stream ended, total chunks:', chunks);
|
||||||
|
process.exit(0);
|
||||||
|
});
|
||||||
|
stream.on('error', (err) => {
|
||||||
|
console.error('Stream error:', err);
|
||||||
|
process.exit(1);
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
console.log('No stream available');
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error:', error);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
test();
|
||||||
46
test-stream.mjs
Normal file
46
test-stream.mjs
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
import { SmartRequest } from '@push.rocks/smartrequest';
|
||||||
|
|
||||||
|
async function test() {
|
||||||
|
try {
|
||||||
|
const response = await SmartRequest.create()
|
||||||
|
.url('http://unix:/run/user/1000/docker.sock:/images/hello-world:latest/get')
|
||||||
|
.header('Host', 'docker.sock')
|
||||||
|
.get();
|
||||||
|
|
||||||
|
console.log('Response status:', response.status);
|
||||||
|
console.log('Response type:', typeof response);
|
||||||
|
|
||||||
|
const stream = response.streamNode();
|
||||||
|
console.log('Stream type:', typeof stream);
|
||||||
|
console.log('Has on method:', typeof stream.on);
|
||||||
|
|
||||||
|
if (stream) {
|
||||||
|
let chunks = 0;
|
||||||
|
stream.on('data', (chunk) => {
|
||||||
|
chunks++;
|
||||||
|
if (chunks <= 3) console.log('Got chunk', chunks, chunk.length);
|
||||||
|
});
|
||||||
|
stream.on('end', () => {
|
||||||
|
console.log('Stream ended, total chunks:', chunks);
|
||||||
|
process.exit(0);
|
||||||
|
});
|
||||||
|
stream.on('error', (err) => {
|
||||||
|
console.error('Stream error:', err);
|
||||||
|
process.exit(1);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Set a timeout in case stream doesn't end
|
||||||
|
setTimeout(() => {
|
||||||
|
console.log('Timeout after 5 seconds');
|
||||||
|
process.exit(1);
|
||||||
|
}, 5000);
|
||||||
|
} else {
|
||||||
|
console.log('No stream available');
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error:', error);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
test();
|
||||||
325
test/test.nonci.node+deno.ts
Normal file
325
test/test.nonci.node+deno.ts
Normal file
@@ -0,0 +1,325 @@
|
|||||||
|
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||||
|
import { Qenv } from '@push.rocks/qenv';
|
||||||
|
|
||||||
|
const testQenv = new Qenv('./', './.nogit/');
|
||||||
|
|
||||||
|
import * as plugins from '../ts/plugins.js';
|
||||||
|
import * as paths from '../ts/paths.js';
|
||||||
|
|
||||||
|
import * as docker from '../ts/index.js';
|
||||||
|
|
||||||
|
let testDockerHost: docker.DockerHost;
|
||||||
|
|
||||||
|
tap.test('should create a new Dockersock instance', async () => {
|
||||||
|
testDockerHost = new docker.DockerHost({});
|
||||||
|
await testDockerHost.start();
|
||||||
|
return expect(testDockerHost).toBeInstanceOf(docker.DockerHost);
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('should create a docker swarm', async () => {
|
||||||
|
await testDockerHost.activateSwarm();
|
||||||
|
});
|
||||||
|
|
||||||
|
// Containers
|
||||||
|
tap.test('should list containers', async () => {
|
||||||
|
const containers = await testDockerHost.listContainers();
|
||||||
|
console.log(containers);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Networks
|
||||||
|
tap.test('should list networks', async () => {
|
||||||
|
const networks = await testDockerHost.listNetworks();
|
||||||
|
console.log(networks);
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('should create a network', async () => {
|
||||||
|
const newNetwork = await testDockerHost.createNetwork({
|
||||||
|
Name: 'webgateway',
|
||||||
|
});
|
||||||
|
expect(newNetwork).toBeInstanceOf(docker.DockerNetwork);
|
||||||
|
expect(newNetwork.Name).toEqual('webgateway');
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('should remove a network', async () => {
|
||||||
|
const webgateway = await testDockerHost.getNetworkByName('webgateway');
|
||||||
|
await webgateway.remove();
|
||||||
|
});
|
||||||
|
|
||||||
|
// Images
|
||||||
|
tap.test('should pull an image from imagetag', async () => {
|
||||||
|
const image = await testDockerHost.createImageFromRegistry({
|
||||||
|
imageUrl: 'hosttoday/ht-docker-node',
|
||||||
|
imageTag: 'alpine',
|
||||||
|
});
|
||||||
|
expect(image).toBeInstanceOf(docker.DockerImage);
|
||||||
|
console.log(image);
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('should return a change Observable', async (tools) => {
|
||||||
|
const testObservable = await testDockerHost.getEventObservable();
|
||||||
|
const subscription = testObservable.subscribe((changeObject) => {
|
||||||
|
console.log(changeObject);
|
||||||
|
});
|
||||||
|
await tools.delayFor(2000);
|
||||||
|
subscription.unsubscribe();
|
||||||
|
});
|
||||||
|
|
||||||
|
// SECRETS
|
||||||
|
tap.test('should create a secret', async () => {
|
||||||
|
const mySecret = await testDockerHost.createSecret({
|
||||||
|
name: 'testSecret',
|
||||||
|
version: '1.0.3',
|
||||||
|
contentArg: `{ "hi": "wow"}`,
|
||||||
|
labels: {},
|
||||||
|
});
|
||||||
|
console.log(mySecret);
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('should remove a secret by name', async () => {
|
||||||
|
const mySecret = await testDockerHost.getSecretByName('testSecret');
|
||||||
|
await mySecret.remove();
|
||||||
|
});
|
||||||
|
|
||||||
|
// SERVICES
|
||||||
|
tap.test('should activate swarm mode', async () => {
|
||||||
|
await testDockerHost.activateSwarm();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('should list all services', async (tools) => {
|
||||||
|
const services = await testDockerHost.listServices();
|
||||||
|
console.log(services);
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('should create a service', async () => {
|
||||||
|
const testNetwork = await testDockerHost.createNetwork({
|
||||||
|
Name: 'testNetwork',
|
||||||
|
});
|
||||||
|
const testSecret = await testDockerHost.createSecret({
|
||||||
|
name: 'testSecret',
|
||||||
|
version: '0.0.1',
|
||||||
|
labels: {},
|
||||||
|
contentArg: '{"hi": "wow"}',
|
||||||
|
});
|
||||||
|
const testImage = await testDockerHost.createImageFromRegistry({
|
||||||
|
imageUrl: 'code.foss.global/host.today/ht-docker-node:latest',
|
||||||
|
});
|
||||||
|
const testService = await testDockerHost.createService({
|
||||||
|
image: testImage,
|
||||||
|
labels: {},
|
||||||
|
name: 'testService',
|
||||||
|
networks: [testNetwork],
|
||||||
|
networkAlias: 'testService',
|
||||||
|
secrets: [testSecret],
|
||||||
|
ports: ['3000:80'],
|
||||||
|
});
|
||||||
|
|
||||||
|
await testService.remove();
|
||||||
|
await testNetwork.remove();
|
||||||
|
await testSecret.remove();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('should export images', async (toolsArg) => {
|
||||||
|
const done = toolsArg.defer();
|
||||||
|
const testImage = await testDockerHost.createImageFromRegistry({
|
||||||
|
imageUrl: 'code.foss.global/host.today/ht-docker-node:latest',
|
||||||
|
});
|
||||||
|
const fsWriteStream = plugins.smartfile.fsStream.createWriteStream(
|
||||||
|
plugins.path.join(paths.nogitDir, 'testimage.tar'),
|
||||||
|
);
|
||||||
|
const exportStream = await testImage.exportToTarStream();
|
||||||
|
exportStream.pipe(fsWriteStream).on('finish', () => {
|
||||||
|
done.resolve();
|
||||||
|
});
|
||||||
|
await done.promise;
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('should import images', async () => {
|
||||||
|
const fsReadStream = plugins.smartfile.fsStream.createReadStream(
|
||||||
|
plugins.path.join(paths.nogitDir, 'testimage.tar'),
|
||||||
|
);
|
||||||
|
const importedImage = await testDockerHost.createImageFromTarStream(
|
||||||
|
fsReadStream,
|
||||||
|
{
|
||||||
|
imageUrl: 'code.foss.global/host.today/ht-docker-node:latest',
|
||||||
|
},
|
||||||
|
);
|
||||||
|
expect(importedImage).toBeInstanceOf(docker.DockerImage);
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('should expose a working DockerImageStore', async () => {
|
||||||
|
// lets first add am s3 target
|
||||||
|
const s3Descriptor = {
|
||||||
|
endpoint: await testQenv.getEnvVarOnDemand('S3_ENDPOINT'),
|
||||||
|
accessKey: await testQenv.getEnvVarOnDemand('S3_ACCESSKEY'),
|
||||||
|
accessSecret: await testQenv.getEnvVarOnDemand('S3_ACCESSSECRET'),
|
||||||
|
bucketName: await testQenv.getEnvVarOnDemand('S3_BUCKET'),
|
||||||
|
};
|
||||||
|
await testDockerHost.addS3Storage(s3Descriptor);
|
||||||
|
|
||||||
|
// Use the new public API instead of direct imageStore access
|
||||||
|
await testDockerHost.storeImage(
|
||||||
|
'hello2',
|
||||||
|
plugins.smartfile.fsStream.createReadStream(
|
||||||
|
plugins.path.join(paths.nogitDir, 'testimage.tar'),
|
||||||
|
),
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
// CONTAINER GETTERS
|
||||||
|
tap.test('should return undefined for non-existent container', async () => {
|
||||||
|
const container = await testDockerHost.getContainerById('invalid-container-id-12345');
|
||||||
|
expect(container).toEqual(undefined);
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('should return container for valid container ID', async () => {
|
||||||
|
const containers = await testDockerHost.listContainers();
|
||||||
|
if (containers.length > 0) {
|
||||||
|
const validId = containers[0].Id;
|
||||||
|
const container = await testDockerHost.getContainerById(validId);
|
||||||
|
expect(container).toBeInstanceOf(docker.DockerContainer);
|
||||||
|
expect(container?.Id).toEqual(validId);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// CONTAINER STREAMING FEATURES
|
||||||
|
let testContainer: docker.DockerContainer;
|
||||||
|
|
||||||
|
tap.test('should get an existing container for streaming tests', async () => {
|
||||||
|
const containers = await testDockerHost.listContainers();
|
||||||
|
|
||||||
|
// Use the first running container we find
|
||||||
|
testContainer = containers.find((c) => c.State === 'running');
|
||||||
|
|
||||||
|
if (!testContainer) {
|
||||||
|
throw new Error('No running containers found for streaming tests');
|
||||||
|
}
|
||||||
|
|
||||||
|
expect(testContainer).toBeInstanceOf(docker.DockerContainer);
|
||||||
|
console.log('Using existing container for tests:', testContainer.Names[0], testContainer.Id);
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('should stream container logs', async (tools) => {
|
||||||
|
const done = tools.defer();
|
||||||
|
const logStream = await testContainer.streamLogs({
|
||||||
|
stdout: true,
|
||||||
|
stderr: true,
|
||||||
|
timestamps: true,
|
||||||
|
});
|
||||||
|
|
||||||
|
let receivedData = false;
|
||||||
|
|
||||||
|
logStream.on('data', (chunk) => {
|
||||||
|
console.log('Received log chunk:', chunk.toString().slice(0, 100));
|
||||||
|
receivedData = true;
|
||||||
|
});
|
||||||
|
|
||||||
|
logStream.on('error', (error) => {
|
||||||
|
console.error('Stream error:', error);
|
||||||
|
done.resolve();
|
||||||
|
});
|
||||||
|
|
||||||
|
// Wait for 2 seconds to collect logs, then close
|
||||||
|
await tools.delayFor(2000);
|
||||||
|
logStream.destroy();
|
||||||
|
done.resolve();
|
||||||
|
|
||||||
|
await done.promise;
|
||||||
|
console.log('Log streaming test completed. Received data:', receivedData);
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('should get container logs (one-shot)', async () => {
|
||||||
|
const logs = await testContainer.logs({
|
||||||
|
stdout: true,
|
||||||
|
stderr: true,
|
||||||
|
tail: 10,
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(typeof logs).toEqual('string');
|
||||||
|
console.log('Container logs (last 10 lines):', logs.slice(0, 200));
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('should execute command in container', async (tools) => {
|
||||||
|
const done = tools.defer();
|
||||||
|
const { stream, close } = await testContainer.exec('echo "Hello from exec"', {
|
||||||
|
tty: false,
|
||||||
|
attachStdout: true,
|
||||||
|
attachStderr: true,
|
||||||
|
});
|
||||||
|
|
||||||
|
let output = '';
|
||||||
|
|
||||||
|
stream.on('data', (chunk) => {
|
||||||
|
output += chunk.toString();
|
||||||
|
console.log('Exec output:', chunk.toString());
|
||||||
|
});
|
||||||
|
|
||||||
|
stream.on('end', async () => {
|
||||||
|
await close();
|
||||||
|
console.log('Exec completed. Full output:', output);
|
||||||
|
done.resolve();
|
||||||
|
});
|
||||||
|
|
||||||
|
stream.on('error', async (error) => {
|
||||||
|
console.error('Exec error:', error);
|
||||||
|
await close();
|
||||||
|
done.resolve();
|
||||||
|
});
|
||||||
|
|
||||||
|
await done.promise;
|
||||||
|
expect(output.length).toBeGreaterThan(0);
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('should attach to container', async (tools) => {
|
||||||
|
const done = tools.defer();
|
||||||
|
const { stream, close } = await testContainer.attach({
|
||||||
|
stream: true,
|
||||||
|
stdout: true,
|
||||||
|
stderr: true,
|
||||||
|
stdin: false,
|
||||||
|
});
|
||||||
|
|
||||||
|
let receivedData = false;
|
||||||
|
|
||||||
|
stream.on('data', (chunk) => {
|
||||||
|
console.log('Attach received:', chunk.toString().slice(0, 100));
|
||||||
|
receivedData = true;
|
||||||
|
});
|
||||||
|
|
||||||
|
stream.on('error', async (error) => {
|
||||||
|
console.error('Attach error:', error);
|
||||||
|
await close();
|
||||||
|
done.resolve();
|
||||||
|
});
|
||||||
|
|
||||||
|
// Monitor for 2 seconds then detach
|
||||||
|
await tools.delayFor(2000);
|
||||||
|
await close();
|
||||||
|
done.resolve();
|
||||||
|
|
||||||
|
await done.promise;
|
||||||
|
console.log('Attach test completed. Received data:', receivedData);
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('should get container stats', async () => {
|
||||||
|
const stats = await testContainer.stats({ stream: false });
|
||||||
|
expect(stats).toBeInstanceOf(Object);
|
||||||
|
console.log('Container stats keys:', Object.keys(stats));
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('should inspect container', async () => {
|
||||||
|
const inspection = await testContainer.inspect();
|
||||||
|
expect(inspection).toBeInstanceOf(Object);
|
||||||
|
expect(inspection.Id).toEqual(testContainer.Id);
|
||||||
|
console.log('Container state:', inspection.State?.Status);
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('should complete container tests', async () => {
|
||||||
|
// Using existing container, no cleanup needed
|
||||||
|
console.log('Container streaming tests completed');
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('cleanup', async () => {
|
||||||
|
await testDockerHost.stop();
|
||||||
|
});
|
||||||
|
|
||||||
|
export default tap.start();
|
||||||
@@ -1,156 +0,0 @@
|
|||||||
import { expect, tap } from '@push.rocks/tapbundle';
|
|
||||||
|
|
||||||
import * as plugins from '../ts/plugins.js';
|
|
||||||
import * as paths from '../ts/paths.js';
|
|
||||||
|
|
||||||
import * as docker from '../ts/index.js';
|
|
||||||
|
|
||||||
let testDockerHost: docker.DockerHost;
|
|
||||||
|
|
||||||
tap.test('should create a new Dockersock instance', async () => {
|
|
||||||
testDockerHost = new docker.DockerHost({});
|
|
||||||
await testDockerHost.start();
|
|
||||||
return expect(testDockerHost).toBeInstanceOf(docker.DockerHost);
|
|
||||||
});
|
|
||||||
|
|
||||||
tap.test('should create a docker swarm', async () => {
|
|
||||||
await testDockerHost.activateSwarm();
|
|
||||||
});
|
|
||||||
|
|
||||||
// Containers
|
|
||||||
tap.test('should list containers', async () => {
|
|
||||||
const containers = await testDockerHost.getContainers();
|
|
||||||
console.log(containers);
|
|
||||||
});
|
|
||||||
|
|
||||||
// Networks
|
|
||||||
tap.test('should list networks', async () => {
|
|
||||||
const networks = await testDockerHost.getNetworks();
|
|
||||||
console.log(networks);
|
|
||||||
});
|
|
||||||
|
|
||||||
tap.test('should create a network', async () => {
|
|
||||||
const newNetwork = await docker.DockerNetwork.createNetwork(testDockerHost, {
|
|
||||||
Name: 'webgateway',
|
|
||||||
});
|
|
||||||
expect(newNetwork).toBeInstanceOf(docker.DockerNetwork);
|
|
||||||
expect(newNetwork.Name).toEqual('webgateway');
|
|
||||||
});
|
|
||||||
|
|
||||||
tap.test('should remove a network', async () => {
|
|
||||||
const webgateway = await docker.DockerNetwork.getNetworkByName(testDockerHost, 'webgateway');
|
|
||||||
await webgateway.remove();
|
|
||||||
});
|
|
||||||
|
|
||||||
// Images
|
|
||||||
tap.test('should pull an image from imagetag', async () => {
|
|
||||||
const image = await docker.DockerImage.createFromRegistry(testDockerHost, {
|
|
||||||
creationObject: {
|
|
||||||
imageUrl: 'hosttoday/ht-docker-node',
|
|
||||||
imageTag: 'alpine',
|
|
||||||
},
|
|
||||||
});
|
|
||||||
expect(image).toBeInstanceOf(docker.DockerImage);
|
|
||||||
console.log(image);
|
|
||||||
});
|
|
||||||
|
|
||||||
tap.test('should return a change Observable', async (tools) => {
|
|
||||||
const testObservable = await testDockerHost.getEventObservable();
|
|
||||||
const subscription = testObservable.subscribe((changeObject) => {
|
|
||||||
console.log(changeObject);
|
|
||||||
});
|
|
||||||
await tools.delayFor(2000);
|
|
||||||
subscription.unsubscribe();
|
|
||||||
});
|
|
||||||
|
|
||||||
// SECRETS
|
|
||||||
tap.test('should create a secret', async () => {
|
|
||||||
const mySecret = await docker.DockerSecret.createSecret(testDockerHost, {
|
|
||||||
name: 'testSecret',
|
|
||||||
version: '1.0.3',
|
|
||||||
contentArg: `{ "hi": "wow"}`,
|
|
||||||
labels: {},
|
|
||||||
});
|
|
||||||
console.log(mySecret);
|
|
||||||
});
|
|
||||||
|
|
||||||
tap.test('should remove a secret by name', async () => {
|
|
||||||
const mySecret = await docker.DockerSecret.getSecretByName(testDockerHost, 'testSecret');
|
|
||||||
await mySecret.remove();
|
|
||||||
});
|
|
||||||
|
|
||||||
// SERVICES
|
|
||||||
tap.test('should activate swarm mode', async () => {
|
|
||||||
await testDockerHost.activateSwarm();
|
|
||||||
});
|
|
||||||
|
|
||||||
tap.test('should list all services', async (tools) => {
|
|
||||||
const services = await testDockerHost.getServices();
|
|
||||||
console.log(services);
|
|
||||||
});
|
|
||||||
|
|
||||||
tap.test('should create a service', async () => {
|
|
||||||
const testNetwork = await docker.DockerNetwork.createNetwork(testDockerHost, {
|
|
||||||
Name: 'testNetwork',
|
|
||||||
});
|
|
||||||
const testSecret = await docker.DockerSecret.createSecret(testDockerHost, {
|
|
||||||
name: 'testSecret',
|
|
||||||
version: '0.0.1',
|
|
||||||
labels: {},
|
|
||||||
contentArg: '{"hi": "wow"}',
|
|
||||||
});
|
|
||||||
const testImage = await docker.DockerImage.createFromRegistry(testDockerHost, {
|
|
||||||
creationObject: {
|
|
||||||
imageUrl: 'code.foss.global/host.today/ht-docker-node:latest',
|
|
||||||
}
|
|
||||||
});
|
|
||||||
const testService = await docker.DockerService.createService(testDockerHost, {
|
|
||||||
image: testImage,
|
|
||||||
labels: {},
|
|
||||||
name: 'testService',
|
|
||||||
networks: [testNetwork],
|
|
||||||
networkAlias: 'testService',
|
|
||||||
secrets: [testSecret],
|
|
||||||
ports: ['3000:80'],
|
|
||||||
});
|
|
||||||
|
|
||||||
await testService.remove();
|
|
||||||
await testNetwork.remove();
|
|
||||||
await testSecret.remove();
|
|
||||||
});
|
|
||||||
|
|
||||||
tap.skip.test('should export images', async (toolsArg) => {
|
|
||||||
const done = toolsArg.defer();
|
|
||||||
const testImage = await docker.DockerImage.createFromRegistry(testDockerHost, {
|
|
||||||
creationObject: {
|
|
||||||
imageUrl: 'code.foss.global/host.today/ht-docker-node:latest',
|
|
||||||
}
|
|
||||||
});
|
|
||||||
const fsWriteStream = plugins.smartfile.fsStream.createWriteStream(
|
|
||||||
plugins.path.join(paths.nogitDir, 'testimage.tar')
|
|
||||||
);
|
|
||||||
const exportStream = await testImage.exportToTarStream();
|
|
||||||
exportStream.pipe(fsWriteStream).on('finish', () => {
|
|
||||||
done.resolve();
|
|
||||||
});
|
|
||||||
await done.promise;
|
|
||||||
});
|
|
||||||
|
|
||||||
tap.skip.test('should import images', async (toolsArg) => {
|
|
||||||
const done = toolsArg.defer();
|
|
||||||
const fsReadStream = plugins.smartfile.fsStream.createReadStream(
|
|
||||||
plugins.path.join(paths.nogitDir, 'testimage.tar')
|
|
||||||
);
|
|
||||||
await docker.DockerImage.createFromTarStream(testDockerHost, {
|
|
||||||
tarStream: fsReadStream,
|
|
||||||
creationObject: {
|
|
||||||
imageUrl: 'code.foss.global/host.today/ht-docker-node:latest',
|
|
||||||
}
|
|
||||||
})
|
|
||||||
});
|
|
||||||
|
|
||||||
tap.test('should expose a working DockerImageStore', async () => {
|
|
||||||
await testDockerHost.imageStore.storeImage('hello', plugins.smartfile.fsStream.createReadStream(plugins.path.join(paths.nogitDir, 'testimage.tar')));
|
|
||||||
})
|
|
||||||
|
|
||||||
export default tap.start();
|
|
||||||
@@ -1,8 +1,8 @@
|
|||||||
/**
|
/**
|
||||||
* autocreated commitinfo by @pushrocks/commitinfo
|
* autocreated commitinfo by @push.rocks/commitinfo
|
||||||
*/
|
*/
|
||||||
export const commitinfo = {
|
export const commitinfo = {
|
||||||
name: '@apiclient.xyz/docker',
|
name: '@apiclient.xyz/docker',
|
||||||
version: '1.2.0',
|
version: '5.0.2',
|
||||||
description: 'Provides easy communication with Docker remote API from Node.js, with TypeScript support.'
|
description: 'Provides easy communication with Docker remote API from Node.js, with TypeScript support.'
|
||||||
}
|
}
|
||||||
|
|||||||
27
ts/classes.base.ts
Normal file
27
ts/classes.base.ts
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
import type { DockerHost } from './classes.host.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Abstract base class for all Docker resources.
|
||||||
|
* Provides standardized patterns for resource management and lifecycle.
|
||||||
|
*/
|
||||||
|
export abstract class DockerResource {
|
||||||
|
/**
|
||||||
|
* Reference to the DockerHost that manages this resource.
|
||||||
|
* All API operations go through this host instance.
|
||||||
|
*/
|
||||||
|
protected readonly dockerHost: DockerHost;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new Docker resource instance.
|
||||||
|
* @param dockerHost The DockerHost instance that manages this resource
|
||||||
|
*/
|
||||||
|
constructor(dockerHost: DockerHost) {
|
||||||
|
this.dockerHost = dockerHost;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Refreshes this resource's state from the Docker daemon.
|
||||||
|
* Implementations should fetch current data and update instance properties.
|
||||||
|
*/
|
||||||
|
abstract refresh(): Promise<void>;
|
||||||
|
}
|
||||||
@@ -2,19 +2,23 @@ import * as plugins from './plugins.js';
|
|||||||
import * as interfaces from './interfaces/index.js';
|
import * as interfaces from './interfaces/index.js';
|
||||||
|
|
||||||
import { DockerHost } from './classes.host.js';
|
import { DockerHost } from './classes.host.js';
|
||||||
|
import { DockerResource } from './classes.base.js';
|
||||||
import { logger } from './logger.js';
|
import { logger } from './logger.js';
|
||||||
|
|
||||||
export class DockerContainer {
|
export class DockerContainer extends DockerResource {
|
||||||
// STATIC
|
// STATIC (Internal - prefixed with _ to indicate internal use)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* get all containers
|
* Internal: Get all containers
|
||||||
|
* Public API: Use dockerHost.listContainers() instead
|
||||||
*/
|
*/
|
||||||
public static async getContainers(dockerHostArg: DockerHost): Promise<DockerContainer[]> {
|
public static async _list(
|
||||||
|
dockerHostArg: DockerHost,
|
||||||
|
): Promise<DockerContainer[]> {
|
||||||
const result: DockerContainer[] = [];
|
const result: DockerContainer[] = [];
|
||||||
const response = await dockerHostArg.request('GET', '/containers/json');
|
const response = await dockerHostArg.request('GET', '/containers/json');
|
||||||
|
|
||||||
// TODO: Think about getting the config by inpsecting the container
|
// TODO: Think about getting the config by inspecting the container
|
||||||
for (const containerResult of response.body) {
|
for (const containerResult of response.body) {
|
||||||
result.push(new DockerContainer(dockerHostArg, containerResult));
|
result.push(new DockerContainer(dockerHostArg, containerResult));
|
||||||
}
|
}
|
||||||
@@ -22,43 +26,50 @@ export class DockerContainer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gets an container by Id
|
* Internal: Get a container by ID
|
||||||
* @param containerId
|
* Public API: Use dockerHost.getContainerById(id) instead
|
||||||
|
* Returns undefined if container does not exist
|
||||||
*/
|
*/
|
||||||
public static async getContainerById(containerId: string) {
|
public static async _fromId(
|
||||||
// TODO: implement get container by id
|
dockerHostArg: DockerHost,
|
||||||
|
containerId: string,
|
||||||
|
): Promise<DockerContainer | undefined> {
|
||||||
|
const containers = await this._list(dockerHostArg);
|
||||||
|
return containers.find((container) => container.Id === containerId);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* create a container
|
* Internal: Create a container
|
||||||
|
* Public API: Use dockerHost.createContainer(descriptor) instead
|
||||||
*/
|
*/
|
||||||
public static async create(
|
public static async _create(
|
||||||
dockerHost: DockerHost,
|
dockerHost: DockerHost,
|
||||||
containerCreationDescriptor: interfaces.IContainerCreationDescriptor
|
containerCreationDescriptor: interfaces.IContainerCreationDescriptor,
|
||||||
) {
|
): Promise<DockerContainer> {
|
||||||
// check for unique hostname
|
// Check for unique hostname
|
||||||
const existingContainers = await DockerContainer.getContainers(dockerHost);
|
const existingContainers = await DockerContainer._list(dockerHost);
|
||||||
const sameHostNameContainer = existingContainers.find((container) => {
|
const sameHostNameContainer = existingContainers.find((container) => {
|
||||||
// TODO implement HostName Detection;
|
// TODO implement HostName Detection;
|
||||||
return false;
|
return false;
|
||||||
});
|
});
|
||||||
|
|
||||||
const response = await dockerHost.request('POST', '/containers/create', {
|
const response = await dockerHost.request('POST', '/containers/create', {
|
||||||
Hostname: containerCreationDescriptor.Hostname,
|
Hostname: containerCreationDescriptor.Hostname,
|
||||||
Domainname: containerCreationDescriptor.Domainname,
|
Domainname: containerCreationDescriptor.Domainname,
|
||||||
User: 'root',
|
User: 'root',
|
||||||
});
|
});
|
||||||
|
|
||||||
if (response.statusCode < 300) {
|
if (response.statusCode < 300) {
|
||||||
logger.log('info', 'Container created successfully');
|
logger.log('info', 'Container created successfully');
|
||||||
|
// Return the created container instance
|
||||||
|
return await DockerContainer._fromId(dockerHost, response.body.Id);
|
||||||
} else {
|
} else {
|
||||||
logger.log('error', 'There has been a problem when creating the container');
|
logger.log('error', 'There has been a problem when creating the container');
|
||||||
|
throw new Error(`Failed to create container: ${response.statusCode}`);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// INSTANCE
|
// INSTANCE PROPERTIES
|
||||||
// references
|
|
||||||
public dockerHost: DockerHost;
|
|
||||||
|
|
||||||
// properties
|
|
||||||
public Id: string;
|
public Id: string;
|
||||||
public Names: string[];
|
public Names: string[];
|
||||||
public Image: string;
|
public Image: string;
|
||||||
@@ -90,10 +101,294 @@ export class DockerContainer {
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
public Mounts: any;
|
public Mounts: any;
|
||||||
|
|
||||||
constructor(dockerHostArg: DockerHost, dockerContainerObjectArg: any) {
|
constructor(dockerHostArg: DockerHost, dockerContainerObjectArg: any) {
|
||||||
this.dockerHost = dockerHostArg;
|
super(dockerHostArg);
|
||||||
Object.keys(dockerContainerObjectArg).forEach((keyArg) => {
|
Object.keys(dockerContainerObjectArg).forEach((keyArg) => {
|
||||||
this[keyArg] = dockerContainerObjectArg[keyArg];
|
this[keyArg] = dockerContainerObjectArg[keyArg];
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// INSTANCE METHODS
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Refreshes this container's state from the Docker daemon
|
||||||
|
*/
|
||||||
|
public async refresh(): Promise<void> {
|
||||||
|
const updated = await DockerContainer._fromId(this.dockerHost, this.Id);
|
||||||
|
Object.assign(this, updated);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Inspects the container and returns detailed information
|
||||||
|
*/
|
||||||
|
public async inspect(): Promise<any> {
|
||||||
|
const response = await this.dockerHost.request('GET', `/containers/${this.Id}/json`);
|
||||||
|
// Update instance with fresh data
|
||||||
|
Object.assign(this, response.body);
|
||||||
|
return response.body;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Starts the container
|
||||||
|
*/
|
||||||
|
public async start(): Promise<void> {
|
||||||
|
const response = await this.dockerHost.request('POST', `/containers/${this.Id}/start`);
|
||||||
|
if (response.statusCode >= 300) {
|
||||||
|
throw new Error(`Failed to start container: ${response.statusCode}`);
|
||||||
|
}
|
||||||
|
await this.refresh();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Stops the container
|
||||||
|
* @param options Options for stopping (e.g., timeout in seconds)
|
||||||
|
*/
|
||||||
|
public async stop(options?: { t?: number }): Promise<void> {
|
||||||
|
const queryParams = options?.t ? `?t=${options.t}` : '';
|
||||||
|
const response = await this.dockerHost.request('POST', `/containers/${this.Id}/stop${queryParams}`);
|
||||||
|
if (response.statusCode >= 300) {
|
||||||
|
throw new Error(`Failed to stop container: ${response.statusCode}`);
|
||||||
|
}
|
||||||
|
await this.refresh();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Removes the container
|
||||||
|
* @param options Options for removal (force, remove volumes, remove link)
|
||||||
|
*/
|
||||||
|
public async remove(options?: { force?: boolean; v?: boolean; link?: boolean }): Promise<void> {
|
||||||
|
const queryParams = new URLSearchParams();
|
||||||
|
if (options?.force) queryParams.append('force', '1');
|
||||||
|
if (options?.v) queryParams.append('v', '1');
|
||||||
|
if (options?.link) queryParams.append('link', '1');
|
||||||
|
|
||||||
|
const queryString = queryParams.toString();
|
||||||
|
const response = await this.dockerHost.request(
|
||||||
|
'DELETE',
|
||||||
|
`/containers/${this.Id}${queryString ? '?' + queryString : ''}`,
|
||||||
|
);
|
||||||
|
|
||||||
|
if (response.statusCode >= 300) {
|
||||||
|
throw new Error(`Failed to remove container: ${response.statusCode}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets container logs
|
||||||
|
* @param options Log options (stdout, stderr, timestamps, tail, since, follow)
|
||||||
|
*/
|
||||||
|
public async logs(options?: {
|
||||||
|
stdout?: boolean;
|
||||||
|
stderr?: boolean;
|
||||||
|
timestamps?: boolean;
|
||||||
|
tail?: number | 'all';
|
||||||
|
since?: number;
|
||||||
|
follow?: boolean;
|
||||||
|
}): Promise<string> {
|
||||||
|
const queryParams = new URLSearchParams();
|
||||||
|
queryParams.append('stdout', options?.stdout !== false ? '1' : '0');
|
||||||
|
queryParams.append('stderr', options?.stderr !== false ? '1' : '0');
|
||||||
|
if (options?.timestamps) queryParams.append('timestamps', '1');
|
||||||
|
if (options?.tail) queryParams.append('tail', options.tail.toString());
|
||||||
|
if (options?.since) queryParams.append('since', options.since.toString());
|
||||||
|
if (options?.follow) queryParams.append('follow', '1');
|
||||||
|
|
||||||
|
const response = await this.dockerHost.request('GET', `/containers/${this.Id}/logs?${queryParams.toString()}`);
|
||||||
|
|
||||||
|
// Docker returns logs with a special format (8 bytes header + payload)
|
||||||
|
// For simplicity, we'll return the raw body as string
|
||||||
|
return response.body.toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets container stats
|
||||||
|
* @param options Stats options (stream for continuous stats)
|
||||||
|
*/
|
||||||
|
public async stats(options?: { stream?: boolean }): Promise<any> {
|
||||||
|
const queryParams = new URLSearchParams();
|
||||||
|
queryParams.append('stream', options?.stream ? '1' : '0');
|
||||||
|
|
||||||
|
const response = await this.dockerHost.request('GET', `/containers/${this.Id}/stats?${queryParams.toString()}`);
|
||||||
|
return response.body;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Streams container logs continuously (follow mode)
|
||||||
|
* Returns a readable stream that emits log data as it's produced
|
||||||
|
* @param options Log streaming options
|
||||||
|
*/
|
||||||
|
public async streamLogs(options?: {
|
||||||
|
stdout?: boolean;
|
||||||
|
stderr?: boolean;
|
||||||
|
timestamps?: boolean;
|
||||||
|
tail?: number | 'all';
|
||||||
|
since?: number;
|
||||||
|
}): Promise<plugins.smartstream.stream.Readable> {
|
||||||
|
const queryParams = new URLSearchParams();
|
||||||
|
queryParams.append('stdout', options?.stdout !== false ? '1' : '0');
|
||||||
|
queryParams.append('stderr', options?.stderr !== false ? '1' : '0');
|
||||||
|
queryParams.append('follow', '1'); // Always follow for streaming
|
||||||
|
if (options?.timestamps) queryParams.append('timestamps', '1');
|
||||||
|
if (options?.tail) queryParams.append('tail', options.tail.toString());
|
||||||
|
if (options?.since) queryParams.append('since', options.since.toString());
|
||||||
|
|
||||||
|
const response = await this.dockerHost.requestStreaming(
|
||||||
|
'GET',
|
||||||
|
`/containers/${this.Id}/logs?${queryParams.toString()}`
|
||||||
|
);
|
||||||
|
|
||||||
|
// requestStreaming returns Node.js stream
|
||||||
|
return response as plugins.smartstream.stream.Readable;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Attaches to the container's main process (PID 1)
|
||||||
|
* Returns a duplex stream for bidirectional communication
|
||||||
|
* @param options Attach options
|
||||||
|
*/
|
||||||
|
public async attach(options?: {
|
||||||
|
stream?: boolean;
|
||||||
|
stdin?: boolean;
|
||||||
|
stdout?: boolean;
|
||||||
|
stderr?: boolean;
|
||||||
|
logs?: boolean;
|
||||||
|
}): Promise<{
|
||||||
|
stream: plugins.smartstream.stream.Duplex;
|
||||||
|
close: () => Promise<void>;
|
||||||
|
}> {
|
||||||
|
const queryParams = new URLSearchParams();
|
||||||
|
queryParams.append('stream', options?.stream !== false ? '1' : '0');
|
||||||
|
queryParams.append('stdin', options?.stdin ? '1' : '0');
|
||||||
|
queryParams.append('stdout', options?.stdout !== false ? '1' : '0');
|
||||||
|
queryParams.append('stderr', options?.stderr !== false ? '1' : '0');
|
||||||
|
if (options?.logs) queryParams.append('logs', '1');
|
||||||
|
|
||||||
|
const response = await this.dockerHost.requestStreaming(
|
||||||
|
'POST',
|
||||||
|
`/containers/${this.Id}/attach?${queryParams.toString()}`
|
||||||
|
);
|
||||||
|
|
||||||
|
// Create a duplex stream for bidirectional communication
|
||||||
|
const nodeStream = response as plugins.smartstream.stream.Readable;
|
||||||
|
|
||||||
|
// Convert to duplex by wrapping in SmartDuplex
|
||||||
|
const duplexStream = new plugins.smartstream.SmartDuplex({
|
||||||
|
writeFunction: async (chunk) => {
|
||||||
|
// Write data is sent to the container's stdin
|
||||||
|
return chunk;
|
||||||
|
},
|
||||||
|
readableObjectMode: false,
|
||||||
|
writableObjectMode: false,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Pipe container output to our duplex readable side
|
||||||
|
nodeStream.on('data', (chunk) => {
|
||||||
|
duplexStream.push(chunk);
|
||||||
|
});
|
||||||
|
|
||||||
|
nodeStream.on('end', () => {
|
||||||
|
duplexStream.push(null); // Signal end of stream
|
||||||
|
});
|
||||||
|
|
||||||
|
nodeStream.on('error', (error) => {
|
||||||
|
duplexStream.destroy(error);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Helper function to close the attachment
|
||||||
|
const close = async () => {
|
||||||
|
duplexStream.end();
|
||||||
|
if (nodeStream.destroy) {
|
||||||
|
nodeStream.destroy();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
return {
|
||||||
|
stream: duplexStream,
|
||||||
|
close,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Executes a command in the container
|
||||||
|
* Returns a duplex stream for command interaction
|
||||||
|
* @param command Command to execute (string or array of strings)
|
||||||
|
* @param options Exec options
|
||||||
|
*/
|
||||||
|
public async exec(
|
||||||
|
command: string | string[],
|
||||||
|
options?: {
|
||||||
|
tty?: boolean;
|
||||||
|
attachStdin?: boolean;
|
||||||
|
attachStdout?: boolean;
|
||||||
|
attachStderr?: boolean;
|
||||||
|
env?: string[];
|
||||||
|
workingDir?: string;
|
||||||
|
user?: string;
|
||||||
|
}
|
||||||
|
): Promise<{
|
||||||
|
stream: plugins.smartstream.stream.Duplex;
|
||||||
|
close: () => Promise<void>;
|
||||||
|
}> {
|
||||||
|
// Step 1: Create exec instance
|
||||||
|
const createResponse = await this.dockerHost.request('POST', `/containers/${this.Id}/exec`, {
|
||||||
|
Cmd: typeof command === 'string' ? ['/bin/sh', '-c', command] : command,
|
||||||
|
AttachStdin: options?.attachStdin !== false,
|
||||||
|
AttachStdout: options?.attachStdout !== false,
|
||||||
|
AttachStderr: options?.attachStderr !== false,
|
||||||
|
Tty: options?.tty || false,
|
||||||
|
Env: options?.env || [],
|
||||||
|
WorkingDir: options?.workingDir,
|
||||||
|
User: options?.user,
|
||||||
|
});
|
||||||
|
|
||||||
|
const execId = createResponse.body.Id;
|
||||||
|
|
||||||
|
// Step 2: Start exec instance with streaming response
|
||||||
|
const startResponse = await this.dockerHost.requestStreaming(
|
||||||
|
'POST',
|
||||||
|
`/exec/${execId}/start`,
|
||||||
|
undefined, // no stream input
|
||||||
|
{
|
||||||
|
Detach: false,
|
||||||
|
Tty: options?.tty || false,
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
const nodeStream = startResponse as plugins.smartstream.stream.Readable;
|
||||||
|
|
||||||
|
// Create duplex stream for bidirectional communication
|
||||||
|
const duplexStream = new plugins.smartstream.SmartDuplex({
|
||||||
|
writeFunction: async (chunk) => {
|
||||||
|
return chunk;
|
||||||
|
},
|
||||||
|
readableObjectMode: false,
|
||||||
|
writableObjectMode: false,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Pipe exec output to duplex readable side
|
||||||
|
nodeStream.on('data', (chunk) => {
|
||||||
|
duplexStream.push(chunk);
|
||||||
|
});
|
||||||
|
|
||||||
|
nodeStream.on('end', () => {
|
||||||
|
duplexStream.push(null);
|
||||||
|
});
|
||||||
|
|
||||||
|
nodeStream.on('error', (error) => {
|
||||||
|
duplexStream.destroy(error);
|
||||||
|
});
|
||||||
|
|
||||||
|
const close = async () => {
|
||||||
|
duplexStream.end();
|
||||||
|
if (nodeStream.destroy) {
|
||||||
|
nodeStream.destroy();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
return {
|
||||||
|
stream: duplexStream,
|
||||||
|
close,
|
||||||
|
};
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
import * as plugins from './plugins.js';
|
import * as plugins from './plugins.js';
|
||||||
import * as paths from './paths.js';
|
import * as paths from './paths.js';
|
||||||
|
import * as interfaces from './interfaces/index.js';
|
||||||
import { DockerContainer } from './classes.container.js';
|
import { DockerContainer } from './classes.container.js';
|
||||||
import { DockerNetwork } from './classes.network.js';
|
import { DockerNetwork } from './classes.network.js';
|
||||||
import { DockerService } from './classes.service.js';
|
import { DockerService } from './classes.service.js';
|
||||||
|
import { DockerSecret } from './classes.secret.js';
|
||||||
import { logger } from './logger.js';
|
import { logger } from './logger.js';
|
||||||
import path from 'path';
|
|
||||||
import { DockerImageStore } from './classes.imagestore.js';
|
import { DockerImageStore } from './classes.imagestore.js';
|
||||||
import { DockerImage } from './classes.image.js';
|
import { DockerImage } from './classes.image.js';
|
||||||
|
|
||||||
@@ -15,7 +16,7 @@ export interface IAuthData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
export interface IDockerHostConstructorOptions {
|
export interface IDockerHostConstructorOptions {
|
||||||
dockerSockPath?: string;
|
socketPath?: string;
|
||||||
imageStoreDir?: string;
|
imageStoreDir?: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -27,7 +28,8 @@ export class DockerHost {
|
|||||||
*/
|
*/
|
||||||
public socketPath: string;
|
public socketPath: string;
|
||||||
private registryToken: string = '';
|
private registryToken: string = '';
|
||||||
public imageStore: DockerImageStore;
|
private imageStore: DockerImageStore; // Now private - use storeImage/retrieveImage instead
|
||||||
|
public smartBucket: plugins.smartbucket.SmartBucket;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* the constructor to instantiate a new docker sock instance
|
* the constructor to instantiate a new docker sock instance
|
||||||
@@ -36,13 +38,16 @@ export class DockerHost {
|
|||||||
constructor(optionsArg: IDockerHostConstructorOptions) {
|
constructor(optionsArg: IDockerHostConstructorOptions) {
|
||||||
this.options = {
|
this.options = {
|
||||||
...{
|
...{
|
||||||
imageStoreDir: plugins.path.join(paths.nogitDir, 'temp-docker-image-store'),
|
imageStoreDir: plugins.path.join(
|
||||||
|
paths.nogitDir,
|
||||||
|
'temp-docker-image-store',
|
||||||
|
),
|
||||||
},
|
},
|
||||||
...optionsArg,
|
...optionsArg,
|
||||||
}
|
};
|
||||||
let pathToUse: string;
|
let pathToUse: string;
|
||||||
if (optionsArg.dockerSockPath) {
|
if (optionsArg.socketPath) {
|
||||||
pathToUse = optionsArg.dockerSockPath;
|
pathToUse = optionsArg.socketPath;
|
||||||
} else if (process.env.DOCKER_HOST) {
|
} else if (process.env.DOCKER_HOST) {
|
||||||
pathToUse = process.env.DOCKER_HOST;
|
pathToUse = process.env.DOCKER_HOST;
|
||||||
} else if (process.env.CI) {
|
} else if (process.env.CI) {
|
||||||
@@ -58,10 +63,10 @@ export class DockerHost {
|
|||||||
}
|
}
|
||||||
console.log(`using docker sock at ${pathToUse}`);
|
console.log(`using docker sock at ${pathToUse}`);
|
||||||
this.socketPath = pathToUse;
|
this.socketPath = pathToUse;
|
||||||
this.imageStore = new DockerImageStore(this, {
|
this.imageStore = new DockerImageStore({
|
||||||
bucketDir: null,
|
bucketDir: null,
|
||||||
localDirPath: this.options.imageStoreDir,
|
localDirPath: this.options.imageStoreDir,
|
||||||
})
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
public async start() {
|
public async start() {
|
||||||
@@ -71,6 +76,18 @@ export class DockerHost {
|
|||||||
await this.imageStore.stop();
|
await this.imageStore.stop();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Ping the Docker daemon to check if it's running and accessible
|
||||||
|
* @returns Promise that resolves if Docker is available, rejects otherwise
|
||||||
|
* @throws Error if Docker ping fails
|
||||||
|
*/
|
||||||
|
public async ping(): Promise<void> {
|
||||||
|
const response = await this.request('GET', '/_ping');
|
||||||
|
if (response.statusCode !== 200) {
|
||||||
|
throw new Error(`Docker ping failed with status ${response.statusCode}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* authenticate against a registry
|
* authenticate against a registry
|
||||||
* @param userArg
|
* @param userArg
|
||||||
@@ -83,17 +100,22 @@ export class DockerHost {
|
|||||||
throw new Error(response.body.Status);
|
throw new Error(response.body.Status);
|
||||||
}
|
}
|
||||||
console.log(response.body.Status);
|
console.log(response.body.Status);
|
||||||
this.registryToken = plugins.smartstring.base64.encode(plugins.smartjson.stringify(authData));
|
this.registryToken = plugins.smartstring.base64.encode(
|
||||||
|
plugins.smartjson.stringify(authData),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gets the token from the .docker/config.json file for GitLab registry
|
* gets the token from the .docker/config.json file for GitLab registry
|
||||||
*/
|
*/
|
||||||
public async getAuthTokenFromDockerConfig(registryUrlArg: string) {
|
public async getAuthTokenFromDockerConfig(registryUrlArg: string) {
|
||||||
const dockerConfigPath = plugins.smartpath.get.home('~/.docker/config.json');
|
const dockerConfigPath = plugins.smartpath.get.home(
|
||||||
|
'~/.docker/config.json',
|
||||||
|
);
|
||||||
const configObject = plugins.smartfile.fs.toObjectSync(dockerConfigPath);
|
const configObject = plugins.smartfile.fs.toObjectSync(dockerConfigPath);
|
||||||
const gitlabAuthBase64 = configObject.auths[registryUrlArg].auth;
|
const gitlabAuthBase64 = configObject.auths[registryUrlArg].auth;
|
||||||
const gitlabAuth: string = plugins.smartstring.base64.decode(gitlabAuthBase64);
|
const gitlabAuth: string =
|
||||||
|
plugins.smartstring.base64.decode(gitlabAuthBase64);
|
||||||
const gitlabAuthArray = gitlabAuth.split(':');
|
const gitlabAuthArray = gitlabAuth.split(':');
|
||||||
await this.auth({
|
await this.auth({
|
||||||
username: gitlabAuthArray[0],
|
username: gitlabAuthArray[0],
|
||||||
@@ -103,69 +125,191 @@ export class DockerHost {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ==============
|
// ==============
|
||||||
// NETWORKS
|
// NETWORKS - Public Factory API
|
||||||
// ==============
|
// ==============
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gets all networks
|
* Lists all networks
|
||||||
*/
|
*/
|
||||||
public async getNetworks() {
|
public async listNetworks() {
|
||||||
return await DockerNetwork.getNetworks(this);
|
return await DockerNetwork._list(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* create a network
|
* Gets a network by name
|
||||||
*/
|
|
||||||
public async createNetwork(optionsArg: Parameters<typeof DockerNetwork.createNetwork>[1]) {
|
|
||||||
return await DockerNetwork.createNetwork(this, optionsArg);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* get a network by name
|
|
||||||
*/
|
*/
|
||||||
public async getNetworkByName(networkNameArg: string) {
|
public async getNetworkByName(networkNameArg: string) {
|
||||||
return await DockerNetwork.getNetworkByName(this, networkNameArg);
|
return await DockerNetwork._fromName(this, networkNameArg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// ==============
|
|
||||||
// CONTAINERS
|
|
||||||
// ==============
|
|
||||||
/**
|
/**
|
||||||
* gets all containers
|
* Creates a network
|
||||||
*/
|
*/
|
||||||
public async getContainers() {
|
public async createNetwork(
|
||||||
const containerArray = await DockerContainer.getContainers(this);
|
descriptor: interfaces.INetworkCreationDescriptor,
|
||||||
return containerArray;
|
) {
|
||||||
|
return await DockerNetwork._create(this, descriptor);
|
||||||
}
|
}
|
||||||
|
|
||||||
// ==============
|
// ==============
|
||||||
// SERVICES
|
// CONTAINERS - Public Factory API
|
||||||
// ==============
|
// ==============
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gets all services
|
* Lists all containers
|
||||||
*/
|
*/
|
||||||
public async getServices() {
|
public async listContainers() {
|
||||||
const serviceArray = await DockerService.getServices(this);
|
return await DockerContainer._list(this);
|
||||||
return serviceArray;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ==============
|
|
||||||
// IMAGES
|
|
||||||
// ==============
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* get all images
|
* Gets a container by ID
|
||||||
|
* Returns undefined if container does not exist
|
||||||
*/
|
*/
|
||||||
public async getImages() {
|
public async getContainerById(containerId: string): Promise<DockerContainer | undefined> {
|
||||||
return await DockerImage.getImages(this);
|
return await DockerContainer._fromId(this, containerId);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* get an image by name
|
* Creates a container
|
||||||
|
*/
|
||||||
|
public async createContainer(
|
||||||
|
descriptor: interfaces.IContainerCreationDescriptor,
|
||||||
|
) {
|
||||||
|
return await DockerContainer._create(this, descriptor);
|
||||||
|
}
|
||||||
|
|
||||||
|
// ==============
|
||||||
|
// SERVICES - Public Factory API
|
||||||
|
// ==============
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Lists all services
|
||||||
|
*/
|
||||||
|
public async listServices() {
|
||||||
|
return await DockerService._list(this);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets a service by name
|
||||||
|
*/
|
||||||
|
public async getServiceByName(serviceName: string) {
|
||||||
|
return await DockerService._fromName(this, serviceName);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a service
|
||||||
|
*/
|
||||||
|
public async createService(
|
||||||
|
descriptor: interfaces.IServiceCreationDescriptor,
|
||||||
|
) {
|
||||||
|
return await DockerService._create(this, descriptor);
|
||||||
|
}
|
||||||
|
|
||||||
|
// ==============
|
||||||
|
// IMAGES - Public Factory API
|
||||||
|
// ==============
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Lists all images
|
||||||
|
*/
|
||||||
|
public async listImages() {
|
||||||
|
return await DockerImage._list(this);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets an image by name
|
||||||
*/
|
*/
|
||||||
public async getImageByName(imageNameArg: string) {
|
public async getImageByName(imageNameArg: string) {
|
||||||
return await DockerImage.getImageByName(this, imageNameArg);
|
return await DockerImage._fromName(this, imageNameArg);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates an image from a registry
|
||||||
|
*/
|
||||||
|
public async createImageFromRegistry(
|
||||||
|
descriptor: interfaces.IImageCreationDescriptor,
|
||||||
|
) {
|
||||||
|
return await DockerImage._createFromRegistry(this, {
|
||||||
|
creationObject: descriptor,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates an image from a tar stream
|
||||||
|
*/
|
||||||
|
public async createImageFromTarStream(
|
||||||
|
tarStream: plugins.smartstream.stream.Readable,
|
||||||
|
descriptor: interfaces.IImageCreationDescriptor,
|
||||||
|
) {
|
||||||
|
return await DockerImage._createFromTarStream(this, {
|
||||||
|
creationObject: descriptor,
|
||||||
|
tarStream: tarStream,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Builds an image from a Dockerfile
|
||||||
|
*/
|
||||||
|
public async buildImage(imageTag: string) {
|
||||||
|
return await DockerImage._build(this, imageTag);
|
||||||
|
}
|
||||||
|
|
||||||
|
// ==============
|
||||||
|
// SECRETS - Public Factory API
|
||||||
|
// ==============
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Lists all secrets
|
||||||
|
*/
|
||||||
|
public async listSecrets() {
|
||||||
|
return await DockerSecret._list(this);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets a secret by name
|
||||||
|
*/
|
||||||
|
public async getSecretByName(secretName: string) {
|
||||||
|
return await DockerSecret._fromName(this, secretName);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets a secret by ID
|
||||||
|
*/
|
||||||
|
public async getSecretById(secretId: string) {
|
||||||
|
return await DockerSecret._fromId(this, secretId);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a secret
|
||||||
|
*/
|
||||||
|
public async createSecret(
|
||||||
|
descriptor: interfaces.ISecretCreationDescriptor,
|
||||||
|
) {
|
||||||
|
return await DockerSecret._create(this, descriptor);
|
||||||
|
}
|
||||||
|
|
||||||
|
// ==============
|
||||||
|
// IMAGE STORE - Public API
|
||||||
|
// ==============
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Stores an image in the local image store
|
||||||
|
*/
|
||||||
|
public async storeImage(
|
||||||
|
imageName: string,
|
||||||
|
tarStream: plugins.smartstream.stream.Readable,
|
||||||
|
): Promise<void> {
|
||||||
|
return await this.imageStore.storeImage(imageName, tarStream);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Retrieves an image from the local image store
|
||||||
|
*/
|
||||||
|
public async retrieveImage(
|
||||||
|
imageName: string,
|
||||||
|
): Promise<plugins.smartstream.stream.Readable> {
|
||||||
|
return await this.imageStore.getImage(imageName);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -173,8 +317,12 @@ export class DockerHost {
|
|||||||
*/
|
*/
|
||||||
public async getEventObservable(): Promise<plugins.rxjs.Observable<any>> {
|
public async getEventObservable(): Promise<plugins.rxjs.Observable<any>> {
|
||||||
const response = await this.requestStreaming('GET', '/events');
|
const response = await this.requestStreaming('GET', '/events');
|
||||||
|
|
||||||
|
// requestStreaming now returns Node.js stream, not web stream
|
||||||
|
const nodeStream = response as plugins.smartstream.stream.Readable;
|
||||||
|
|
||||||
return plugins.rxjs.Observable.create((observer) => {
|
return plugins.rxjs.Observable.create((observer) => {
|
||||||
response.on('data', (data) => {
|
nodeStream.on('data', (data) => {
|
||||||
const eventString = data.toString();
|
const eventString = data.toString();
|
||||||
try {
|
try {
|
||||||
const eventObject = JSON.parse(eventString);
|
const eventObject = JSON.parse(eventString);
|
||||||
@@ -184,7 +332,7 @@ export class DockerHost {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
return () => {
|
return () => {
|
||||||
response.emit('end');
|
nodeStream.emit('end');
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@@ -225,53 +373,189 @@ export class DockerHost {
|
|||||||
*/
|
*/
|
||||||
public async request(methodArg: string, routeArg: string, dataArg = {}) {
|
public async request(methodArg: string, routeArg: string, dataArg = {}) {
|
||||||
const requestUrl = `${this.socketPath}${routeArg}`;
|
const requestUrl = `${this.socketPath}${routeArg}`;
|
||||||
const response = await plugins.smartrequest.request(requestUrl, {
|
|
||||||
method: methodArg,
|
// Build the request using the fluent API
|
||||||
headers: {
|
const smartRequest = plugins.smartrequest.SmartRequest.create()
|
||||||
'Content-Type': 'application/json',
|
.url(requestUrl)
|
||||||
'X-Registry-Auth': this.registryToken,
|
.header('Content-Type', 'application/json')
|
||||||
Host: 'docker.sock',
|
.header('X-Registry-Auth', this.registryToken)
|
||||||
},
|
.header('Host', 'docker.sock')
|
||||||
requestBody: dataArg,
|
.options({ keepAlive: false });
|
||||||
keepAlive: false,
|
|
||||||
});
|
// Add body for methods that support it
|
||||||
if (response.statusCode !== 200) {
|
if (dataArg && Object.keys(dataArg).length > 0) {
|
||||||
console.log(response.body);
|
smartRequest.json(dataArg);
|
||||||
}
|
}
|
||||||
return response;
|
|
||||||
|
// Execute the request based on method
|
||||||
|
let response;
|
||||||
|
switch (methodArg.toUpperCase()) {
|
||||||
|
case 'GET':
|
||||||
|
response = await smartRequest.get();
|
||||||
|
break;
|
||||||
|
case 'POST':
|
||||||
|
response = await smartRequest.post();
|
||||||
|
break;
|
||||||
|
case 'PUT':
|
||||||
|
response = await smartRequest.put();
|
||||||
|
break;
|
||||||
|
case 'DELETE':
|
||||||
|
response = await smartRequest.delete();
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
throw new Error(`Unsupported HTTP method: ${methodArg}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse the response body based on content type
|
||||||
|
let body;
|
||||||
|
const contentType = response.headers['content-type'] || '';
|
||||||
|
|
||||||
|
// Docker's streaming endpoints (like /images/create) return newline-delimited JSON
|
||||||
|
// which can't be parsed as a single JSON object
|
||||||
|
const isStreamingEndpoint =
|
||||||
|
routeArg.includes('/images/create') ||
|
||||||
|
routeArg.includes('/images/load') ||
|
||||||
|
routeArg.includes('/build');
|
||||||
|
|
||||||
|
if (contentType.includes('application/json') && !isStreamingEndpoint) {
|
||||||
|
body = await response.json();
|
||||||
|
} else {
|
||||||
|
body = await response.text();
|
||||||
|
// Try to parse as JSON if it looks like JSON and is not a streaming response
|
||||||
|
if (
|
||||||
|
!isStreamingEndpoint &&
|
||||||
|
body &&
|
||||||
|
(body.startsWith('{') || body.startsWith('['))
|
||||||
|
) {
|
||||||
|
try {
|
||||||
|
body = JSON.parse(body);
|
||||||
|
} catch {
|
||||||
|
// Keep as text if parsing fails
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a response object compatible with existing code
|
||||||
|
const legacyResponse = {
|
||||||
|
statusCode: response.status,
|
||||||
|
body: body,
|
||||||
|
headers: response.headers,
|
||||||
|
};
|
||||||
|
|
||||||
|
if (response.status !== 200) {
|
||||||
|
console.log(body);
|
||||||
|
}
|
||||||
|
|
||||||
|
return legacyResponse;
|
||||||
}
|
}
|
||||||
|
|
||||||
public async requestStreaming(methodArg: string, routeArg: string, readStream?: plugins.smartstream.stream.Readable) {
|
public async requestStreaming(
|
||||||
|
methodArg: string,
|
||||||
|
routeArg: string,
|
||||||
|
readStream?: plugins.smartstream.stream.Readable,
|
||||||
|
jsonData?: any,
|
||||||
|
) {
|
||||||
const requestUrl = `${this.socketPath}${routeArg}`;
|
const requestUrl = `${this.socketPath}${routeArg}`;
|
||||||
const response = await plugins.smartrequest.request(
|
|
||||||
requestUrl,
|
// Build the request using the fluent API
|
||||||
{
|
const smartRequest = plugins.smartrequest.SmartRequest.create()
|
||||||
method: methodArg,
|
.url(requestUrl)
|
||||||
headers: {
|
.header('Content-Type', 'application/json')
|
||||||
'Content-Type': 'application/json',
|
.header('X-Registry-Auth', this.registryToken)
|
||||||
'X-Registry-Auth': this.registryToken,
|
.header('Host', 'docker.sock')
|
||||||
Host: 'docker.sock',
|
.timeout(30000)
|
||||||
},
|
.options({ keepAlive: false, autoDrain: true }); // Disable auto-drain for streaming
|
||||||
requestBody: null,
|
|
||||||
keepAlive: false,
|
// If we have JSON data, add it to the request
|
||||||
},
|
if (jsonData && Object.keys(jsonData).length > 0) {
|
||||||
true,
|
smartRequest.json(jsonData);
|
||||||
(readStream ? reqArg => {
|
}
|
||||||
let counter = 0;
|
|
||||||
const smartduplex = new plugins.smartstream.SmartDuplex({
|
// If we have a readStream, use the new stream method with logging
|
||||||
writeFunction: async (chunkArg) => {
|
if (readStream) {
|
||||||
if (counter % 1000 === 0) {
|
let counter = 0;
|
||||||
console.log(`posting chunk ${counter}`);
|
const smartduplex = new plugins.smartstream.SmartDuplex({
|
||||||
}
|
writeFunction: async (chunkArg) => {
|
||||||
counter++;
|
if (counter % 1000 === 0) {
|
||||||
return chunkArg;
|
console.log(`posting chunk ${counter}`);
|
||||||
}
|
}
|
||||||
});
|
counter++;
|
||||||
readStream.pipe(smartduplex).pipe(reqArg);
|
return chunkArg;
|
||||||
} : null),
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// Pipe through the logging duplex stream
|
||||||
|
const loggedStream = readStream.pipe(smartduplex);
|
||||||
|
|
||||||
|
// Use the new stream method to stream the data
|
||||||
|
smartRequest.stream(loggedStream, 'application/octet-stream');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute the request based on method
|
||||||
|
let response: plugins.smartrequest.ICoreResponse;
|
||||||
|
switch (methodArg.toUpperCase()) {
|
||||||
|
case 'GET':
|
||||||
|
response = await smartRequest.get();
|
||||||
|
break;
|
||||||
|
case 'POST':
|
||||||
|
response = await smartRequest.post();
|
||||||
|
break;
|
||||||
|
case 'PUT':
|
||||||
|
response = await smartRequest.put();
|
||||||
|
break;
|
||||||
|
case 'DELETE':
|
||||||
|
response = await smartRequest.delete();
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
throw new Error(`Unsupported HTTP method: ${methodArg}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(response.status);
|
||||||
|
|
||||||
|
// For streaming responses, get the web stream
|
||||||
|
const webStream = response.stream();
|
||||||
|
|
||||||
|
if (!webStream) {
|
||||||
|
// If no stream is available, consume the body as text
|
||||||
|
const body = await response.text();
|
||||||
|
console.log(body);
|
||||||
|
|
||||||
|
// Return a compatible response object
|
||||||
|
return {
|
||||||
|
statusCode: response.status,
|
||||||
|
body: body,
|
||||||
|
headers: response.headers,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert web ReadableStream to Node.js stream for backward compatibility
|
||||||
|
const nodeStream = plugins.smartstream.nodewebhelpers.convertWebReadableToNodeReadable(webStream);
|
||||||
|
|
||||||
|
// Add properties for compatibility
|
||||||
|
(nodeStream as any).statusCode = response.status;
|
||||||
|
(nodeStream as any).body = ''; // For compatibility
|
||||||
|
|
||||||
|
return nodeStream;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* add s3 storage
|
||||||
|
* @param optionsArg
|
||||||
|
*/
|
||||||
|
public async addS3Storage(optionsArg: plugins.tsclass.storage.IS3Descriptor) {
|
||||||
|
this.smartBucket = new plugins.smartbucket.SmartBucket(optionsArg);
|
||||||
|
if (!optionsArg.bucketName) {
|
||||||
|
throw new Error('bucketName is required');
|
||||||
|
}
|
||||||
|
const bucket = await this.smartBucket.getBucketByName(
|
||||||
|
optionsArg.bucketName,
|
||||||
);
|
);
|
||||||
console.log(response.statusCode);
|
let wantedDirectory = await bucket.getBaseDirectory();
|
||||||
console.log(response.body);
|
if (optionsArg.directoryPath) {
|
||||||
return response;
|
wantedDirectory = await wantedDirectory.getSubDirectoryByName(
|
||||||
|
optionsArg.directoryPath,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
this.imageStore.options.bucketDir = wantedDirectory;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,14 +1,20 @@
|
|||||||
import * as plugins from './plugins.js';
|
import * as plugins from './plugins.js';
|
||||||
import * as interfaces from './interfaces/index.js';
|
import * as interfaces from './interfaces/index.js';
|
||||||
import { DockerHost } from './classes.host.js';
|
import { DockerHost } from './classes.host.js';
|
||||||
|
import { DockerResource } from './classes.base.js';
|
||||||
import { logger } from './logger.js';
|
import { logger } from './logger.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* represents a docker image on the remote docker host
|
* represents a docker image on the remote docker host
|
||||||
*/
|
*/
|
||||||
export class DockerImage {
|
export class DockerImage extends DockerResource {
|
||||||
// STATIC
|
// STATIC (Internal - prefixed with _ to indicate internal use)
|
||||||
public static async getImages(dockerHost: DockerHost) {
|
|
||||||
|
/**
|
||||||
|
* Internal: Get all images
|
||||||
|
* Public API: Use dockerHost.listImages() instead
|
||||||
|
*/
|
||||||
|
public static async _list(dockerHost: DockerHost) {
|
||||||
const images: DockerImage[] = [];
|
const images: DockerImage[] = [];
|
||||||
const response = await dockerHost.request('GET', '/images/json');
|
const response = await dockerHost.request('GET', '/images/json');
|
||||||
for (const imageObject of response.body) {
|
for (const imageObject of response.body) {
|
||||||
@@ -17,8 +23,15 @@ export class DockerImage {
|
|||||||
return images;
|
return images;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static async getImageByName(dockerHost: DockerHost, imageNameArg: string) {
|
/**
|
||||||
const images = await this.getImages(dockerHost);
|
* Internal: Get image by name
|
||||||
|
* Public API: Use dockerHost.getImageByName(name) instead
|
||||||
|
*/
|
||||||
|
public static async _fromName(
|
||||||
|
dockerHost: DockerHost,
|
||||||
|
imageNameArg: string,
|
||||||
|
) {
|
||||||
|
const images = await this._list(dockerHost);
|
||||||
const result = images.find((image) => {
|
const result = images.find((image) => {
|
||||||
if (image.RepoTags) {
|
if (image.RepoTags) {
|
||||||
return image.RepoTags.includes(imageNameArg);
|
return image.RepoTags.includes(imageNameArg);
|
||||||
@@ -29,11 +42,15 @@ export class DockerImage {
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static async createFromRegistry(
|
/**
|
||||||
|
* Internal: Create image from registry
|
||||||
|
* Public API: Use dockerHost.createImageFromRegistry(descriptor) instead
|
||||||
|
*/
|
||||||
|
public static async _createFromRegistry(
|
||||||
dockerHostArg: DockerHost,
|
dockerHostArg: DockerHost,
|
||||||
optionsArg: {
|
optionsArg: {
|
||||||
creationObject: interfaces.IImageCreationDescriptor
|
creationObject: interfaces.IImageCreationDescriptor;
|
||||||
}
|
},
|
||||||
): Promise<DockerImage> {
|
): Promise<DockerImage> {
|
||||||
// lets create a sanatized imageUrlObject
|
// lets create a sanatized imageUrlObject
|
||||||
const imageUrlObject: {
|
const imageUrlObject: {
|
||||||
@@ -50,7 +67,7 @@ export class DockerImage {
|
|||||||
const imageTag = imageUrlObject.imageUrl.split(':')[1];
|
const imageTag = imageUrlObject.imageUrl.split(':')[1];
|
||||||
if (imageUrlObject.imageTag) {
|
if (imageUrlObject.imageTag) {
|
||||||
throw new Error(
|
throw new Error(
|
||||||
`imageUrl ${imageUrlObject.imageUrl} can't be tagged with ${imageUrlObject.imageTag} because it is already tagged with ${imageTag}`
|
`imageUrl ${imageUrlObject.imageUrl} can't be tagged with ${imageUrlObject.imageTag} because it is already tagged with ${imageTag}`,
|
||||||
);
|
);
|
||||||
} else {
|
} else {
|
||||||
imageUrlObject.imageUrl = imageUrl;
|
imageUrlObject.imageUrl = imageUrl;
|
||||||
@@ -65,12 +82,18 @@ export class DockerImage {
|
|||||||
const response = await dockerHostArg.request(
|
const response = await dockerHostArg.request(
|
||||||
'POST',
|
'POST',
|
||||||
`/images/create?fromImage=${encodeURIComponent(
|
`/images/create?fromImage=${encodeURIComponent(
|
||||||
imageUrlObject.imageUrl
|
imageUrlObject.imageUrl,
|
||||||
)}&tag=${encodeURIComponent(imageUrlObject.imageTag)}`
|
)}&tag=${encodeURIComponent(imageUrlObject.imageTag)}`,
|
||||||
);
|
);
|
||||||
if (response.statusCode < 300) {
|
if (response.statusCode < 300) {
|
||||||
logger.log('info', `Successfully pulled image ${imageUrlObject.imageUrl} from the registry`);
|
logger.log(
|
||||||
const image = await DockerImage.getImageByName(dockerHostArg, imageUrlObject.imageOriginTag);
|
'info',
|
||||||
|
`Successfully pulled image ${imageUrlObject.imageUrl} from the registry`,
|
||||||
|
);
|
||||||
|
const image = await DockerImage._fromName(
|
||||||
|
dockerHostArg,
|
||||||
|
imageUrlObject.imageOriginTag,
|
||||||
|
);
|
||||||
return image;
|
return image;
|
||||||
} else {
|
} else {
|
||||||
logger.log('error', `Failed at the attempt of creating a new image`);
|
logger.log('error', `Failed at the attempt of creating a new image`);
|
||||||
@@ -78,38 +101,119 @@ export class DockerImage {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
* Internal: Create image from tar stream
|
||||||
* @param dockerHostArg
|
* Public API: Use dockerHost.createImageFromTarStream(stream, descriptor) instead
|
||||||
* @param tarStreamArg
|
|
||||||
*/
|
*/
|
||||||
public static async createFromTarStream(dockerHostArg: DockerHost, optionsArg: {
|
public static async _createFromTarStream(
|
||||||
creationObject: interfaces.IImageCreationDescriptor,
|
dockerHostArg: DockerHost,
|
||||||
tarStream: plugins.smartstream.stream.Readable,
|
optionsArg: {
|
||||||
}) {
|
creationObject: interfaces.IImageCreationDescriptor;
|
||||||
const response = await dockerHostArg.requestStreaming('POST', '/images/load', optionsArg.tarStream);
|
tarStream: plugins.smartstream.stream.Readable;
|
||||||
return response;
|
},
|
||||||
|
): Promise<DockerImage> {
|
||||||
|
// Start the request for importing an image
|
||||||
|
const response = await dockerHostArg.requestStreaming(
|
||||||
|
'POST',
|
||||||
|
'/images/load',
|
||||||
|
optionsArg.tarStream,
|
||||||
|
);
|
||||||
|
|
||||||
|
// requestStreaming now returns Node.js stream
|
||||||
|
const nodeStream = response as plugins.smartstream.stream.Readable;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Docker typically returns lines like:
|
||||||
|
* {"stream":"Loaded image: myrepo/myimage:latest"}
|
||||||
|
*
|
||||||
|
* So we will collect those lines and parse out the final image name.
|
||||||
|
*/
|
||||||
|
let rawOutput = '';
|
||||||
|
nodeStream.on('data', (chunk) => {
|
||||||
|
rawOutput += chunk.toString();
|
||||||
|
});
|
||||||
|
|
||||||
|
// Wrap the end event in a Promise for easier async/await usage
|
||||||
|
await new Promise<void>((resolve, reject) => {
|
||||||
|
nodeStream.on('end', () => {
|
||||||
|
resolve();
|
||||||
|
});
|
||||||
|
nodeStream.on('error', (err) => {
|
||||||
|
reject(err);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
// Attempt to parse each line to find something like "Loaded image: ..."
|
||||||
|
let loadedImageTag: string | undefined;
|
||||||
|
const lines = rawOutput.trim().split('\n').filter(Boolean);
|
||||||
|
|
||||||
|
for (const line of lines) {
|
||||||
|
try {
|
||||||
|
const jsonLine = JSON.parse(line);
|
||||||
|
if (
|
||||||
|
jsonLine.stream &&
|
||||||
|
(jsonLine.stream.startsWith('Loaded image:') ||
|
||||||
|
jsonLine.stream.startsWith('Loaded image ID:'))
|
||||||
|
) {
|
||||||
|
// Examples:
|
||||||
|
// "Loaded image: your-image:latest"
|
||||||
|
// "Loaded image ID: sha256:...."
|
||||||
|
loadedImageTag = jsonLine.stream
|
||||||
|
.replace('Loaded image: ', '')
|
||||||
|
.replace('Loaded image ID: ', '')
|
||||||
|
.trim();
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// not valid JSON, ignore
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!loadedImageTag) {
|
||||||
|
throw new Error(
|
||||||
|
`Could not parse the loaded image info from Docker response.\nResponse was:\n${rawOutput}`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now try to look up that image by the "loadedImageTag".
|
||||||
|
// Depending on Docker's response, it might be something like:
|
||||||
|
// "myrepo/myimage:latest" OR "sha256:someHash..."
|
||||||
|
// If Docker gave you an ID (e.g. "sha256:..."), you may need a separate
|
||||||
|
// DockerImage.getImageById method; or if you prefer, you can treat it as a name.
|
||||||
|
const newlyImportedImage = await DockerImage._fromName(
|
||||||
|
dockerHostArg,
|
||||||
|
loadedImageTag,
|
||||||
|
);
|
||||||
|
|
||||||
|
if (!newlyImportedImage) {
|
||||||
|
throw new Error(
|
||||||
|
`Image load succeeded, but no local reference found for "${loadedImageTag}".`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.log('info', `Successfully imported image "${loadedImageTag}".`);
|
||||||
|
|
||||||
|
return newlyImportedImage;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static async tagImageByIdOrName(
|
public static async tagImageByIdOrName(
|
||||||
dockerHost: DockerHost,
|
dockerHost: DockerHost,
|
||||||
idOrNameArg: string,
|
idOrNameArg: string,
|
||||||
newTagArg: string
|
newTagArg: string,
|
||||||
) {
|
) {
|
||||||
const response = await dockerHost.request(
|
const response = await dockerHost.request(
|
||||||
'POST',
|
'POST',
|
||||||
`/images/${encodeURIComponent(idOrNameArg)}/${encodeURIComponent(newTagArg)}`
|
`/images/${encodeURIComponent(idOrNameArg)}/${encodeURIComponent(newTagArg)}`,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static async buildImage(dockerHostArg: DockerHost, dockerImageTag) {
|
/**
|
||||||
|
* Internal: Build image from Dockerfile
|
||||||
|
* Public API: Use dockerHost.buildImage(tag) instead
|
||||||
|
*/
|
||||||
|
public static async _build(dockerHostArg: DockerHost, dockerImageTag) {
|
||||||
// TODO: implement building an image
|
// TODO: implement building an image
|
||||||
}
|
}
|
||||||
|
|
||||||
// INSTANCE
|
// INSTANCE PROPERTIES
|
||||||
// references
|
|
||||||
public dockerHost: DockerHost;
|
|
||||||
|
|
||||||
// properties
|
|
||||||
/**
|
/**
|
||||||
* the tags for an image
|
* the tags for an image
|
||||||
*/
|
*/
|
||||||
@@ -124,13 +228,28 @@ export class DockerImage {
|
|||||||
public Size: number;
|
public Size: number;
|
||||||
public VirtualSize: number;
|
public VirtualSize: number;
|
||||||
|
|
||||||
constructor(dockerHostArg, dockerImageObjectArg: any) {
|
constructor(dockerHostArg: DockerHost, dockerImageObjectArg: any) {
|
||||||
this.dockerHost = dockerHostArg;
|
super(dockerHostArg);
|
||||||
Object.keys(dockerImageObjectArg).forEach((keyArg) => {
|
Object.keys(dockerImageObjectArg).forEach((keyArg) => {
|
||||||
this[keyArg] = dockerImageObjectArg[keyArg];
|
this[keyArg] = dockerImageObjectArg[keyArg];
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// INSTANCE METHODS
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Refreshes this image's state from the Docker daemon
|
||||||
|
*/
|
||||||
|
public async refresh(): Promise<void> {
|
||||||
|
if (!this.RepoTags || this.RepoTags.length === 0) {
|
||||||
|
throw new Error('Cannot refresh image without RepoTags');
|
||||||
|
}
|
||||||
|
const updated = await DockerImage._fromName(this.dockerHost, this.RepoTags[0]);
|
||||||
|
if (updated) {
|
||||||
|
Object.assign(this, updated);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* tag an image
|
* tag an image
|
||||||
* @param newTag
|
* @param newTag
|
||||||
@@ -143,7 +262,7 @@ export class DockerImage {
|
|||||||
* pulls the latest version from the registry
|
* pulls the latest version from the registry
|
||||||
*/
|
*/
|
||||||
public async pullLatestImageFromRegistry(): Promise<boolean> {
|
public async pullLatestImageFromRegistry(): Promise<boolean> {
|
||||||
const updatedImage = await DockerImage.createFromRegistry(this.dockerHost, {
|
const updatedImage = await DockerImage._createFromRegistry(this.dockerHost, {
|
||||||
creationObject: {
|
creationObject: {
|
||||||
imageUrl: this.RepoTags[0],
|
imageUrl: this.RepoTags[0],
|
||||||
},
|
},
|
||||||
@@ -153,6 +272,25 @@ export class DockerImage {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Removes this image from the Docker daemon
|
||||||
|
*/
|
||||||
|
public async remove(options?: { force?: boolean; noprune?: boolean }): Promise<void> {
|
||||||
|
const queryParams = new URLSearchParams();
|
||||||
|
if (options?.force) queryParams.append('force', '1');
|
||||||
|
if (options?.noprune) queryParams.append('noprune', '1');
|
||||||
|
|
||||||
|
const queryString = queryParams.toString();
|
||||||
|
const response = await this.dockerHost.request(
|
||||||
|
'DELETE',
|
||||||
|
`/images/${encodeURIComponent(this.Id)}${queryString ? '?' + queryString : ''}`,
|
||||||
|
);
|
||||||
|
|
||||||
|
if (response.statusCode >= 300) {
|
||||||
|
throw new Error(`Failed to remove image: ${response.statusCode}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// get stuff
|
// get stuff
|
||||||
public async getVersion() {
|
public async getVersion() {
|
||||||
if (this.Labels && this.Labels.version) {
|
if (this.Labels && this.Labels.version) {
|
||||||
@@ -166,28 +304,42 @@ export class DockerImage {
|
|||||||
* exports an image to a tar ball
|
* exports an image to a tar ball
|
||||||
*/
|
*/
|
||||||
public async exportToTarStream(): Promise<plugins.smartstream.stream.Readable> {
|
public async exportToTarStream(): Promise<plugins.smartstream.stream.Readable> {
|
||||||
console.log(`Exporting image ${this.RepoTags[0]} to tar stream.`);
|
logger.log('info', `Exporting image ${this.RepoTags[0]} to tar stream.`);
|
||||||
const response = await this.dockerHost.requestStreaming('GET', `/images/${encodeURIComponent(this.RepoTags[0])}/get`);
|
const response = await this.dockerHost.requestStreaming(
|
||||||
|
'GET',
|
||||||
|
`/images/${encodeURIComponent(this.RepoTags[0])}/get`,
|
||||||
|
);
|
||||||
|
|
||||||
|
// requestStreaming now returns Node.js stream
|
||||||
|
const nodeStream = response as plugins.smartstream.stream.Readable;
|
||||||
|
|
||||||
let counter = 0;
|
let counter = 0;
|
||||||
const webduplexStream = new plugins.smartstream.SmartDuplex({
|
const webduplexStream = new plugins.smartstream.SmartDuplex({
|
||||||
writeFunction: async (chunk, tools) => {
|
writeFunction: async (chunk, tools) => {
|
||||||
if (counter % 1000 === 0)
|
if (counter % 1000 === 0) console.log(`Got chunk: ${counter}`);
|
||||||
console.log(`Got chunk: ${counter}`);
|
|
||||||
counter++;
|
counter++;
|
||||||
return chunk;
|
return chunk;
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
nodeStream.on('data', (chunk) => {
|
||||||
|
if (!webduplexStream.write(chunk)) {
|
||||||
|
nodeStream.pause();
|
||||||
|
webduplexStream.once('drain', () => {
|
||||||
|
nodeStream.resume();
|
||||||
|
});
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
response.on('data', (chunk) => {
|
|
||||||
if (!webduplexStream.write(chunk)) {
|
nodeStream.on('end', () => {
|
||||||
response.pause();
|
|
||||||
webduplexStream.once('drain', () => {
|
|
||||||
response.resume();
|
|
||||||
})
|
|
||||||
};
|
|
||||||
});
|
|
||||||
response.on('end', () => {
|
|
||||||
webduplexStream.end();
|
webduplexStream.end();
|
||||||
})
|
});
|
||||||
|
|
||||||
|
nodeStream.on('error', (error) => {
|
||||||
|
logger.log('error', `Error during image export: ${error.message}`);
|
||||||
|
webduplexStream.destroy(error);
|
||||||
|
});
|
||||||
|
|
||||||
return webduplexStream;
|
return webduplexStream;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,19 +17,30 @@ export interface IDockerImageStoreConstructorOptions {
|
|||||||
export class DockerImageStore {
|
export class DockerImageStore {
|
||||||
public options: IDockerImageStoreConstructorOptions;
|
public options: IDockerImageStoreConstructorOptions;
|
||||||
|
|
||||||
constructor(dockerHost: DockerHost, optionsArg: IDockerImageStoreConstructorOptions) {
|
constructor(optionsArg: IDockerImageStoreConstructorOptions) {
|
||||||
this.options = optionsArg;
|
this.options = optionsArg;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Method to store tar stream
|
// Method to store tar stream
|
||||||
public async storeImage(imageName: string, tarStream: plugins.smartstream.stream.Readable): Promise<void> {
|
public async storeImage(
|
||||||
|
imageName: string,
|
||||||
|
tarStream: plugins.smartstream.stream.Readable,
|
||||||
|
): Promise<void> {
|
||||||
logger.log('info', `Storing image ${imageName}...`);
|
logger.log('info', `Storing image ${imageName}...`);
|
||||||
const uniqueProcessingId = plugins.smartunique.shortId();
|
const uniqueProcessingId = plugins.smartunique.shortId();
|
||||||
|
|
||||||
const initialTarDownloadPath = plugins.path.join(this.options.localDirPath, `${uniqueProcessingId}.tar`);
|
const initialTarDownloadPath = plugins.path.join(
|
||||||
const extractionDir = plugins.path.join(this.options.localDirPath, uniqueProcessingId);
|
this.options.localDirPath,
|
||||||
|
`${uniqueProcessingId}.tar`,
|
||||||
|
);
|
||||||
|
const extractionDir = plugins.path.join(
|
||||||
|
this.options.localDirPath,
|
||||||
|
uniqueProcessingId,
|
||||||
|
);
|
||||||
// Create a write stream to store the tar file
|
// Create a write stream to store the tar file
|
||||||
const writeStream = plugins.smartfile.fsStream.createWriteStream(initialTarDownloadPath);
|
const writeStream = plugins.smartfile.fsStream.createWriteStream(
|
||||||
|
initialTarDownloadPath,
|
||||||
|
);
|
||||||
|
|
||||||
// lets wait for the write stream to finish
|
// lets wait for the write stream to finish
|
||||||
await new Promise((resolve, reject) => {
|
await new Promise((resolve, reject) => {
|
||||||
@@ -37,23 +48,43 @@ export class DockerImageStore {
|
|||||||
writeStream.on('finish', resolve);
|
writeStream.on('finish', resolve);
|
||||||
writeStream.on('error', reject);
|
writeStream.on('error', reject);
|
||||||
});
|
});
|
||||||
logger.log('info', `Image ${imageName} stored locally for processing. Extracting...`);
|
logger.log(
|
||||||
|
'info',
|
||||||
|
`Image ${imageName} stored locally for processing. Extracting...`,
|
||||||
|
);
|
||||||
|
|
||||||
// lets process the image
|
// lets process the image
|
||||||
const tarArchive = await plugins.smartarchive.SmartArchive.fromArchiveFile(initialTarDownloadPath);
|
const tarArchive = await plugins.smartarchive.SmartArchive.fromArchiveFile(
|
||||||
|
initialTarDownloadPath,
|
||||||
|
);
|
||||||
await tarArchive.exportToFs(extractionDir);
|
await tarArchive.exportToFs(extractionDir);
|
||||||
logger.log('info', `Image ${imageName} extracted.`);
|
logger.log('info', `Image ${imageName} extracted.`);
|
||||||
await plugins.smartfile.fs.remove(initialTarDownloadPath);
|
await plugins.smartfile.fs.remove(initialTarDownloadPath);
|
||||||
logger.log('info', `deleted original tar to save space.`);
|
logger.log('info', `deleted original tar to save space.`);
|
||||||
logger.log('info', `now repackaging for s3...`);
|
logger.log('info', `now repackaging for s3...`);
|
||||||
const smartfileIndexJson = await plugins.smartfile.SmartFile.fromFilePath(plugins.path.join(extractionDir, 'index.json'));
|
const smartfileIndexJson = await plugins.smartfile.SmartFile.fromFilePath(
|
||||||
const smartfileManifestJson = await plugins.smartfile.SmartFile.fromFilePath(plugins.path.join(extractionDir, 'manifest.json'));
|
plugins.path.join(extractionDir, 'index.json'),
|
||||||
const smartfileOciLayoutJson = await plugins.smartfile.SmartFile.fromFilePath(plugins.path.join(extractionDir, 'oci-layout'));
|
);
|
||||||
const smartfileRepositoriesJson = await plugins.smartfile.SmartFile.fromFilePath(plugins.path.join(extractionDir, 'repositories'));
|
const smartfileManifestJson =
|
||||||
|
await plugins.smartfile.SmartFile.fromFilePath(
|
||||||
|
plugins.path.join(extractionDir, 'manifest.json'),
|
||||||
|
);
|
||||||
|
const smartfileOciLayoutJson =
|
||||||
|
await plugins.smartfile.SmartFile.fromFilePath(
|
||||||
|
plugins.path.join(extractionDir, 'oci-layout'),
|
||||||
|
);
|
||||||
|
const smartfileRepositoriesJson =
|
||||||
|
await plugins.smartfile.SmartFile.fromFilePath(
|
||||||
|
plugins.path.join(extractionDir, 'repositories'),
|
||||||
|
);
|
||||||
const indexJson = JSON.parse(smartfileIndexJson.contents.toString());
|
const indexJson = JSON.parse(smartfileIndexJson.contents.toString());
|
||||||
const manifestJson = JSON.parse(smartfileManifestJson.contents.toString());
|
const manifestJson = JSON.parse(smartfileManifestJson.contents.toString());
|
||||||
const ociLayoutJson = JSON.parse(smartfileOciLayoutJson.contents.toString());
|
const ociLayoutJson = JSON.parse(
|
||||||
const repositoriesJson = JSON.parse(smartfileRepositoriesJson.contents.toString());
|
smartfileOciLayoutJson.contents.toString(),
|
||||||
|
);
|
||||||
|
const repositoriesJson = JSON.parse(
|
||||||
|
smartfileRepositoriesJson.contents.toString(),
|
||||||
|
);
|
||||||
|
|
||||||
indexJson.manifests[0].annotations['io.containerd.image.name'] = imageName;
|
indexJson.manifests[0].annotations['io.containerd.image.name'] = imageName;
|
||||||
manifestJson[0].RepoTags[0] = imageName;
|
manifestJson[0].RepoTags[0] = imageName;
|
||||||
@@ -62,10 +93,18 @@ export class DockerImageStore {
|
|||||||
repositoriesJson[imageName] = repoFirstValue;
|
repositoriesJson[imageName] = repoFirstValue;
|
||||||
delete repositoriesJson[repoFirstKey];
|
delete repositoriesJson[repoFirstKey];
|
||||||
|
|
||||||
smartfileIndexJson.contents = Buffer.from(JSON.stringify(indexJson, null, 2));
|
smartfileIndexJson.contents = Buffer.from(
|
||||||
smartfileManifestJson.contents = Buffer.from(JSON.stringify(manifestJson, null, 2));
|
JSON.stringify(indexJson, null, 2),
|
||||||
smartfileOciLayoutJson.contents = Buffer.from(JSON.stringify(ociLayoutJson, null, 2));
|
);
|
||||||
smartfileRepositoriesJson.contents = Buffer.from(JSON.stringify(repositoriesJson, null, 2));
|
smartfileManifestJson.contents = Buffer.from(
|
||||||
|
JSON.stringify(manifestJson, null, 2),
|
||||||
|
);
|
||||||
|
smartfileOciLayoutJson.contents = Buffer.from(
|
||||||
|
JSON.stringify(ociLayoutJson, null, 2),
|
||||||
|
);
|
||||||
|
smartfileRepositoriesJson.contents = Buffer.from(
|
||||||
|
JSON.stringify(repositoriesJson, null, 2),
|
||||||
|
);
|
||||||
await Promise.all([
|
await Promise.all([
|
||||||
smartfileIndexJson.write(),
|
smartfileIndexJson.write(),
|
||||||
smartfileManifestJson.write(),
|
smartfileManifestJson.write(),
|
||||||
@@ -77,8 +116,12 @@ export class DockerImageStore {
|
|||||||
const tartools = new plugins.smartarchive.TarTools();
|
const tartools = new plugins.smartarchive.TarTools();
|
||||||
const newTarPack = await tartools.packDirectory(extractionDir);
|
const newTarPack = await tartools.packDirectory(extractionDir);
|
||||||
const finalTarName = `${uniqueProcessingId}.processed.tar`;
|
const finalTarName = `${uniqueProcessingId}.processed.tar`;
|
||||||
const finalTarPath = plugins.path.join(this.options.localDirPath, finalTarName);
|
const finalTarPath = plugins.path.join(
|
||||||
const finalWriteStream = plugins.smartfile.fsStream.createWriteStream(finalTarPath);
|
this.options.localDirPath,
|
||||||
|
finalTarName,
|
||||||
|
);
|
||||||
|
const finalWriteStream =
|
||||||
|
plugins.smartfile.fsStream.createWriteStream(finalTarPath);
|
||||||
await new Promise((resolve, reject) => {
|
await new Promise((resolve, reject) => {
|
||||||
newTarPack.finalize();
|
newTarPack.finalize();
|
||||||
newTarPack.pipe(finalWriteStream);
|
newTarPack.pipe(finalWriteStream);
|
||||||
@@ -87,6 +130,13 @@ export class DockerImageStore {
|
|||||||
});
|
});
|
||||||
logger.log('ok', `Repackaged image ${imageName} for s3.`);
|
logger.log('ok', `Repackaged image ${imageName} for s3.`);
|
||||||
await plugins.smartfile.fs.remove(extractionDir);
|
await plugins.smartfile.fs.remove(extractionDir);
|
||||||
|
const finalTarReadStream =
|
||||||
|
plugins.smartfile.fsStream.createReadStream(finalTarPath);
|
||||||
|
await this.options.bucketDir.fastPutStream({
|
||||||
|
stream: finalTarReadStream,
|
||||||
|
path: `${imageName}.tar`,
|
||||||
|
});
|
||||||
|
await plugins.smartfile.fs.remove(finalTarPath);
|
||||||
}
|
}
|
||||||
|
|
||||||
public async start() {
|
public async start() {
|
||||||
@@ -96,8 +146,13 @@ export class DockerImageStore {
|
|||||||
public async stop() {}
|
public async stop() {}
|
||||||
|
|
||||||
// Method to retrieve tar stream
|
// Method to retrieve tar stream
|
||||||
public async getImage(imageName: string): Promise<plugins.smartstream.stream.Readable> {
|
public async getImage(
|
||||||
const imagePath = plugins.path.join(this.options.localDirPath, `${imageName}.tar`);
|
imageName: string,
|
||||||
|
): Promise<plugins.smartstream.stream.Readable> {
|
||||||
|
const imagePath = plugins.path.join(
|
||||||
|
this.options.localDirPath,
|
||||||
|
`${imageName}.tar`,
|
||||||
|
);
|
||||||
|
|
||||||
if (!(await plugins.smartfile.fs.fileExists(imagePath))) {
|
if (!(await plugins.smartfile.fs.fileExists(imagePath))) {
|
||||||
throw new Error(`Image ${imageName} does not exist.`);
|
throw new Error(`Image ${imageName} does not exist.`);
|
||||||
|
|||||||
@@ -2,11 +2,20 @@ import * as plugins from './plugins.js';
|
|||||||
import * as interfaces from './interfaces/index.js';
|
import * as interfaces from './interfaces/index.js';
|
||||||
|
|
||||||
import { DockerHost } from './classes.host.js';
|
import { DockerHost } from './classes.host.js';
|
||||||
|
import { DockerResource } from './classes.base.js';
|
||||||
import { DockerService } from './classes.service.js';
|
import { DockerService } from './classes.service.js';
|
||||||
import { logger } from './logger.js';
|
import { logger } from './logger.js';
|
||||||
|
|
||||||
export class DockerNetwork {
|
export class DockerNetwork extends DockerResource {
|
||||||
public static async getNetworks(dockerHost: DockerHost): Promise<DockerNetwork[]> {
|
// STATIC (Internal - prefixed with _ to indicate internal use)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Internal: Get all networks
|
||||||
|
* Public API: Use dockerHost.getNetworks() instead
|
||||||
|
*/
|
||||||
|
public static async _list(
|
||||||
|
dockerHost: DockerHost,
|
||||||
|
): Promise<DockerNetwork[]> {
|
||||||
const dockerNetworks: DockerNetwork[] = [];
|
const dockerNetworks: DockerNetwork[] = [];
|
||||||
const response = await dockerHost.request('GET', '/networks');
|
const response = await dockerHost.request('GET', '/networks');
|
||||||
for (const networkObject of response.body) {
|
for (const networkObject of response.body) {
|
||||||
@@ -17,14 +26,27 @@ export class DockerNetwork {
|
|||||||
return dockerNetworks;
|
return dockerNetworks;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static async getNetworkByName(dockerHost: DockerHost, dockerNetworkNameArg: string) {
|
/**
|
||||||
const networks = await DockerNetwork.getNetworks(dockerHost);
|
* Internal: Get network by name
|
||||||
return networks.find((dockerNetwork) => dockerNetwork.Name === dockerNetworkNameArg);
|
* Public API: Use dockerHost.getNetworkByName(name) instead
|
||||||
|
*/
|
||||||
|
public static async _fromName(
|
||||||
|
dockerHost: DockerHost,
|
||||||
|
dockerNetworkNameArg: string,
|
||||||
|
) {
|
||||||
|
const networks = await DockerNetwork._list(dockerHost);
|
||||||
|
return networks.find(
|
||||||
|
(dockerNetwork) => dockerNetwork.Name === dockerNetworkNameArg,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static async createNetwork(
|
/**
|
||||||
|
* Internal: Create a network
|
||||||
|
* Public API: Use dockerHost.createNetwork(descriptor) instead
|
||||||
|
*/
|
||||||
|
public static async _create(
|
||||||
dockerHost: DockerHost,
|
dockerHost: DockerHost,
|
||||||
networkCreationDescriptor: interfaces.INetworkCreationDescriptor
|
networkCreationDescriptor: interfaces.INetworkCreationDescriptor,
|
||||||
): Promise<DockerNetwork> {
|
): Promise<DockerNetwork> {
|
||||||
const response = await dockerHost.request('POST', '/networks/create', {
|
const response = await dockerHost.request('POST', '/networks/create', {
|
||||||
Name: networkCreationDescriptor.Name,
|
Name: networkCreationDescriptor.Name,
|
||||||
@@ -47,18 +69,20 @@ export class DockerNetwork {
|
|||||||
});
|
});
|
||||||
if (response.statusCode < 300) {
|
if (response.statusCode < 300) {
|
||||||
logger.log('info', 'Created network successfully');
|
logger.log('info', 'Created network successfully');
|
||||||
return await DockerNetwork.getNetworkByName(dockerHost, networkCreationDescriptor.Name);
|
return await DockerNetwork._fromName(
|
||||||
|
dockerHost,
|
||||||
|
networkCreationDescriptor.Name,
|
||||||
|
);
|
||||||
} else {
|
} else {
|
||||||
logger.log('error', 'There has been an error creating the wanted network');
|
logger.log(
|
||||||
|
'error',
|
||||||
|
'There has been an error creating the wanted network',
|
||||||
|
);
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// INSTANCE
|
// INSTANCE PROPERTIES
|
||||||
// references
|
|
||||||
public dockerHost: DockerHost;
|
|
||||||
|
|
||||||
// properties
|
|
||||||
public Name: string;
|
public Name: string;
|
||||||
public Id: string;
|
public Id: string;
|
||||||
public Created: string;
|
public Created: string;
|
||||||
@@ -75,22 +99,37 @@ export class DockerNetwork {
|
|||||||
Subnet: string;
|
Subnet: string;
|
||||||
IPRange: string;
|
IPRange: string;
|
||||||
Gateway: string;
|
Gateway: string;
|
||||||
}
|
},
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
|
|
||||||
constructor(dockerHostArg: DockerHost) {
|
constructor(dockerHostArg: DockerHost) {
|
||||||
this.dockerHost = dockerHostArg;
|
super(dockerHostArg);
|
||||||
|
}
|
||||||
|
|
||||||
|
// INSTANCE METHODS
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Refreshes this network's state from the Docker daemon
|
||||||
|
*/
|
||||||
|
public async refresh(): Promise<void> {
|
||||||
|
const updated = await DockerNetwork._fromName(this.dockerHost, this.Name);
|
||||||
|
if (updated) {
|
||||||
|
Object.assign(this, updated);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* removes the network
|
* Removes the network
|
||||||
*/
|
*/
|
||||||
public async remove() {
|
public async remove() {
|
||||||
const response = await this.dockerHost.request('DELETE', `/networks/${this.Id}`);
|
const response = await this.dockerHost.request(
|
||||||
|
'DELETE',
|
||||||
|
`/networks/${this.Id}`,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
public async getContainersOnNetwork(): Promise<
|
public async listContainersOnNetwork(): Promise<
|
||||||
Array<{
|
Array<{
|
||||||
Name: string;
|
Name: string;
|
||||||
EndpointID: string;
|
EndpointID: string;
|
||||||
@@ -100,7 +139,10 @@ export class DockerNetwork {
|
|||||||
}>
|
}>
|
||||||
> {
|
> {
|
||||||
const returnArray = [];
|
const returnArray = [];
|
||||||
const response = await this.dockerHost.request('GET', `/networks/${this.Id}`);
|
const response = await this.dockerHost.request(
|
||||||
|
'GET',
|
||||||
|
`/networks/${this.Id}`,
|
||||||
|
);
|
||||||
for (const key of Object.keys(response.body.Containers)) {
|
for (const key of Object.keys(response.body.Containers)) {
|
||||||
returnArray.push(response.body.Containers[key]);
|
returnArray.push(response.body.Containers[key]);
|
||||||
}
|
}
|
||||||
@@ -109,7 +151,7 @@ export class DockerNetwork {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public async getContainersOnNetworkForService(serviceArg: DockerService) {
|
public async getContainersOnNetworkForService(serviceArg: DockerService) {
|
||||||
const containersOnNetwork = await this.getContainersOnNetwork();
|
const containersOnNetwork = await this.listContainersOnNetwork();
|
||||||
const containersOfService = containersOnNetwork.filter((container) => {
|
const containersOfService = containersOnNetwork.filter((container) => {
|
||||||
return container.Name.startsWith(serviceArg.Spec.Name);
|
return container.Name.startsWith(serviceArg.Spec.Name);
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -1,12 +1,18 @@
|
|||||||
import * as plugins from './plugins.js';
|
import * as plugins from './plugins.js';
|
||||||
import { DockerHost } from './classes.host.js';
|
import { DockerHost } from './classes.host.js';
|
||||||
|
import { DockerResource } from './classes.base.js';
|
||||||
|
|
||||||
// interfaces
|
// interfaces
|
||||||
import * as interfaces from './interfaces/index.js';
|
import * as interfaces from './interfaces/index.js';
|
||||||
|
|
||||||
export class DockerSecret {
|
export class DockerSecret extends DockerResource {
|
||||||
// STATIC
|
// STATIC (Internal - prefixed with _ to indicate internal use)
|
||||||
public static async getSecrets(dockerHostArg: DockerHost) {
|
|
||||||
|
/**
|
||||||
|
* Internal: Get all secrets
|
||||||
|
* Public API: Use dockerHost.listSecrets() instead
|
||||||
|
*/
|
||||||
|
public static async _list(dockerHostArg: DockerHost) {
|
||||||
const response = await dockerHostArg.request('GET', '/secrets');
|
const response = await dockerHostArg.request('GET', '/secrets');
|
||||||
const secrets: DockerSecret[] = [];
|
const secrets: DockerSecret[] = [];
|
||||||
for (const secret of response.body) {
|
for (const secret of response.body) {
|
||||||
@@ -17,19 +23,34 @@ export class DockerSecret {
|
|||||||
return secrets;
|
return secrets;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static async getSecretByID(dockerHostArg: DockerHost, idArg: string) {
|
/**
|
||||||
const secrets = await this.getSecrets(dockerHostArg);
|
* Internal: Get secret by ID
|
||||||
|
* Public API: Use dockerHost.getSecretById(id) instead
|
||||||
|
*/
|
||||||
|
public static async _fromId(dockerHostArg: DockerHost, idArg: string) {
|
||||||
|
const secrets = await this._list(dockerHostArg);
|
||||||
return secrets.find((secret) => secret.ID === idArg);
|
return secrets.find((secret) => secret.ID === idArg);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static async getSecretByName(dockerHostArg: DockerHost, nameArg: string) {
|
/**
|
||||||
const secrets = await this.getSecrets(dockerHostArg);
|
* Internal: Get secret by name
|
||||||
|
* Public API: Use dockerHost.getSecretByName(name) instead
|
||||||
|
*/
|
||||||
|
public static async _fromName(
|
||||||
|
dockerHostArg: DockerHost,
|
||||||
|
nameArg: string,
|
||||||
|
) {
|
||||||
|
const secrets = await this._list(dockerHostArg);
|
||||||
return secrets.find((secret) => secret.Spec.Name === nameArg);
|
return secrets.find((secret) => secret.Spec.Name === nameArg);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static async createSecret(
|
/**
|
||||||
|
* Internal: Create a secret
|
||||||
|
* Public API: Use dockerHost.createSecret(descriptor) instead
|
||||||
|
*/
|
||||||
|
public static async _create(
|
||||||
dockerHostArg: DockerHost,
|
dockerHostArg: DockerHost,
|
||||||
secretDescriptor: interfaces.ISecretCreationDescriptor
|
secretDescriptor: interfaces.ISecretCreationDescriptor,
|
||||||
) {
|
) {
|
||||||
const labels: interfaces.TLabels = {
|
const labels: interfaces.TLabels = {
|
||||||
...secretDescriptor.labels,
|
...secretDescriptor.labels,
|
||||||
@@ -45,12 +66,12 @@ export class DockerSecret {
|
|||||||
Object.assign(newSecretInstance, response.body);
|
Object.assign(newSecretInstance, response.body);
|
||||||
Object.assign(
|
Object.assign(
|
||||||
newSecretInstance,
|
newSecretInstance,
|
||||||
await DockerSecret.getSecretByID(dockerHostArg, newSecretInstance.ID)
|
await DockerSecret._fromId(dockerHostArg, newSecretInstance.ID),
|
||||||
);
|
);
|
||||||
return newSecretInstance;
|
return newSecretInstance;
|
||||||
}
|
}
|
||||||
|
|
||||||
// INSTANCE
|
// INSTANCE PROPERTIES
|
||||||
public ID: string;
|
public ID: string;
|
||||||
public Spec: {
|
public Spec: {
|
||||||
Name: string;
|
Name: string;
|
||||||
@@ -60,13 +81,24 @@ export class DockerSecret {
|
|||||||
Index: string;
|
Index: string;
|
||||||
};
|
};
|
||||||
|
|
||||||
public dockerHost: DockerHost;
|
|
||||||
constructor(dockerHostArg: DockerHost) {
|
constructor(dockerHostArg: DockerHost) {
|
||||||
this.dockerHost = dockerHostArg;
|
super(dockerHostArg);
|
||||||
|
}
|
||||||
|
|
||||||
|
// INSTANCE METHODS
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Refreshes this secret's state from the Docker daemon
|
||||||
|
*/
|
||||||
|
public async refresh(): Promise<void> {
|
||||||
|
const updated = await DockerSecret._fromId(this.dockerHost, this.ID);
|
||||||
|
if (updated) {
|
||||||
|
Object.assign(this, updated);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* updates a secret
|
* Updates a secret
|
||||||
*/
|
*/
|
||||||
public async update(contentArg: string) {
|
public async update(contentArg: string) {
|
||||||
const route = `/secrets/${this.ID}/update?=version=${this.Version.Index}`;
|
const route = `/secrets/${this.ID}/update?=version=${this.Version.Index}`;
|
||||||
@@ -77,15 +109,20 @@ export class DockerSecret {
|
|||||||
Name: this.Spec.Name,
|
Name: this.Spec.Name,
|
||||||
Labels: this.Spec.Labels,
|
Labels: this.Spec.Labels,
|
||||||
Data: plugins.smartstring.base64.encode(contentArg),
|
Data: plugins.smartstring.base64.encode(contentArg),
|
||||||
}
|
},
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Removes this secret from the Docker daemon
|
||||||
|
*/
|
||||||
public async remove() {
|
public async remove() {
|
||||||
await this.dockerHost.request('DELETE', `/secrets/${this.ID}`);
|
await this.dockerHost.request('DELETE', `/secrets/${this.ID}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
// get things
|
/**
|
||||||
|
* Gets the version label of this secret
|
||||||
|
*/
|
||||||
public async getVersion() {
|
public async getVersion() {
|
||||||
return this.Spec.Labels.version;
|
return this.Spec.Labels.version;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,13 +2,19 @@ import * as plugins from './plugins.js';
|
|||||||
import * as interfaces from './interfaces/index.js';
|
import * as interfaces from './interfaces/index.js';
|
||||||
|
|
||||||
import { DockerHost } from './classes.host.js';
|
import { DockerHost } from './classes.host.js';
|
||||||
|
import { DockerResource } from './classes.base.js';
|
||||||
import { DockerImage } from './classes.image.js';
|
import { DockerImage } from './classes.image.js';
|
||||||
import { DockerSecret } from './classes.secret.js';
|
import { DockerSecret } from './classes.secret.js';
|
||||||
import { logger } from './logger.js';
|
import { logger } from './logger.js';
|
||||||
|
|
||||||
export class DockerService {
|
export class DockerService extends DockerResource {
|
||||||
// STATIC
|
// STATIC (Internal - prefixed with _ to indicate internal use)
|
||||||
public static async getServices(dockerHost: DockerHost) {
|
|
||||||
|
/**
|
||||||
|
* Internal: Get all services
|
||||||
|
* Public API: Use dockerHost.listServices() instead
|
||||||
|
*/
|
||||||
|
public static async _list(dockerHost: DockerHost) {
|
||||||
const services: DockerService[] = [];
|
const services: DockerService[] = [];
|
||||||
const response = await dockerHost.request('GET', '/services');
|
const response = await dockerHost.request('GET', '/services');
|
||||||
for (const serviceObject of response.body) {
|
for (const serviceObject of response.body) {
|
||||||
@@ -19,11 +25,15 @@ export class DockerService {
|
|||||||
return services;
|
return services;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static async getServiceByName(
|
/**
|
||||||
|
* Internal: Get service by name
|
||||||
|
* Public API: Use dockerHost.getServiceByName(name) instead
|
||||||
|
*/
|
||||||
|
public static async _fromName(
|
||||||
dockerHost: DockerHost,
|
dockerHost: DockerHost,
|
||||||
networkName: string
|
networkName: string,
|
||||||
): Promise<DockerService> {
|
): Promise<DockerService> {
|
||||||
const allServices = await DockerService.getServices(dockerHost);
|
const allServices = await DockerService._list(dockerHost);
|
||||||
const wantedService = allServices.find((service) => {
|
const wantedService = allServices.find((service) => {
|
||||||
return service.Spec.Name === networkName;
|
return service.Spec.Name === networkName;
|
||||||
});
|
});
|
||||||
@@ -31,17 +41,30 @@ export class DockerService {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* creates a service
|
* Internal: Create a service
|
||||||
|
* Public API: Use dockerHost.createService(descriptor) instead
|
||||||
*/
|
*/
|
||||||
public static async createService(
|
public static async _create(
|
||||||
dockerHost: DockerHost,
|
dockerHost: DockerHost,
|
||||||
serviceCreationDescriptor: interfaces.IServiceCreationDescriptor
|
serviceCreationDescriptor: interfaces.IServiceCreationDescriptor,
|
||||||
): Promise<DockerService> {
|
): Promise<DockerService> {
|
||||||
// lets get the image
|
logger.log(
|
||||||
logger.log('info', `now creating service ${serviceCreationDescriptor.name}`);
|
'info',
|
||||||
|
`now creating service ${serviceCreationDescriptor.name}`,
|
||||||
|
);
|
||||||
|
|
||||||
// await serviceCreationDescriptor.image.pullLatestImageFromRegistry();
|
// Resolve image (support both string and DockerImage instance)
|
||||||
const serviceVersion = await serviceCreationDescriptor.image.getVersion();
|
let imageInstance: DockerImage;
|
||||||
|
if (typeof serviceCreationDescriptor.image === 'string') {
|
||||||
|
imageInstance = await DockerImage._fromName(dockerHost, serviceCreationDescriptor.image);
|
||||||
|
if (!imageInstance) {
|
||||||
|
throw new Error(`Image not found: ${serviceCreationDescriptor.image}`);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
imageInstance = serviceCreationDescriptor.image;
|
||||||
|
}
|
||||||
|
|
||||||
|
const serviceVersion = await imageInstance.getVersion();
|
||||||
|
|
||||||
const labels: interfaces.TLabels = {
|
const labels: interfaces.TLabels = {
|
||||||
...serviceCreationDescriptor.labels,
|
...serviceCreationDescriptor.labels,
|
||||||
@@ -71,8 +94,12 @@ export class DockerService {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
if (serviceCreationDescriptor.resources && serviceCreationDescriptor.resources.volumeMounts) {
|
if (
|
||||||
for (const volumeMount of serviceCreationDescriptor.resources.volumeMounts) {
|
serviceCreationDescriptor.resources &&
|
||||||
|
serviceCreationDescriptor.resources.volumeMounts
|
||||||
|
) {
|
||||||
|
for (const volumeMount of serviceCreationDescriptor.resources
|
||||||
|
.volumeMounts) {
|
||||||
mounts.push({
|
mounts.push({
|
||||||
Target: volumeMount.containerFsPath,
|
Target: volumeMount.containerFsPath,
|
||||||
Source: volumeMount.hostFsPath,
|
Source: volumeMount.hostFsPath,
|
||||||
@@ -83,14 +110,23 @@ export class DockerService {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Resolve networks (support both string[] and DockerNetwork[])
|
||||||
const networkArray: Array<{
|
const networkArray: Array<{
|
||||||
Target: string;
|
Target: string;
|
||||||
Aliases: string[];
|
Aliases: string[];
|
||||||
}> = [];
|
}> = [];
|
||||||
|
|
||||||
for (const network of serviceCreationDescriptor.networks) {
|
for (const network of serviceCreationDescriptor.networks) {
|
||||||
|
// Skip null networks (can happen if network creation fails)
|
||||||
|
if (!network) {
|
||||||
|
logger.log('warn', 'Skipping null network in service creation');
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resolve network name
|
||||||
|
const networkName = typeof network === 'string' ? network : network.Name;
|
||||||
networkArray.push({
|
networkArray.push({
|
||||||
Target: network.Name,
|
Target: networkName,
|
||||||
Aliases: [serviceCreationDescriptor.networkAlias],
|
Aliases: [serviceCreationDescriptor.networkAlias],
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@@ -107,9 +143,20 @@ export class DockerService {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// lets configure secrets
|
// Resolve secrets (support both string[] and DockerSecret[])
|
||||||
const secretArray: any[] = [];
|
const secretArray: any[] = [];
|
||||||
for (const secret of serviceCreationDescriptor.secrets) {
|
for (const secret of serviceCreationDescriptor.secrets) {
|
||||||
|
// Resolve secret instance
|
||||||
|
let secretInstance: DockerSecret;
|
||||||
|
if (typeof secret === 'string') {
|
||||||
|
secretInstance = await DockerSecret._fromName(dockerHost, secret);
|
||||||
|
if (!secretInstance) {
|
||||||
|
throw new Error(`Secret not found: ${secret}`);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
secretInstance = secret;
|
||||||
|
}
|
||||||
|
|
||||||
secretArray.push({
|
secretArray.push({
|
||||||
File: {
|
File: {
|
||||||
Name: 'secret.json', // TODO: make sure that works with multiple secrets
|
Name: 'secret.json', // TODO: make sure that works with multiple secrets
|
||||||
@@ -117,15 +164,16 @@ export class DockerService {
|
|||||||
GID: '33',
|
GID: '33',
|
||||||
Mode: 384,
|
Mode: 384,
|
||||||
},
|
},
|
||||||
SecretID: secret.ID,
|
SecretID: secretInstance.ID,
|
||||||
SecretName: secret.Spec.Name,
|
SecretName: secretInstance.Spec.Name,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// lets configure limits
|
// lets configure limits
|
||||||
|
|
||||||
const memoryLimitMB =
|
const memoryLimitMB =
|
||||||
serviceCreationDescriptor.resources && serviceCreationDescriptor.resources.memorySizeMB
|
serviceCreationDescriptor.resources &&
|
||||||
|
serviceCreationDescriptor.resources.memorySizeMB
|
||||||
? serviceCreationDescriptor.resources.memorySizeMB
|
? serviceCreationDescriptor.resources.memorySizeMB
|
||||||
: 1000;
|
: 1000;
|
||||||
|
|
||||||
@@ -134,14 +182,15 @@ export class DockerService {
|
|||||||
};
|
};
|
||||||
|
|
||||||
if (serviceCreationDescriptor.resources) {
|
if (serviceCreationDescriptor.resources) {
|
||||||
limits.MemoryBytes = serviceCreationDescriptor.resources.memorySizeMB * 1000000;
|
limits.MemoryBytes =
|
||||||
|
serviceCreationDescriptor.resources.memorySizeMB * 1000000;
|
||||||
}
|
}
|
||||||
|
|
||||||
const response = await dockerHost.request('POST', '/services/create', {
|
const response = await dockerHost.request('POST', '/services/create', {
|
||||||
Name: serviceCreationDescriptor.name,
|
Name: serviceCreationDescriptor.name,
|
||||||
TaskTemplate: {
|
TaskTemplate: {
|
||||||
ContainerSpec: {
|
ContainerSpec: {
|
||||||
Image: serviceCreationDescriptor.image.RepoTags[0],
|
Image: imageInstance.RepoTags[0],
|
||||||
Labels: labels,
|
Labels: labels,
|
||||||
Secrets: secretArray,
|
Secrets: secretArray,
|
||||||
Mounts: mounts,
|
Mounts: mounts,
|
||||||
@@ -175,15 +224,15 @@ export class DockerService {
|
|||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
const createdService = await DockerService.getServiceByName(
|
const createdService = await DockerService._fromName(
|
||||||
dockerHost,
|
dockerHost,
|
||||||
serviceCreationDescriptor.name
|
serviceCreationDescriptor.name,
|
||||||
);
|
);
|
||||||
return createdService;
|
return createdService;
|
||||||
}
|
}
|
||||||
|
|
||||||
// INSTANCE
|
// INSTANCE PROPERTIES
|
||||||
public dockerHostRef: DockerHost;
|
// Note: dockerHost (not dockerHostRef) for consistency with base class
|
||||||
|
|
||||||
public ID: string;
|
public ID: string;
|
||||||
public Version: { Index: number };
|
public Version: { Index: number };
|
||||||
@@ -215,30 +264,62 @@ export class DockerService {
|
|||||||
public Endpoint: { Spec: {}; VirtualIPs: [any[]] };
|
public Endpoint: { Spec: {}; VirtualIPs: [any[]] };
|
||||||
|
|
||||||
constructor(dockerHostArg: DockerHost) {
|
constructor(dockerHostArg: DockerHost) {
|
||||||
this.dockerHostRef = dockerHostArg;
|
super(dockerHostArg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// INSTANCE METHODS
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Refreshes this service's state from the Docker daemon
|
||||||
|
*/
|
||||||
|
public async refresh(): Promise<void> {
|
||||||
|
const updated = await DockerService._fromName(this.dockerHost, this.Spec.Name);
|
||||||
|
if (updated) {
|
||||||
|
Object.assign(this, updated);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Removes this service from the Docker daemon
|
||||||
|
*/
|
||||||
public async remove() {
|
public async remove() {
|
||||||
await this.dockerHostRef.request('DELETE', `/services/${this.ID}`);
|
await this.dockerHost.request('DELETE', `/services/${this.ID}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Re-reads service data from Docker engine
|
||||||
|
* @deprecated Use refresh() instead
|
||||||
|
*/
|
||||||
public async reReadFromDockerEngine() {
|
public async reReadFromDockerEngine() {
|
||||||
const dockerData = await this.dockerHostRef.request('GET', `/services/${this.ID}`);
|
const dockerData = await this.dockerHost.request(
|
||||||
|
'GET',
|
||||||
|
`/services/${this.ID}`,
|
||||||
|
);
|
||||||
// TODO: Better assign: Object.assign(this, dockerData);
|
// TODO: Better assign: Object.assign(this, dockerData);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks if this service needs an update based on image version
|
||||||
|
*/
|
||||||
public async needsUpdate(): Promise<boolean> {
|
public async needsUpdate(): Promise<boolean> {
|
||||||
// TODO: implement digest based update recognition
|
// TODO: implement digest based update recognition
|
||||||
|
|
||||||
await this.reReadFromDockerEngine();
|
await this.reReadFromDockerEngine();
|
||||||
const dockerImage = await DockerImage.createFromRegistry(this.dockerHostRef, {
|
const dockerImage = await DockerImage._createFromRegistry(
|
||||||
creationObject: {
|
this.dockerHost,
|
||||||
imageUrl: this.Spec.TaskTemplate.ContainerSpec.Image,
|
{
|
||||||
}
|
creationObject: {
|
||||||
});
|
imageUrl: this.Spec.TaskTemplate.ContainerSpec.Image,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
const imageVersion = new plugins.smartversion.SmartVersion(dockerImage.Labels.version);
|
const imageVersion = new plugins.smartversion.SmartVersion(
|
||||||
const serviceVersion = new plugins.smartversion.SmartVersion(this.Spec.Labels.version);
|
dockerImage.Labels.version,
|
||||||
|
);
|
||||||
|
const serviceVersion = new plugins.smartversion.SmartVersion(
|
||||||
|
this.Spec.Labels.version,
|
||||||
|
);
|
||||||
if (imageVersion.greaterThan(serviceVersion)) {
|
if (imageVersion.greaterThan(serviceVersion)) {
|
||||||
console.log(`service ${this.Spec.Name} needs to be updated`);
|
console.log(`service ${this.Spec.Name} needs to be updated`);
|
||||||
return true;
|
return true;
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
export * from './classes.base.js';
|
||||||
export * from './classes.host.js';
|
export * from './classes.host.js';
|
||||||
export * from './classes.container.js';
|
export * from './classes.container.js';
|
||||||
export * from './classes.image.js';
|
export * from './classes.image.js';
|
||||||
|
|||||||
@@ -1,7 +1,12 @@
|
|||||||
import { DockerNetwork } from '../classes.network.js';
|
import { DockerNetwork } from '../classes.network.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Container creation descriptor supporting both string references and class instances.
|
||||||
|
* Strings will be resolved to resources internally.
|
||||||
|
*/
|
||||||
export interface IContainerCreationDescriptor {
|
export interface IContainerCreationDescriptor {
|
||||||
Hostname: string;
|
Hostname: string;
|
||||||
Domainname: string;
|
Domainname: string;
|
||||||
networks?: DockerNetwork[];
|
/** Network names (strings) or DockerNetwork instances */
|
||||||
|
networks?: (string | DockerNetwork)[];
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,13 +5,20 @@ import { DockerNetwork } from '../classes.network.js';
|
|||||||
import { DockerSecret } from '../classes.secret.js';
|
import { DockerSecret } from '../classes.secret.js';
|
||||||
import { DockerImage } from '../classes.image.js';
|
import { DockerImage } from '../classes.image.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Service creation descriptor supporting both string references and class instances.
|
||||||
|
* Strings will be resolved to resources internally.
|
||||||
|
*/
|
||||||
export interface IServiceCreationDescriptor {
|
export interface IServiceCreationDescriptor {
|
||||||
name: string;
|
name: string;
|
||||||
image: DockerImage;
|
/** Image tag (string) or DockerImage instance */
|
||||||
|
image: string | DockerImage;
|
||||||
labels: interfaces.TLabels;
|
labels: interfaces.TLabels;
|
||||||
networks: DockerNetwork[];
|
/** Network names (strings) or DockerNetwork instances */
|
||||||
|
networks: (string | DockerNetwork)[];
|
||||||
networkAlias: string;
|
networkAlias: string;
|
||||||
secrets: DockerSecret[];
|
/** Secret names (strings) or DockerSecret instances */
|
||||||
|
secrets: (string | DockerSecret)[];
|
||||||
ports: string[];
|
ports: string[];
|
||||||
accessHostDockerSock?: boolean;
|
accessHostDockerSock?: boolean;
|
||||||
resources?: {
|
resources?: {
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ import * as plugins from './plugins.js';
|
|||||||
|
|
||||||
export const packageDir = plugins.path.resolve(
|
export const packageDir = plugins.path.resolve(
|
||||||
plugins.smartpath.get.dirnameFromImportMetaUrl(import.meta.url),
|
plugins.smartpath.get.dirnameFromImportMetaUrl(import.meta.url),
|
||||||
'../'
|
'../',
|
||||||
);
|
);
|
||||||
|
|
||||||
export const nogitDir = plugins.path.resolve(packageDir, '.nogit/');
|
export const nogitDir = plugins.path.resolve(packageDir, '.nogit/');
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
// node native path
|
// node native path
|
||||||
import * as path from 'path';
|
import * as path from 'node:path';
|
||||||
|
|
||||||
export { path };
|
export { path };
|
||||||
|
|
||||||
|
|||||||
@@ -6,9 +6,9 @@
|
|||||||
"module": "NodeNext",
|
"module": "NodeNext",
|
||||||
"moduleResolution": "NodeNext",
|
"moduleResolution": "NodeNext",
|
||||||
"esModuleInterop": true,
|
"esModuleInterop": true,
|
||||||
"verbatimModuleSyntax": true
|
"verbatimModuleSyntax": true,
|
||||||
|
"baseUrl": ".",
|
||||||
|
"paths": {}
|
||||||
},
|
},
|
||||||
"exclude": [
|
"exclude": ["dist_*/**/*.d.ts"]
|
||||||
"dist_*/**/*.d.ts"
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user