Compare commits
12 Commits
Author | SHA1 | Date | |
---|---|---|---|
4b1c908b89 | |||
6e313261e7 | |||
42df15a523 | |||
7ef2ebcf5b | |||
87f26b7b63 | |||
ffdc61fb42 | |||
5b25704cf8 | |||
00e6033d8b | |||
453040983d | |||
456858bc36 | |||
606c82dafa | |||
9fc4afe4b8 |
BIN
.serena/cache/typescript/document_symbols_cache_v23-06-25.pkl
vendored
Normal file
BIN
.serena/cache/typescript/document_symbols_cache_v23-06-25.pkl
vendored
Normal file
Binary file not shown.
68
.serena/project.yml
Normal file
68
.serena/project.yml
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
# language of the project (csharp, python, rust, java, typescript, go, cpp, or ruby)
|
||||||
|
# * For C, use cpp
|
||||||
|
# * For JavaScript, use typescript
|
||||||
|
# Special requirements:
|
||||||
|
# * csharp: Requires the presence of a .sln file in the project folder.
|
||||||
|
language: typescript
|
||||||
|
|
||||||
|
# whether to use the project's gitignore file to ignore files
|
||||||
|
# Added on 2025-04-07
|
||||||
|
ignore_all_files_in_gitignore: true
|
||||||
|
# list of additional paths to ignore
|
||||||
|
# same syntax as gitignore, so you can use * and **
|
||||||
|
# Was previously called `ignored_dirs`, please update your config if you are using that.
|
||||||
|
# Added (renamed) on 2025-04-07
|
||||||
|
ignored_paths: []
|
||||||
|
|
||||||
|
# whether the project is in read-only mode
|
||||||
|
# If set to true, all editing tools will be disabled and attempts to use them will result in an error
|
||||||
|
# Added on 2025-04-18
|
||||||
|
read_only: false
|
||||||
|
|
||||||
|
|
||||||
|
# list of tool names to exclude. We recommend not excluding any tools, see the readme for more details.
|
||||||
|
# Below is the complete list of tools for convenience.
|
||||||
|
# To make sure you have the latest list of tools, and to view their descriptions,
|
||||||
|
# execute `uv run scripts/print_tool_overview.py`.
|
||||||
|
#
|
||||||
|
# * `activate_project`: Activates a project by name.
|
||||||
|
# * `check_onboarding_performed`: Checks whether project onboarding was already performed.
|
||||||
|
# * `create_text_file`: Creates/overwrites a file in the project directory.
|
||||||
|
# * `delete_lines`: Deletes a range of lines within a file.
|
||||||
|
# * `delete_memory`: Deletes a memory from Serena's project-specific memory store.
|
||||||
|
# * `execute_shell_command`: Executes a shell command.
|
||||||
|
# * `find_referencing_code_snippets`: Finds code snippets in which the symbol at the given location is referenced.
|
||||||
|
# * `find_referencing_symbols`: Finds symbols that reference the symbol at the given location (optionally filtered by type).
|
||||||
|
# * `find_symbol`: Performs a global (or local) search for symbols with/containing a given name/substring (optionally filtered by type).
|
||||||
|
# * `get_current_config`: Prints the current configuration of the agent, including the active and available projects, tools, contexts, and modes.
|
||||||
|
# * `get_symbols_overview`: Gets an overview of the top-level symbols defined in a given file.
|
||||||
|
# * `initial_instructions`: Gets the initial instructions for the current project.
|
||||||
|
# Should only be used in settings where the system prompt cannot be set,
|
||||||
|
# e.g. in clients you have no control over, like Claude Desktop.
|
||||||
|
# * `insert_after_symbol`: Inserts content after the end of the definition of a given symbol.
|
||||||
|
# * `insert_at_line`: Inserts content at a given line in a file.
|
||||||
|
# * `insert_before_symbol`: Inserts content before the beginning of the definition of a given symbol.
|
||||||
|
# * `list_dir`: Lists files and directories in the given directory (optionally with recursion).
|
||||||
|
# * `list_memories`: Lists memories in Serena's project-specific memory store.
|
||||||
|
# * `onboarding`: Performs onboarding (identifying the project structure and essential tasks, e.g. for testing or building).
|
||||||
|
# * `prepare_for_new_conversation`: Provides instructions for preparing for a new conversation (in order to continue with the necessary context).
|
||||||
|
# * `read_file`: Reads a file within the project directory.
|
||||||
|
# * `read_memory`: Reads the memory with the given name from Serena's project-specific memory store.
|
||||||
|
# * `remove_project`: Removes a project from the Serena configuration.
|
||||||
|
# * `replace_lines`: Replaces a range of lines within a file with new content.
|
||||||
|
# * `replace_symbol_body`: Replaces the full definition of a symbol.
|
||||||
|
# * `restart_language_server`: Restarts the language server, may be necessary when edits not through Serena happen.
|
||||||
|
# * `search_for_pattern`: Performs a search for a pattern in the project.
|
||||||
|
# * `summarize_changes`: Provides instructions for summarizing the changes made to the codebase.
|
||||||
|
# * `switch_modes`: Activates modes by providing a list of their names
|
||||||
|
# * `think_about_collected_information`: Thinking tool for pondering the completeness of collected information.
|
||||||
|
# * `think_about_task_adherence`: Thinking tool for determining whether the agent is still on track with the current task.
|
||||||
|
# * `think_about_whether_you_are_done`: Thinking tool for determining whether the task is truly completed.
|
||||||
|
# * `write_memory`: Writes a named memory (for future reference) to Serena's project-specific memory store.
|
||||||
|
excluded_tools: []
|
||||||
|
|
||||||
|
# initial prompt for the project. It will always be given to the LLM upon activating the project
|
||||||
|
# (contrary to the memories, which are loaded on demand).
|
||||||
|
initial_prompt: ""
|
||||||
|
|
||||||
|
project_name: "docker"
|
40
changelog.md
40
changelog.md
@@ -1,5 +1,45 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## 2025-08-19 - 1.3.4 - fix(test)
|
||||||
|
Increase test timeout, enable DockerImageStore test, update test image name, bump smartrequest patch, and add local claude settings
|
||||||
|
|
||||||
|
- Increase tstest timeout from 120s to 600s in package.json to accommodate longer-running integration tests.
|
||||||
|
- Unskip the DockerImageStore integration test and change stored image name from 'hello' to 'hello2' in test/test.nonci.node.ts.
|
||||||
|
- Bump dependency @push.rocks/smartrequest from ^4.3.0 to ^4.3.1.
|
||||||
|
- Add .claude/settings.local.json to allow local agent permissions for running tests and related tooling.
|
||||||
|
|
||||||
|
## 2025-08-19 - 1.3.3 - fix(classes.host)
|
||||||
|
Adjust requestStreaming timeout and autoDrain; stabilize tests
|
||||||
|
|
||||||
|
- Reduced requestStreaming timeout from 10 minutes to 30 seconds to avoid long-running hanging requests.
|
||||||
|
- Enabled autoDrain for streaming requests to ensure response streams are properly drained and reduce resource issues.
|
||||||
|
- Marked the DockerImageStore S3 integration test as skipped to avoid CI dependence on external S3 and added a cleanup test to stop the test DockerHost.
|
||||||
|
- Added local tool settings file (.claude/settings.local.json) with local permissions (development-only).
|
||||||
|
|
||||||
|
## 2025-08-18 - 1.3.2 - fix(package.json)
|
||||||
|
Fix test script timeout typo, update dependency versions, and add typings & project configs
|
||||||
|
|
||||||
|
- Fix test script: correct 'tineout' -> 'timeout' for npm test command and set timeout to 120s
|
||||||
|
- Add 'typings': 'dist_ts/index.d.ts' to package.json
|
||||||
|
- Bump dependencies to newer compatible versions (notable packages: @push.rocks/lik, @push.rocks/smartarchive, @push.rocks/smartbucket, @push.rocks/smartfile, @push.rocks/smartlog, @push.rocks/smartpromise, @push.rocks/smartstream, rxjs)
|
||||||
|
- Add project/config files: .serena/project.yml and .claude/settings.local.json (editor/CI metadata)
|
||||||
|
- Include generated cache/metadata files (typescript document symbols cache) — not source changes but tooling/cache artifacts
|
||||||
|
|
||||||
|
## 2025-08-18 - 1.3.1 - fix(test)
|
||||||
|
Update test setup and devDependencies; adjust test import and add package metadata
|
||||||
|
|
||||||
|
- Update test script to run with additional flags: --verbose, --logfile and --tineout 120
|
||||||
|
- Bump devDependencies: @git.zone/tsbuild -> ^2.6.7, @git.zone/tsrun -> ^1.3.3, @git.zone/tstest -> ^2.3.5, @push.rocks/qenv -> ^6.1.3
|
||||||
|
- Change test import from @push.rocks/tapbundle to @git.zone/tstest/tapbundle
|
||||||
|
- Add typings field (dist_ts/index.d.ts)
|
||||||
|
- Add packageManager field for pnpm@10.14.0 with integrity hash
|
||||||
|
|
||||||
|
## 2024-12-23 - 1.3.0 - feat(core)
|
||||||
|
Initial release of Docker client with TypeScript support
|
||||||
|
|
||||||
|
- Provides easy communication with Docker's remote API from Node.js
|
||||||
|
- Includes implementations for managing Docker services, networks, secrets, containers, and images
|
||||||
|
|
||||||
## 2024-12-23 - 1.2.8 - fix(core)
|
## 2024-12-23 - 1.2.8 - fix(core)
|
||||||
Improved the image creation process from tar stream in DockerImage class.
|
Improved the image creation process from tar stream in DockerImage class.
|
||||||
|
|
||||||
|
40
package.json
40
package.json
@@ -1,13 +1,13 @@
|
|||||||
{
|
{
|
||||||
"name": "@apiclient.xyz/docker",
|
"name": "@apiclient.xyz/docker",
|
||||||
"version": "1.2.8",
|
"version": "1.3.4",
|
||||||
"description": "Provides easy communication with Docker remote API from Node.js, with TypeScript support.",
|
"description": "Provides easy communication with Docker remote API from Node.js, with TypeScript support.",
|
||||||
"private": false,
|
"private": false,
|
||||||
"main": "dist_ts/index.js",
|
"main": "dist_ts/index.js",
|
||||||
"typings": "dist_ts/index.d.ts",
|
"typings": "dist_ts/index.d.ts",
|
||||||
"type": "module",
|
"type": "module",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"test": "(tstest test/ --web)",
|
"test": "(tstest test/ --verbose --logfile --timeout 600)",
|
||||||
"build": "(tsbuild --web --allowimplicitany)",
|
"build": "(tsbuild --web --allowimplicitany)",
|
||||||
"buildDocs": "tsdoc"
|
"buildDocs": "tsdoc"
|
||||||
},
|
},
|
||||||
@@ -33,29 +33,28 @@
|
|||||||
},
|
},
|
||||||
"homepage": "https://gitlab.com/mojoio/docker#readme",
|
"homepage": "https://gitlab.com/mojoio/docker#readme",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@push.rocks/lik": "^6.0.15",
|
"@push.rocks/lik": "^6.2.2",
|
||||||
"@push.rocks/smartarchive": "^4.0.39",
|
"@push.rocks/smartarchive": "^4.2.2",
|
||||||
"@push.rocks/smartbucket": "^3.0.22",
|
"@push.rocks/smartbucket": "^3.3.10",
|
||||||
"@push.rocks/smartfile": "^11.0.21",
|
"@push.rocks/smartfile": "^11.2.7",
|
||||||
"@push.rocks/smartjson": "^5.0.20",
|
"@push.rocks/smartjson": "^5.0.20",
|
||||||
"@push.rocks/smartlog": "^3.0.7",
|
"@push.rocks/smartlog": "^3.1.8",
|
||||||
"@push.rocks/smartnetwork": "^3.0.0",
|
"@push.rocks/smartnetwork": "^4.1.2",
|
||||||
"@push.rocks/smartpath": "^5.0.18",
|
"@push.rocks/smartpath": "^6.0.0",
|
||||||
"@push.rocks/smartpromise": "^4.0.4",
|
"@push.rocks/smartpromise": "^4.2.3",
|
||||||
"@push.rocks/smartrequest": "^2.0.22",
|
"@push.rocks/smartrequest": "^4.3.1",
|
||||||
"@push.rocks/smartstream": "^3.0.46",
|
"@push.rocks/smartstream": "^3.2.5",
|
||||||
"@push.rocks/smartstring": "^4.0.15",
|
"@push.rocks/smartstring": "^4.0.15",
|
||||||
"@push.rocks/smartunique": "^3.0.9",
|
"@push.rocks/smartunique": "^3.0.9",
|
||||||
"@push.rocks/smartversion": "^3.0.5",
|
"@push.rocks/smartversion": "^3.0.5",
|
||||||
"@tsclass/tsclass": "^4.1.2",
|
"@tsclass/tsclass": "^9.2.0",
|
||||||
"rxjs": "^7.5.7"
|
"rxjs": "^7.8.2"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@git.zone/tsbuild": "^2.1.84",
|
"@git.zone/tsbuild": "^2.6.7",
|
||||||
"@git.zone/tsrun": "^1.2.49",
|
"@git.zone/tsrun": "^1.3.3",
|
||||||
"@git.zone/tstest": "^1.0.90",
|
"@git.zone/tstest": "^2.3.5",
|
||||||
"@push.rocks/qenv": "^6.0.5",
|
"@push.rocks/qenv": "^6.1.3",
|
||||||
"@push.rocks/tapbundle": "^5.3.0",
|
|
||||||
"@types/node": "22.7.5"
|
"@types/node": "22.7.5"
|
||||||
},
|
},
|
||||||
"files": [
|
"files": [
|
||||||
@@ -72,5 +71,6 @@
|
|||||||
],
|
],
|
||||||
"browserslist": [
|
"browserslist": [
|
||||||
"last 1 chrome versions"
|
"last 1 chrome versions"
|
||||||
]
|
],
|
||||||
|
"packageManager": "pnpm@10.14.0+sha512.ad27a79641b49c3e481a16a805baa71817a04bbe06a38d17e60e2eaee83f6a146c6a688125f5792e48dd5ba30e7da52a5cda4c3992b9ccf333f9ce223af84748"
|
||||||
}
|
}
|
||||||
|
8003
pnpm-lock.yaml
generated
8003
pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
539
readme.md
539
readme.md
@@ -1,136 +1,495 @@
|
|||||||
# @apiclient.xyz/docker
|
# @apiclient.xyz/docker 🐳
|
||||||
|
|
||||||
easy communication with docker remote api from node, TypeScript ready
|
> **Powerful TypeScript client for Docker Remote API** - Build, manage, and orchestrate Docker containers, images, networks, and more with type-safe elegance.
|
||||||
|
|
||||||
## Install
|
## 🚀 Features
|
||||||
|
|
||||||
To install @apiclient.xyz/docker, you can use npm (npm package manager). Run the following command in your terminal:
|
- 🎯 **Full TypeScript Support** - Complete type definitions for Docker API entities
|
||||||
|
- 🔄 **Async/Await Ready** - Modern promise-based architecture for seamless async operations
|
||||||
|
- 📦 **Container Management** - Create, list, inspect, and remove containers effortlessly
|
||||||
|
- 🖼️ **Image Handling** - Pull from registries, build from tarballs, export, and manage tags
|
||||||
|
- 🌐 **Network Operations** - Create and manage Docker networks with full IPAM support
|
||||||
|
- 🔐 **Secrets Management** - Handle Docker secrets securely in swarm mode
|
||||||
|
- 🎭 **Service Orchestration** - Deploy and manage services in Docker Swarm
|
||||||
|
- 💾 **S3 Image Storage** - Built-in support for storing/retrieving images from S3
|
||||||
|
- 📊 **Event Streaming** - Real-time Docker event monitoring with RxJS observables
|
||||||
|
- 🔧 **Registry Authentication** - Seamless authentication with Docker registries
|
||||||
|
|
||||||
|
## 📦 Installation
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
# Using npm
|
||||||
npm install @apiclient.xyz/docker --save
|
npm install @apiclient.xyz/docker --save
|
||||||
|
|
||||||
|
# Using pnpm (recommended)
|
||||||
|
pnpm add @apiclient.xyz/docker
|
||||||
|
|
||||||
|
# Using yarn
|
||||||
|
yarn add @apiclient.xyz/docker
|
||||||
```
|
```
|
||||||
|
|
||||||
This command installs the package and adds it as a dependency to your project's `package.json` file.
|
## 🎯 Quick Start
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
The `@apiclient.xyz/docker` package provides a TypeScript-ready interface for interacting with Docker's Remote API directly from Node.js applications. It leverages TypeScript for strong type definitions, ensuring more reliable and maintainable code.
|
|
||||||
|
|
||||||
### Prerequisites
|
|
||||||
|
|
||||||
Before you begin, ensure:
|
|
||||||
|
|
||||||
- You have Docker installed and running on your machine or a remote server.
|
|
||||||
- You are familiar with TypeScript and have it set up in your development environment.
|
|
||||||
|
|
||||||
### Getting Started
|
|
||||||
|
|
||||||
First, import the required classes from the package:
|
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
import { DockerHost, DockerContainer, DockerService, DockerNetwork } from '@apiclient.xyz/docker';
|
import { DockerHost } from '@apiclient.xyz/docker';
|
||||||
|
|
||||||
|
// Connect to local Docker daemon
|
||||||
|
const docker = new DockerHost();
|
||||||
|
|
||||||
|
// Or connect to remote Docker host
|
||||||
|
const remoteDocker = new DockerHost({
|
||||||
|
socketPath: 'tcp://remote-docker-host:2375'
|
||||||
|
});
|
||||||
```
|
```
|
||||||
|
|
||||||
### Instantiate DockerHost
|
## 📚 Complete API Guide
|
||||||
|
|
||||||
Start by creating a `DockerHost` instance. This class is the entry point to communicate with the Docker Remote API.
|
### 🐳 DockerHost - Your Gateway to Docker
|
||||||
|
|
||||||
|
The `DockerHost` class is your primary interface to interact with the Docker daemon.
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
// Connect to local Docker instance
|
import { DockerHost } from '@apiclient.xyz/docker';
|
||||||
const localDockerHost = new DockerHost();
|
|
||||||
|
|
||||||
// Or specify a custom path or URL to a Docker host
|
// Initialize with default local socket
|
||||||
const remoteDockerHost = new DockerHost('tcp://<REMOTE_DOCKER_HOST>:2375');
|
const docker = new DockerHost();
|
||||||
|
|
||||||
|
// Custom initialization options
|
||||||
|
const customDocker = new DockerHost({
|
||||||
|
socketPath: '/var/run/docker.sock', // Unix socket path
|
||||||
|
// or
|
||||||
|
socketPath: 'tcp://192.168.1.100:2375' // TCP connection
|
||||||
|
});
|
||||||
|
|
||||||
|
// Start and stop (for lifecycle management)
|
||||||
|
await docker.start();
|
||||||
|
// ... do your work
|
||||||
|
await docker.stop();
|
||||||
```
|
```
|
||||||
|
|
||||||
### Working with Containers
|
### 📦 Container Management
|
||||||
|
|
||||||
#### List All Containers
|
#### List All Containers
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
async function listAllContainers() {
|
// Get all containers (including stopped ones)
|
||||||
const containers = await localDockerHost.getContainers();
|
const allContainers = await docker.getContainers();
|
||||||
console.log(containers);
|
|
||||||
}
|
|
||||||
|
|
||||||
listAllContainers();
|
// Each container includes detailed information
|
||||||
|
allContainers.forEach(container => {
|
||||||
|
console.log(`Container: ${container.Names[0]}`);
|
||||||
|
console.log(` ID: ${container.Id}`);
|
||||||
|
console.log(` Status: ${container.Status}`);
|
||||||
|
console.log(` Image: ${container.Image}`);
|
||||||
|
console.log(` State: ${container.State}`);
|
||||||
|
});
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Create and Remove a Container
|
#### Create and Manage Containers
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
import { IContainerCreationDescriptor } from '@apiclient.xyz/docker';
|
import { DockerContainer } from '@apiclient.xyz/docker';
|
||||||
|
|
||||||
async function createAndRemoveContainer() {
|
// Create a container with detailed configuration
|
||||||
const containerDescriptor: IContainerCreationDescriptor = {
|
const container = await DockerContainer.create(docker, {
|
||||||
Hostname: 'test-container',
|
Image: 'nginx:latest',
|
||||||
Domainname: '',
|
name: 'my-nginx-server',
|
||||||
// Additional settings here
|
HostConfig: {
|
||||||
};
|
PortBindings: {
|
||||||
|
'80/tcp': [{ HostPort: '8080' }]
|
||||||
|
},
|
||||||
|
RestartPolicy: {
|
||||||
|
Name: 'unless-stopped'
|
||||||
|
},
|
||||||
|
Memory: 512 * 1024 * 1024, // 512MB memory limit
|
||||||
|
},
|
||||||
|
Env: [
|
||||||
|
'NODE_ENV=production',
|
||||||
|
'LOG_LEVEL=info'
|
||||||
|
],
|
||||||
|
Labels: {
|
||||||
|
'app': 'web-server',
|
||||||
|
'environment': 'production'
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
// Create container
|
console.log(`Container created: ${container.Id}`);
|
||||||
const container = await DockerContainer.create(localDockerHost, containerDescriptor);
|
|
||||||
console.log(`Container Created: ${container.Id}`);
|
|
||||||
|
|
||||||
// Remove container
|
// Container operations (these would need to be implemented)
|
||||||
await container.remove();
|
// await container.start();
|
||||||
console.log(`Container Removed: ${container.Id}`);
|
// await container.stop();
|
||||||
}
|
// await container.remove();
|
||||||
|
|
||||||
createAndRemoveContainer();
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Working with Docker Services
|
#### Get Container by ID
|
||||||
|
|
||||||
#### Create a Docker Service
|
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
import { IServiceCreationDescriptor } from '@apiclient.xyz/docker';
|
const container = await DockerContainer.getContainerById(docker, 'container-id-here');
|
||||||
|
if (container) {
|
||||||
|
console.log(`Found container: ${container.Names[0]}`);
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
async function createDockerService() {
|
### 🖼️ Image Management
|
||||||
const serviceDescriptor: IServiceCreationDescriptor = {
|
|
||||||
name: 'my-service',
|
#### Pull Images from Registry
|
||||||
image: 'nginx:latest', // Docker Image
|
|
||||||
// Additional settings
|
```typescript
|
||||||
};
|
import { DockerImage } from '@apiclient.xyz/docker';
|
||||||
|
|
||||||
|
// Pull an image from Docker Hub
|
||||||
|
const image = await DockerImage.createFromRegistry(docker, {
|
||||||
|
imageName: 'node',
|
||||||
|
imageTag: '18-alpine',
|
||||||
|
// Optional: provide registry authentication
|
||||||
|
authToken: 'your-registry-auth-token'
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log(`Image pulled: ${image.RepoTags[0]}`);
|
||||||
|
console.log(`Size: ${(image.Size / 1024 / 1024).toFixed(2)} MB`);
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Import Images from Tar
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import * as fs from 'fs';
|
||||||
|
|
||||||
|
// Import from a tar stream
|
||||||
|
const tarStream = fs.createReadStream('./my-image.tar');
|
||||||
|
const importedImage = await DockerImage.createFromTarStream(docker, {
|
||||||
|
tarStream,
|
||||||
|
imageUrl: 'file://./my-image.tar',
|
||||||
|
imageTag: 'my-app:v1.0.0'
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Export Images to Tar
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Export an image to a tar stream
|
||||||
|
const image = await DockerImage.getImageByName(docker, 'nginx:latest');
|
||||||
|
const exportStream = await image.exportToTarStream();
|
||||||
|
|
||||||
|
// Save to file
|
||||||
|
const writeStream = fs.createWriteStream('./nginx-export.tar');
|
||||||
|
exportStream.pipe(writeStream);
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Tag Images
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Tag an existing image
|
||||||
|
await DockerImage.tagImageByIdOrName(docker, 'node:18-alpine', {
|
||||||
|
registry: 'myregistry.com',
|
||||||
|
imageName: 'my-node-app',
|
||||||
|
imageTag: 'v2.0.0'
|
||||||
|
});
|
||||||
|
// Result: myregistry.com/my-node-app:v2.0.0
|
||||||
|
```
|
||||||
|
|
||||||
|
### 🌐 Network Management
|
||||||
|
|
||||||
|
#### Create Custom Networks
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { DockerNetwork } from '@apiclient.xyz/docker';
|
||||||
|
|
||||||
|
// Create a bridge network
|
||||||
|
const network = await DockerNetwork.createNetwork(docker, {
|
||||||
|
Name: 'my-app-network',
|
||||||
|
Driver: 'bridge',
|
||||||
|
EnableIPv6: false,
|
||||||
|
IPAM: {
|
||||||
|
Driver: 'default',
|
||||||
|
Config: [{
|
||||||
|
Subnet: '172.28.0.0/16',
|
||||||
|
Gateway: '172.28.0.1'
|
||||||
|
}]
|
||||||
|
},
|
||||||
|
Labels: {
|
||||||
|
'project': 'my-app',
|
||||||
|
'environment': 'production'
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log(`Network created: ${network.Id}`);
|
||||||
|
```
|
||||||
|
|
||||||
|
#### List and Inspect Networks
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Get all networks
|
||||||
|
const networks = await docker.getNetworks();
|
||||||
|
networks.forEach(net => {
|
||||||
|
console.log(`Network: ${net.Name} (${net.Driver})`);
|
||||||
|
console.log(` Scope: ${net.Scope}`);
|
||||||
|
console.log(` Internal: ${net.Internal}`);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Get specific network
|
||||||
|
const appNetwork = await DockerNetwork.getNetworkByName(docker, 'my-app-network');
|
||||||
|
|
||||||
|
// Get containers on network
|
||||||
|
const containers = await appNetwork.getContainersOnNetwork();
|
||||||
|
console.log(`Containers on network: ${containers.length}`);
|
||||||
|
```
|
||||||
|
|
||||||
|
### 🎭 Service Management (Swarm Mode)
|
||||||
|
|
||||||
|
#### Deploy Services
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { DockerService } from '@apiclient.xyz/docker';
|
||||||
|
|
||||||
|
// Create a replicated service
|
||||||
|
const service = await DockerService.createService(docker, {
|
||||||
|
name: 'web-api',
|
||||||
|
image: 'my-api:latest',
|
||||||
|
replicas: 3,
|
||||||
|
ports: [{
|
||||||
|
Protocol: 'tcp',
|
||||||
|
PublishedPort: 80,
|
||||||
|
TargetPort: 3000
|
||||||
|
}],
|
||||||
|
networks: ['my-app-network'],
|
||||||
|
labels: {
|
||||||
|
'app': 'api',
|
||||||
|
'version': '2.0.0'
|
||||||
|
},
|
||||||
|
resources: {
|
||||||
|
limits: {
|
||||||
|
Memory: 256 * 1024 * 1024, // 256MB
|
||||||
|
CPUs: 0.5
|
||||||
|
}
|
||||||
|
},
|
||||||
|
secrets: ['api-key', 'db-password'],
|
||||||
|
mounts: [{
|
||||||
|
Target: '/data',
|
||||||
|
Source: 'app-data',
|
||||||
|
Type: 'volume'
|
||||||
|
}]
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log(`Service deployed: ${service.ID}`);
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Manage Services
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// List all services
|
||||||
|
const services = await docker.getServices();
|
||||||
|
services.forEach(service => {
|
||||||
|
console.log(`Service: ${service.Spec.Name}`);
|
||||||
|
console.log(` Replicas: ${service.Spec.Mode.Replicated.Replicas}`);
|
||||||
|
console.log(` Image: ${service.Spec.TaskTemplate.ContainerSpec.Image}`);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Get service by name
|
||||||
|
const myService = await DockerService.getServiceByName(docker, 'web-api');
|
||||||
|
|
||||||
|
// Check if service needs update
|
||||||
|
const needsUpdate = await myService.needsUpdate();
|
||||||
|
if (needsUpdate) {
|
||||||
|
console.log('Service configuration has changed, update needed');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove service
|
||||||
|
await myService.remove();
|
||||||
|
```
|
||||||
|
|
||||||
|
### 🔐 Secrets Management
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { DockerSecret } from '@apiclient.xyz/docker';
|
||||||
|
|
||||||
|
// Create a secret
|
||||||
|
const secret = await DockerSecret.createSecret(docker, {
|
||||||
|
name: 'api-key',
|
||||||
|
data: Buffer.from('super-secret-key-123').toString('base64'),
|
||||||
|
labels: {
|
||||||
|
'app': 'my-app',
|
||||||
|
'type': 'api-key'
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log(`Secret created: ${secret.ID}`);
|
||||||
|
|
||||||
|
// List secrets
|
||||||
|
const secrets = await DockerSecret.getSecrets(docker);
|
||||||
|
secrets.forEach(secret => {
|
||||||
|
console.log(`Secret: ${secret.Spec.Name}`);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Get secret by name
|
||||||
|
const apiKeySecret = await DockerSecret.getSecretByName(docker, 'api-key');
|
||||||
|
|
||||||
|
// Update secret
|
||||||
|
await apiKeySecret.update({
|
||||||
|
data: Buffer.from('new-secret-key-456').toString('base64')
|
||||||
|
});
|
||||||
|
|
||||||
|
// Remove secret
|
||||||
|
await apiKeySecret.remove();
|
||||||
|
```
|
||||||
|
|
||||||
|
### 💾 S3 Image Storage
|
||||||
|
|
||||||
|
Store and retrieve Docker images from S3-compatible storage:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Configure S3 storage
|
||||||
|
await docker.addS3Storage({
|
||||||
|
endpoint: 's3.amazonaws.com',
|
||||||
|
accessKeyId: 'your-access-key',
|
||||||
|
secretAccessKey: 'your-secret-key',
|
||||||
|
bucket: 'docker-images'
|
||||||
|
});
|
||||||
|
|
||||||
|
// Store an image to S3
|
||||||
|
const imageStore = docker.imageStore;
|
||||||
|
await imageStore.storeImage('my-app:v1.0.0');
|
||||||
|
|
||||||
|
// Retrieve an image from S3
|
||||||
|
const retrievedImage = await imageStore.getImage('my-app:v1.0.0');
|
||||||
|
```
|
||||||
|
|
||||||
|
### 📊 Event Monitoring
|
||||||
|
|
||||||
|
Monitor Docker events in real-time using RxJS observables:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Subscribe to Docker events
|
||||||
|
const eventStream = docker.getEventObservable();
|
||||||
|
|
||||||
|
const subscription = eventStream.subscribe({
|
||||||
|
next: (event) => {
|
||||||
|
console.log(`Event: ${event.Type} - ${event.Action}`);
|
||||||
|
console.log(`Actor: ${event.Actor.ID}`);
|
||||||
|
console.log(`Time: ${new Date(event.time * 1000).toISOString()}`);
|
||||||
|
},
|
||||||
|
error: (err) => console.error('Event stream error:', err),
|
||||||
|
complete: () => console.log('Event stream completed')
|
||||||
|
});
|
||||||
|
|
||||||
|
// Unsubscribe when done
|
||||||
|
subscription.unsubscribe();
|
||||||
|
```
|
||||||
|
|
||||||
|
### 🔧 Registry Authentication
|
||||||
|
|
||||||
|
Authenticate with Docker registries for private images:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Authenticate with Docker Hub
|
||||||
|
await docker.auth({
|
||||||
|
username: 'your-username',
|
||||||
|
password: 'your-password',
|
||||||
|
serveraddress: 'https://index.docker.io/v1/'
|
||||||
|
});
|
||||||
|
|
||||||
|
// Or use existing Docker config
|
||||||
|
const authToken = await docker.getAuthTokenFromDockerConfig('myregistry.com');
|
||||||
|
|
||||||
|
// Use auth token when pulling images
|
||||||
|
const privateImage = await DockerImage.createFromRegistry(docker, {
|
||||||
|
imageName: 'myregistry.com/private/image',
|
||||||
|
imageTag: 'latest',
|
||||||
|
authToken
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
### 🔄 Swarm Mode
|
||||||
|
|
||||||
|
Initialize and manage Docker Swarm:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Initialize swarm mode
|
||||||
|
await docker.activateSwarm({
|
||||||
|
ListenAddr: '0.0.0.0:2377',
|
||||||
|
AdvertiseAddr: '192.168.1.100:2377',
|
||||||
|
ForceNewCluster: false
|
||||||
|
});
|
||||||
|
|
||||||
|
// Now you can create services, secrets, and use swarm features
|
||||||
|
const service = await DockerService.createService(docker, {
|
||||||
|
name: 'my-swarm-service',
|
||||||
|
image: 'nginx:latest',
|
||||||
|
replicas: 5
|
||||||
|
// ... more service config
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
## 🏗️ Advanced Examples
|
||||||
|
|
||||||
|
### Complete Application Stack
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
async function deployStack() {
|
||||||
|
const docker = new DockerHost();
|
||||||
|
|
||||||
const service = await DockerService.createService(localDockerHost, serviceDescriptor);
|
// Create network
|
||||||
console.log(`Service Created: ${service.Id}`);
|
const network = await DockerNetwork.createNetwork(docker, {
|
||||||
}
|
Name: 'app-network',
|
||||||
|
Driver: 'overlay' // for swarm mode
|
||||||
createDockerService();
|
|
||||||
```
|
|
||||||
|
|
||||||
### Working with Docker Networks
|
|
||||||
|
|
||||||
#### Listing and Creating Networks
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
async function listAndCreateNetwork() {
|
|
||||||
// List all networks
|
|
||||||
const networks = await localDockerHost.getNetworks();
|
|
||||||
console.log(networks);
|
|
||||||
|
|
||||||
// Create a new network
|
|
||||||
const network = await DockerNetwork.createNetwork(localDockerHost, {
|
|
||||||
Name: 'my-network'
|
|
||||||
// Additional settings
|
|
||||||
});
|
});
|
||||||
console.log(`Network Created: ${network.Id}`);
|
|
||||||
|
// Create secrets
|
||||||
|
const dbPassword = await DockerSecret.createSecret(docker, {
|
||||||
|
name: 'db-password',
|
||||||
|
data: Buffer.from('strong-password').toString('base64')
|
||||||
|
});
|
||||||
|
|
||||||
|
// Deploy database service
|
||||||
|
const dbService = await DockerService.createService(docker, {
|
||||||
|
name: 'postgres',
|
||||||
|
image: 'postgres:14',
|
||||||
|
networks: ['app-network'],
|
||||||
|
secrets: ['db-password'],
|
||||||
|
env: ['POSTGRES_PASSWORD_FILE=/run/secrets/db-password']
|
||||||
|
});
|
||||||
|
|
||||||
|
// Deploy application service
|
||||||
|
const appService = await DockerService.createService(docker, {
|
||||||
|
name: 'web-app',
|
||||||
|
image: 'my-app:latest',
|
||||||
|
replicas: 3,
|
||||||
|
networks: ['app-network'],
|
||||||
|
ports: [{ Protocol: 'tcp', PublishedPort: 80, TargetPort: 3000 }]
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log('Stack deployed successfully!');
|
||||||
}
|
}
|
||||||
|
|
||||||
listAndCreateNetwork();
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Advanced Usage
|
## 🔍 TypeScript Support
|
||||||
|
|
||||||
You can leverage the full potential of the Docker Remote API with `@apiclient.xyz/docker`. This includes managing images, volumes, swarms, and more. The package's design is consistent and intuitive, making it easy to extend your usage as needed.
|
This package provides comprehensive TypeScript definitions for all Docker API entities:
|
||||||
|
|
||||||
Remember, the Docker Remote API offers extensive capabilities. Always refer to the [Docker API documentation](https://docs.docker.com/engine/api/latest/) for a comprehensive list of endpoints and actions you can perform.
|
```typescript
|
||||||
|
import type {
|
||||||
|
IContainerCreationDescriptor,
|
||||||
|
IServiceCreationDescriptor,
|
||||||
|
INetworkCreationDescriptor,
|
||||||
|
IImageCreationDescriptor,
|
||||||
|
ISecretCreationDescriptor
|
||||||
|
} from '@apiclient.xyz/docker';
|
||||||
|
|
||||||
### Conclusion
|
// Full IntelliSense support for all configuration options
|
||||||
|
const containerConfig: IContainerCreationDescriptor = {
|
||||||
|
Image: 'node:18',
|
||||||
|
// Your IDE will provide full autocomplete here
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
`@apiclient.xyz/docker` simplifies interaction with Docker's Remote API in TypeScript projects, providing strong typing and asynchronous operations. Whether you're managing containers, images, services or networks, it offers a comprehensive toolset to perform these tasks seamlessly.
|
## 🤝 Contributing
|
||||||
|
|
||||||
|
We welcome contributions! Please feel free to submit issues and pull requests.
|
||||||
|
|
||||||
|
## 📖 API Documentation
|
||||||
|
|
||||||
|
For complete API documentation, visit [https://apiclient.xyz/docker](https://apiclient.xyz/docker)
|
||||||
|
|
||||||
|
For Docker Remote API reference, see [Docker Engine API Documentation](https://docs.docker.com/engine/api/latest/)
|
||||||
|
|
||||||
## License and Legal Information
|
## License and Legal Information
|
||||||
|
|
||||||
@@ -149,4 +508,4 @@ Registered at District court Bremen HRB 35230 HB, Germany
|
|||||||
|
|
||||||
For any legal inquiries or if you require further information, please contact us via email at hello@task.vc.
|
For any legal inquiries or if you require further information, please contact us via email at hello@task.vc.
|
||||||
|
|
||||||
By using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.
|
By using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.
|
40
test-stream.js
Normal file
40
test-stream.js
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
const { SmartRequest } = require('@push.rocks/smartrequest');
|
||||||
|
|
||||||
|
async function test() {
|
||||||
|
try {
|
||||||
|
const response = await SmartRequest.create()
|
||||||
|
.url('http://unix:/run/user/1000/docker.sock:/images/hello-world:latest/get')
|
||||||
|
.header('Host', 'docker.sock')
|
||||||
|
.get();
|
||||||
|
|
||||||
|
console.log('Response status:', response.status);
|
||||||
|
console.log('Response type:', typeof response);
|
||||||
|
|
||||||
|
const stream = response.streamNode();
|
||||||
|
console.log('Stream type:', typeof stream);
|
||||||
|
console.log('Has on method:', typeof stream.on);
|
||||||
|
|
||||||
|
if (stream) {
|
||||||
|
let chunks = 0;
|
||||||
|
stream.on('data', (chunk) => {
|
||||||
|
chunks++;
|
||||||
|
if (chunks <= 3) console.log('Got chunk', chunks, chunk.length);
|
||||||
|
});
|
||||||
|
stream.on('end', () => {
|
||||||
|
console.log('Stream ended, total chunks:', chunks);
|
||||||
|
process.exit(0);
|
||||||
|
});
|
||||||
|
stream.on('error', (err) => {
|
||||||
|
console.error('Stream error:', err);
|
||||||
|
process.exit(1);
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
console.log('No stream available');
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error:', error);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
test();
|
46
test-stream.mjs
Normal file
46
test-stream.mjs
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
import { SmartRequest } from '@push.rocks/smartrequest';
|
||||||
|
|
||||||
|
async function test() {
|
||||||
|
try {
|
||||||
|
const response = await SmartRequest.create()
|
||||||
|
.url('http://unix:/run/user/1000/docker.sock:/images/hello-world:latest/get')
|
||||||
|
.header('Host', 'docker.sock')
|
||||||
|
.get();
|
||||||
|
|
||||||
|
console.log('Response status:', response.status);
|
||||||
|
console.log('Response type:', typeof response);
|
||||||
|
|
||||||
|
const stream = response.streamNode();
|
||||||
|
console.log('Stream type:', typeof stream);
|
||||||
|
console.log('Has on method:', typeof stream.on);
|
||||||
|
|
||||||
|
if (stream) {
|
||||||
|
let chunks = 0;
|
||||||
|
stream.on('data', (chunk) => {
|
||||||
|
chunks++;
|
||||||
|
if (chunks <= 3) console.log('Got chunk', chunks, chunk.length);
|
||||||
|
});
|
||||||
|
stream.on('end', () => {
|
||||||
|
console.log('Stream ended, total chunks:', chunks);
|
||||||
|
process.exit(0);
|
||||||
|
});
|
||||||
|
stream.on('error', (err) => {
|
||||||
|
console.error('Stream error:', err);
|
||||||
|
process.exit(1);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Set a timeout in case stream doesn't end
|
||||||
|
setTimeout(() => {
|
||||||
|
console.log('Timeout after 5 seconds');
|
||||||
|
process.exit(1);
|
||||||
|
}, 5000);
|
||||||
|
} else {
|
||||||
|
console.log('No stream available');
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error:', error);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
test();
|
@@ -1,4 +1,4 @@
|
|||||||
import { expect, tap } from '@push.rocks/tapbundle';
|
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||||
import { Qenv } from '@push.rocks/qenv';
|
import { Qenv } from '@push.rocks/qenv';
|
||||||
|
|
||||||
const testQenv = new Qenv('./', './.nogit/');
|
const testQenv = new Qenv('./', './.nogit/');
|
||||||
@@ -139,17 +139,17 @@ tap.test('should export images', async (toolsArg) => {
|
|||||||
await done.promise;
|
await done.promise;
|
||||||
});
|
});
|
||||||
|
|
||||||
tap.test('should import images', async (toolsArg) => {
|
tap.test('should import images', async () => {
|
||||||
const done = toolsArg.defer();
|
|
||||||
const fsReadStream = plugins.smartfile.fsStream.createReadStream(
|
const fsReadStream = plugins.smartfile.fsStream.createReadStream(
|
||||||
plugins.path.join(paths.nogitDir, 'testimage.tar')
|
plugins.path.join(paths.nogitDir, 'testimage.tar')
|
||||||
);
|
);
|
||||||
await docker.DockerImage.createFromTarStream(testDockerHost, {
|
const importedImage = await docker.DockerImage.createFromTarStream(testDockerHost, {
|
||||||
tarStream: fsReadStream,
|
tarStream: fsReadStream,
|
||||||
creationObject: {
|
creationObject: {
|
||||||
imageUrl: 'code.foss.global/host.today/ht-docker-node:latest',
|
imageUrl: 'code.foss.global/host.today/ht-docker-node:latest',
|
||||||
}
|
}
|
||||||
})
|
});
|
||||||
|
expect(importedImage).toBeInstanceOf(docker.DockerImage);
|
||||||
});
|
});
|
||||||
|
|
||||||
tap.test('should expose a working DockerImageStore', async () => {
|
tap.test('should expose a working DockerImageStore', async () => {
|
||||||
@@ -163,7 +163,11 @@ tap.test('should expose a working DockerImageStore', async () => {
|
|||||||
await testDockerHost.addS3Storage(s3Descriptor);
|
await testDockerHost.addS3Storage(s3Descriptor);
|
||||||
|
|
||||||
//
|
//
|
||||||
await testDockerHost.imageStore.storeImage('hello', plugins.smartfile.fsStream.createReadStream(plugins.path.join(paths.nogitDir, 'testimage.tar')));
|
await testDockerHost.imageStore.storeImage('hello2', plugins.smartfile.fsStream.createReadStream(plugins.path.join(paths.nogitDir, 'testimage.tar')));
|
||||||
|
})
|
||||||
|
|
||||||
|
tap.test('cleanup', async () => {
|
||||||
|
await testDockerHost.stop();
|
||||||
})
|
})
|
||||||
|
|
||||||
export default tap.start();
|
export default tap.start();
|
||||||
|
@@ -3,6 +3,6 @@
|
|||||||
*/
|
*/
|
||||||
export const commitinfo = {
|
export const commitinfo = {
|
||||||
name: '@apiclient.xyz/docker',
|
name: '@apiclient.xyz/docker',
|
||||||
version: '1.2.8',
|
version: '1.3.4',
|
||||||
description: 'Provides easy communication with Docker remote API from Node.js, with TypeScript support.'
|
description: 'Provides easy communication with Docker remote API from Node.js, with TypeScript support.'
|
||||||
}
|
}
|
||||||
|
@@ -226,54 +226,151 @@ export class DockerHost {
|
|||||||
*/
|
*/
|
||||||
public async request(methodArg: string, routeArg: string, dataArg = {}) {
|
public async request(methodArg: string, routeArg: string, dataArg = {}) {
|
||||||
const requestUrl = `${this.socketPath}${routeArg}`;
|
const requestUrl = `${this.socketPath}${routeArg}`;
|
||||||
const response = await plugins.smartrequest.request(requestUrl, {
|
|
||||||
method: methodArg,
|
// Build the request using the fluent API
|
||||||
headers: {
|
const smartRequest = plugins.smartrequest.SmartRequest.create()
|
||||||
'Content-Type': 'application/json',
|
.url(requestUrl)
|
||||||
'X-Registry-Auth': this.registryToken,
|
.header('Content-Type', 'application/json')
|
||||||
Host: 'docker.sock',
|
.header('X-Registry-Auth', this.registryToken)
|
||||||
},
|
.header('Host', 'docker.sock')
|
||||||
requestBody: dataArg,
|
.options({ keepAlive: false });
|
||||||
keepAlive: false,
|
|
||||||
});
|
// Add body for methods that support it
|
||||||
if (response.statusCode !== 200) {
|
if (dataArg && Object.keys(dataArg).length > 0) {
|
||||||
console.log(response.body);
|
smartRequest.json(dataArg);
|
||||||
}
|
}
|
||||||
return response;
|
|
||||||
|
// Execute the request based on method
|
||||||
|
let response;
|
||||||
|
switch (methodArg.toUpperCase()) {
|
||||||
|
case 'GET':
|
||||||
|
response = await smartRequest.get();
|
||||||
|
break;
|
||||||
|
case 'POST':
|
||||||
|
response = await smartRequest.post();
|
||||||
|
break;
|
||||||
|
case 'PUT':
|
||||||
|
response = await smartRequest.put();
|
||||||
|
break;
|
||||||
|
case 'DELETE':
|
||||||
|
response = await smartRequest.delete();
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
throw new Error(`Unsupported HTTP method: ${methodArg}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse the response body based on content type
|
||||||
|
let body;
|
||||||
|
const contentType = response.headers['content-type'] || '';
|
||||||
|
|
||||||
|
// Docker's streaming endpoints (like /images/create) return newline-delimited JSON
|
||||||
|
// which can't be parsed as a single JSON object
|
||||||
|
const isStreamingEndpoint = routeArg.includes('/images/create') ||
|
||||||
|
routeArg.includes('/images/load') ||
|
||||||
|
routeArg.includes('/build');
|
||||||
|
|
||||||
|
if (contentType.includes('application/json') && !isStreamingEndpoint) {
|
||||||
|
body = await response.json();
|
||||||
|
} else {
|
||||||
|
body = await response.text();
|
||||||
|
// Try to parse as JSON if it looks like JSON and is not a streaming response
|
||||||
|
if (!isStreamingEndpoint && body && (body.startsWith('{') || body.startsWith('['))) {
|
||||||
|
try {
|
||||||
|
body = JSON.parse(body);
|
||||||
|
} catch {
|
||||||
|
// Keep as text if parsing fails
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a response object compatible with existing code
|
||||||
|
const legacyResponse = {
|
||||||
|
statusCode: response.status,
|
||||||
|
body: body,
|
||||||
|
headers: response.headers
|
||||||
|
};
|
||||||
|
|
||||||
|
if (response.status !== 200) {
|
||||||
|
console.log(body);
|
||||||
|
}
|
||||||
|
|
||||||
|
return legacyResponse;
|
||||||
}
|
}
|
||||||
|
|
||||||
public async requestStreaming(methodArg: string, routeArg: string, readStream?: plugins.smartstream.stream.Readable) {
|
public async requestStreaming(methodArg: string, routeArg: string, readStream?: plugins.smartstream.stream.Readable) {
|
||||||
const requestUrl = `${this.socketPath}${routeArg}`;
|
const requestUrl = `${this.socketPath}${routeArg}`;
|
||||||
const response = await plugins.smartrequest.request(
|
|
||||||
requestUrl,
|
// Build the request using the fluent API
|
||||||
{
|
const smartRequest = plugins.smartrequest.SmartRequest.create()
|
||||||
method: methodArg,
|
.url(requestUrl)
|
||||||
headers: {
|
.header('Content-Type', 'application/json')
|
||||||
'Content-Type': 'application/json',
|
.header('X-Registry-Auth', this.registryToken)
|
||||||
'X-Registry-Auth': this.registryToken,
|
.header('Host', 'docker.sock')
|
||||||
Host: 'docker.sock',
|
.timeout(30000)
|
||||||
},
|
.options({ keepAlive: false, autoDrain: true }); // Disable auto-drain for streaming
|
||||||
requestBody: null,
|
|
||||||
keepAlive: false,
|
// If we have a readStream, use the new stream method with logging
|
||||||
},
|
if (readStream) {
|
||||||
true,
|
let counter = 0;
|
||||||
(readStream ? reqArg => {
|
const smartduplex = new plugins.smartstream.SmartDuplex({
|
||||||
let counter = 0;
|
writeFunction: async (chunkArg) => {
|
||||||
const smartduplex = new plugins.smartstream.SmartDuplex({
|
if (counter % 1000 === 0) {
|
||||||
writeFunction: async (chunkArg) => {
|
console.log(`posting chunk ${counter}`);
|
||||||
if (counter % 1000 === 0) {
|
|
||||||
console.log(`posting chunk ${counter}`);
|
|
||||||
}
|
|
||||||
counter++;
|
|
||||||
return chunkArg;
|
|
||||||
}
|
}
|
||||||
});
|
counter++;
|
||||||
readStream.pipe(smartduplex).pipe(reqArg);
|
return chunkArg;
|
||||||
} : null),
|
}
|
||||||
);
|
});
|
||||||
console.log(response.statusCode);
|
|
||||||
console.log(response.body);
|
// Pipe through the logging duplex stream
|
||||||
return response;
|
const loggedStream = readStream.pipe(smartduplex);
|
||||||
|
|
||||||
|
// Use the new stream method to stream the data
|
||||||
|
smartRequest.stream(loggedStream, 'application/octet-stream');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute the request based on method
|
||||||
|
let response;
|
||||||
|
switch (methodArg.toUpperCase()) {
|
||||||
|
case 'GET':
|
||||||
|
response = await smartRequest.get();
|
||||||
|
break;
|
||||||
|
case 'POST':
|
||||||
|
response = await smartRequest.post();
|
||||||
|
break;
|
||||||
|
case 'PUT':
|
||||||
|
response = await smartRequest.put();
|
||||||
|
break;
|
||||||
|
case 'DELETE':
|
||||||
|
response = await smartRequest.delete();
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
throw new Error(`Unsupported HTTP method: ${methodArg}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(response.status);
|
||||||
|
|
||||||
|
// For streaming responses, get the Node.js stream
|
||||||
|
const nodeStream = response.streamNode();
|
||||||
|
|
||||||
|
if (!nodeStream) {
|
||||||
|
// If no stream is available, consume the body as text
|
||||||
|
const body = await response.text();
|
||||||
|
console.log(body);
|
||||||
|
|
||||||
|
// Return a compatible response object
|
||||||
|
return {
|
||||||
|
statusCode: response.status,
|
||||||
|
body: body,
|
||||||
|
headers: response.headers
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// For streaming responses, return the stream with added properties
|
||||||
|
(nodeStream as any).statusCode = response.status;
|
||||||
|
(nodeStream as any).body = ''; // For compatibility
|
||||||
|
|
||||||
|
return nodeStream;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@@ -250,6 +250,12 @@ export class DockerImage {
|
|||||||
public async exportToTarStream(): Promise<plugins.smartstream.stream.Readable> {
|
public async exportToTarStream(): Promise<plugins.smartstream.stream.Readable> {
|
||||||
logger.log('info', `Exporting image ${this.RepoTags[0]} to tar stream.`);
|
logger.log('info', `Exporting image ${this.RepoTags[0]} to tar stream.`);
|
||||||
const response = await this.dockerHost.requestStreaming('GET', `/images/${encodeURIComponent(this.RepoTags[0])}/get`);
|
const response = await this.dockerHost.requestStreaming('GET', `/images/${encodeURIComponent(this.RepoTags[0])}/get`);
|
||||||
|
|
||||||
|
// Check if response is a Node.js stream
|
||||||
|
if (!response || typeof response.on !== 'function') {
|
||||||
|
throw new Error('Failed to get streaming response for image export');
|
||||||
|
}
|
||||||
|
|
||||||
let counter = 0;
|
let counter = 0;
|
||||||
const webduplexStream = new plugins.smartstream.SmartDuplex({
|
const webduplexStream = new plugins.smartstream.SmartDuplex({
|
||||||
writeFunction: async (chunk, tools) => {
|
writeFunction: async (chunk, tools) => {
|
||||||
@@ -259,17 +265,25 @@ export class DockerImage {
|
|||||||
return chunk;
|
return chunk;
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
response.on('data', (chunk) => {
|
response.on('data', (chunk) => {
|
||||||
if (!webduplexStream.write(chunk)) {
|
if (!webduplexStream.write(chunk)) {
|
||||||
response.pause();
|
response.pause();
|
||||||
webduplexStream.once('drain', () => {
|
webduplexStream.once('drain', () => {
|
||||||
response.resume();
|
response.resume();
|
||||||
})
|
});
|
||||||
};
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
response.on('end', () => {
|
response.on('end', () => {
|
||||||
webduplexStream.end();
|
webduplexStream.end();
|
||||||
})
|
});
|
||||||
|
|
||||||
|
response.on('error', (error) => {
|
||||||
|
logger.log('error', `Error during image export: ${error.message}`);
|
||||||
|
webduplexStream.destroy(error);
|
||||||
|
});
|
||||||
|
|
||||||
return webduplexStream;
|
return webduplexStream;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -89,6 +89,11 @@ export class DockerService {
|
|||||||
}> = [];
|
}> = [];
|
||||||
|
|
||||||
for (const network of serviceCreationDescriptor.networks) {
|
for (const network of serviceCreationDescriptor.networks) {
|
||||||
|
// Skip null networks (can happen if network creation fails)
|
||||||
|
if (!network) {
|
||||||
|
logger.log('warn', 'Skipping null network in service creation');
|
||||||
|
continue;
|
||||||
|
}
|
||||||
networkArray.push({
|
networkArray.push({
|
||||||
Target: network.Name,
|
Target: network.Name,
|
||||||
Aliases: [serviceCreationDescriptor.networkAlias],
|
Aliases: [serviceCreationDescriptor.networkAlias],
|
||||||
|
Reference in New Issue
Block a user