chore(deps): modernize coreflow tooling

This commit is contained in:
2026-04-28 12:02:22 +00:00
parent 9cce79f040
commit 6ba5e36f4f
20 changed files with 6311 additions and 6166 deletions
+23 -23
View File
@@ -6,13 +6,13 @@ on:
- '**' - '**'
env: env:
IMAGE: code.foss.global/hosttoday/ht-docker-node:npmci IMAGE: code.foss.global/host.today/ht-docker-node:szci
NPMCI_COMPUTED_REPOURL: https://${{gitea.repository_owner}}:${{secrets.GITEA_TOKEN}}@/${{gitea.repository}}.git SZCI_COMPUTED_REPOURL: https://${{gitea.repository_owner}}:${{secrets.GITEA_TOKEN}}@/${{gitea.repository}}.git
NPMCI_TOKEN_NPM: ${{secrets.NPMCI_TOKEN_NPM}} SZCI_TOKEN_NPM: ${{secrets.SZCI_TOKEN_NPM}}
NPMCI_TOKEN_NPM2: ${{secrets.NPMCI_TOKEN_NPM2}} SZCI_TOKEN_NPM2: ${{secrets.SZCI_TOKEN_NPM2}}
NPMCI_GIT_GITHUBTOKEN: ${{secrets.NPMCI_GIT_GITHUBTOKEN}} SZCI_GIT_GITHUBTOKEN: ${{secrets.SZCI_GIT_GITHUBTOKEN}}
NPMCI_LOGIN_DOCKER_GITEA: ${{ github.server_url }}|${{ gitea.repository_owner }}|${{ secrets.GITEA_TOKEN }} SZCI_LOGIN_DOCKER_GITEA: ${{ github.server_url }}|${{ gitea.repository_owner }}|${{ secrets.GITEA_TOKEN }}
NPMCI_LOGIN_DOCKER_DOCKERREGISTRY: ${{ secrets.NPMCI_LOGIN_DOCKER_DOCKERREGISTRY }} SZCI_LOGIN_DOCKER_DOCKERREGISTRY: ${{ secrets.SZCI_LOGIN_DOCKER_DOCKERREGISTRY }}
jobs: jobs:
security: security:
@@ -24,22 +24,22 @@ jobs:
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: Install pnpm and npmci - name: Install pnpm and szci
run: | run: |
pnpm install -g pnpm pnpm install -g pnpm
pnpm install -g @ship.zone/npmci pnpm install -g @ship.zone/szci
npmci npm prepare szci npm prepare
- name: Audit production dependencies - name: Audit production dependencies
run: | run: |
npmci command npm config set registry https://registry.npmjs.org npm config set registry https://registry.npmjs.org
npmci command pnpm audit --audit-level=high --prod pnpm audit --audit-level=high --prod
continue-on-error: true continue-on-error: true
- name: Audit development dependencies - name: Audit development dependencies
run: | run: |
npmci command npm config set registry https://registry.npmjs.org npm config set registry https://registry.npmjs.org
npmci command pnpm audit --audit-level=high --dev pnpm audit --audit-level=high --dev
continue-on-error: true continue-on-error: true
test: test:
@@ -54,18 +54,18 @@ jobs:
- name: Prepare - name: Prepare
run: | run: |
pnpm install -g pnpm pnpm install -g pnpm
pnpm install -g @ship.zone/npmci pnpm install -g @ship.zone/szci
npmci npm prepare szci npm prepare
- name: Test stable - name: Test stable
run: | run: |
npmci node install stable szci node install stable
npmci npm install szci npm install
npmci npm test szci npm test
- name: Test build - name: Test build
run: | run: |
npmci npm prepare szci npm prepare
npmci node install stable szci node install stable
npmci npm install szci npm install
npmci command npm run build npm run build
+27 -41
View File
@@ -6,13 +6,13 @@ on:
- '*' - '*'
env: env:
IMAGE: code.foss.global/hosttoday/ht-docker-node:npmci IMAGE: code.foss.global/host.today/ht-docker-node:szci
NPMCI_COMPUTED_REPOURL: https://${{gitea.repository_owner}}:${{secrets.GITEA_TOKEN}}@/${{gitea.repository}}.git SZCI_COMPUTED_REPOURL: https://${{gitea.repository_owner}}:${{secrets.GITEA_TOKEN}}@/${{gitea.repository}}.git
NPMCI_TOKEN_NPM: ${{secrets.NPMCI_TOKEN_NPM}} SZCI_TOKEN_NPM: ${{secrets.SZCI_TOKEN_NPM}}
NPMCI_TOKEN_NPM2: ${{secrets.NPMCI_TOKEN_NPM2}} SZCI_TOKEN_NPM2: ${{secrets.SZCI_TOKEN_NPM2}}
NPMCI_GIT_GITHUBTOKEN: ${{secrets.NPMCI_GIT_GITHUBTOKEN}} SZCI_GIT_GITHUBTOKEN: ${{secrets.SZCI_GIT_GITHUBTOKEN}}
NPMCI_LOGIN_DOCKER_GITEA: ${{ github.server_url }}|${{ gitea.repository_owner }}|${{ secrets.GITEA_TOKEN }} SZCI_LOGIN_DOCKER_GITEA: ${{ github.server_url }}|${{ gitea.repository_owner }}|${{ secrets.GITEA_TOKEN }}
NPMCI_LOGIN_DOCKER_DOCKERREGISTRY: ${{ secrets.NPMCI_LOGIN_DOCKER_DOCKERREGISTRY }} SZCI_LOGIN_DOCKER_DOCKERREGISTRY: ${{ secrets.SZCI_LOGIN_DOCKER_DOCKERREGISTRY }}
jobs: jobs:
security: security:
@@ -27,19 +27,19 @@ jobs:
- name: Prepare - name: Prepare
run: | run: |
pnpm install -g pnpm pnpm install -g pnpm
pnpm install -g @ship.zone/npmci pnpm install -g @ship.zone/szci
npmci npm prepare szci npm prepare
- name: Audit production dependencies - name: Audit production dependencies
run: | run: |
npmci command npm config set registry https://registry.npmjs.org npm config set registry https://registry.npmjs.org
npmci command pnpm audit --audit-level=high --prod pnpm audit --audit-level=high --prod
continue-on-error: true continue-on-error: true
- name: Audit development dependencies - name: Audit development dependencies
run: | run: |
npmci command npm config set registry https://registry.npmjs.org npm config set registry https://registry.npmjs.org
npmci command pnpm audit --audit-level=high --dev pnpm audit --audit-level=high --dev
continue-on-error: true continue-on-error: true
test: test:
@@ -54,27 +54,27 @@ jobs:
- name: Prepare - name: Prepare
run: | run: |
pnpm install -g pnpm pnpm install -g pnpm
pnpm install -g @ship.zone/npmci pnpm install -g @ship.zone/szci
npmci npm prepare szci npm prepare
- name: Test stable - name: Test stable
run: | run: |
npmci node install stable szci node install stable
npmci npm install szci npm install
npmci npm test szci npm test
- name: Test build - name: Test build
run: | run: |
npmci node install stable szci node install stable
npmci npm install szci npm install
npmci command npm run build npm run build
release: release:
needs: test needs: test
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/')
runs-on: ubuntu-latest runs-on: ubuntu-latest
container: container:
image: code.foss.global/hosttoday/ht-docker-dbase:npmci image: code.foss.global/host.today/ht-docker-node:dbase
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
@@ -82,25 +82,11 @@ jobs:
- name: Prepare - name: Prepare
run: | run: |
pnpm install -g pnpm pnpm install -g pnpm
pnpm install -g @ship.zone/npmci pnpm install -g @ship.zone/szci
- name: Release - name: Release
run: | run: |
npmci docker login szci docker login
npmci docker build szci docker build
npmci docker test szci docker test
# npmci docker push szci docker push
npmci docker push
metadata:
needs: test
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/')
runs-on: ubuntu-latest
container:
image: ${{ env.IMAGE }}
steps:
- uses: actions/checkout@v3
- name: Trigger
run: npmci trigger
+55
View File
@@ -0,0 +1,55 @@
{
"@ship.zone/szci": {
"npmGlobalTools": [],
"npmAccessLevel": "public",
"npmRegistryUrl": "registry.npmjs.org"
},
"@git.zone/tsdocker": {
"registryRepoMap": {
"registry.gitlab.com": "losslessone/services/servezone/coreflow"
},
"buildArgEnvMap": {
"SZCI_TOKEN_NPM2": "SZCI_TOKEN_NPM2"
}
},
"@git.zone/cli": {
"projectType": "service",
"module": {
"githost": "code.foss.global",
"gitscope": "serve.zone",
"gitrepo": "coreflow",
"description": "A comprehensive tool for managing Docker-based applications and services, enabling efficient scaling, network management, and integration with cloud services.",
"npmPackagename": "@serve.zone/coreflow",
"license": "MIT",
"keywords": [
"Docker",
"Service scaling",
"Networking",
"Server management",
"Continuous deployment",
"Microservices",
"Load balancing",
"Task scheduling",
"Web services",
"Container management",
"Cluster configuration",
"DevOps",
"Cloud integration",
"Security",
"High availability",
"Multi-cloud",
"Service discovery",
"Distributed systems",
"Docker Swarm",
"Traffic management",
"Service provisioning",
"Monitoring",
"Observability",
"TypeScript"
]
}
},
"@git.zone/tsdoc": {
"legal": "\n## License and Legal Information\n\nThis repository contains open-source code that is licensed under the MIT License. A copy of the MIT License can be found in the [license](license) file within this repository. \n\n**Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.\n\n### Trademarks\n\nThis project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and any related products or services are trademarks of Task Venture Capital GmbH and are not included within the scope of the MIT license granted herein. Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines, and any usage must be approved in writing by Task Venture Capital GmbH.\n\n### Company Information\n\nTask Venture Capital GmbH \nRegistered at District court Bremen HRB 35230 HB, Germany\n\nFor any legal inquiries or if you require further information, please contact us via email at hello@task.vc.\n\nBy using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.\n"
}
}
+5 -5
View File
@@ -1,17 +1,17 @@
{ {
"json.schemas": [ "json.schemas": [
{ {
"fileMatch": ["/npmextra.json"], "fileMatch": ["/.smartconfig.json"],
"schema": { "schema": {
"type": "object", "type": "object",
"properties": { "properties": {
"npmci": { "@ship.zone/szci": {
"type": "object", "type": "object",
"description": "settings for npmci" "description": "settings for szci"
}, },
"gitzone": { "@git.zone/cli": {
"type": "object", "type": "object",
"description": "settings for gitzone", "description": "settings for git.zone CLI",
"properties": { "properties": {
"projectType": { "projectType": {
"type": "string", "type": "string",
+13 -13
View File
@@ -1,40 +1,40 @@
# gitzone dockerfile_service # gitzone dockerfile_service
## STAGE 1 // BUILD ## STAGE 1 // BUILD
FROM code.foss.global/host.today/ht-docker-node:npmci as node1 FROM code.foss.global/host.today/ht-docker-node:szci as node1
COPY ./ /app COPY ./ /app
WORKDIR /app WORKDIR /app
ARG NPMCI_TOKEN_NPM2 ARG SZCI_TOKEN_NPM2
ENV NPMCI_TOKEN_NPM2 $NPMCI_TOKEN_NPM2 ENV SZCI_TOKEN_NPM2 $SZCI_TOKEN_NPM2
RUN npmci npm prepare RUN szci npm prepare
RUN pnpm config set store-dir .pnpm-store RUN pnpm config set store-dir .pnpm-store
RUN rm -rf node_modules && pnpm install RUN rm -rf node_modules && pnpm install
RUN pnpm run build RUN pnpm run build
# gitzone dockerfile_service # gitzone dockerfile_service
## STAGE 2 // install production ## STAGE 2 // install production
FROM code.foss.global/host.today/ht-docker-node:npmci as node2 FROM code.foss.global/host.today/ht-docker-node:szci as node2
WORKDIR /app WORKDIR /app
COPY --from=node1 /app /app COPY --from=node1 /app /app
RUN rm -rf .pnpm-store RUN rm -rf .pnpm-store
ARG NPMCI_TOKEN_NPM2 ARG SZCI_TOKEN_NPM2
ENV NPMCI_TOKEN_NPM2 $NPMCI_TOKEN_NPM2 ENV SZCI_TOKEN_NPM2 $SZCI_TOKEN_NPM2
RUN npmci npm prepare RUN szci npm prepare
RUN pnpm config set store-dir .pnpm-store RUN pnpm config set store-dir .pnpm-store
RUN rm -rf node_modules/ && pnpm install --prod RUN rm -rf node_modules/ && pnpm install --prod
## STAGE 3 // rebuild dependencies for alpine ## STAGE 3 // rebuild dependencies for alpine
FROM code.foss.global/host.today/ht-docker-node:alpinenpmci as node3 FROM code.foss.global/host.today/ht-docker-node:alpine-szci as node3
WORKDIR /app WORKDIR /app
COPY --from=node2 /app /app COPY --from=node2 /app /app
ARG NPMCI_TOKEN_NPM2 ARG SZCI_TOKEN_NPM2
ENV NPMCI_TOKEN_NPM2 $NPMCI_TOKEN_NPM2 ENV SZCI_TOKEN_NPM2 $SZCI_TOKEN_NPM2
RUN npmci npm prepare RUN szci npm prepare
RUN pnpm config set store-dir .pnpm-store RUN pnpm config set store-dir .pnpm-store
RUN pnpm rebuild -r RUN pnpm rebuild -r
## STAGE 4 // the final production image with all dependencies in place ## STAGE 4 // the final production image with all dependencies in place
FROM code.foss.global/host.today/ht-docker-node:alpine as node4 FROM code.foss.global/host.today/ht-docker-node:alpine-node as node4
WORKDIR /app WORKDIR /app
COPY --from=node3 /app /app COPY --from=node3 /app /app
-50
View File
@@ -1,50 +0,0 @@
{
"npmci": {
"npmGlobalTools": [],
"npmAccessLevel": "public",
"npmRegistryUrl": "registry.npmjs.org",
"dockerRegistryRepoMap": {
"registry.gitlab.com": "losslessone/services/servezone/coreflow"
},
"dockerBuildargEnvMap": {
"NPMCI_TOKEN_NPM2": "NPMCI_TOKEN_NPM2"
}
},
"gitzone": {
"projectType": "service",
"module": {
"githost": "code.foss.global",
"gitscope": "serve.zone",
"gitrepo": "coreflow",
"description": "A comprehensive tool for managing Docker-based applications and services, enabling efficient scaling, network management, and integration with cloud services.",
"npmPackagename": "@serve.zone/coreflow",
"license": "MIT",
"keywords": [
"Docker",
"Service scaling",
"Networking",
"Server management",
"Continuous deployment",
"Microservices",
"Load balancing",
"Task scheduling",
"Web services",
"Container management",
"Cluster configuration",
"DevOps",
"Cloud integration",
"Security",
"High availability",
"Multi-cloud",
"Service discovery",
"Distributed systems",
"Docker Swarm",
"Traffic management",
"Service provisioning",
"Monitoring",
"Observability",
"TypeScript"
]
}
}
}
+28 -28
View File
@@ -53,36 +53,36 @@
}, },
"homepage": "https://gitlab.com/losslessone/services/servezone/coreflow#readme", "homepage": "https://gitlab.com/losslessone/services/servezone/coreflow#readme",
"devDependencies": { "devDependencies": {
"@git.zone/tsbuild": "^2.2.0", "@git.zone/tsbuild": "^4.4.0",
"@git.zone/tsrun": "^1.3.3", "@git.zone/tsrun": "^2.0.2",
"@git.zone/tstest": "^1.0.90", "@git.zone/tstest": "^3.6.3",
"@git.zone/tswatch": "^2.0.37", "@git.zone/tswatch": "^3.3.2"
"@push.rocks/tapbundle": "^5.5.3"
}, },
"dependencies": { "dependencies": {
"@api.global/typedrequest": "^3.1.10", "@api.global/typedrequest": "^3.3.0",
"@api.global/typedsocket": "^3.0.1", "@api.global/typedsocket": "^4.1.2",
"@apiclient.xyz/docker": "^1.3.0", "@apiclient.xyz/docker": "^5.1.2",
"@push.rocks/early": "^4.0.3", "@push.rocks/early": "^4.0.4",
"@push.rocks/lik": "^6.1.0", "@push.rocks/lik": "^6.4.0",
"@push.rocks/projectinfo": "^5.0.1", "@push.rocks/projectinfo": "^5.1.0",
"@push.rocks/qenv": "^6.1.0", "@push.rocks/qenv": "^6.1.3",
"@push.rocks/smartcli": "^4.0.11", "@push.rocks/smartcli": "^4.0.20",
"@push.rocks/smartdelay": "^3.0.1", "@push.rocks/smartdelay": "^3.0.5",
"@push.rocks/smartlog": "^3.0.7", "@push.rocks/smartlog": "^3.2.2",
"@push.rocks/smartnetwork": "3.0.2", "@push.rocks/smartnetwork": "4.7.0",
"@push.rocks/smartpath": "^5.0.18", "@push.rocks/smartpath": "^6.0.0",
"@push.rocks/smartpromise": "^4.0.4", "@push.rocks/smartpromise": "^4.2.3",
"@push.rocks/smartrequest": "^2.0.23", "@push.rocks/smartrequest": "^5.0.1",
"@push.rocks/smartrx": "^3.0.2", "@push.rocks/smartrx": "^3.0.10",
"@push.rocks/smartstate": "^2.0.19", "@push.rocks/smartserve": "^2.0.3",
"@push.rocks/smartstream": "^3.2.5", "@push.rocks/smartstate": "^2.3.0",
"@push.rocks/smartstring": "^4.0.15", "@push.rocks/smartstream": "^3.4.0",
"@push.rocks/taskbuffer": "^3.0.10", "@push.rocks/smartstring": "^4.1.0",
"@push.rocks/taskbuffer": "^8.0.2",
"@serve.zone/api": "^5.3.1", "@serve.zone/api": "^5.3.1",
"@serve.zone/interfaces": "^5.4.3", "@serve.zone/interfaces": "^5.4.4",
"@tsclass/tsclass": "^4.2.0", "@tsclass/tsclass": "^9.5.0",
"@types/node": "22.10.2" "@types/node": "25.6.0"
}, },
"private": true, "private": true,
"files": [ "files": [
@@ -94,7 +94,7 @@
"dist_ts_web/**/*", "dist_ts_web/**/*",
"assets/**/*", "assets/**/*",
"cli.js", "cli.js",
"npmextra.json", ".smartconfig.json",
"readme.md" "readme.md"
], ],
"browserslist": [ "browserslist": [
+5868 -5568
View File
File diff suppressed because it is too large Load Diff
+164 -294
View File
@@ -1,344 +1,214 @@
# @serve.zone/coreflow # @serve.zone/coreflow
A comprehensive solution for managing Docker and scaling applications across servers, handling tasks from service provisioning to network traffic management. Coreflow is the Docker Swarm reconciliation engine for the serve.zone platform. It runs inside a cluster, connects back to Cloudly, reads the desired cluster state, provisions the base runtime services, deploys workload services, and pushes reverse-proxy routing updates to Coretraffic.
## Install ## Issue Reporting and Security
To install @serve.zone/coreflow, you can use npm with the following command: For reporting bugs, issues, or security vulnerabilities, please visit [community.foss.global/](https://community.foss.global/). This is the central community hub for all issue reporting. Developers who sign and comply with our contribution agreement and go through identification can also get a [code.foss.global/](https://code.foss.global/) account to submit Pull Requests directly.
## What Coreflow Does
Coreflow sits between Cloudly and the Docker Swarm runtime:
- Connects to Cloudly over the `@serve.zone/api` WebSocket client and registers as `coreflow`.
- Authenticates with Cloudly using the cluster jump code token.
- Reads the cluster configuration and service definitions managed by Cloudly.
- Ensures base Docker networks exist for traffic and platform communication.
- Deploys and updates base services such as `coretraffic` and `corelog`.
- Deploys workload services from Cloudly image definitions.
- Creates Docker secrets from Cloudly secret bundles and attaches them to services.
- Builds reverse proxy configs from service domains, Docker task IPs, and Cloudly certificates.
- Sends routing updates to Coretraffic through the internal TypedSocket server.
- Reconciles state initially, on Cloudly config updates, and on a scheduled hourly task.
## Runtime Model
Coreflow is not a general-purpose application framework. It is a long-running cluster component designed to be started as a service or Docker container on a Docker Swarm manager node.
```text
Cloudly
-> Coreflow
-> local Docker Engine / Swarm
-> Coretraffic via internal TypedSocket
```
Coreflow never waits for Cloudly to call it. It connects outward to Cloudly, keeps the connection tagged as a `coreflow` client, and reacts to config update events from that connection.
## Requirements
- Node.js runtime compatible with the project toolchain.
- pnpm for dependency management.
- A Docker Swarm manager with access to the local Docker socket.
- A reachable Cloudly instance.
- A valid Cloudly jump code for the target cluster.
## Configuration
Coreflow reads runtime configuration through `@push.rocks/qenv` from the project environment and `.nogit` overlays.
Required environment variables:
| Variable | Purpose |
| --- | --- |
| `CLOUDLY_URL` | WebSocket/HTTP endpoint of the Cloudly control plane. |
| `JUMPCODE` | Cloudly token used to authenticate this Coreflow instance and tag the connection. |
Example `.nogit/.env`:
```env
CLOUDLY_URL=https://cloudly.example.com
JUMPCODE=cluster-machine-token
```
## Installation
This package is private and normally deployed as part of the serve.zone platform image pipeline.
```sh ```sh
npm install @serve.zone/coreflow --save pnpm install
pnpm run build
pnpm start
``` ```
Given that this is a private package, make sure you have access to the required npm registry and that you are authenticated properly. The package also exposes the `coreflow` binary after build through `dist/cli.js`, while the repository entrypoint is `cli.js`.
## Usage ## Programmatic Startup
Coreflow is designed as an advanced tool for managing Docker-based applications and services, enabling efficient scaling across servers, and handling multiple aspects of service provisioning and network traffic management. Below are examples and explanations to illustrate its capabilities and how you can leverage Coreflow in your infrastructure. Note that these examples are based on TypeScript and use ESM syntax. The CLI path imports `runCli()` from `dist_ts/index.js`. For direct TypeScript usage inside this repository, instantiate the main class and call `start()`:
### Prerequisites ```ts
import { Coreflow } from './ts/coreflow.classes.coreflow.js';
Before you start, ensure you have Docker and Docker Swarm configured in your environment as Coreflow operates on top of these technologies. Additionally, verify that your environment variables are properly set up for accessing Coreflow's functionalities. const coreflow = new Coreflow();
await coreflow.start();
### Setting Up Coreflow process.on('SIGTERM', async () => {
await coreflow.stop();
To get started, you need to import and initialize Coreflow within your application. Here's an example of how to do this in a TypeScript module:
```typescript
import { Coreflow } from '@serve.zone/coreflow';
// Initialize Coreflow
const coreflowInstance = new Coreflow();
// Start Coreflow
await coreflowInstance.start();
// Example: Add your logic here for handling Docker events
coreflowInstance.handleDockerEvents().then(() => {
console.log('Docker events are being handled.');
});
// Stop Coreflow when done
await coreflowInstance.stop();
```
In the above example:
- The Coreflow instance is initialized.
- Coreflow is started, which internally initializes various managers and connectors.
- The method `handleDockerEvents` is used to handle Docker events.
- Finally, Coreflow is stopped gracefully.
### Configuring Service Connections
Coreflow manages applications and services, often requiring direct interactions with other services like a database, message broker, or external API. Coreflow simplifies these connections through its configuration and service discovery layers.
```typescript
// Assuming coreflowInstance is already started as per previous examples
const serviceConnection = coreflowInstance.createServiceConnection({
serviceName: 'myDatabaseService',
servicePort: 3306,
});
serviceConnection.connect().then(() => {
console.log('Successfully connected to the service');
}); });
``` ```
### Scaling Your Application `start()` initializes components in this order:
Coreflow excels in scaling applications across multiple servers. This involves not just replicating services, but also ensuring they are properly networked, balanced, and monitored. 1. `InternalServer` starts a SmartServe server on port `3000` with TypedSocket support.
2. `CloudlyConnector` connects to Cloudly and resolves the cluster identity.
3. `ClusterManager` reads initial Cloudly config and subscribes to config updates.
4. `PlatformManager` starts its placeholder lifecycle hook.
5. `CoreflowTaskmanager` schedules the initial and recurring reconciliation tasks.
```typescript ## Reconciliation Flow
const scalingPolicy = {
serviceName: 'apiService',
replicaCount: 5, // Target number of replicas
maxReplicaCount: 10, // Maximum number of replicas
minReplicaCount: 2, // Minimum number of replicas
};
coreflowInstance.applyScalingPolicy(scalingPolicy).then(() => { The task manager coordinates the runtime work as a task chain:
console.log('Scaling policy applied successfully.');
```text
updateBaseServices
-> updateWorkloadServices
-> updateTrafficRouting
```
`updateBaseServices` ensures the base Docker networks and platform services exist:
- `sznwebgateway` for public web routing.
- `szncorechat` for internal base-service communication.
- `coretraffic` attached to both networks with host ports `80` and `443` mapped to its service ports.
- `corelog` attached to the internal network.
`updateWorkloadServices` fetches Cloudly services, skips non-workload service categories, pulls or imports the configured Docker image, creates a Docker secret from the assigned secret bundle, and creates or replaces the Docker service when an update is required.
`updateTrafficRouting` inspects Docker services on the web gateway network, resolves container IPs, fetches certificates for configured domains, and sends `IReverseProxyConfig[]` updates to Coretraffic with the `updateRouting` typed request.
The same base-service task is triggered when Cloudly emits a config update. After the initial delayed run, it is also scheduled hourly.
## Cloudly Integration
`CloudlyConnector` wraps `CloudlyApiClient` from `@serve.zone/api`:
```ts
this.cloudlyApiClient = new CloudlyApiClient({
registerAs: 'coreflow',
cloudlyUrl,
}); });
``` ```
In the above example: After connection, Coreflow authenticates with `JUMPCODE` and requests a stateful, tagged identity. That identity is then used to fetch cluster configuration and certificates.
- A scaling policy is defined with target, maximum, and minimum replica counts for the `apiService`. Coreflow depends on these Cloudly-side resources being present and valid:
- The `applyScalingPolicy` method of the Coreflow instance is used to apply this scaling policy.
### Managing Network Traffic - Cluster configuration for the authenticated identity.
- Service records with image, resource, domain, port, and secret bundle references.
- Image records pointing either to internal Cloudly image storage or an external registry.
- Secret bundles that can be flattened into environment key/value data.
- SSL certificates for all routed domains.
One of Coreflow's key features is its ability to manage network traffic, ensuring that it is efficiently distributed among various services based on load, priority, and other custom rules. ## Coretraffic Integration
```typescript Coreflow starts an internal SmartServe/TypedSocket server on port `3000`. Coretraffic is expected to connect to that server and tag its connection as `coretraffic`.
import { TrafficRule } from '@serve.zone/coreflow';
const rule: TrafficRule = { When routing changes are computed, Coreflow sends:
serviceName: 'webService',
externalPort: 80,
internalPort: 3000,
protocol: 'http',
};
coreflowInstance.applyTrafficRule(rule).then(() => { ```ts
console.log('Traffic rule applied successfully.'); const request = typedsocketServer.createTypedRequest('updateRouting', coretrafficConnection);
}); await request.fire({ reverseConfigs });
``` ```
In the above example: Each reverse config contains destination container IPs, destination ports, hostname, and certificate material.
- A traffic rule is defined for the `webService`, redirecting external traffic from port 80 to the service's internal port 3000. ## Docker Image Handling
- The `applyTrafficRule` method is used to enforce this rule.
### Continuous Deployment Coreflow supports two image sources from Cloudly image metadata:
Coreflow integrates continuous integration and deployment processes, allowing seamless updates and rollbacks for your services: - Internal Cloudly images: Coreflow pulls the requested image version stream from Cloudly and imports it into Docker from a tar stream.
- External registry images: Coreflow authenticates against the configured registry, pulls the image, and updates the local Docker image.
```typescript Invalid or incomplete image location data causes reconciliation to fail for that service, which is intentional: Coreflow only deploys services with complete desired-state data.
const deploymentConfig = {
serviceName: 'userAuthService',
image: 'myregistry.com/userauthservice:latest',
updatePolicy: 'rolling', // or "recreate"
};
coreflowInstance.deployService(deploymentConfig).then(() => { ## Development
console.log('Service deployed successfully.');
}); Common commands:
```sh
pnpm install
pnpm run build
pnpm test
pnpm run watch
``` ```
In the above example: Project layout:
- A deployment configuration is created for the `userAuthService` using the latest image from the specified registry. | Path | Purpose |
- The `deployService` method is then used to deploy the service using the specified update policy (e.g., rolling updates or recreating the service). | --- | --- |
| `ts/index.ts` | CLI startup wrapper and lifecycle entrypoints. |
| `ts/coreflow.classes.coreflow.ts` | Main coordinator class. |
| `ts/coreflow.connector.cloudlyconnector.ts` | Cloudly API connection and identity handling. |
| `ts/coreflow.classes.clustermanager.ts` | Docker network, service, secret, image, and routing reconciliation. |
| `ts/coreflow.classes.taskmanager.ts` | Buffered and scheduled reconciliation task chain. |
| `ts/coreflow.connector.coretrafficconnector.ts` | TypedSocket routing updates to Coretraffic. |
| `ts/coreflow.classes.internalserver.ts` | Internal SmartServe and TypedSocket server. |
### Observability and Monitoring ## Operational Notes
To keep track of your applications' health and performance, Coreflow provides tools for logging, monitoring, and alerting. - Coreflow expects Docker access through the local Docker socket by default.
- Reconciliation removes and recreates services when the Docker service reports that it needs an update.
- Workload services must be attached to `sznwebgateway` for routing to be generated.
- The current routing logic uses the first available container IP for a service.
- `PlatformManager` currently provides lifecycle hooks but does not reconcile platform services yet.
```typescript ## License and Legal Information
coreflowInstance.monitorService('webService').on('serviceHealthUpdate', (healthStatus) => {
console.log(`Received health update for webService: ${healthStatus}`);
});
```
In the above example: This repository contains open-source code licensed under the MIT License. A copy of the license can be found in the [license](./license) file.
- The `monitorService` method is used to monitor the health status of the `webService`. **Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.
- When a health update event is received, it is logged to the console.
### Detailed Example: Setting Up and Managing Coreflow ### Trademarks
Here is a detailed example that covers various features, from setup to scaling and traffic management. This project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and any related products or services are trademarks of Task Venture Capital GmbH or third parties, and are not included within the scope of the MIT license granted herein.
#### Step 1: Initialize Coreflow Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines or the guidelines of the respective third-party owners, and any usage must be approved in writing. Third-party trademarks used herein are the property of their respective owners and used only in a descriptive manner, e.g. for an implementation of an API or similar.
```typescript ### Company Information
import { Coreflow } from '@serve.zone/coreflow';
const coreflowInstance = new Coreflow(); Task Venture Capital GmbH
Registered at District Court Bremen HRB 35230 HB, Germany
async function initializeCoreflow() { For any legal inquiries or further information, please contact us via email at hello@task.vc.
await coreflowInstance.start();
console.log('Coreflow initialized.');
await manageServices();
}
initializeCoreflow().catch((error) => { By using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.
console.error('Error initializing Coreflow:', error);
});
```
#### Step 2: Handling Docker Events
```typescript
coreflowInstance.handleDockerEvents().then(() => {
console.log('Docker events are being handled.');
});
```
#### Step 3: Configuring and Connecting to a Service
```typescript
const serviceConnection = coreflowInstance.createServiceConnection({
serviceName: 'databaseService',
servicePort: 5432,
});
serviceConnection.connect().then(() => {
console.log('Successfully connected to the database service.');
});
```
#### Step 4: Applying a Scaling Policy
```typescript
const scalingPolicy = {
serviceName: 'microserviceA',
replicaCount: 3, // Starting with 3 replicas
maxReplicaCount: 10, // Allowing up to 10 replicas
minReplicaCount: 2, // Ensuring at least 2 replicas
};
coreflowInstance.applyScalingPolicy(scalingPolicy).then(() => {
console.log('Scaling policy applied for microserviceA');
});
```
#### Step 5: Managing Network Traffic
```typescript
import { TrafficRule } from '@serve.zone/coreflow';
const trafficRules: TrafficRule[] = [
{
serviceName: 'frontendService',
externalPort: 80,
internalPort: 3000,
protocol: 'http',
},
{
serviceName: 'apiService',
externalPort: 443,
internalPort: 4000,
protocol: 'https',
},
];
Promise.all(trafficRules.map((rule) => coreflowInstance.applyTrafficRule(rule))).then(() => {
console.log('Traffic rules applied.');
});
```
#### Step 6: Deploying a Service
```typescript
const deploymentConfig = {
serviceName: 'authService',
image: 'myregistry.com/authservice:latest',
updatePolicy: 'rolling', // Performing rolling updates
};
coreflowInstance.deployService(deploymentConfig).then(() => {
console.log('AuthService deployed successfully.');
});
```
#### Step 7: Monitoring a Service
```typescript
coreflowInstance.monitorService('frontendService').on('serviceHealthUpdate', (healthStatus) => {
console.log(`Health update for frontendService: ${healthStatus}`);
});
```
### Advanced Usage: Task Scheduling and Traffic Configuration
In more complex scenarios, you might want to leverage Coreflow's ability to schedule tasks and manage traffic configurations.
#### Scheduling Tasks
Coreflow supports scheduling updates and other tasks using the `taskBuffer` API.
```typescript
import { Task } from '@push.rocks/taskbuffer';
const checkinTask = new Task({
name: 'checkin',
buffered: true,
taskFunction: async () => {
console.log('Running checkin task...');
},
});
const taskManager = coreflowInstance.taskManager;
taskManager.addAndScheduleTask(checkinTask, '0 * * * * *'); // Scheduling task to run every minute
taskManager.start().then(() => {
console.log('Task manager started.');
});
```
#### Managing Traffic Routing
Coreflow can manage complex traffic routing scenarios, such as configuring reverse proxies for different services.
```typescript
import { CoretrafficConnector } from '@serve.zone/coreflow';
// Assume coreflowInstance is already started
const coretrafficConnector = new CoretrafficConnector(coreflowInstance);
const reverseProxyConfigs = [
{
hostName: 'example.com',
destinationIp: '192.168.1.100',
destinationPort: '3000',
privateKey: '<your-private-key>',
publicKey: '<your-public-key>',
},
{
hostName: 'api.example.com',
destinationIp: '192.168.1.101',
destinationPort: '4000',
privateKey: '<your-private-key>',
publicKey: '<your-public-key>',
},
];
coretrafficConnector.setReverseConfigs(reverseProxyConfigs).then(() => {
console.log('Reverse proxy configurations applied.');
});
```
### Integrating with Cloudly
Coreflow is designed to integrate seamlessly with Cloudly, a configuration management and orchestration tool.
#### Starting the Cloudly Connector
```typescript
const cloudlyConnector = coreflowInstance.cloudlyConnector;
cloudlyConnector.start().then(() => {
console.log('Cloudly connector started.');
});
```
#### Retrieving and Applying Configurations from Cloudly
```typescript
cloudlyConnector.getConfigFromCloudly().then((config) => {
console.log('Received configuration from Cloudly:', config);
coreflowInstance.clusterManager.provisionWorkloadServices(config).then(() => {
console.log('Workload services provisioned based on Cloudly config.');
});
});
```
### Conclusion
Coreflow is a powerful and flexible tool for managing Docker-based applications, scaling services, configuring network traffic, handling continuous deployments, and ensuring observability of your infrastructure. The examples provided aim to give a comprehensive understanding of how to use Coreflow in various scenarios, ensuring it meets your DevOps and CI/CD needs.
By leveraging Coreflow's rich feature set, you can optimize your infrastructure for high availability, scalability, and efficient operation across multiple servers and environments.
undefined
+10 -2
View File
@@ -1,22 +1,30 @@
import { tap, expect } from '@push.rocks/tapbundle'; import { tap, expect } from '@git.zone/tstest/tapbundle';
delete process.env.CLI_CALL; delete process.env.CLI_CALL;
// process.env.CLOUDLY_TESTURL = 'http://localhost:3000'; // process.env.CLOUDLY_TESTURL = 'http://localhost:3000';
import * as coreflow from '../ts/index.js'; import * as coreflow from '../ts/index.js';
const shouldRunStartupTest = Boolean(process.env.CLOUDLY_URL && process.env.JUMPCODE);
if (process.env.CI) { if (process.env.CI) {
tap.start(); tap.start();
process.exit(0); process.exit(0);
} }
tap.test('should startup correctly', async () => { tap.test('should startup correctly', async () => {
if (!shouldRunStartupTest) {
return;
}
await coreflow.runCli(); await coreflow.runCli();
}); });
tap.test('should end correctly', async (tools) => { tap.test('should end correctly', async (tools) => {
if (!shouldRunStartupTest) {
return;
}
await tools.delayFor(2000); await tools.delayFor(2000);
await coreflow.stop(); await coreflow.stop();
}); });
tap.start(); export default tap.start();
+84 -123
View File
@@ -4,9 +4,7 @@ import { Coreflow } from './coreflow.classes.coreflow.js';
export class ClusterManager { export class ClusterManager {
public coreflowRef: Coreflow; public coreflowRef: Coreflow;
public configSubscription: plugins.smartrx.rxjs.Subscription; public configSubscription?: plugins.smartrx.rxjs.Subscription;
public containerSubscription: plugins.smartrx.rxjs.Subscription;
public containerVersionSubscription: plugins.smartrx.rxjs.Subscription;
public readyDeferred = plugins.smartpromise.defer(); public readyDeferred = plugins.smartpromise.defer();
@@ -51,42 +49,34 @@ export class ClusterManager {
public async provisionBaseServices() { public async provisionBaseServices() {
// swarm should be enabled by lower level serverconfig package // swarm should be enabled by lower level serverconfig package
// get current situation // get current situation
const networks = await this.coreflowRef.dockerHost.getNetworks(); const networks = await this.coreflowRef.dockerHost.listNetworks();
logger.log('info', 'There are currently ' + networks.length + ' networks'); logger.log('info', 'There are currently ' + networks.length + ' networks');
for (const network of networks) { for (const network of networks) {
logger.log('info', 'Network: ' + network.Name); logger.log('info', 'Network: ' + network.Name);
} }
// make sure there is a network for the webgateway // make sure there is a network for the webgateway
let sznWebgatewayNetwork = await plugins.docker.DockerNetwork.getNetworkByName( let sznWebgatewayNetwork = await this.coreflowRef.dockerHost.getNetworkByName(
this.coreflowRef.dockerHost,
this.commonDockerData.networkNames.sznWebgateway, this.commonDockerData.networkNames.sznWebgateway,
); );
if (!sznWebgatewayNetwork) { if (!sznWebgatewayNetwork) {
logger.log('info', 'Creating network: ' + this.commonDockerData.networkNames.sznWebgateway); logger.log('info', 'Creating network: ' + this.commonDockerData.networkNames.sznWebgateway);
sznWebgatewayNetwork = await plugins.docker.DockerNetwork.createNetwork( sznWebgatewayNetwork = await this.coreflowRef.dockerHost.createNetwork({
this.coreflowRef.dockerHost, Name: this.commonDockerData.networkNames.sznWebgateway,
{ });
Name: this.commonDockerData.networkNames.sznWebgateway,
},
);
} else { } else {
logger.log('ok', 'sznWebgateway is already present'); logger.log('ok', 'sznWebgateway is already present');
} }
// corechat network so base services can talk to each other // corechat network so base services can talk to each other
let sznCorechatNetwork = await plugins.docker.DockerNetwork.getNetworkByName( let sznCorechatNetwork = await this.coreflowRef.dockerHost.getNetworkByName(
this.coreflowRef.dockerHost,
this.commonDockerData.networkNames.sznCorechat, this.commonDockerData.networkNames.sznCorechat,
); );
if (!sznCorechatNetwork) { if (!sznCorechatNetwork) {
sznCorechatNetwork = await plugins.docker.DockerNetwork.createNetwork( sznCorechatNetwork = await this.coreflowRef.dockerHost.createNetwork({
this.coreflowRef.dockerHost, Name: this.commonDockerData.networkNames.sznCorechat,
{ });
Name: this.commonDockerData.networkNames.sznCorechat,
},
);
} else { } else {
logger.log('ok', 'sznCorechat is already present'); logger.log('ok', 'sznCorechat is already present');
} }
@@ -99,30 +89,19 @@ export class ClusterManager {
// Images // Images
logger.log('info', `now updating docker images of base services...`); logger.log('info', `now updating docker images of base services...`);
const coretrafficImage = await plugins.docker.DockerImage.createFromRegistry( const coretrafficImage = await this.coreflowRef.dockerHost.createImageFromRegistry({
this.coreflowRef.dockerHost, imageUrl: 'code.foss.global/serve.zone/coretraffic',
{ });
creationObject: {
imageUrl: 'code.foss.global/serve.zone/coretraffic',
},
},
);
const corelogImage = await plugins.docker.DockerImage.createFromRegistry( const corelogImage = await this.coreflowRef.dockerHost.createImageFromRegistry({
this.coreflowRef.dockerHost, imageUrl: 'code.foss.global/serve.zone/corelog',
{ });
creationObject: {
imageUrl: 'code.foss.global/serve.zone/corelog',
},
},
);
// SERVICES // SERVICES
// lets deploy the base services // lets deploy the base services
// coretraffic // coretraffic
let coretrafficService: plugins.docker.DockerService; let coretrafficService: plugins.docker.DockerService | null;
coretrafficService = await plugins.docker.DockerService.getServiceByName( coretrafficService = await this.coreflowRef.dockerHost.getServiceByName(
this.coreflowRef.dockerHost,
'coretraffic', 'coretraffic',
); );
@@ -135,22 +114,19 @@ export class ClusterManager {
} }
if (!coretrafficService) { if (!coretrafficService) {
coretrafficService = await plugins.docker.DockerService.createService( coretrafficService = await this.coreflowRef.dockerHost.createService({
this.coreflowRef.dockerHost, image: coretrafficImage,
{ labels: {},
image: coretrafficImage, name: 'coretraffic',
labels: {}, networks: [sznCorechatNetwork, sznWebgatewayNetwork],
name: 'coretraffic', networkAlias: 'coretraffic',
networks: [sznCorechatNetwork, sznWebgatewayNetwork], ports: ['80:7999', '443:8000'],
networkAlias: 'coretraffic', secrets: [],
ports: ['80:7999', '443:8000'], resources: {
secrets: [], memorySizeMB: 1100,
resources: { volumeMounts: [],
memorySizeMB: 1100,
volumeMounts: [],
},
}, },
); });
} else { } else {
logger.log('ok', 'coretraffic service is already present'); logger.log('ok', 'coretraffic service is already present');
} }
@@ -158,9 +134,8 @@ export class ClusterManager {
await plugins.smartdelay.delayFor(10000); await plugins.smartdelay.delayFor(10000);
// corelog // corelog
let corelogService: plugins.docker.DockerService; let corelogService: plugins.docker.DockerService | null;
corelogService = await plugins.docker.DockerService.getServiceByName( corelogService = await this.coreflowRef.dockerHost.getServiceByName(
this.coreflowRef.dockerHost,
'corelog', 'corelog',
); );
@@ -172,22 +147,19 @@ export class ClusterManager {
} }
if (!corelogService) { if (!corelogService) {
corelogService = await plugins.docker.DockerService.createService( corelogService = await this.coreflowRef.dockerHost.createService({
this.coreflowRef.dockerHost, image: corelogImage,
{ labels: {},
image: corelogImage, name: 'corelog',
labels: {}, networks: [sznCorechatNetwork],
name: 'corelog', networkAlias: 'corelog',
networks: [sznCorechatNetwork], ports: [],
networkAlias: 'corelog', secrets: [],
ports: [], resources: {
secrets: [], memorySizeMB: 120,
resources: { volumeMounts: [],
memorySizeMB: 120,
volumeMounts: [],
},
}, },
); });
} else { } else {
logger.log('ok', 'corelog service is already present'); logger.log('ok', 'corelog service is already present');
} }
@@ -219,15 +191,11 @@ export class ClusterManager {
const imageStream = await containerImageFromCloudly.pullImageVersion( const imageStream = await containerImageFromCloudly.pullImageVersion(
serviceArgFromCloudly.data.imageVersion, serviceArgFromCloudly.data.imageVersion,
); );
localDockerImage = await plugins.docker.DockerImage.createFromTarStream( localDockerImage = await this.coreflowRef.dockerHost.createImageFromTarStream(
this.coreflowRef.dockerHost, plugins.smartstream.nodewebhelpers.convertWebReadableToNodeReadable(imageStream),
{ {
creationObject: { imageUrl: containerImageFromCloudly.id,
imageUrl: containerImageFromCloudly.id, imageTag: serviceArgFromCloudly.data.imageVersion,
imageTag: serviceArgFromCloudly.data.imageVersion,
},
tarStream:
plugins.smartstream.nodewebhelpers.convertWebReadableToNodeReadable(imageStream),
}, },
); );
} else if ( } else if (
@@ -240,42 +208,40 @@ export class ClusterManager {
); );
// Lets authenticate against the external registry // Lets authenticate against the external registry
// TODO: deduplicate this, check wether we are already authenticated // TODO: deduplicate this, check wether we are already authenticated
if (!externalRegistry.data.username || !externalRegistry.data.password) {
throw new Error(`External registry ${externalRegistry.id} is missing credentials`);
}
await this.coreflowRef.dockerHost.auth({ await this.coreflowRef.dockerHost.auth({
username: externalRegistry.data.username, username: externalRegistry.data.username,
password: externalRegistry.data.password, password: externalRegistry.data.password,
serveraddress: externalRegistry.data.url, serveraddress: externalRegistry.data.url,
}); });
localDockerImage = await plugins.docker.DockerImage.createFromRegistry( localDockerImage = await this.coreflowRef.dockerHost.createImageFromRegistry({
this.coreflowRef.dockerHost, imageUrl: containerImageFromCloudly.id,
{ imageTag: serviceArgFromCloudly.data.imageVersion,
creationObject: { });
imageUrl: containerImageFromCloudly.id,
imageTag: serviceArgFromCloudly.data.imageVersion,
},
},
);
await localDockerImage.pullLatestImageFromRegistry(); await localDockerImage.pullLatestImageFromRegistry();
} else { } else {
throw new Error('Invalid image location'); throw new Error('Invalid image location');
} }
let containerService = await plugins.docker.DockerService.getServiceByName( let containerService: plugins.docker.DockerService | null = await this.coreflowRef.dockerHost.getServiceByName(
this.coreflowRef.dockerHost,
serviceArgFromCloudly.data.name, serviceArgFromCloudly.data.name,
); );
this.coreflowRef.cloudlyConnector.cloudlyApiClient; this.coreflowRef.cloudlyConnector.cloudlyApiClient;
const dockerSecretName = `${serviceArgFromCloudly.id}_${serviceArgFromCloudly.data.name}_Secret`; const dockerSecretName = `${serviceArgFromCloudly.id}_${serviceArgFromCloudly.data.name}_Secret`;
let containerSecret = await plugins.docker.DockerSecret.getSecretByName( let containerSecret: plugins.docker.DockerSecret | undefined | null = await this.coreflowRef.dockerHost.getSecretByName(
this.coreflowRef.dockerHost,
dockerSecretName, dockerSecretName,
); );
// existing network to connect to // existing network to connect to
const webGatewayNetwork = await plugins.docker.DockerNetwork.getNetworkByName( const webGatewayNetwork = await this.coreflowRef.dockerHost.getNetworkByName(
this.coreflowRef.dockerHost,
this.commonDockerData.networkNames.sznWebgateway, this.commonDockerData.networkNames.sznWebgateway,
); );
if (!webGatewayNetwork) {
throw new Error(`Missing required Docker network ${this.commonDockerData.networkNames.sznWebgateway}`);
}
if (containerService && (await containerService.needsUpdate())) { if (containerService && (await containerService.needsUpdate())) {
await containerService.remove(); await containerService.remove();
@@ -287,8 +253,7 @@ export class ClusterManager {
} }
if (!containerService) { if (!containerService) {
containerSecret = await plugins.docker.DockerSecret.getSecretByName( containerSecret = await this.coreflowRef.dockerHost.getSecretByName(
this.coreflowRef.dockerHost,
dockerSecretName, dockerSecretName,
); );
if (containerSecret) { if (containerSecret) {
@@ -301,29 +266,23 @@ export class ClusterManager {
); );
// lets create the relevant stuff on the docker side // lets create the relevant stuff on the docker side
containerSecret = await plugins.docker.DockerSecret.createSecret( containerSecret = await this.coreflowRef.dockerHost.createSecret({
this.coreflowRef.dockerHost, name: dockerSecretName,
{ contentArg: JSON.stringify(await secretBundle.getFlatKeyValueObjectForEnvironment()),
name: dockerSecretName, labels: {},
contentArg: JSON.stringify(await secretBundle.getFlatKeyValueObjectForEnvironment()), version: serviceArgFromCloudly.data.imageVersion,
labels: {}, });
version: serviceArgFromCloudly.data.imageVersion, containerService = await this.coreflowRef.dockerHost.createService({
}, name: serviceArgFromCloudly.data.name,
); image: localDockerImage,
containerService = await plugins.docker.DockerService.createService( networks: [webGatewayNetwork],
this.coreflowRef.dockerHost, secrets: [containerSecret],
{ ports: [],
name: serviceArgFromCloudly.data.name, labels: {},
image: localDockerImage, resources: serviceArgFromCloudly.data.resources,
networks: [webGatewayNetwork], // TODO: introduce a clean name here, that is guaranteed to work with APIs.
secrets: [containerSecret], networkAlias: serviceArgFromCloudly.data.name,
ports: [], });
labels: {},
resources: serviceArgFromCloudly.data.resources,
// TODO: introduce a clean name here, that is guaranteed to work with APIs.
networkAlias: serviceArgFromCloudly.data.name,
},
);
} }
} }
@@ -346,11 +305,13 @@ export class ClusterManager {
public async updateTrafficRouting( public async updateTrafficRouting(
_clusterConfigArg: plugins.servezoneInterfaces.data.ICluster, _clusterConfigArg: plugins.servezoneInterfaces.data.ICluster,
) { ) {
const services = await this.coreflowRef.dockerHost.getServices(); const services = await this.coreflowRef.dockerHost.listServices();
const webGatewayNetwork = await plugins.docker.DockerNetwork.getNetworkByName( const webGatewayNetwork = await this.coreflowRef.dockerHost.getNetworkByName(
this.coreflowRef.dockerHost,
this.commonDockerData.networkNames.sznWebgateway, this.commonDockerData.networkNames.sznWebgateway,
); );
if (!webGatewayNetwork) {
throw new Error(`Missing required Docker network ${this.commonDockerData.networkNames.sznWebgateway}`);
}
const reverseProxyConfigs: plugins.servezoneInterfaces.data.IReverseProxyConfig[] = []; const reverseProxyConfigs: plugins.servezoneInterfaces.data.IReverseProxyConfig[] = [];
const pushProxyConfig = async ( const pushProxyConfig = async (
+1 -1
View File
@@ -11,7 +11,7 @@ import { PlatformManager } from './coreflow.classes.platformmanager.js';
* the main Coreflow class * the main Coreflow class
*/ */
export class Coreflow { export class Coreflow {
public typedrouter = new plugins.typedrequest.TypedRouter(); public typedrouter: plugins.typedrequest.TypedRouter = new plugins.typedrequest.TypedRouter();
public internalServer: InternalServer; public internalServer: InternalServer;
public serviceQenv: plugins.qenv.Qenv; public serviceQenv: plugins.qenv.Qenv;
+12 -2
View File
@@ -3,19 +3,29 @@ import * as plugins from './coreflow.plugins.js';
export class InternalServer { export class InternalServer {
public coreflowRef: Coreflow; public coreflowRef: Coreflow;
public typedsocketServer: plugins.typedsocket.TypedSocket; public smartServe: plugins.smartserve.SmartServe;
public typedsocketServer!: plugins.typedsocket.TypedSocket;
constructor(coreflowRefArg: Coreflow) { constructor(coreflowRefArg: Coreflow) {
this.coreflowRef = coreflowRefArg; this.coreflowRef = coreflowRefArg;
this.smartServe = new plugins.smartserve.SmartServe({
port: 3000,
websocket: {
typedRouter: this.coreflowRef.typedrouter,
},
});
} }
public async start() { public async start() {
this.typedsocketServer = await plugins.typedsocket.TypedSocket.createServer( await this.smartServe.start();
this.typedsocketServer = plugins.typedsocket.TypedSocket.fromSmartServe(
this.smartServe,
this.coreflowRef.typedrouter, this.coreflowRef.typedrouter,
); );
} }
public async stop() { public async stop() {
await this.typedsocketServer.stop(); await this.typedsocketServer.stop();
await this.smartServe.stop();
} }
} }
-5
View File
@@ -6,9 +6,6 @@ export class CoreflowTaskmanager {
public coreflowRef: Coreflow; public coreflowRef: Coreflow;
public taskmanager: plugins.taskbuffer.TaskManager; public taskmanager: plugins.taskbuffer.TaskManager;
// checkin tasks
public checkinTask: plugins.taskbuffer.Task;
// event based tasks // event based tasks
/** /**
* updates baseservices * updates baseservices
@@ -19,8 +16,6 @@ export class CoreflowTaskmanager {
// timed // timed
public updateTrafficRoutingTask: plugins.taskbuffer.Task; public updateTrafficRoutingTask: plugins.taskbuffer.Task;
public updateConfigTask: plugins.taskbuffer.Task;
constructor(coreflowRefArg: Coreflow) { constructor(coreflowRefArg: Coreflow) {
this.coreflowRef = coreflowRefArg; this.coreflowRef = coreflowRefArg;
this.taskmanager = new plugins.taskbuffer.TaskManager(); this.taskmanager = new plugins.taskbuffer.TaskManager();
+13 -5
View File
@@ -7,21 +7,29 @@ import { Coreflow } from './coreflow.classes.coreflow.js';
export class CloudlyConnector { export class CloudlyConnector {
public coreflowRef: Coreflow; public coreflowRef: Coreflow;
public cloudlyApiClient: plugins.servezoneApi.CloudlyApiClient; public cloudlyApiClient!: plugins.servezoneApi.CloudlyApiClient;
public coreflowJumpCode: string; public coreflowJumpCode!: string;
public identity: plugins.servezoneInterfaces.data.IIdentity; public identity!: plugins.servezoneInterfaces.data.IIdentity;
constructor(coreflowRefArg: Coreflow) { constructor(coreflowRefArg: Coreflow) {
this.coreflowRef = coreflowRefArg; this.coreflowRef = coreflowRefArg;
} }
public async start() { public async start() {
const cloudlyUrl = await this.coreflowRef.serviceQenv.getEnvVarOnDemand('CLOUDLY_URL');
if (!cloudlyUrl) {
throw new Error('Missing required CLOUDLY_URL environment variable');
}
this.cloudlyApiClient = new plugins.servezoneApi.CloudlyApiClient({ this.cloudlyApiClient = new plugins.servezoneApi.CloudlyApiClient({
registerAs: 'coreflow', registerAs: 'coreflow',
cloudlyUrl: await this.coreflowRef.serviceQenv.getEnvVarOnDemand('CLOUDLY_URL'), cloudlyUrl,
}); });
await this.cloudlyApiClient.start(); await this.cloudlyApiClient.start();
this.coreflowJumpCode = await this.coreflowRef.serviceQenv.getEnvVarOnDemand('JUMPCODE'); const coreflowJumpCode = await this.coreflowRef.serviceQenv.getEnvVarOnDemand('JUMPCODE');
if (!coreflowJumpCode) {
throw new Error('Missing required JUMPCODE environment variable');
}
this.coreflowJumpCode = coreflowJumpCode;
// get identity and tag connection (second parameter is true -> tags the connection) // get identity and tag connection (second parameter is true -> tags the connection)
this.identity = await this.cloudlyApiClient.getIdentityByToken(this.coreflowJumpCode, { this.identity = await this.cloudlyApiClient.getIdentityByToken(this.coreflowJumpCode, {
@@ -22,8 +22,8 @@ export class CoretrafficConnector {
const reactionRequest = const reactionRequest =
this.coreflowRef.internalServer.typedsocketServer.createTypedRequest<plugins.servezoneInterfaces.requests.routing.IRequest_Coreflow_Coretraffic_RoutingUpdate>( this.coreflowRef.internalServer.typedsocketServer.createTypedRequest<plugins.servezoneInterfaces.requests.routing.IRequest_Coreflow_Coretraffic_RoutingUpdate>(
'updateRouting', 'updateRouting',
await this.coreflowRef.internalServer.typedsocketServer.findTargetConnection( await this.coreflowRef.internalServer.typedsocketServer.findTargetConnectionByTag(
async (targetConnection) => targetConnection.alias === 'coretraffic', 'coretraffic',
), ),
); );
const response = await reactionRequest.fire({ const response = await reactionRequest.fire({
+1 -1
View File
@@ -1,4 +1,4 @@
import * as plugins from './coreflow.plugins.js'; import * as plugins from './coreflow.plugins.js';
import * as paths from './coreflow.paths.js'; import * as paths from './coreflow.paths.js';
export const projectInfoNpm = new plugins.projectinfo.ProjectinfoNpm(paths.packageDir); export const projectInfoNpm = await plugins.projectinfo.ProjectinfoNpm.create(paths.packageDir);
-1
View File
@@ -1,5 +1,4 @@
import * as plugins from './coreflow.plugins.js'; import * as plugins from './coreflow.plugins.js';
import { projectInfoNpm } from './coreflow.info.js';
export const logger = new plugins.smartlog.Smartlog({ export const logger = new plugins.smartlog.Smartlog({
logContext: { logContext: {
+3 -1
View File
@@ -15,7 +15,7 @@ import * as typedsocket from '@api.global/typedsocket';
export { typedrequest, typedsocket }; export { typedrequest, typedsocket };
// @pushrocks scope // @push.rocks scope
import * as lik from '@push.rocks/lik'; import * as lik from '@push.rocks/lik';
import * as projectinfo from '@push.rocks/projectinfo'; import * as projectinfo from '@push.rocks/projectinfo';
import * as qenv from '@push.rocks/qenv'; import * as qenv from '@push.rocks/qenv';
@@ -26,6 +26,7 @@ import * as smartnetwork from '@push.rocks/smartnetwork';
import * as smartpath from '@push.rocks/smartpath'; import * as smartpath from '@push.rocks/smartpath';
import * as smartpromise from '@push.rocks/smartpromise'; import * as smartpromise from '@push.rocks/smartpromise';
import * as smartrequest from '@push.rocks/smartrequest'; import * as smartrequest from '@push.rocks/smartrequest';
import * as smartserve from '@push.rocks/smartserve';
import * as smartrx from '@push.rocks/smartrx'; import * as smartrx from '@push.rocks/smartrx';
import * as smartstate from '@push.rocks/smartstate'; import * as smartstate from '@push.rocks/smartstate';
import * as smartstream from '@push.rocks/smartstream'; import * as smartstream from '@push.rocks/smartstream';
@@ -43,6 +44,7 @@ export {
smartpath, smartpath,
smartpromise, smartpromise,
smartrequest, smartrequest,
smartserve,
smartrx, smartrx,
smartstate, smartstate,
smartstream, smartstream,
+1
View File
@@ -7,6 +7,7 @@
"moduleResolution": "NodeNext", "moduleResolution": "NodeNext",
"esModuleInterop": true, "esModuleInterop": true,
"verbatimModuleSyntax": true, "verbatimModuleSyntax": true,
"ignoreDeprecations": "6.0",
"baseUrl": ".", "baseUrl": ".",
"paths": {} "paths": {}
}, },