Compare commits

...

16 Commits

14 changed files with 4433 additions and 1892 deletions

40
changelog.md Normal file
View File

@ -0,0 +1,40 @@
# Changelog
## 2024-12-20 - 1.2.2 - fix(core)
Refactored configuration management classes and improved service update handling
- Replaced SparkLocalConfig with SparkConfig for configuration management.
- Improved service handling and update check logic.
- Consolidated service definition and update logic for better maintainability.
## 2024-12-19 - 1.2.1 - fix(taskmanager)
Remove checkinSlackTask from SparkTaskManager for streamlined task management
- checkinSlackTask has been removed from the task manager class.
- Removal of the slack check-in task allows the system to focus on essential update tasks.
## 2024-12-18 - 1.2.0 - feat(core)
Initial commit of the Spark project with core functionalities for server management and integration with Docker.
- Add core functionalities for server maintenance and configuration.
- Integrate Docker for advanced task scheduling and service management.
- Provide CLI commands for daemon management and task execution.
## 2024-12-18 - 1.1.0 - feat(core)
Update package dependencies and improve API integration.
- Updated devDependencies and dependencies in package.json.
- Integrated new package @serve.zone/api.
- Updated identityArg in SparkLocalConfig for userHomeDir kvStore.
## 2024-06-13 - 1.0.85 to 1.0.90 - Core Updates
Routine updates and fixes to core functionality.
- Updated core component throughout versions for enhanced stability
- Incremental improvements applied on versions 1.0.85 to 1.0.90
## 2024-05-08 - 1.0.82 to 1.0.85 - Core Enhancements
Consistent updates made to improve core operations.
- Updates focused on core functionality for improved performance
- Series of updates applied from versions 1.0.82 to 1.0.85

View File

@ -5,7 +5,7 @@
"githost": "gitlab.com",
"gitscope": "losslessone/services/initzone",
"gitrepo": "spark",
"description": "A tool to maintain and configure servers on the base OS level for the Servezone infrastructure.",
"description": "A comprehensive tool for maintaining and configuring servers, integrating with Docker and supporting advanced task scheduling, targeted at the Servezone infrastructure and used by @serve.zone/cloudly as a cluster node server system manager.",
"npmPackagename": "@losslessone_private/spark",
"license": "MIT",
"projectDomain": "https://lossless.one",
@ -20,7 +20,15 @@
"continuous deployment",
"deployment automation",
"service orchestration",
"node.js"
"node.js",
"task scheduling",
"CLI",
"logging",
"server maintenance",
"serve.zone",
"cluster management",
"system manager",
"server configuration"
]
}
},
@ -28,5 +36,8 @@
"npmGlobalTools": [],
"npmAccessLevel": "private",
"npmRegistryUrl": "verdaccio.lossless.one"
},
"tsdoc": {
"legal": "\n## License and Legal Information\n\nThis repository contains open-source code that is licensed under the MIT License. A copy of the MIT License can be found in the [license](license) file within this repository. \n\n**Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.\n\n### Trademarks\n\nThis project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and any related products or services are trademarks of Task Venture Capital GmbH and are not included within the scope of the MIT license granted herein. Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines, and any usage must be approved in writing by Task Venture Capital GmbH.\n\n### Company Information\n\nTask Venture Capital GmbH \nRegistered at District court Bremen HRB 35230 HB, Germany\n\nFor any legal inquiries or if you require further information, please contact us via email at hello@task.vc.\n\nBy using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.\n"
}
}

View File

@ -1,8 +1,8 @@
{
"name": "@serve.zone/spark",
"version": "1.0.86",
"version": "1.2.2",
"private": false,
"description": "A tool to maintain and configure servers on the base OS level for the Servezone infrastructure.",
"description": "A comprehensive tool for maintaining and configuring servers, integrating with Docker and supporting advanced task scheduling, targeted at the Servezone infrastructure and used by @serve.zone/cloudly as a cluster node server system manager.",
"main": "dist_ts/index.js",
"typings": "dist_ts/index.d.ts",
"author": "Task Venture Capital GmbH",
@ -16,29 +16,30 @@
"spark": "./cli.js"
},
"devDependencies": {
"@git.zone/tsbuild": "^2.1.80",
"@git.zone/tsrun": "^1.2.39",
"@git.zone/tsbuild": "^2.2.0",
"@git.zone/tsrun": "^1.3.3",
"@git.zone/tstest": "^1.0.60",
"@push.rocks/tapbundle": "^5.0.4",
"@types/node": "20.14.2"
"@push.rocks/tapbundle": "^5.5.3",
"@types/node": "22.10.2"
},
"dependencies": {
"@apiclient.xyz/docker": "^1.2.2",
"@push.rocks/npmextra": "^5.0.17",
"@apiclient.xyz/docker": "^1.2.7",
"@push.rocks/npmextra": "^5.1.2",
"@push.rocks/projectinfo": "^5.0.1",
"@push.rocks/qenv": "^6.0.5",
"@push.rocks/qenv": "^6.1.0",
"@push.rocks/smartcli": "^4.0.11",
"@push.rocks/smartdaemon": "^2.0.3",
"@push.rocks/smartdelay": "^3.0.5",
"@push.rocks/smartfile": "^11.0.20",
"@push.rocks/smartfile": "^11.0.23",
"@push.rocks/smartjson": "^5.0.20",
"@push.rocks/smartlog": "^3.0.7",
"@push.rocks/smartlog-destination-local": "^9.0.0",
"@push.rocks/smartpath": "^5.0.5",
"@push.rocks/smartshell": "^3.0.5",
"@push.rocks/smartshell": "^3.2.2",
"@push.rocks/smartupdate": "^2.0.4",
"@push.rocks/taskbuffer": "^3.0.10",
"@serve.zone/interfaces": "^1.0.74"
"@serve.zone/api": "^4.5.1",
"@serve.zone/interfaces": "^4.5.1"
},
"files": [
"ts/**/*",
@ -67,6 +68,14 @@
"continuous deployment",
"deployment automation",
"service orchestration",
"node.js"
"node.js",
"task scheduling",
"CLI",
"logging",
"server maintenance",
"serve.zone",
"cluster management",
"system manager",
"server configuration"
]
}

5726
pnpm-lock.yaml generated

File diff suppressed because it is too large Load Diff

View File

@ -1,2 +1,2 @@
- this package is part of serve.zone
- it is used to maintain and configure servers on the base OS level
- make sure to mention that this package is part of serve.zone and **spark's main purpose** is the be used by @serve.zone/cloudly as a cluster node server system manager.
- it is used to maintain and configure servers on the base OS level

227
readme.md
View File

@ -1,31 +1,35 @@
# @serve.zone/spark
sparks the servezone services
A comprehensive tool for maintaining and configuring servers, integrating with Docker and supporting advanced task scheduling, targeted at the serve.zone infrastructure. It's mainly designed to be utilized by @serve.zone/cloudly as a cluster node server system manager, maintaining and configuring servers on the base OS level.
## Install
To install `@serve.zone/spark`, run the following command in your terminal:
```sh
npm install @serve.zone/spark --save
```
Ensure you have both Node.js and npm installed on your machine.
## Usage
### Getting Started
To use `@serve.zone/spark` in your project, you need to include and initiate it in your TypeScript project. Ensure you have TypeScript and the necessary build tools set up in your project.
First, import `@serve.zone/spark`:
```typescript
import { Spark } from '@serve.zone/spark';
```
### Initializing Spark
Create an instance of the `Spark` class to start using Spark. This instance will serve as the main entry point for interacting with the Spark functionalities.
Create an instance of the `Spark` class to start using Spark. This instance will serve as the main entry point for interacting with Spark functionalities.
```typescript
const sparkInstance = new Spark();
```
### Running Spark as a Daemon
To run Spark as a daemon, which is useful for maintaining and configuring servers on the base OS level, use the CLI feature bundled with Spark. This should ideally be handled outside of your code through a command-line terminal but can also be automated within your Node.js scripts if required.
To run Spark as a daemon, which is useful for maintaining and configuring servers at the OS level, you can use the CLI feature bundled with Spark. This should ideally be handled outside of your code through a command-line terminal but can also be automated within your Node.js scripts if required.
```shell
spark installdaemon
@ -41,7 +45,7 @@ await sparkInstance.sparkUpdateManager.updateServices();
```
### Managing Configuration and Logging
Spark allows for extensive configuration and logging customization. Use the `SparkLocalConfig` and logging features to tailor Spark's operation to your needs.
Spark allows extensive configuration and logging customization. Use the `SparkLocalConfig` and logging features to tailor Spark's operation to your needs.
```typescript
// Accessing the local configuration
@ -54,7 +58,7 @@ logger.log('info', 'Custom log message');
```
### Advanced Usage
`@serve.zone/spark` offers a suite of tools for detailed server and service management, including but not limited to task scheduling, daemon management, and service updates. Explore the `SparkTaskManager` for scheduling specific tasks, `SparkUpdateManager` for handling service updates, and `SparkLocalConfig` for configuration.
`@serve.zone/spark` offers tools for detailed server and service management, including but not limited to task scheduling, daemon management, and service updates. Explore the `SparkTaskManager` for scheduling specific tasks, `SparkUpdateManager` for handling service updates, and `SparkLocalConfig` for configuration.
### Example: Scheduling Custom Tasks
```typescript
@ -73,8 +77,8 @@ sparkInstance.sparkTaskManager.taskmanager.addAndScheduleTask(myTask, '* * * * *
The example above creates a simple task that logs a message every second, demonstrating how to use Spark's task manager for custom scheduled tasks.
### Advanced Configuration
For advanced configurations, including Docker and service management:
### Detailed Service Management
For advanced configurations, including Docker and service management, you can utilize the following patterns:
- Use `SparkUpdateManager` to handle Docker image updates, service creation, and management.
- Access and modify Docker and service configurations through Spark's integration with configuration files and environment variables.
@ -88,6 +92,209 @@ const newServiceDefinition = {...};
await sparkInstance.sparkUpdateManager.createService(newServiceDefinition);
```
### Conclusion
`@serve.zone/spark` provides a comprehensive toolkit for orchestrating and managing server environments and Docker-based services. By leveraging its CLI and programmatic interfaces, you can automate and streamline server operations, configurations, updates, and task scheduling, ensuring your infrastructure is responsive, updated, and maintained efficiently.
undefined
### CLI Commands
Spark provides several CLI commands to interact with and manage the system services:
#### Installing Spark as a Daemon
```shell
spark installdaemon
```
Sets up Spark as a system service to maintain server configurations automatically.
#### Updating the Daemon
```shell
spark updatedaemon
```
Updates the daemon service if a new version is available.
#### Running Spark as Daemon
```shell
spark asdaemon
```
Runs Spark in daemon mode, which is suitable for executing automated tasks.
#### Viewing Logs
```shell
spark logs
```
Views the logs of the Spark daemon service.
#### Cleaning Up Services
```shell
spark prune
```
Stops and cleans up all Docker services (stacks, networks, secrets, etc.) and prunes the Docker system.
### Programmatic Daemon Management
You can also manage the daemon programmatically:
```typescript
import { SmartDaemon } from '@push.rocks/smartdaemon';
import { Spark } from '@serve.zone/spark';
const sparkInstance = new Spark();
const smartDaemon = new SmartDaemon();
const startDaemon = async () => {
const sparkService = await smartDaemon.addService({
name: 'spark',
version: sparkInstance.sparkInfo.projectInfo.version,
command: 'spark asdaemon',
description: 'Spark daemon service',
workingDir: '/path/to/project',
});
await sparkService.save();
await sparkService.enable();
await sparkService.start();
};
const updateDaemon = async () => {
const sparkService = await smartDaemon.addService({
name: 'spark',
version: sparkInstance.sparkInfo.projectInfo.version,
command: 'spark asdaemon',
description: 'Spark daemon service',
workingDir: '/path/to/project',
});
await sparkService.reload();
};
startDaemon();
updateDaemon();
```
This illustrates how to initiate and update the Spark daemon using the `SmartDaemon` class from `@push.rocks/smartdaemon`.
### Configuration Management
Extensive configuration management is possible through the `SparkLocalConfig` and other configuration classes. This feature allows you to make your application's behavior adaptable based on different environments and requirements.
```typescript
// Example on setting local config
import { SparkLocalConfig } from '@serve.zone/spark';
const localConfig = new SparkLocalConfig(sparkInstance);
await localConfig.kvStore.set('someKey', 'someValue');
// Retrieving a value from local config
const someConfigValue = await localConfig.kvStore.get('someKey');
console.log(someConfigValue); // Outputs: someValue
```
### Detailed Log Management
Logging is a crucial aspect of any automation tool, and `@serve.zone/spark` offers rich logging functionality through its built-in logging library.
```typescript
import { logger, Spark } from '@serve.zone/spark';
const sparkInstance = new Spark();
logger.log('info', 'Spark instance created.');
// Using logger in various levels of severity
logger.log('debug', 'This is a debug message');
logger.log('warn', 'This is a warning message');
logger.log('error', 'This is an error message');
logger.log('ok', 'This is a success message');
```
### Real-World Scenarios
#### Automated System Update and Restart
In real-world scenarios, you might want to automate system updates and reboots to ensure your services are running the latest security patches and features.
```typescript
import { Spark } from '@serve.zone/spark';
import { SmartShell } from '@push.rocks/smartshell';
const sparkInstance = new Spark();
const shell = new SmartShell({ executor: 'bash' });
const updateAndRestart = async () => {
await shell.exec('apt-get update && apt-get upgrade -y');
console.log('System updated.');
await shell.exec('reboot');
};
sparkInstance.sparkTaskManager.taskmanager.addAndScheduleTask(
{ name: 'updateAndRestart', taskFunction: updateAndRestart },
'0 3 * * 7' // Every Sunday at 3 AM
);
```
This example demonstrates creating and scheduling a task to update and restart the server every Sunday at 3 AM using Spark's task management capabilities.
#### Integrating with Docker for Service Deployment
Spark's tight integration with Docker makes it an excellent tool for deploying containerized applications across your infrastructure.
```typescript
import { Spark } from '@serve.zone/spark';
import { DockerHost } from '@apiclient.xyz/docker';
const sparkInstance = new Spark();
const dockerHost = new DockerHost({});
const deployService = async () => {
const image = await dockerHost.pullImage('my-docker-repo/my-service:latest');
const newService = await dockerHost.createService({
name: 'my-service',
image,
ports: ['80:8080'],
environmentVariables: {
NODE_ENV: 'production',
},
});
console.log(`Service ${newService.name} deployed.`);
};
deployService();
```
This example demonstrates how to pull a Docker image and deploy it as a new service in your infrastructure using Spark's Docker integration.
### Managing Secrets
Managing secrets and sensitive data is crucial in any configuration and automation tool. Spark's integration with Docker allows you to handle secrets securely.
```typescript
import { Spark, SparkUpdateManager } from '@serve.zone/spark';
import { DockerSecret } from '@apiclient.xyz/docker';
const sparkInstance = new Spark();
const updateManager = new SparkUpdateManager(sparkInstance);
const createDockerSecret = async () => {
const secret = await DockerSecret.createSecret(updateManager.dockerHost, {
name: 'dbPassword',
contentArg: 'superSecretPassword',
});
console.log(`Secret ${secret.Spec.Name} created.`);
};
createDockerSecret();
```
This example shows how to create a Docker secret using Spark's `SparkUpdateManager` class, ensuring that sensitive information is securely stored and managed.
## License and Legal Information
This repository contains open-source code that is licensed under the MIT License. A copy of the MIT License can be found in the [license](license) file within this repository.
**Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.
### Trademarks
This project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and any related products or services are trademarks of Task Venture Capital GmbH and are not included within the scope of the MIT license granted herein. Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines, and any usage must be approved in writing by Task Venture Capital GmbH.
### Company Information
Task Venture Capital GmbH
Registered at District court Bremen HRB 35230 HB, Germany
For any legal inquiries or if you require further information, please contact us via email at hello@task.vc.
By using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.

View File

@ -1,8 +1,8 @@
/**
* autocreated commitinfo by @pushrocks/commitinfo
* autocreated commitinfo by @push.rocks/commitinfo
*/
export const commitinfo = {
name: '@serve.zone/spark',
version: '1.0.86',
description: 'A tool to maintain and configure servers on the base OS level for the Servezone infrastructure.'
version: '1.2.2',
description: 'A comprehensive tool for maintaining and configuring servers, integrating with Docker and supporting advanced task scheduling, targeted at the Servezone infrastructure and used by @serve.zone/cloudly as a cluster node server system manager.'
}

View File

@ -3,7 +3,12 @@ import { Spark } from './index.js';
export class SparkConfig {
public sparkRef: Spark;
public kvStore: plugins.npmextra.KeyValueStore;
constructor(sparkRefArg: Spark) {
this.sparkRef = sparkRefArg;
this.kvStore = new plugins.npmextra.KeyValueStore({
typeArg: 'userHomeDir',
identityArg: 'servezone_spark',
});
}
}

View File

@ -1,15 +0,0 @@
import * as plugins from './spark.plugins.js';
import { Spark } from './index.js';
export class SparkLocalConfig {
public sparkRef: Spark;
private kvStore: plugins.npmextra.KeyValueStore;
constructor(sparkRefArg: Spark) {
this.sparkRef = sparkRefArg;
this.kvStore = new plugins.npmextra.KeyValueStore({
typeArg: 'userHomeDir',
identityArg: 'spark',
});
}
}

View File

@ -1,23 +1,23 @@
import * as plugins from './spark.plugins.js';
import { SparkTaskManager } from './spark.classes.taskmanager.js';
import { SparkInfo } from './spark.classes.info.js';
import { SparkUpdateManager } from './spark.classes.updatemanager.js';
import { SparkServicesManager } from './spark.classes.updatemanager.js';
import { logger } from './spark.logging.js';
import { SparkLocalConfig } from './spark.classes.localconfig.js';
import { SparkConfig } from './spark.classes.config.js';
export class Spark {
public smartdaemon: plugins.smartdaemon.SmartDaemon;
public sparkLocalConfig: SparkLocalConfig;
public sparkConfig: SparkConfig;
public sparkTaskManager: SparkTaskManager;
public sparkInfo: SparkInfo;
public sparkUpdateManager: SparkUpdateManager;
public sparkUpdateManager: SparkServicesManager;
constructor() {
this.smartdaemon = new plugins.smartdaemon.SmartDaemon();
this.sparkLocalConfig = new SparkLocalConfig(this);
this.sparkConfig = new SparkConfig(this);
this.sparkInfo = new SparkInfo(this);
this.sparkTaskManager = new SparkTaskManager(this);
this.sparkUpdateManager = new SparkUpdateManager(this);
this.sparkUpdateManager = new SparkServicesManager(this);
}
public async daemonStart() {

View File

@ -8,25 +8,14 @@ export class SparkTaskManager {
public taskmanager: plugins.taskbuffer.TaskManager;
// tasks
public checkinSlackTask: plugins.taskbuffer.Task;
public updateSpark: plugins.taskbuffer.Task;
public updateHost: plugins.taskbuffer.Task;
public updateCloudly: plugins.taskbuffer.Task;
public updateServices: plugins.taskbuffer.Task;
constructor(sparkRefArg: Spark) {
this.sparkRef = sparkRefArg;
this.taskmanager = new plugins.taskbuffer.TaskManager();
// checkinOnSlack
this.checkinSlackTask = new plugins.taskbuffer.Task({
name: 'checkinSlack',
taskFunction: async () => {
logger.log('ok', 'running hourly checkin now');
logger.log('info', 'completed hourly checkin');
},
});
// updateSpark
this.updateSpark = new plugins.taskbuffer.Task({
name: 'updateSpark',
@ -67,7 +56,10 @@ export class SparkTaskManager {
},
});
this.updateCloudly = new plugins.taskbuffer.Task({
/**
* only being run when mode is cloudly
*/
this.updateServices = new plugins.taskbuffer.Task({
name: 'updateCloudly',
taskFunction: async () => {
logger.log('info', 'now running updateCloudly task');
@ -80,10 +72,9 @@ export class SparkTaskManager {
* start the taskmanager
*/
public async start() {
this.taskmanager.addAndScheduleTask(this.checkinSlackTask, '0 0 * * * *');
this.taskmanager.addAndScheduleTask(this.updateServices, '30 */2 * * * *');
this.taskmanager.addAndScheduleTask(this.updateSpark, '0 * * * * *');
this.taskmanager.addAndScheduleTask(this.updateHost, '0 0 0 * * *');
this.taskmanager.addAndScheduleTask(this.updateCloudly, '30 */2 * * * *');
this.taskmanager.start();
}
@ -91,10 +82,9 @@ export class SparkTaskManager {
* stops the taskmanager
*/
public async stop() {
this.taskmanager.descheduleTask(this.checkinSlackTask);
this.taskmanager.descheduleTask(this.updateSpark);
this.taskmanager.descheduleTask(this.updateHost);
this.taskmanager.descheduleTask(this.updateCloudly);
this.taskmanager.descheduleTask(this.updateServices);
this.taskmanager.stop();
}
}

View File

@ -3,10 +3,26 @@ import * as paths from './spark.paths.js';
import { Spark } from './spark.classes.spark.js';
import { logger } from './spark.logging.js';
export class SparkUpdateManager {
/**
* this class takes care of updating the services that are managed by spark
*/
export class SparkServicesManager {
public sparkRef: Spark;
public dockerHost: plugins.docker.DockerHost;
public smartupdate: plugins.smartupdate.SmartUpdate;
/**
* the services that are managed by spark
*/
services: Array<{
name: string;
image: string;
url: string;
port: string;
environment: string;
secretJson: any;
}> = [];
constructor(sparkrefArg: Spark) {
this.sparkRef = sparkrefArg;
this.dockerHost = new plugins.docker.DockerHost({});
@ -21,109 +37,58 @@ export class SparkUpdateManager {
}
public async updateServices() {
if (
plugins.smartfile.fs.isDirectory(plugins.path.join(paths.homeDir, 'serve.zone/spark')) &&
(await plugins.smartfile.fs.fileExists(
plugins.path.join(paths.homeDir, 'serve.zone/spark/spark.json')
))
) {
const services: Array<{
name: string;
image: string;
url: string;
port: string;
environment: string;
secretJson: any;
}> = [];
// lets add coreflow
services.push({
name: `coreflow`,
image: `code.foss.global/serve.zone/coreflow`,
url: `coreflow`,
environment: `production`,
port: `3000`,
secretJson: {
SERVEZONE_PORT: `3000`,
SERVEZONE_ENVIRONMENT: `production`,
},
});
services.push({
name: `coretraffic`,
image: `code.foss.global/serve.zone/coretraffic`,
url: `coreflow`,
environment: `production`,
port: `3000`,
secretJson: {
SERVEZONE_PORT: `3000`,
SERVEZONE_ENVIRONMENT: `production`,
},
});
services.push({
name: `corelog`,
image: `code.foss.global/serve.zone/corelog`,
url: `coreflow`,
environment: `production`,
port: `3000`,
secretJson: {
SERVEZONE_PORT: `3000`,
SERVEZONE_ENVIRONMENT: `production`,
},
});
// lets add coretraffic
for (const service of services) {
const existingService = await plugins.docker.DockerService.getServiceByName(
this.dockerHost,
service.name
);
const existingServiceSecret = await plugins.docker.DockerSecret.getSecretByName(
this.dockerHost,
`${service.name}Secret`
);
if (existingService) {
const needsUpdate: boolean = await existingService.needsUpdate();
if (!needsUpdate) {
logger.log('info', `not needing update.`);
// we simply return here to end the functions
return;
}
logger.log('ok', `${service.name} needs to be updated!`);
await existingService.remove();
await existingServiceSecret.remove();
for (const service of this.services) {
const existingService = await plugins.docker.DockerService.getServiceByName(
this.dockerHost,
service.name
);
const existingServiceSecret = await plugins.docker.DockerSecret.getSecretByName(
this.dockerHost,
`${service.name}Secret`
);
if (existingService) {
const needsUpdate: boolean = await existingService.needsUpdate();
if (!needsUpdate) {
logger.log('info', `service >>${service.name}<< not needing update.`);
// we simply return here to end the functions
return;
}
if (!existingService && existingServiceSecret) {
await existingServiceSecret.remove();
}
const newServiceImage = await plugins.docker.DockerImage.createFromRegistry(
this.dockerHost,
{
creationObject: {
imageUrl: service.image,
},
}
);
const newServiceSecret = await plugins.docker.DockerSecret.createSecret(this.dockerHost, {
name: `${service.name}Secret`,
contentArg: plugins.smartjson.stringify(service.secretJson),
version: await newServiceImage.getVersion(),
labels: {},
});
const newService = await plugins.docker.DockerService.createService(this.dockerHost, {
image: newServiceImage,
labels: {},
name: service.name,
networkAlias: service.name,
networks: [],
secrets: [newServiceSecret],
ports: [`${service.port}:${service.secretJson.SERVEZONE_PORT}`],
});
logger.log('ok', `updated service >>${newService.Spec.Name}<<!`);
// continuing here means we need to update the service
logger.log('ok', `${service.name} needs to be updated!`);
await existingService.remove();
await existingServiceSecret.remove();
}
logger.log('success', `updated ${services.length} services!`);
if (!existingService && existingServiceSecret) {
await existingServiceSecret.remove();
}
const newServiceImage = await plugins.docker.DockerImage.createFromRegistry(
this.dockerHost,
{
creationObject: {
imageUrl: service.image,
},
}
);
const newServiceSecret = await plugins.docker.DockerSecret.createSecret(this.dockerHost, {
name: `${service.name}Secret`,
contentArg: plugins.smartjson.stringify(service.secretJson),
version: await newServiceImage.getVersion(),
labels: {},
});
const newService = await plugins.docker.DockerService.createService(this.dockerHost, {
image: newServiceImage,
labels: {},
name: service.name,
networkAlias: service.name,
networks: [],
secrets: [newServiceSecret],
ports: [`${service.port}:${service.secretJson.SERVEZONE_PORT}`],
});
logger.log('ok', `updated service >>${newService.Spec.Name}<<!`);
}
logger.log('success', `updated ${this.services.length} services!`);
}
}

View File

@ -45,6 +45,50 @@ export const runCli = async () => {
smartcliInstance.addCommand('asdaemon').subscribe(async (argvArg) => {
logger.log('success', 'looks like we are running as daemon now');
logger.log('info', 'starting spark in daemon mode');
// lets determine the mode if specified
let mode = argvArg.mode;
if (mode === 'cloudly') {
await sparkInstance.sparkConfig.kvStore.writeKey('mode', 'cloudly');
} else if (mode === 'coreflow-node') {
await sparkInstance.sparkConfig.kvStore.writeKey('mode', 'coreflow-node');
} else if (mode) {
logger.log('error', 'unknown mode specified');
process.exit(1);
} else {
// mode is not specified by cli, lets get it from the config
mode = await sparkInstance.sparkConfig.kvStore.readKey('mode');
}
if (!mode) {
logger.log('error', 'no mode specified by either cli or config');
process.exit(1);
} else if (mode === 'cloudly') {
sparkInstance.sparkUpdateManager.services.push({
name: `coreflow`,
image: `code.foss.global/serve.zone/cloudly`,
url: `cloudly`,
environment: `production`,
port: `3000`,
secretJson: {
SERVEZONE_PORT: `3000`,
SERVEZONE_ENVIRONMENT: `production`,
},
});
} else if (mode === 'coreflow-node') {
sparkInstance.sparkUpdateManager.services.push({
name: `coreflow`,
image: `code.foss.global/serve.zone/coreflow`,
url: `coreflow`,
environment: `production`,
port: `3000`,
secretJson: {
SERVEZONE_PORT: `3000`,
SERVEZONE_ENVIRONMENT: `production`,
},
});
}
await sparkInstance.daemonStart();
});

View File

@ -5,8 +5,9 @@ export { path };
// @serve.zone scope
import * as servezoneInterfaces from '@serve.zone/interfaces';
import * as servezoneApi from '@serve.zone/api';
export { servezoneInterfaces };
export { servezoneInterfaces, servezoneApi };
// @apiclient.xyz scope
import * as docker from '@apiclient.xyz/docker';