Compare commits

...

10 Commits

14 changed files with 4207 additions and 1887 deletions

40
changelog.md Normal file
View File

@ -0,0 +1,40 @@
# Changelog
## 2024-12-20 - 1.2.2 - fix(core)
Refactored configuration management classes and improved service update handling
- Replaced SparkLocalConfig with SparkConfig for configuration management.
- Improved service handling and update check logic.
- Consolidated service definition and update logic for better maintainability.
## 2024-12-19 - 1.2.1 - fix(taskmanager)
Remove checkinSlackTask from SparkTaskManager for streamlined task management
- checkinSlackTask has been removed from the task manager class.
- Removal of the slack check-in task allows the system to focus on essential update tasks.
## 2024-12-18 - 1.2.0 - feat(core)
Initial commit of the Spark project with core functionalities for server management and integration with Docker.
- Add core functionalities for server maintenance and configuration.
- Integrate Docker for advanced task scheduling and service management.
- Provide CLI commands for daemon management and task execution.
## 2024-12-18 - 1.1.0 - feat(core)
Update package dependencies and improve API integration.
- Updated devDependencies and dependencies in package.json.
- Integrated new package @serve.zone/api.
- Updated identityArg in SparkLocalConfig for userHomeDir kvStore.
## 2024-06-13 - 1.0.85 to 1.0.90 - Core Updates
Routine updates and fixes to core functionality.
- Updated core component throughout versions for enhanced stability
- Incremental improvements applied on versions 1.0.85 to 1.0.90
## 2024-05-08 - 1.0.82 to 1.0.85 - Core Enhancements
Consistent updates made to improve core operations.
- Updates focused on core functionality for improved performance
- Series of updates applied from versions 1.0.82 to 1.0.85

View File

@ -5,7 +5,7 @@
"githost": "gitlab.com",
"gitscope": "losslessone/services/initzone",
"gitrepo": "spark",
"description": "A comprehensive tool for maintaining and configuring servers, integrating with Docker and supporting advanced task scheduling, targeted at the Servezone infrastructure.",
"description": "A comprehensive tool for maintaining and configuring servers, integrating with Docker and supporting advanced task scheduling, targeted at the Servezone infrastructure and used by @serve.zone/cloudly as a cluster node server system manager.",
"npmPackagename": "@losslessone_private/spark",
"license": "MIT",
"projectDomain": "https://lossless.one",
@ -24,7 +24,11 @@
"task scheduling",
"CLI",
"logging",
"server maintenance"
"server maintenance",
"serve.zone",
"cluster management",
"system manager",
"server configuration"
]
}
},

View File

@ -1,8 +1,8 @@
{
"name": "@serve.zone/spark",
"version": "1.0.89",
"version": "1.2.2",
"private": false,
"description": "A comprehensive tool for maintaining and configuring servers, integrating with Docker and supporting advanced task scheduling, targeted at the Servezone infrastructure.",
"description": "A comprehensive tool for maintaining and configuring servers, integrating with Docker and supporting advanced task scheduling, targeted at the Servezone infrastructure and used by @serve.zone/cloudly as a cluster node server system manager.",
"main": "dist_ts/index.js",
"typings": "dist_ts/index.d.ts",
"author": "Task Venture Capital GmbH",
@ -16,29 +16,30 @@
"spark": "./cli.js"
},
"devDependencies": {
"@git.zone/tsbuild": "^2.1.80",
"@git.zone/tsrun": "^1.2.39",
"@git.zone/tsbuild": "^2.2.0",
"@git.zone/tsrun": "^1.3.3",
"@git.zone/tstest": "^1.0.60",
"@push.rocks/tapbundle": "^5.0.4",
"@types/node": "20.14.2"
"@push.rocks/tapbundle": "^5.5.3",
"@types/node": "22.10.2"
},
"dependencies": {
"@apiclient.xyz/docker": "^1.2.2",
"@push.rocks/npmextra": "^5.0.17",
"@apiclient.xyz/docker": "^1.2.7",
"@push.rocks/npmextra": "^5.1.2",
"@push.rocks/projectinfo": "^5.0.1",
"@push.rocks/qenv": "^6.0.5",
"@push.rocks/qenv": "^6.1.0",
"@push.rocks/smartcli": "^4.0.11",
"@push.rocks/smartdaemon": "^2.0.3",
"@push.rocks/smartdelay": "^3.0.5",
"@push.rocks/smartfile": "^11.0.20",
"@push.rocks/smartfile": "^11.0.23",
"@push.rocks/smartjson": "^5.0.20",
"@push.rocks/smartlog": "^3.0.7",
"@push.rocks/smartlog-destination-local": "^9.0.0",
"@push.rocks/smartpath": "^5.0.5",
"@push.rocks/smartshell": "^3.0.5",
"@push.rocks/smartshell": "^3.2.2",
"@push.rocks/smartupdate": "^2.0.4",
"@push.rocks/taskbuffer": "^3.0.10",
"@serve.zone/interfaces": "^1.0.74"
"@serve.zone/api": "^4.5.1",
"@serve.zone/interfaces": "^4.5.1"
},
"files": [
"ts/**/*",
@ -71,6 +72,10 @@
"task scheduling",
"CLI",
"logging",
"server maintenance"
"server maintenance",
"serve.zone",
"cluster management",
"system manager",
"server configuration"
]
}

5726
pnpm-lock.yaml generated

File diff suppressed because it is too large Load Diff

View File

@ -1,2 +1,2 @@
- this package is part of serve.zone and that spark's main purpose is the be used by @serve.zone/cloudly as a cluster node server system manager.
- make sure to mention that this package is part of serve.zone and **spark's main purpose** is the be used by @serve.zone/cloudly as a cluster node server system manager.
- it is used to maintain and configure servers on the base OS level

View File

@ -1,5 +1,5 @@
# @serve.zone/spark
A comprehensive tool for maintaining and configuring servers, integrating with Docker and supporting advanced task scheduling, targeted at the Servezone infrastructure.
A comprehensive tool for maintaining and configuring servers, integrating with Docker and supporting advanced task scheduling, targeted at the serve.zone infrastructure. It's mainly designed to be utilized by @serve.zone/cloudly as a cluster node server system manager, maintaining and configuring servers on the base OS level.
## Install
To install `@serve.zone/spark`, run the following command in your terminal:
@ -22,7 +22,7 @@ import { Spark } from '@serve.zone/spark';
```
### Initializing Spark
Create an instance of the `Spark` class to start using Spark. This instance will serve as the main entry point for interacting with the Spark functionalities.
Create an instance of the `Spark` class to start using Spark. This instance will serve as the main entry point for interacting with Spark functionalities.
```typescript
const sparkInstance = new Spark();
@ -45,7 +45,7 @@ await sparkInstance.sparkUpdateManager.updateServices();
```
### Managing Configuration and Logging
Spark allows for extensive configuration and logging customization. Use the `SparkLocalConfig` and logging features to tailor Spark's operation to your needs.
Spark allows extensive configuration and logging customization. Use the `SparkLocalConfig` and logging features to tailor Spark's operation to your needs.
```typescript
// Accessing the local configuration
@ -280,9 +280,6 @@ createDockerSecret();
This example shows how to create a Docker secret using Spark's `SparkUpdateManager` class, ensuring that sensitive information is securely stored and managed.
## Conclusion
`@serve.zone/spark` is a comprehensive toolkit for orchestrating and managing server environments and Docker-based services. By leveraging its CLI and programmatic interfaces, you can automate and streamline server operations, configurations, updates, and task scheduling, ensuring your infrastructure is responsive, updated, and maintained efficiently.
## License and Legal Information
This repository contains open-source code that is licensed under the MIT License. A copy of the MIT License can be found in the [license](license) file within this repository.

View File

@ -1,8 +1,8 @@
/**
* autocreated commitinfo by @pushrocks/commitinfo
* autocreated commitinfo by @push.rocks/commitinfo
*/
export const commitinfo = {
name: '@serve.zone/spark',
version: '1.0.89',
description: 'A comprehensive tool for maintaining and configuring servers, integrating with Docker and supporting advanced task scheduling, targeted at the Servezone infrastructure.'
version: '1.2.2',
description: 'A comprehensive tool for maintaining and configuring servers, integrating with Docker and supporting advanced task scheduling, targeted at the Servezone infrastructure and used by @serve.zone/cloudly as a cluster node server system manager.'
}

View File

@ -3,7 +3,12 @@ import { Spark } from './index.js';
export class SparkConfig {
public sparkRef: Spark;
public kvStore: plugins.npmextra.KeyValueStore;
constructor(sparkRefArg: Spark) {
this.sparkRef = sparkRefArg;
this.kvStore = new plugins.npmextra.KeyValueStore({
typeArg: 'userHomeDir',
identityArg: 'servezone_spark',
});
}
}

View File

@ -1,15 +0,0 @@
import * as plugins from './spark.plugins.js';
import { Spark } from './index.js';
export class SparkLocalConfig {
public sparkRef: Spark;
private kvStore: plugins.npmextra.KeyValueStore;
constructor(sparkRefArg: Spark) {
this.sparkRef = sparkRefArg;
this.kvStore = new plugins.npmextra.KeyValueStore({
typeArg: 'userHomeDir',
identityArg: 'spark',
});
}
}

View File

@ -1,23 +1,23 @@
import * as plugins from './spark.plugins.js';
import { SparkTaskManager } from './spark.classes.taskmanager.js';
import { SparkInfo } from './spark.classes.info.js';
import { SparkUpdateManager } from './spark.classes.updatemanager.js';
import { SparkServicesManager } from './spark.classes.updatemanager.js';
import { logger } from './spark.logging.js';
import { SparkLocalConfig } from './spark.classes.localconfig.js';
import { SparkConfig } from './spark.classes.config.js';
export class Spark {
public smartdaemon: plugins.smartdaemon.SmartDaemon;
public sparkLocalConfig: SparkLocalConfig;
public sparkConfig: SparkConfig;
public sparkTaskManager: SparkTaskManager;
public sparkInfo: SparkInfo;
public sparkUpdateManager: SparkUpdateManager;
public sparkUpdateManager: SparkServicesManager;
constructor() {
this.smartdaemon = new plugins.smartdaemon.SmartDaemon();
this.sparkLocalConfig = new SparkLocalConfig(this);
this.sparkConfig = new SparkConfig(this);
this.sparkInfo = new SparkInfo(this);
this.sparkTaskManager = new SparkTaskManager(this);
this.sparkUpdateManager = new SparkUpdateManager(this);
this.sparkUpdateManager = new SparkServicesManager(this);
}
public async daemonStart() {

View File

@ -8,25 +8,14 @@ export class SparkTaskManager {
public taskmanager: plugins.taskbuffer.TaskManager;
// tasks
public checkinSlackTask: plugins.taskbuffer.Task;
public updateSpark: plugins.taskbuffer.Task;
public updateHost: plugins.taskbuffer.Task;
public updateCloudly: plugins.taskbuffer.Task;
public updateServices: plugins.taskbuffer.Task;
constructor(sparkRefArg: Spark) {
this.sparkRef = sparkRefArg;
this.taskmanager = new plugins.taskbuffer.TaskManager();
// checkinOnSlack
this.checkinSlackTask = new plugins.taskbuffer.Task({
name: 'checkinSlack',
taskFunction: async () => {
logger.log('ok', 'running hourly checkin now');
logger.log('info', 'completed hourly checkin');
},
});
// updateSpark
this.updateSpark = new plugins.taskbuffer.Task({
name: 'updateSpark',
@ -67,7 +56,10 @@ export class SparkTaskManager {
},
});
this.updateCloudly = new plugins.taskbuffer.Task({
/**
* only being run when mode is cloudly
*/
this.updateServices = new plugins.taskbuffer.Task({
name: 'updateCloudly',
taskFunction: async () => {
logger.log('info', 'now running updateCloudly task');
@ -80,10 +72,9 @@ export class SparkTaskManager {
* start the taskmanager
*/
public async start() {
this.taskmanager.addAndScheduleTask(this.checkinSlackTask, '0 0 * * * *');
this.taskmanager.addAndScheduleTask(this.updateServices, '30 */2 * * * *');
this.taskmanager.addAndScheduleTask(this.updateSpark, '0 * * * * *');
this.taskmanager.addAndScheduleTask(this.updateHost, '0 0 0 * * *');
this.taskmanager.addAndScheduleTask(this.updateCloudly, '30 */2 * * * *');
this.taskmanager.start();
}
@ -91,10 +82,9 @@ export class SparkTaskManager {
* stops the taskmanager
*/
public async stop() {
this.taskmanager.descheduleTask(this.checkinSlackTask);
this.taskmanager.descheduleTask(this.updateSpark);
this.taskmanager.descheduleTask(this.updateHost);
this.taskmanager.descheduleTask(this.updateCloudly);
this.taskmanager.descheduleTask(this.updateServices);
this.taskmanager.stop();
}
}

View File

@ -3,10 +3,26 @@ import * as paths from './spark.paths.js';
import { Spark } from './spark.classes.spark.js';
import { logger } from './spark.logging.js';
export class SparkUpdateManager {
/**
* this class takes care of updating the services that are managed by spark
*/
export class SparkServicesManager {
public sparkRef: Spark;
public dockerHost: plugins.docker.DockerHost;
public smartupdate: plugins.smartupdate.SmartUpdate;
/**
* the services that are managed by spark
*/
services: Array<{
name: string;
image: string;
url: string;
port: string;
environment: string;
secretJson: any;
}> = [];
constructor(sparkrefArg: Spark) {
this.sparkRef = sparkrefArg;
this.dockerHost = new plugins.docker.DockerHost({});
@ -21,109 +37,58 @@ export class SparkUpdateManager {
}
public async updateServices() {
if (
plugins.smartfile.fs.isDirectory(plugins.path.join(paths.homeDir, 'serve.zone/spark')) &&
(await plugins.smartfile.fs.fileExists(
plugins.path.join(paths.homeDir, 'serve.zone/spark/spark.json')
))
) {
const services: Array<{
name: string;
image: string;
url: string;
port: string;
environment: string;
secretJson: any;
}> = [];
// lets add coreflow
services.push({
name: `coreflow`,
image: `code.foss.global/serve.zone/coreflow`,
url: `coreflow`,
environment: `production`,
port: `3000`,
secretJson: {
SERVEZONE_PORT: `3000`,
SERVEZONE_ENVIRONMENT: `production`,
},
});
services.push({
name: `coretraffic`,
image: `code.foss.global/serve.zone/coretraffic`,
url: `coreflow`,
environment: `production`,
port: `3000`,
secretJson: {
SERVEZONE_PORT: `3000`,
SERVEZONE_ENVIRONMENT: `production`,
},
});
services.push({
name: `corelog`,
image: `code.foss.global/serve.zone/corelog`,
url: `coreflow`,
environment: `production`,
port: `3000`,
secretJson: {
SERVEZONE_PORT: `3000`,
SERVEZONE_ENVIRONMENT: `production`,
},
});
// lets add coretraffic
for (const service of services) {
const existingService = await plugins.docker.DockerService.getServiceByName(
this.dockerHost,
service.name
);
const existingServiceSecret = await plugins.docker.DockerSecret.getSecretByName(
this.dockerHost,
`${service.name}Secret`
);
if (existingService) {
const needsUpdate: boolean = await existingService.needsUpdate();
if (!needsUpdate) {
logger.log('info', `not needing update.`);
// we simply return here to end the functions
return;
}
logger.log('ok', `${service.name} needs to be updated!`);
await existingService.remove();
await existingServiceSecret.remove();
for (const service of this.services) {
const existingService = await plugins.docker.DockerService.getServiceByName(
this.dockerHost,
service.name
);
const existingServiceSecret = await plugins.docker.DockerSecret.getSecretByName(
this.dockerHost,
`${service.name}Secret`
);
if (existingService) {
const needsUpdate: boolean = await existingService.needsUpdate();
if (!needsUpdate) {
logger.log('info', `service >>${service.name}<< not needing update.`);
// we simply return here to end the functions
return;
}
if (!existingService && existingServiceSecret) {
await existingServiceSecret.remove();
}
const newServiceImage = await plugins.docker.DockerImage.createFromRegistry(
this.dockerHost,
{
creationObject: {
imageUrl: service.image,
},
}
);
const newServiceSecret = await plugins.docker.DockerSecret.createSecret(this.dockerHost, {
name: `${service.name}Secret`,
contentArg: plugins.smartjson.stringify(service.secretJson),
version: await newServiceImage.getVersion(),
labels: {},
});
const newService = await plugins.docker.DockerService.createService(this.dockerHost, {
image: newServiceImage,
labels: {},
name: service.name,
networkAlias: service.name,
networks: [],
secrets: [newServiceSecret],
ports: [`${service.port}:${service.secretJson.SERVEZONE_PORT}`],
});
logger.log('ok', `updated service >>${newService.Spec.Name}<<!`);
// continuing here means we need to update the service
logger.log('ok', `${service.name} needs to be updated!`);
await existingService.remove();
await existingServiceSecret.remove();
}
logger.log('success', `updated ${services.length} services!`);
if (!existingService && existingServiceSecret) {
await existingServiceSecret.remove();
}
const newServiceImage = await plugins.docker.DockerImage.createFromRegistry(
this.dockerHost,
{
creationObject: {
imageUrl: service.image,
},
}
);
const newServiceSecret = await plugins.docker.DockerSecret.createSecret(this.dockerHost, {
name: `${service.name}Secret`,
contentArg: plugins.smartjson.stringify(service.secretJson),
version: await newServiceImage.getVersion(),
labels: {},
});
const newService = await plugins.docker.DockerService.createService(this.dockerHost, {
image: newServiceImage,
labels: {},
name: service.name,
networkAlias: service.name,
networks: [],
secrets: [newServiceSecret],
ports: [`${service.port}:${service.secretJson.SERVEZONE_PORT}`],
});
logger.log('ok', `updated service >>${newService.Spec.Name}<<!`);
}
logger.log('success', `updated ${this.services.length} services!`);
}
}

View File

@ -45,6 +45,50 @@ export const runCli = async () => {
smartcliInstance.addCommand('asdaemon').subscribe(async (argvArg) => {
logger.log('success', 'looks like we are running as daemon now');
logger.log('info', 'starting spark in daemon mode');
// lets determine the mode if specified
let mode = argvArg.mode;
if (mode === 'cloudly') {
await sparkInstance.sparkConfig.kvStore.writeKey('mode', 'cloudly');
} else if (mode === 'coreflow-node') {
await sparkInstance.sparkConfig.kvStore.writeKey('mode', 'coreflow-node');
} else if (mode) {
logger.log('error', 'unknown mode specified');
process.exit(1);
} else {
// mode is not specified by cli, lets get it from the config
mode = await sparkInstance.sparkConfig.kvStore.readKey('mode');
}
if (!mode) {
logger.log('error', 'no mode specified by either cli or config');
process.exit(1);
} else if (mode === 'cloudly') {
sparkInstance.sparkUpdateManager.services.push({
name: `coreflow`,
image: `code.foss.global/serve.zone/cloudly`,
url: `cloudly`,
environment: `production`,
port: `3000`,
secretJson: {
SERVEZONE_PORT: `3000`,
SERVEZONE_ENVIRONMENT: `production`,
},
});
} else if (mode === 'coreflow-node') {
sparkInstance.sparkUpdateManager.services.push({
name: `coreflow`,
image: `code.foss.global/serve.zone/coreflow`,
url: `coreflow`,
environment: `production`,
port: `3000`,
secretJson: {
SERVEZONE_PORT: `3000`,
SERVEZONE_ENVIRONMENT: `production`,
},
});
}
await sparkInstance.daemonStart();
});

View File

@ -5,8 +5,9 @@ export { path };
// @serve.zone scope
import * as servezoneInterfaces from '@serve.zone/interfaces';
import * as servezoneApi from '@serve.zone/api';
export { servezoneInterfaces };
export { servezoneInterfaces, servezoneApi };
// @apiclient.xyz scope
import * as docker from '@apiclient.xyz/docker';