Files
spark/ts/spark.cli.ts
Juergen Kunz 526b4f46dd
All checks were successful
CI / Type Check & Lint (push) Successful in 13s
CI / Build Test (Current Platform) (push) Successful in 19s
CI / Build All Platforms (push) Successful in 1m48s
feat(migration): Migrate from Node.js to Deno runtime
Major migration to Deno runtime following the nupst project pattern:

Core Changes:
- Created deno.json configuration with tasks, imports, and settings
- Created mod.ts as main entry point with Deno permissions
- Updated all TypeScript imports from .js to .ts extensions
- Replaced Node.js APIs (process.exit) with Deno equivalents (Deno.exit)
- Updated path imports to use @std/path from JSR

Dependencies:
- Migrated all npm dependencies to use npm: prefix in import map
- Added Deno standard library imports (@std/path, @std/assert)
- Configured import aliases for all @push.rocks and @serve.zone packages

Build & Distribution:
- Created install.sh for downloading pre-compiled binaries
- Created uninstall.sh for clean system removal
- Created scripts/compile-all.sh for multi-platform compilation
- Supports Linux (x64, ARM64), macOS (x64, ARM64), Windows (x64)

Testing:
- Migrated tests to Deno test framework using @std/assert
- Created test.simple.ts for basic verification
- Updated test structure to use Deno.test instead of tap

CI/CD:
- Created .gitea/workflows/ci.yml for type checking, linting, and builds
- Created .gitea/workflows/release.yml for automated releases
- Created .gitea/release-template.md for release documentation

Cleanup:
- Removed package.json, pnpm-lock.yaml, tsconfig.json
- Removed Node.js CLI files (cli.js, cli.child.ts, cli.ts.js)
- Removed dist_ts/ compiled output directory
- Removed npmextra.json configuration

This migration enables standalone binary distribution without Node.js
runtime dependency while maintaining all existing functionality.
2025-10-23 23:22:16 +00:00

140 lines
4.9 KiB
TypeScript

import * as plugins from './spark.plugins.ts';
import * as paths from './spark.paths.ts';
import { Spark } from './spark.classes.spark.ts';
import { logger } from './spark.logging.ts';
export const runCli = async () => {
const smartshellInstance = new plugins.smartshell.Smartshell({
executor: 'bash',
});
const sparkInstance = new Spark();
const smartcliInstance = new plugins.smartcli.Smartcli();
smartcliInstance.standardCommand().subscribe(async () => {
logger.log('info', 'no action specified! you can type:');
logger.log('info', '* installdaemon');
});
smartcliInstance.addCommand('installdaemon').subscribe(async (argvArg) => {
logger.log('ok', 'we are apparently not running as daemon yet');
logger.log('info', 'trying to set this up now');
const sparkService = await sparkInstance.smartdaemon.addService({
name: 'spark',
version: sparkInstance.sparkInfo.projectInfo.version,
command: 'spark asdaemon',
description: 'spark daemon service',
workingDir: paths.packageDir,
});
await sparkService.save();
await sparkService.enable();
await sparkService.start();
});
smartcliInstance.addCommand('updatedaemon').subscribe(async (argvArg) => {
logger.log('ok', 'we are apparently trying to update the daemon for spark');
logger.log('info', 'trying to set this up now');
const sparkService = await sparkInstance.smartdaemon.addService({
name: 'spark',
version: sparkInstance.sparkInfo.projectInfo.version,
command: 'spark asdaemon',
description: 'spark daemon service',
workingDir: paths.packageDir,
});
await sparkService.reload();
});
smartcliInstance.addCommand('asdaemon').subscribe(async (argvArg) => {
logger.log('success', 'looks like we are running as daemon now');
logger.log('info', 'starting spark in daemon mode');
// lets determine the mode if specified
let mode = argvArg.mode;
if (mode === 'cloudly') {
await sparkInstance.sparkConfig.kvStore.writeKey('mode', 'cloudly');
} else if (mode === 'coreflow-node') {
await sparkInstance.sparkConfig.kvStore.writeKey('mode', 'coreflow-node');
} else if (mode) {
logger.log('error', 'unknown mode specified');
Deno.exit(1);
} else {
// mode is not specified by cli, lets get it from the config
mode = await sparkInstance.sparkConfig.kvStore.readKey('mode');
}
if (!mode) {
logger.log('error', 'no mode specified by either cli or config');
Deno.exit(1);
} else if (mode === 'cloudly') {
sparkInstance.sparkUpdateManager.services.push({
name: `coreflow`,
image: `code.foss.global/serve.zone/cloudly`,
url: `cloudly`,
environment: `production`,
port: `3000`,
secretJson: {
SERVEZONE_PORT: `3000`,
SERVEZONE_ENVIRONMENT: `production`,
},
});
} else if (mode === 'coreflow-node') {
sparkInstance.sparkUpdateManager.services.push({
name: `coreflow`,
image: `code.foss.global/serve.zone/coreflow`,
url: `coreflow`,
environment: `production`,
port: `3000`,
secretJson: {
SERVEZONE_PORT: `3000`,
SERVEZONE_ENVIRONMENT: `production`,
},
});
}
await sparkInstance.daemonStart();
});
smartcliInstance.addCommand('logs').subscribe(async (argvArg) => {
smartshellInstance.exec(`journalctl -u smartdaemon_spark -f`);
});
smartcliInstance.addCommand('prune').subscribe(async (argvArg) => {
// daemon
await smartshellInstance.exec(`systemctl stop smartdaemon_spark`);
logger.log('ok', 'stopped serverconfig daemon');
await plugins.smartdelay.delayFor(5000);
// services
await smartshellInstance.exec(`docker stack rm $(docker stack ls -q)`);
logger.log('ok', 'removed docker stacks');
await plugins.smartdelay.delayFor(5000);
// services
await smartshellInstance.exec(`docker service rm $(docker service ls -q)`);
logger.log('ok', 'removed docker services');
await plugins.smartdelay.delayFor(5000);
// secrets
await smartshellInstance.exec(`docker secret rm $(docker secret ls -q)`);
logger.log('ok', 'removed docker secrets');
await plugins.smartdelay.delayFor(5000);
// networks
await smartshellInstance.exec(`docker network rm szncorechat sznwebgateway`);
logger.log('ok', 'removed docker networks');
await plugins.smartdelay.delayFor(5000);
await smartshellInstance.exec(`docker system prune -af`);
logger.log('ok', 'pruned docker system');
await plugins.smartdelay.delayFor(5000);
// restart docker
await smartshellInstance.exec(`systemctl restart docker`);
logger.log('ok', 'restarted the docker service');
await plugins.smartdelay.delayFor(5000);
// serverconfig daemon
await smartshellInstance.exec(`systemctl start smartdaemon_spark`);
logger.log('ok', 'handed over control back to serverconfig daemon');
});
smartcliInstance.startParse();
};