Compare commits

...

8 Commits
v6.1.2 ... main

Author SHA1 Message Date
587cfffbe4 v8.0.2 2026-03-23 14:59:59 +00:00
820811cca2 fix(servicemanager): cancel startup timeout once service initialization completes 2026-03-23 14:59:59 +00:00
e25271f9db v8.0.1 2026-03-23 14:40:17 +00:00
709f9fa894 fix(servicemanager): cancel shutdown timeouts after services stop 2026-03-23 14:40:17 +00:00
99430fbde4 v8.0.0 2026-03-21 10:57:27 +00:00
0f93e86cc1 BREAKING CHANGE(service): expand service lifecycle management with instance-aware hooks, startup timeouts, labels, readiness waits, and auto-restart support 2026-03-21 10:57:27 +00:00
0b78b05101 v7.0.0 2026-03-20 15:24:16 +00:00
e91e782113 feat(service): add Service and ServiceManager for component lifecycle management
Adds two new classes:
- Service: long-running component with start/stop lifecycle, health checks, builder pattern and subclass support
- ServiceManager: orchestrates multiple services with dependency-ordered startup, failure isolation, retry with backoff, and reverse-order shutdown
2026-03-20 15:24:12 +00:00
12 changed files with 4557 additions and 2878 deletions

View File

@@ -1,5 +1,24 @@
# Changelog
## 2026-03-23 - 8.0.2 - fix(servicemanager)
cancel startup timeout once service initialization completes
- Replaces the startup timeout race delay with a cancellable Timeout instance
- Prevents the global startup timeout from lingering after startup finishes or fails
## 2026-03-23 - 8.0.1 - fix(servicemanager)
cancel shutdown timeouts after services stop
- Replace the shutdown race delay with a cancellable Timeout in ServiceManager.
- Prevent timeout handlers from lingering after a service stops successfully during shutdown.
## 2026-03-20 - 8.0.0 - BREAKING CHANGE(service)
expand service lifecycle management with instance-aware hooks, startup timeouts, labels, readiness waits, and auto-restart support
- Change service stop and health check callbacks to receive the started instance and expose it via service.instance
- Add per-service and global startup timeout handling plus waitForState, waitForRunning, and waitForStopped readiness helpers
- Support service labels, label-based manager queries, and auto-restart lifecycle events with configurable backoff
## 2026-02-15 - 6.1.2 - fix(deps)
bump @push.rocks/smarttime to ^4.2.3

View File

@@ -1,13 +1,13 @@
{
"name": "@push.rocks/taskbuffer",
"version": "6.1.2",
"version": "8.0.2",
"private": false,
"description": "A flexible task management library supporting TypeScript, allowing for task buffering, scheduling, and execution with dependency management.",
"main": "dist_ts/index.js",
"typings": "dist_ts/index.d.ts",
"type": "module",
"scripts": {
"test": "(tstest test/ --verbose --logfile --timeout 120)",
"test": "(tstest test/ --verbose --logfile --timeout 300)",
"build": "(tsbuild tsfolders)",
"buildDocs": "tsdoc"
},
@@ -34,21 +34,21 @@
},
"homepage": "https://code.foss.global/push.rocks/taskbuffer#readme",
"dependencies": {
"@design.estate/dees-element": "^2.1.6",
"@push.rocks/lik": "^6.2.2",
"@design.estate/dees-element": "^2.2.3",
"@push.rocks/lik": "^6.3.1",
"@push.rocks/smartdelay": "^3.0.5",
"@push.rocks/smartlog": "^3.1.11",
"@push.rocks/smartlog": "^3.2.1",
"@push.rocks/smartpromise": "^4.2.3",
"@push.rocks/smartrx": "^3.0.10",
"@push.rocks/smarttime": "^4.2.3",
"@push.rocks/smartunique": "^3.0.9"
},
"devDependencies": {
"@git.zone/tsbuild": "^4.1.2",
"@git.zone/tsbundle": "^2.8.3",
"@git.zone/tsbuild": "^4.3.0",
"@git.zone/tsbundle": "^2.9.1",
"@git.zone/tsrun": "^2.0.1",
"@git.zone/tstest": "^3.1.8",
"@types/node": "^25.2.3"
"@git.zone/tstest": "^3.5.0",
"@types/node": "^25.5.0"
},
"files": [
"ts/**/*",

5094
pnpm-lock.yaml generated

File diff suppressed because it is too large Load Diff

View File

@@ -94,17 +94,45 @@
accessor count = 0;
```
## Dependencies (as of v4.2.0)
- `@design.estate/dees-element` ^2.1.6 - TC39 decorators with `accessor` keyword
- `@push.rocks/lik` ^6.2.2 - Data structures
## Service Lifecycle System (v7.0.0+)
### Service<T>
- `new Service<T>(name)` or `new Service<T>(options: IServiceOptions<T>)`
- Builder pattern: `.critical()`, `.optional()`, `.dependsOn(...)`, `.withStart(fn)`, `.withStop(fn)`, `.withHealthCheck(fn, config?)`, `.withRetry(config)`, `.withStartupTimeout(ms)`, `.withLabels(labels)`
- Subclass pattern: override `serviceStart()`, `serviceStop()`, `serviceHealthCheck()`
- State machine: `stopped` → `starting` → `running` → `degraded` → `failed` → `stopping`
- `service.instance` — stores the value returned from `start()` (e.g. a database pool)
- `withStop(fn)` and `withHealthCheck(fn)` receive the instance as argument: `(instance: T) => Promise<void>`
- `waitForState(target, timeoutMs?)`, `waitForRunning(timeoutMs?)`, `waitForStopped(timeoutMs?)` — programmatic readiness gates
- Per-service startup timeout via `withStartupTimeout(ms)` or `startupTimeoutMs` in options
- Labels: `setLabel`, `getLabel`, `removeLabel`, `hasLabel`, `withLabels`
- Health checks: configurable via `IHealthCheckConfig` with `intervalMs`, `timeoutMs`, `failuresBeforeDegraded`, `failuresBeforeFailed`
- Auto-restart on health failure: `autoRestart: true` in `IHealthCheckConfig` with `maxAutoRestarts`, `autoRestartDelayMs`, `autoRestartBackoffFactor`
- Events via `eventSubject`: `'started'`, `'stopped'`, `'failed'`, `'degraded'`, `'recovered'`, `'retrying'`, `'healthCheck'`, `'autoRestarting'`
### ServiceManager
- `manager.addService(service)` / `manager.addServiceFromOptions(options)` / `manager.removeService(name)`
- Dependency-ordered startup via topological sort (Kahn's algorithm), level-by-level parallel
- Critical service failure aborts startup with rollback; optional service failure continues
- Retry with exponential backoff + jitter
- Reverse-dependency-ordered shutdown with per-service timeout
- `restartService(name)` — cascade stops dependents, restarts target, restarts dependents
- `getHealth()` — aggregated `'healthy' | 'degraded' | 'unhealthy'` status
- `getServicesByLabel(key, value)` / `getServicesStatusByLabel(key, value)` — label-based queries
- Circular dependency detection
- Global startup timeout enforcement via `startupTimeoutMs` in `IServiceManagerOptions`
## Dependencies (as of v7.0.0)
- `@design.estate/dees-element` ^2.2.3 - TC39 decorators with `accessor` keyword
- `@push.rocks/lik` ^6.3.1 - Data structures
- `@push.rocks/smartdelay` ^3.0.5 - Delay utilities
- `@push.rocks/smartlog` ^3.1.11 - Logging
- `@push.rocks/smartlog` ^3.2.1 - Logging
- `@push.rocks/smartpromise` ^4.2.3 - Promise utilities
- `@push.rocks/smartrx` ^3.0.10 - RxJS wrapper
- `@push.rocks/smarttime` ^4.1.1 - Time/cron utilities
- `@push.rocks/smarttime` ^4.2.3 - Time/cron utilities
- `@push.rocks/smartunique` ^3.0.9 - Unique ID generation
- `@git.zone/tsbuild` ^4.1.2 - Build tool
- `@git.zone/tsbundle` ^2.8.3 - Bundler (for browser tests)
- `@git.zone/tsbuild` ^4.3.0 - Build tool
- `@git.zone/tsbundle` ^2.9.1 - Bundler (for browser tests)
- `@git.zone/tsrun` ^2.0.1 - TypeScript runner
- `@git.zone/tstest` ^3.1.8 - Test runner (supports `.chromium.ts` files)
- `@types/node` ^25.2.3 - Node.js type definitions
- `@git.zone/tstest` ^3.5.0 - Test runner (supports `.chromium.ts` files)
- `@types/node` ^25.5.0 - Node.js type definitions

269
readme.md
View File

@@ -1,6 +1,6 @@
# @push.rocks/taskbuffer 🚀
> **Modern TypeScript task orchestration with constraint-based concurrency, smart buffering, scheduling, labels, and real-time event streaming**
> **Modern TypeScript task orchestration and service lifecycle management with constraint-based concurrency, smart buffering, scheduling, health checks, and real-time event streaming**
[![npm version](https://img.shields.io/npm/v/@push.rocks/taskbuffer.svg)](https://www.npmjs.com/package/@push.rocks/taskbuffer)
[![TypeScript](https://img.shields.io/badge/TypeScript-5.x-blue.svg)](https://www.typescriptlang.org/)
@@ -21,6 +21,7 @@ For reporting bugs, issues, or security vulnerabilities, please visit [community
- **🏷️ Labels** — Attach arbitrary `Record<string, string>` metadata (userId, tenantId, etc.) for multi-tenant filtering
- **📡 Push-Based Events** — rxjs `Subject<ITaskEvent>` on every Task and TaskManager for real-time state change notifications
- **🛡️ Error Handling** — Configurable error propagation with `catchErrors`, error tracking, and clear error state
- **🩺 Service Lifecycle Management** — `Service` and `ServiceManager` for long-running components (databases, servers, queues) with health checks, auto-restart, dependency ordering, and instance access
- **🎨 Web Component Dashboard** — Built-in Lit-based dashboard for real-time task visualization
- **🌐 Distributed Coordination** — Abstract coordinator for multi-instance task deduplication
@@ -788,6 +789,196 @@ manager.descheduleTaskByName('Deploy'); // Remove cron schedule only
manager.removeConstraintGroup('domain-mutex'); // By name
```
## 🩺 Service Lifecycle Management
For long-running components like database connections, HTTP servers, and message queues, taskbuffer provides `Service` and `ServiceManager` — a complete lifecycle management system with health checks, dependency ordering, retry, auto-restart, and typed instance access.
### Basic Service — Builder Pattern
```typescript
import { Service, ServiceManager } from '@push.rocks/taskbuffer';
const dbService = new Service<DatabasePool>('Database')
.critical()
.withStart(async () => {
const pool = new DatabasePool({ host: 'localhost', port: 5432 });
await pool.connect();
return pool; // stored as service.instance
})
.withStop(async (pool) => {
await pool.disconnect(); // receives the instance from start
})
.withHealthCheck(async (pool) => {
return await pool.ping(); // receives the instance too
});
await dbService.start();
dbService.instance!.query('SELECT 1'); // typed access to the pool
await dbService.stop();
```
The `start()` return value is stored as `service.instance` and automatically passed to `stop()` and `healthCheck()` functions — no need for external closures or shared variables.
### Service with Dependencies & Health Checks
```typescript
const cacheService = new Service('Redis')
.optional()
.withStart(async () => new RedisClient())
.withStop(async (client) => client.quit())
.withHealthCheck(async (client) => client.isReady, {
intervalMs: 10000, // check every 10s
timeoutMs: 3000, // 3s timeout per check
failuresBeforeDegraded: 3, // 3 consecutive failures → 'degraded'
failuresBeforeFailed: 5, // 5 consecutive failures → 'failed'
autoRestart: true, // auto-restart when failed
maxAutoRestarts: 5, // give up after 5 restart attempts
autoRestartDelayMs: 2000, // start with 2s delay
autoRestartBackoffFactor: 2, // double delay each attempt
});
const apiService = new Service('API')
.critical()
.dependsOn('Database', 'Redis')
.withStart(async () => {
const server = createServer();
await server.listen(3000);
return server;
})
.withStop(async (server) => server.close())
.withStartupTimeout(10000); // fail if start takes > 10s
```
### ServiceManager — Orchestration
`ServiceManager` handles dependency-ordered startup, failure isolation, and aggregated health reporting:
```typescript
const manager = new ServiceManager({
name: 'MyApp',
startupTimeoutMs: 60000, // global startup timeout
shutdownTimeoutMs: 15000, // per-service shutdown timeout
defaultRetry: { maxRetries: 3, baseDelayMs: 1000, backoffFactor: 2 },
});
manager.addService(dbService);
manager.addService(cacheService);
manager.addService(apiService);
await manager.start();
// ✅ Starts Database first, then Redis (parallel with DB since independent),
// then API (after both deps are running)
// ❌ If Database (critical) fails → rollback, stop everything, throw
// ⚠️ If Redis (optional) fails → log warning, continue, health = 'degraded'
// Health aggregation
const health = manager.getHealth();
// { overall: 'healthy', services: [...], startedAt: 1706284800000, uptime: 42000 }
// Cascade restart — stops dependents first, restarts target, then restarts dependents
await manager.restartService('Database');
// Graceful reverse-order shutdown
await manager.stop();
```
### Subclass Pattern
For complex services, extend `Service` and override the lifecycle hooks:
```typescript
class PostgresService extends Service<Pool> {
constructor(private config: PoolConfig) {
super('Postgres');
this.critical();
}
protected async serviceStart(): Promise<Pool> {
const pool = new Pool(this.config);
await pool.connect();
return pool;
}
protected async serviceStop(): Promise<void> {
await this.instance?.end();
}
protected async serviceHealthCheck(): Promise<boolean> {
const result = await this.instance?.query('SELECT 1');
return result?.rows.length === 1;
}
}
```
### Waiting for Service Readiness
Programmatically wait for a service to reach a specific state:
```typescript
// Wait for the service to be running (with timeout)
await dbService.waitForRunning(10000);
// Wait for any state
await service.waitForState(['running', 'degraded'], 5000);
// Wait for shutdown
await service.waitForStopped();
```
### Service Labels
Tag services with metadata for filtering and grouping:
```typescript
const service = new Service('Redis')
.withLabels({ type: 'cache', env: 'production', region: 'eu-west' })
.withStart(async () => new RedisClient())
.withStop(async (client) => client.quit());
// Query by label in ServiceManager
const caches = manager.getServicesByLabel('type', 'cache');
const prodStatuses = manager.getServicesStatusByLabel('env', 'production');
```
### Service Events
Every `Service` emits events via an rxjs `Subject<IServiceEvent>`:
```typescript
service.eventSubject.subscribe((event) => {
console.log(`[${event.type}] ${event.serviceName}${event.state}`);
});
// [started] Database → running
// [healthCheck] Database → running
// [degraded] Database → degraded
// [autoRestarting] Database → failed
// [started] Database → running
// [recovered] Database → running
// [stopped] Database → stopped
```
| Event Type | When |
| --- | --- |
| `'started'` | Service started successfully |
| `'stopped'` | Service stopped |
| `'failed'` | Service start failed or health check threshold exceeded |
| `'degraded'` | Health check failures exceeded `failuresBeforeDegraded` |
| `'recovered'` | Health check succeeded while in degraded state |
| `'retrying'` | ServiceManager retrying a failed start attempt |
| `'healthCheck'` | Health check completed (success or failure) |
| `'autoRestarting'` | Auto-restart scheduled after health check failure |
`ServiceManager.serviceSubject` aggregates events from all registered services.
### Service State Machine
```
stopped → starting → running → degraded → failed
↑ ↓ ↓ ↓
└── stopping ←───────────────────┴─────────┘
(auto-restart)
```
## 🎨 Web Component Dashboard
Visualize your tasks in real-time with the included Lit-based web component:
@@ -970,6 +1161,8 @@ const acmeTasks = manager.getTasksMetadataByLabel('tenantId', 'acme');
| `TaskOnce` | Single-execution guard |
| `TaskDebounced` | Debounced task using rxjs |
| `TaskStep` | Step tracking unit (internal, exposed via metadata) |
| `Service<T>` | Long-running component with start/stop lifecycle, health checks, auto-restart, and typed instance access |
| `ServiceManager` | Service orchestrator with dependency ordering, failure isolation, retry, and health aggregation |
### Task Constructor Options
@@ -1080,10 +1273,72 @@ const acmeTasks = manager.getTasksMetadataByLabel('tenantId', 'acme');
| `taskMap` | `ObjectMap<Task>` | Internal task registry |
| `constraintGroups` | `TaskConstraintGroup[]` | Registered constraint groups |
### Service Builder Methods
| Method | Returns | Description |
| --- | --- | --- |
| `critical()` | `this` | Mark as critical (startup failure aborts ServiceManager) |
| `optional()` | `this` | Mark as optional (startup failure is tolerated) |
| `dependsOn(...names)` | `this` | Declare dependencies by service name |
| `withStart(fn)` | `this` | Set start function: `() => Promise<T>` |
| `withStop(fn)` | `this` | Set stop function: `(instance: T) => Promise<void>` |
| `withHealthCheck(fn, config?)` | `this` | Set health check: `(instance: T) => Promise<boolean>` |
| `withRetry(config)` | `this` | Set retry config: `{ maxRetries, baseDelayMs, maxDelayMs, backoffFactor }` |
| `withStartupTimeout(ms)` | `this` | Per-service startup timeout |
| `withLabels(labels)` | `this` | Attach key-value labels |
### Service Methods
| Method | Returns | Description |
| --- | --- | --- |
| `start()` | `Promise<T>` | Start the service (no-op if already running) |
| `stop()` | `Promise<void>` | Stop the service (no-op if already stopped) |
| `checkHealth()` | `Promise<boolean \| undefined>` | Run health check manually |
| `waitForState(target, timeoutMs?)` | `Promise<void>` | Wait for service to reach a state |
| `waitForRunning(timeoutMs?)` | `Promise<void>` | Wait for `'running'` state |
| `waitForStopped(timeoutMs?)` | `Promise<void>` | Wait for `'stopped'` state |
| `getStatus()` | `IServiceStatus` | Full status snapshot |
| `setLabel(key, value)` | `void` | Set a label |
| `getLabel(key)` | `string \| undefined` | Get a label value |
| `removeLabel(key)` | `boolean` | Remove a label |
| `hasLabel(key, value?)` | `boolean` | Check label existence / value |
### Service Properties
| Property | Type | Description |
| --- | --- | --- |
| `name` | `string` | Service identifier |
| `state` | `TServiceState` | Current state (`stopped`, `starting`, `running`, `degraded`, `failed`, `stopping`) |
| `instance` | `T \| undefined` | The value returned from `start()` |
| `criticality` | `TServiceCriticality` | `'critical'` or `'optional'` |
| `dependencies` | `string[]` | Dependency names |
| `labels` | `Record<string, string>` | Attached labels |
| `eventSubject` | `Subject<IServiceEvent>` | rxjs Subject emitting lifecycle events |
| `errorCount` | `number` | Total error count |
| `retryCount` | `number` | Retry attempts during last startup |
### ServiceManager Methods
| Method | Returns | Description |
| --- | --- | --- |
| `addService(service)` | `void` | Register a service |
| `addServiceFromOptions(options)` | `Service<T>` | Create and register from options |
| `removeService(name)` | `void` | Remove service (throws if others depend on it) |
| `start()` | `Promise<void>` | Start all services in dependency order |
| `stop()` | `Promise<void>` | Stop all services in reverse order |
| `restartService(name)` | `Promise<void>` | Cascade restart with dependents |
| `getService(name)` | `Service \| undefined` | Look up by name |
| `getServiceStatus(name)` | `IServiceStatus \| undefined` | Single service status |
| `getAllStatuses()` | `IServiceStatus[]` | All service statuses |
| `getHealth()` | `IServiceManagerHealth` | Aggregated health report |
| `getServicesByLabel(key, value)` | `Service[]` | Filter services by label |
| `getServicesStatusByLabel(key, value)` | `IServiceStatus[]` | Filter statuses by label |
### Exported Types
```typescript
import type {
// Task types
ITaskMetadata,
ITaskExecutionReport,
ITaskExecution,
@@ -1096,6 +1351,18 @@ import type {
IRateLimitConfig,
TResultSharingMode,
StepNames,
// Service types
IServiceOptions,
IServiceStatus,
IServiceEvent,
IServiceManagerOptions,
IServiceManagerHealth,
IRetryConfig,
IHealthCheckConfig,
TServiceState,
TServiceCriticality,
TServiceEventType,
TOverallHealth,
} from '@push.rocks/taskbuffer';
```

View File

@@ -0,0 +1,894 @@
import { expect, tap } from '@git.zone/tstest/tapbundle';
import * as taskbuffer from '../ts/index.js';
import * as smartdelay from '@push.rocks/smartdelay';
// ── Test 1: Basic service start/stop lifecycle ─────────────
tap.test('should start and stop a simple service', async () => {
let started = false;
let stopped = false;
const service = new taskbuffer.Service('TestService')
.withStart(async () => { started = true; })
.withStop(async () => { stopped = true; });
expect(service.state).toEqual('stopped');
await service.start();
expect(service.state).toEqual('running');
expect(started).toBeTrue();
await service.stop();
expect(service.state).toEqual('stopped');
expect(stopped).toBeTrue();
});
// ── Test 2: Builder pattern chaining ───────────────────────
tap.test('should support builder pattern chaining', async () => {
const service = new taskbuffer.Service('ChainedService')
.critical()
.dependsOn('Dep1', 'Dep2')
.withStart(async () => 'result')
.withStop(async () => {})
.withRetry({ maxRetries: 5, baseDelayMs: 100 });
expect(service.criticality).toEqual('critical');
expect(service.dependencies).toEqual(['Dep1', 'Dep2']);
expect(service.retryConfig).toBeTruthy();
expect(service.retryConfig!.maxRetries).toEqual(5);
});
// ── Test 3: Subclass pattern ───────────────────────────────
tap.test('should support subclass pattern', async () => {
class MyService extends taskbuffer.Service<string> {
public startCalled = false;
public stopCalled = false;
constructor() {
super('MySubclassService');
this.optional();
}
protected async serviceStart(): Promise<string> {
this.startCalled = true;
return 'hello';
}
protected async serviceStop(): Promise<void> {
this.stopCalled = true;
}
}
const service = new MyService();
const result = await service.start();
expect(service.startCalled).toBeTrue();
expect(service.state).toEqual('running');
await service.stop();
expect(service.stopCalled).toBeTrue();
expect(service.state).toEqual('stopped');
});
// ── Test 4: Constructor with options object ────────────────
tap.test('should accept options object in constructor', async () => {
let started = false;
const service = new taskbuffer.Service({
name: 'OptionsService',
criticality: 'critical',
dependencies: ['A', 'B'],
start: async () => { started = true; },
stop: async () => {},
retry: { maxRetries: 10 },
});
expect(service.name).toEqual('OptionsService');
expect(service.criticality).toEqual('critical');
expect(service.dependencies).toEqual(['A', 'B']);
await service.start();
expect(started).toBeTrue();
await service.stop();
});
// ── Test 5: ServiceManager dependency ordering ─────────────
tap.test('should start services in dependency order', async () => {
const order: string[] = [];
const manager = new taskbuffer.ServiceManager({ name: 'TestManager' });
manager.addService(
new taskbuffer.Service('C')
.dependsOn('B')
.withStart(async () => { order.push('C'); })
.withStop(async () => {}),
);
manager.addService(
new taskbuffer.Service('A')
.withStart(async () => { order.push('A'); })
.withStop(async () => {}),
);
manager.addService(
new taskbuffer.Service('B')
.dependsOn('A')
.withStart(async () => { order.push('B'); })
.withStop(async () => {}),
);
await manager.start();
// A must come before B, B must come before C
expect(order.indexOf('A')).toBeLessThan(order.indexOf('B'));
expect(order.indexOf('B')).toBeLessThan(order.indexOf('C'));
await manager.stop();
});
// ── Test 6: Critical service failure aborts startup ────────
tap.test('should abort startup when a critical service fails', async () => {
const manager = new taskbuffer.ServiceManager({ name: 'CriticalTest' });
let serviceCStopped = false;
manager.addService(
new taskbuffer.Service('Working')
.critical()
.withStart(async () => {})
.withStop(async () => {}),
);
manager.addService(
new taskbuffer.Service('Broken')
.critical()
.withStart(async () => { throw new Error('boom'); })
.withStop(async () => {})
.withRetry({ maxRetries: 0 }),
);
manager.addService(
new taskbuffer.Service('AfterBroken')
.dependsOn('Broken')
.withStart(async () => { serviceCStopped = true; })
.withStop(async () => {}),
);
let caught = false;
try {
await manager.start();
} catch (err) {
caught = true;
expect((err as Error).message).toInclude('Broken');
}
expect(caught).toBeTrue();
// AfterBroken should never have started because Broken (its dep) failed
expect(serviceCStopped).toBeFalse();
});
// ── Test 7: Optional service failure continues startup ─────
tap.test('should continue startup when an optional service fails', async () => {
const manager = new taskbuffer.ServiceManager({ name: 'OptionalTest' });
let criticalStarted = false;
manager.addService(
new taskbuffer.Service('Critical')
.critical()
.withStart(async () => { criticalStarted = true; })
.withStop(async () => {})
.withRetry({ maxRetries: 0 }),
);
manager.addService(
new taskbuffer.Service('Optional')
.optional()
.withStart(async () => { throw new Error('oops'); })
.withStop(async () => {})
.withRetry({ maxRetries: 0 }),
);
// Should NOT throw
await manager.start();
expect(criticalStarted).toBeTrue();
const health = manager.getHealth();
expect(health.overall).toEqual('degraded');
const optionalStatus = manager.getServiceStatus('Optional');
expect(optionalStatus!.state).toEqual('failed');
await manager.stop();
});
// ── Test 8: Retry with backoff for optional services ───────
tap.test('should retry failed optional services', async () => {
const manager = new taskbuffer.ServiceManager({ name: 'RetryTest' });
let attempts = 0;
manager.addService(
new taskbuffer.Service('Flaky')
.optional()
.withStart(async () => {
attempts++;
if (attempts < 3) {
throw new Error(`attempt ${attempts} failed`);
}
})
.withStop(async () => {})
.withRetry({ maxRetries: 5, baseDelayMs: 50, maxDelayMs: 100, backoffFactor: 1 }),
);
await manager.start();
expect(attempts).toEqual(3);
const status = manager.getServiceStatus('Flaky');
expect(status!.state).toEqual('running');
await manager.stop();
});
// ── Test 9: Reverse-order shutdown ─────────────────────────
tap.test('should stop services in reverse dependency order', async () => {
const order: string[] = [];
const manager = new taskbuffer.ServiceManager({ name: 'ShutdownTest' });
manager.addService(
new taskbuffer.Service('Base')
.withStart(async () => {})
.withStop(async () => { order.push('Base'); }),
);
manager.addService(
new taskbuffer.Service('Middle')
.dependsOn('Base')
.withStart(async () => {})
.withStop(async () => { order.push('Middle'); }),
);
manager.addService(
new taskbuffer.Service('Top')
.dependsOn('Middle')
.withStart(async () => {})
.withStop(async () => { order.push('Top'); }),
);
await manager.start();
await manager.stop();
// Top should stop before Middle, Middle before Base
expect(order.indexOf('Top')).toBeLessThan(order.indexOf('Middle'));
expect(order.indexOf('Middle')).toBeLessThan(order.indexOf('Base'));
});
// ── Test 10: Circular dependency detection ─────────────────
tap.test('should throw on circular dependency', async () => {
const manager = new taskbuffer.ServiceManager({ name: 'CycleTest' });
manager.addService(
new taskbuffer.Service('A')
.dependsOn('B')
.withStart(async () => {})
.withStop(async () => {}),
);
manager.addService(
new taskbuffer.Service('B')
.dependsOn('A')
.withStart(async () => {})
.withStop(async () => {}),
);
let caught = false;
try {
await manager.start();
} catch (err) {
caught = true;
expect((err as Error).message).toInclude('Circular dependency');
}
expect(caught).toBeTrue();
});
// ── Test 11: restartService cascades to dependents ─────────
tap.test('should restart service and its dependents', async () => {
const startOrder: string[] = [];
const stopOrder: string[] = [];
const manager = new taskbuffer.ServiceManager({ name: 'RestartTest' });
manager.addService(
new taskbuffer.Service('Base')
.withStart(async () => { startOrder.push('Base'); })
.withStop(async () => { stopOrder.push('Base'); })
.withRetry({ maxRetries: 0 }),
);
manager.addService(
new taskbuffer.Service('Dep')
.dependsOn('Base')
.withStart(async () => { startOrder.push('Dep'); })
.withStop(async () => { stopOrder.push('Dep'); })
.withRetry({ maxRetries: 0 }),
);
await manager.start();
expect(startOrder).toEqual(['Base', 'Dep']);
// Clear tracking
startOrder.length = 0;
stopOrder.length = 0;
await manager.restartService('Base');
// Dep should be stopped first, then Base, then Base restarted, then Dep
expect(stopOrder).toEqual(['Dep', 'Base']);
expect(startOrder).toEqual(['Base', 'Dep']);
await manager.stop();
});
// ── Test 12: getHealth returns correct aggregated status ───
tap.test('should return correct health aggregation', async () => {
const manager = new taskbuffer.ServiceManager({ name: 'HealthTest' });
manager.addService(
new taskbuffer.Service('OK')
.critical()
.withStart(async () => {})
.withStop(async () => {})
.withRetry({ maxRetries: 0 }),
);
manager.addService(
new taskbuffer.Service('AlsoOK')
.optional()
.withStart(async () => {})
.withStop(async () => {})
.withRetry({ maxRetries: 0 }),
);
await manager.start();
const health = manager.getHealth();
expect(health.overall).toEqual('healthy');
expect(health.services.length).toEqual(2);
expect(health.startedAt).toBeTruthy();
expect(health.uptime).toBeGreaterThanOrEqual(0);
await manager.stop();
});
// ── Test 13: Events emitted on state transitions ───────────
tap.test('should emit events on state transitions', async () => {
const events: taskbuffer.IServiceEvent[] = [];
const manager = new taskbuffer.ServiceManager({ name: 'EventTest' });
manager.addService(
new taskbuffer.Service('Svc')
.withStart(async () => {})
.withStop(async () => {})
.withRetry({ maxRetries: 0 }),
);
manager.serviceSubject.subscribe((event) => {
events.push(event);
});
await manager.start();
await manager.stop();
const types = events.map((e) => e.type);
expect(types).toContain('started');
expect(types).toContain('stopped');
});
// ── Test 14: Parallel startup of independent services ──────
tap.test('should start independent services in parallel', async () => {
const manager = new taskbuffer.ServiceManager({ name: 'ParallelTest' });
const startTimes: Record<string, number> = {};
manager.addService(
new taskbuffer.Service('A')
.withStart(async () => {
startTimes['A'] = Date.now();
await smartdelay.delayFor(100);
})
.withStop(async () => {}),
);
manager.addService(
new taskbuffer.Service('B')
.withStart(async () => {
startTimes['B'] = Date.now();
await smartdelay.delayFor(100);
})
.withStop(async () => {}),
);
await manager.start();
// Both should start at roughly the same time (within 50ms)
const diff = Math.abs(startTimes['A'] - startTimes['B']);
expect(diff).toBeLessThan(50);
await manager.stop();
});
// ── Test 15: getStatus snapshot ────────────────────────────
tap.test('should return accurate status snapshot', async () => {
const service = new taskbuffer.Service('StatusTest')
.critical()
.dependsOn('X')
.withStart(async () => {})
.withStop(async () => {});
const statusBefore = service.getStatus();
expect(statusBefore.state).toEqual('stopped');
expect(statusBefore.name).toEqual('StatusTest');
expect(statusBefore.criticality).toEqual('critical');
expect(statusBefore.dependencies).toEqual(['X']);
expect(statusBefore.errorCount).toEqual(0);
await service.start();
const statusAfter = service.getStatus();
expect(statusAfter.state).toEqual('running');
expect(statusAfter.startedAt).toBeTruthy();
expect(statusAfter.uptime).toBeGreaterThanOrEqual(0);
await service.stop();
});
// ── Test 16: Missing dependency detection ──────────────────
tap.test('should throw when a dependency is not registered', async () => {
const manager = new taskbuffer.ServiceManager({ name: 'MissingDepTest' });
manager.addService(
new taskbuffer.Service('Lonely')
.dependsOn('Ghost')
.withStart(async () => {})
.withStop(async () => {}),
);
let caught = false;
try {
await manager.start();
} catch (err) {
caught = true;
expect((err as Error).message).toInclude('Ghost');
expect((err as Error).message).toInclude('not registered');
}
expect(caught).toBeTrue();
});
// ── Test 17: No-op on double start/stop ────────────────────
tap.test('should be a no-op when starting an already-running service', async () => {
let startCount = 0;
const service = new taskbuffer.Service('DoubleStart')
.withStart(async () => { startCount++; })
.withStop(async () => {});
await service.start();
await service.start(); // should be no-op
expect(startCount).toEqual(1);
await service.stop();
await service.stop(); // should be no-op
});
// ── Test 18: addServiceFromOptions convenience ─────────────
tap.test('should support addServiceFromOptions', async () => {
const manager = new taskbuffer.ServiceManager({ name: 'ConvenienceTest' });
let started = false;
const service = manager.addServiceFromOptions({
name: 'Easy',
start: async () => { started = true; },
stop: async () => {},
});
expect(service).toBeInstanceOf(taskbuffer.Service);
expect(service.name).toEqual('Easy');
await manager.start();
expect(started).toBeTrue();
await manager.stop();
});
// ═══════════════════════════════════════════════════════════
// NEW TESTS: Improvements 1-5
// ═══════════════════════════════════════════════════════════
// ── Test 19: service.instance stores start result ──────────
tap.test('should store start result as service.instance', async () => {
const pool = { query: (sql: string) => `result: ${sql}` };
const service = new taskbuffer.Service<typeof pool>('DB')
.withStart(async () => pool)
.withStop(async (inst) => {});
expect(service.instance).toBeUndefined();
const result = await service.start();
expect(result).toEqual(pool);
expect(service.instance).toEqual(pool);
expect(service.instance!.query('SELECT 1')).toEqual('result: SELECT 1');
// Status should report hasInstance
expect(service.getStatus().hasInstance).toBeTrue();
await service.stop();
expect(service.instance).toBeUndefined();
expect(service.getStatus().hasInstance).toBeFalse();
});
// ── Test 20: stop receives the instance ────────────────────
tap.test('should pass instance to stop function', async () => {
let receivedInstance: any = null;
const service = new taskbuffer.Service<{ id: number }>('InstanceStop')
.withStart(async () => ({ id: 42 }))
.withStop(async (inst) => { receivedInstance = inst; });
await service.start();
await service.stop();
expect(receivedInstance).toBeTruthy();
expect(receivedInstance.id).toEqual(42);
});
// ── Test 21: healthCheck receives the instance ─────────────
tap.test('should pass instance to health check function', async () => {
let checkedInstance: any = null;
const service = new taskbuffer.Service<{ healthy: boolean }>('InstanceHealth')
.withStart(async () => ({ healthy: true }))
.withStop(async () => {})
.withHealthCheck(async (inst) => {
checkedInstance = inst;
return inst.healthy;
}, { intervalMs: 60000 }); // long interval so it doesn't auto-run
await service.start();
// Manually trigger health check
const ok = await service.checkHealth();
expect(ok).toBeTrue();
expect(checkedInstance).toBeTruthy();
expect(checkedInstance.healthy).toBeTrue();
await service.stop();
});
// ── Test 22: double start returns existing instance ────────
tap.test('should return existing instance on double start', async () => {
let callCount = 0;
const service = new taskbuffer.Service<number>('DoubleInstance')
.withStart(async () => { callCount++; return callCount; })
.withStop(async () => {});
const first = await service.start();
const second = await service.start();
expect(first).toEqual(1);
expect(second).toEqual(1); // should return same instance, not call start again
expect(callCount).toEqual(1);
await service.stop();
});
// ── Test 23: Labels on Service ─────────────────────────────
tap.test('should support labels on services', async () => {
const service = new taskbuffer.Service('LabeledService')
.withLabels({ type: 'database', env: 'production' })
.withStart(async () => {})
.withStop(async () => {});
expect(service.hasLabel('type')).toBeTrue();
expect(service.hasLabel('type', 'database')).toBeTrue();
expect(service.hasLabel('type', 'cache')).toBeFalse();
expect(service.getLabel('env')).toEqual('production');
service.setLabel('region', 'eu-west');
expect(service.hasLabel('region')).toBeTrue();
service.removeLabel('env');
expect(service.hasLabel('env')).toBeFalse();
// Labels in status
const status = service.getStatus();
expect(status.labels).toBeTruthy();
expect(status.labels!.type).toEqual('database');
expect(status.labels!.region).toEqual('eu-west');
});
// ── Test 24: Labels via IServiceOptions ────────────────────
tap.test('should accept labels in options constructor', async () => {
const service = new taskbuffer.Service({
name: 'OptionsLabeled',
start: async () => {},
stop: async () => {},
labels: { kind: 'cache' },
});
expect(service.hasLabel('kind', 'cache')).toBeTrue();
});
// ── Test 25: ServiceManager.getServicesByLabel ─────────────
tap.test('should query services by label in ServiceManager', async () => {
const manager = new taskbuffer.ServiceManager({ name: 'LabelQueryTest' });
manager.addService(
new taskbuffer.Service('Redis')
.withLabels({ type: 'cache', tier: 'fast' })
.withStart(async () => {})
.withStop(async () => {}),
);
manager.addService(
new taskbuffer.Service('Memcached')
.withLabels({ type: 'cache', tier: 'fast' })
.withStart(async () => {})
.withStop(async () => {}),
);
manager.addService(
new taskbuffer.Service('Postgres')
.withLabels({ type: 'database', tier: 'slow' })
.withStart(async () => {})
.withStop(async () => {}),
);
const caches = manager.getServicesByLabel('type', 'cache');
expect(caches.length).toEqual(2);
const databases = manager.getServicesByLabel('type', 'database');
expect(databases.length).toEqual(1);
expect(databases[0].name).toEqual('Postgres');
const statuses = manager.getServicesStatusByLabel('tier', 'fast');
expect(statuses.length).toEqual(2);
await manager.stop();
});
// ── Test 26: waitForState / waitForRunning ──────────────────
tap.test('should support waitForState and waitForRunning', async () => {
const service = new taskbuffer.Service('WaitService')
.withStart(async () => {
await smartdelay.delayFor(50);
})
.withStop(async () => {});
// Start in background
const startPromise = service.start();
// Wait for running state
await service.waitForRunning(2000);
expect(service.state).toEqual('running');
await startPromise;
// Now wait for stopped
const stopPromise = service.stop();
await service.waitForStopped(2000);
expect(service.state).toEqual('stopped');
await stopPromise;
});
// ── Test 27: waitForState timeout ──────────────────────────
tap.test('should timeout when waiting for a state that never comes', async () => {
const service = new taskbuffer.Service('TimeoutWait')
.withStart(async () => {})
.withStop(async () => {});
// Service is stopped, wait for 'running' with a short timeout
let caught = false;
try {
await service.waitForState('running', 100);
} catch (err) {
caught = true;
expect((err as Error).message).toInclude('timed out');
expect((err as Error).message).toInclude('running');
}
expect(caught).toBeTrue();
});
// ── Test 28: waitForState already in target state ──────────
tap.test('should resolve immediately if already in target state', async () => {
const service = new taskbuffer.Service('AlreadyThere')
.withStart(async () => {})
.withStop(async () => {});
// Already stopped
await service.waitForState('stopped', 100); // should not throw
await service.start();
await service.waitForState('running', 100); // should not throw
await service.stop();
});
// ── Test 29: Per-service startup timeout ───────────────────
tap.test('should timeout when serviceStart hangs', async () => {
const service = new taskbuffer.Service('SlowStart')
.withStart(async () => {
await smartdelay.delayFor(5000); // very slow
})
.withStop(async () => {})
.withStartupTimeout(100); // 100ms timeout
let caught = false;
try {
await service.start();
} catch (err) {
caught = true;
expect((err as Error).message).toInclude('startup timed out');
expect((err as Error).message).toInclude('100ms');
}
expect(caught).toBeTrue();
expect(service.state).toEqual('failed');
});
// ── Test 30: Startup timeout via options ────────────────────
tap.test('should accept startupTimeoutMs in options constructor', async () => {
const service = new taskbuffer.Service({
name: 'TimeoutOptions',
start: async () => { await smartdelay.delayFor(5000); },
stop: async () => {},
startupTimeoutMs: 100,
});
let caught = false;
try {
await service.start();
} catch (err) {
caught = true;
expect((err as Error).message).toInclude('startup timed out');
}
expect(caught).toBeTrue();
});
// ── Test 31: Auto-restart on health check failure ──────────
tap.test('should auto-restart when health checks fail', async () => {
let startCount = 0;
let healthy = false;
const service = new taskbuffer.Service('AutoRestart')
.withStart(async () => {
startCount++;
// After first restart, become healthy
if (startCount >= 2) {
healthy = true;
}
})
.withStop(async () => {})
.withHealthCheck(async () => healthy, {
intervalMs: 30000, // we'll call checkHealth manually
failuresBeforeDegraded: 1,
failuresBeforeFailed: 2,
autoRestart: true,
maxAutoRestarts: 3,
autoRestartDelayMs: 50,
autoRestartBackoffFactor: 1,
});
await service.start();
expect(startCount).toEqual(1);
// Fail health checks manually to push to failed
await service.checkHealth(); // 1 failure -> degraded
expect(service.state).toEqual('degraded');
await service.checkHealth(); // 2 failures -> failed + auto-restart scheduled
expect(service.state).toEqual('failed');
// Wait for auto-restart to complete
await service.waitForRunning(2000);
expect(service.state).toEqual('running');
expect(startCount).toEqual(2);
await service.stop();
});
// ── Test 32: Auto-restart max attempts ─────────────────────
tap.test('should stop auto-restarting after max attempts', async () => {
let startCount = 0;
const events: string[] = [];
const service = new taskbuffer.Service('MaxRestart')
.withStart(async () => {
startCount++;
if (startCount > 1) {
throw new Error('always fails');
}
})
.withStop(async () => {})
.withHealthCheck(async () => false, {
intervalMs: 60000,
failuresBeforeDegraded: 1,
failuresBeforeFailed: 2,
autoRestart: true,
maxAutoRestarts: 2,
autoRestartDelayMs: 50,
autoRestartBackoffFactor: 1,
});
service.eventSubject.subscribe((e) => events.push(e.type));
await service.start();
expect(startCount).toEqual(1);
// Push to failed
await service.checkHealth();
await service.checkHealth();
// Wait for auto-restart attempts to exhaust
await smartdelay.delayFor(500);
// Should have emitted autoRestarting events
expect(events).toContain('autoRestarting');
// Service should be in failed state (restarts exhausted)
expect(service.state).toEqual('failed');
// Clean up
await service.stop();
});
// ── Test 33: Builder chaining with new methods ─────────────
tap.test('should chain all new builder methods', async () => {
const service = new taskbuffer.Service('FullBuilder')
.critical()
.dependsOn('A')
.withStart(async () => ({ conn: true }))
.withStop(async (inst) => {})
.withHealthCheck(async (inst) => inst.conn)
.withRetry({ maxRetries: 3 })
.withStartupTimeout(5000)
.withLabels({ env: 'test' });
expect(service.criticality).toEqual('critical');
expect(service.startupTimeoutMs).toEqual(5000);
expect(service.hasLabel('env', 'test')).toBeTrue();
});
export default tap.start();

View File

@@ -3,6 +3,6 @@
*/
export const commitinfo = {
name: '@push.rocks/taskbuffer',
version: '6.1.2',
version: '8.0.2',
description: 'A flexible task management library supporting TypeScript, allowing for task buffering, scheduling, and execution with dependency management.'
}

View File

@@ -14,5 +14,22 @@ export type { ITaskStep } from './taskbuffer.classes.taskstep.js';
// Metadata interfaces
export type { ITaskMetadata, ITaskExecutionReport, IScheduledTaskInfo, ITaskEvent, TTaskEventType, ITaskConstraintGroupOptions, ITaskExecution, IRateLimitConfig, TResultSharingMode } from './taskbuffer.interfaces.js';
// Service lifecycle system
export { Service } from './taskbuffer.classes.service.js';
export { ServiceManager } from './taskbuffer.classes.servicemanager.js';
export type {
IServiceOptions,
IServiceStatus,
IServiceEvent,
IServiceManagerOptions,
IServiceManagerHealth,
IRetryConfig,
IHealthCheckConfig,
TServiceState,
TServiceCriticality,
TServiceEventType,
TOverallHealth,
} from './taskbuffer.interfaces.js';
import * as distributedCoordination from './taskbuffer.classes.distributedcoordinator.js';
export { distributedCoordination };

View File

@@ -0,0 +1,528 @@
import * as plugins from './taskbuffer.plugins.js';
import { logger } from './taskbuffer.logging.js';
import type {
TServiceState,
TServiceCriticality,
IServiceEvent,
IServiceStatus,
IRetryConfig,
IHealthCheckConfig,
IServiceOptions,
} from './taskbuffer.interfaces.js';
/**
* Service represents a long-running component with start/stop lifecycle,
* health checking, and retry capabilities.
*
* Use via builder pattern:
* new Service('MyService')
* .critical()
* .dependsOn('Database')
* .withStart(async () => { ... })
* .withStop(async (instance) => { ... })
* .withHealthCheck(async (instance) => { ... })
*
* Or extend for complex services:
* class MyService extends Service {
* protected async serviceStart() { ... }
* protected async serviceStop() { ... }
* }
*/
export class Service<T = any> {
public readonly name: string;
public readonly eventSubject = new plugins.smartrx.rxjs.Subject<IServiceEvent>();
// ── Internal state ─────────────────────────────────
private _state: TServiceState = 'stopped';
private _criticality: TServiceCriticality = 'optional';
private _dependencies: string[] = [];
private _retryConfig: IRetryConfig | undefined;
private _healthCheckConfig: IHealthCheckConfig | undefined;
private _startupTimeoutMs: number | undefined;
// Builder-provided functions
private _startFn: (() => Promise<T>) | undefined;
private _stopFn: ((instance: T) => Promise<void>) | undefined;
private _healthCheckFn: ((instance: T) => Promise<boolean>) | undefined;
// Instance: the resolved start result
private _instance: T | undefined;
// Labels
public labels: Record<string, string> = {};
// Runtime tracking
private _startedAt: number | undefined;
private _stoppedAt: number | undefined;
private _errorCount = 0;
private _lastError: string | undefined;
private _retryCount = 0;
// Health check tracking
private _healthCheckTimer: ReturnType<typeof setTimeout> | undefined;
private _lastHealthCheck: number | undefined;
private _healthCheckOk: boolean | undefined;
private _consecutiveHealthFailures = 0;
// Auto-restart tracking
private _autoRestartCount = 0;
private _autoRestartTimer: ReturnType<typeof setTimeout> | undefined;
constructor(nameOrOptions: string | IServiceOptions<T>) {
if (typeof nameOrOptions === 'string') {
this.name = nameOrOptions;
} else {
this.name = nameOrOptions.name;
this._startFn = nameOrOptions.start;
this._stopFn = nameOrOptions.stop;
this._healthCheckFn = nameOrOptions.healthCheck;
this._criticality = nameOrOptions.criticality || 'optional';
this._dependencies = nameOrOptions.dependencies || [];
this._retryConfig = nameOrOptions.retry;
this._healthCheckConfig = nameOrOptions.healthCheckConfig;
this._startupTimeoutMs = nameOrOptions.startupTimeoutMs;
if (nameOrOptions.labels) {
this.labels = { ...nameOrOptions.labels };
}
}
}
// ── Builder methods ──────────────────────────────────
public critical(): this {
this._criticality = 'critical';
return this;
}
public optional(): this {
this._criticality = 'optional';
return this;
}
public dependsOn(...serviceNames: string[]): this {
this._dependencies.push(...serviceNames);
return this;
}
public withStart(fn: () => Promise<T>): this {
this._startFn = fn;
return this;
}
public withStop(fn: (instance: T) => Promise<void>): this {
this._stopFn = fn;
return this;
}
public withHealthCheck(fn: (instance: T) => Promise<boolean>, config?: IHealthCheckConfig): this {
this._healthCheckFn = fn;
if (config) {
this._healthCheckConfig = config;
}
return this;
}
public withRetry(config: IRetryConfig): this {
this._retryConfig = config;
return this;
}
public withStartupTimeout(ms: number): this {
this._startupTimeoutMs = ms;
return this;
}
public withLabels(labelsArg: Record<string, string>): this {
Object.assign(this.labels, labelsArg);
return this;
}
// ── Label helpers ──────────────────────────────────
public setLabel(key: string, value: string): void {
this.labels[key] = value;
}
public getLabel(key: string): string | undefined {
return this.labels[key];
}
public removeLabel(key: string): boolean {
if (key in this.labels) {
delete this.labels[key];
return true;
}
return false;
}
public hasLabel(key: string, value?: string): boolean {
if (value !== undefined) {
return this.labels[key] === value;
}
return key in this.labels;
}
// ── Overridable hooks (for subclassing) ──────────────
protected async serviceStart(): Promise<T> {
if (this._startFn) {
return this._startFn();
}
throw new Error(`Service '${this.name}': no start function provided. Use withStart() or override serviceStart().`);
}
protected async serviceStop(): Promise<void> {
if (this._stopFn) {
return this._stopFn(this._instance as T);
}
// Default: no-op stop is fine (some services don't need explicit cleanup)
}
protected async serviceHealthCheck(): Promise<boolean> {
if (this._healthCheckFn) {
return this._healthCheckFn(this._instance as T);
}
// No health check configured — assume healthy if running
return this._state === 'running';
}
// ── Lifecycle (called by ServiceManager) ─────────────
public async start(): Promise<T> {
if (this._state === 'running') {
return this._instance as T;
}
this.setState('starting');
try {
let result: T;
if (this._startupTimeoutMs) {
result = await Promise.race([
this.serviceStart(),
new Promise<never>((_, reject) =>
setTimeout(() => reject(new Error(`Service '${this.name}': startup timed out after ${this._startupTimeoutMs}ms`)), this._startupTimeoutMs)
),
]);
} else {
result = await this.serviceStart();
}
this._instance = result;
this._startedAt = Date.now();
this._stoppedAt = undefined;
this._consecutiveHealthFailures = 0;
this._healthCheckOk = true;
this._autoRestartCount = 0;
this.setState('running');
this.emitEvent('started');
this.startHealthCheckTimer();
return result;
} catch (err) {
this._errorCount++;
this._lastError = err instanceof Error ? err.message : String(err);
this.setState('failed');
this.emitEvent('failed', { error: this._lastError });
throw err;
}
}
public async stop(): Promise<void> {
if (this._state === 'stopped' || this._state === 'stopping') {
return;
}
this.stopHealthCheckTimer();
this.clearAutoRestartTimer();
this.setState('stopping');
try {
await this.serviceStop();
} catch (err) {
logger.log('warn', `Service '${this.name}' error during stop: ${err instanceof Error ? err.message : String(err)}`);
}
this._instance = undefined;
this._stoppedAt = Date.now();
this.setState('stopped');
this.emitEvent('stopped');
}
public async checkHealth(): Promise<boolean | undefined> {
if (!this._healthCheckFn && !this.hasOverriddenHealthCheck()) {
return undefined;
}
try {
const config = this._healthCheckConfig;
const timeoutMs = config?.timeoutMs ?? 5000;
const result = await Promise.race([
this.serviceHealthCheck(),
new Promise<boolean>((_, reject) =>
setTimeout(() => reject(new Error('Health check timed out')), timeoutMs)
),
]);
this._lastHealthCheck = Date.now();
this._healthCheckOk = result;
if (result) {
this._consecutiveHealthFailures = 0;
if (this._state === 'degraded') {
this.setState('running');
this.emitEvent('recovered');
}
} else {
this._consecutiveHealthFailures++;
this.handleHealthFailure();
}
this.emitEvent('healthCheck');
return result;
} catch (err) {
this._lastHealthCheck = Date.now();
this._healthCheckOk = false;
this._consecutiveHealthFailures++;
this.handleHealthFailure();
this.emitEvent('healthCheck');
return false;
}
}
// ── Wait / readiness ──────────────────────────────────
public async waitForState(
targetState: TServiceState | TServiceState[],
timeoutMs?: number,
): Promise<void> {
const states = Array.isArray(targetState) ? targetState : [targetState];
// Already in target state
if (states.includes(this._state)) {
return;
}
return new Promise<void>((resolve, reject) => {
let timer: ReturnType<typeof setTimeout> | undefined;
let settled = false;
const settle = (fn: () => void) => {
if (settled) return;
settled = true;
subscription.unsubscribe();
if (timer) clearTimeout(timer);
fn();
};
const subscription = this.eventSubject.subscribe((event) => {
if (states.includes(event.state)) {
settle(resolve);
}
});
// Re-check after subscribing to close the race window
if (states.includes(this._state)) {
settle(resolve);
return;
}
if (timeoutMs !== undefined) {
timer = setTimeout(() => {
settle(() =>
reject(
new Error(
`Service '${this.name}': timed out waiting for state [${states.join(', ')}] after ${timeoutMs}ms (current: ${this._state})`,
),
),
);
}, timeoutMs);
}
});
}
public async waitForRunning(timeoutMs?: number): Promise<void> {
return this.waitForState('running', timeoutMs);
}
public async waitForStopped(timeoutMs?: number): Promise<void> {
return this.waitForState('stopped', timeoutMs);
}
// ── State ────────────────────────────────────────────
public get state(): TServiceState {
return this._state;
}
public get criticality(): TServiceCriticality {
return this._criticality;
}
public get dependencies(): string[] {
return [...this._dependencies];
}
public get retryConfig(): IRetryConfig | undefined {
return this._retryConfig;
}
public get startupTimeoutMs(): number | undefined {
return this._startupTimeoutMs;
}
public get instance(): T | undefined {
return this._instance;
}
public get errorCount(): number {
return this._errorCount;
}
public get retryCount(): number {
return this._retryCount;
}
public set retryCount(value: number) {
this._retryCount = value;
}
public getStatus(): IServiceStatus {
return {
name: this.name,
state: this._state,
criticality: this._criticality,
startedAt: this._startedAt,
stoppedAt: this._stoppedAt,
lastHealthCheck: this._lastHealthCheck,
healthCheckOk: this._healthCheckOk,
uptime: this._startedAt && this._state === 'running'
? Date.now() - this._startedAt
: undefined,
errorCount: this._errorCount,
lastError: this._lastError,
retryCount: this._retryCount,
dependencies: [...this._dependencies],
labels: { ...this.labels },
hasInstance: this._instance !== undefined,
};
}
// ── Internal helpers ─────────────────────────────────
private setState(state: TServiceState): void {
this._state = state;
}
private emitEvent(type: IServiceEvent['type'], extra?: Partial<IServiceEvent>): void {
this.eventSubject.next({
type,
serviceName: this.name,
state: this._state,
timestamp: Date.now(),
...extra,
});
}
private handleHealthFailure(): void {
const config = this._healthCheckConfig;
const failuresBeforeDegraded = config?.failuresBeforeDegraded ?? 3;
const failuresBeforeFailed = config?.failuresBeforeFailed ?? 5;
if (this._state === 'running' && this._consecutiveHealthFailures >= failuresBeforeDegraded) {
this.setState('degraded');
this.emitEvent('degraded');
}
if (this._consecutiveHealthFailures >= failuresBeforeFailed) {
this.setState('failed');
this._lastError = `Health check failed ${this._consecutiveHealthFailures} consecutive times`;
this.emitEvent('failed', { error: this._lastError });
this.stopHealthCheckTimer();
// Auto-restart if configured
if (config?.autoRestart) {
this.scheduleAutoRestart();
}
}
}
private scheduleAutoRestart(): void {
const config = this._healthCheckConfig;
const maxRestarts = config?.maxAutoRestarts ?? 3;
if (maxRestarts > 0 && this._autoRestartCount >= maxRestarts) {
logger.log('warn', `Service '${this.name}': max auto-restarts (${maxRestarts}) exceeded`);
return;
}
const baseDelay = config?.autoRestartDelayMs ?? 5000;
const factor = config?.autoRestartBackoffFactor ?? 2;
const delay = Math.min(baseDelay * Math.pow(factor, this._autoRestartCount), 60000);
this._autoRestartCount++;
this.emitEvent('autoRestarting', { attempt: this._autoRestartCount });
this._autoRestartTimer = setTimeout(async () => {
this._autoRestartTimer = undefined;
try {
// Stop first to clean up, then start fresh
this._instance = undefined;
this._stoppedAt = Date.now();
this.setState('stopped');
await this.start();
// Success — reset counter
this._autoRestartCount = 0;
} catch (err) {
logger.log('warn', `Service '${this.name}': auto-restart attempt ${this._autoRestartCount} failed: ${err instanceof Error ? err.message : String(err)}`);
// Schedule another attempt
this.scheduleAutoRestart();
}
}, delay);
if (this._autoRestartTimer && typeof this._autoRestartTimer === 'object' && 'unref' in this._autoRestartTimer) {
(this._autoRestartTimer as any).unref();
}
}
private clearAutoRestartTimer(): void {
if (this._autoRestartTimer) {
clearTimeout(this._autoRestartTimer);
this._autoRestartTimer = undefined;
}
}
private startHealthCheckTimer(): void {
if (!this._healthCheckFn && !this.hasOverriddenHealthCheck()) {
return;
}
const config = this._healthCheckConfig;
const intervalMs = config?.intervalMs ?? 30000;
this.stopHealthCheckTimer();
const tick = () => {
if (this._state !== 'running' && this._state !== 'degraded') {
return;
}
this.checkHealth().catch(() => {});
this._healthCheckTimer = setTimeout(tick, intervalMs);
if (this._healthCheckTimer && typeof this._healthCheckTimer === 'object' && 'unref' in this._healthCheckTimer) {
(this._healthCheckTimer as any).unref();
}
};
this._healthCheckTimer = setTimeout(tick, intervalMs);
if (this._healthCheckTimer && typeof this._healthCheckTimer === 'object' && 'unref' in this._healthCheckTimer) {
(this._healthCheckTimer as any).unref();
}
}
private stopHealthCheckTimer(): void {
if (this._healthCheckTimer) {
clearTimeout(this._healthCheckTimer);
this._healthCheckTimer = undefined;
}
}
private hasOverriddenHealthCheck(): boolean {
return this.serviceHealthCheck !== Service.prototype.serviceHealthCheck;
}
}

View File

@@ -0,0 +1,436 @@
import * as plugins from './taskbuffer.plugins.js';
import { logger } from './taskbuffer.logging.js';
import { Service } from './taskbuffer.classes.service.js';
import type {
IServiceEvent,
IServiceStatus,
IServiceManagerOptions,
IServiceManagerHealth,
IServiceOptions,
IRetryConfig,
TOverallHealth,
} from './taskbuffer.interfaces.js';
/**
* ServiceManager orchestrates multiple Service instances with:
* - Dependency-ordered startup (topological sort, level-by-level parallel)
* - Failure isolation (optional services don't crash the system)
* - Retry with exponential backoff for failed optional services
* - Reverse-dependency-ordered shutdown
* - Aggregated health status
*/
export class ServiceManager {
public readonly name: string;
public readonly serviceSubject = new plugins.smartrx.rxjs.Subject<IServiceEvent>();
private services = new Map<string, Service>();
private startupOrder: string[][] = []; // levels of service names
private options: Required<IServiceManagerOptions>;
private _startedAt: number | undefined;
private subscriptions: plugins.smartrx.rxjs.Subscription[] = [];
constructor(options?: IServiceManagerOptions) {
this.name = options?.name || 'ServiceManager';
this.options = {
name: this.name,
defaultRetry: options?.defaultRetry || { maxRetries: 3, baseDelayMs: 1000, maxDelayMs: 30000, backoffFactor: 2 },
defaultHealthCheck: options?.defaultHealthCheck || {},
startupTimeoutMs: options?.startupTimeoutMs ?? 120000,
shutdownTimeoutMs: options?.shutdownTimeoutMs ?? 30000,
};
}
// ── Service Registration ─────────────────────────────
public addService(service: Service): void {
if (this.services.has(service.name)) {
throw new Error(`Service '${service.name}' is already registered`);
}
this.services.set(service.name, service);
// Forward service events to the aggregated subject
const sub = service.eventSubject.subscribe((event) => {
this.serviceSubject.next(event);
});
this.subscriptions.push(sub);
}
public addServiceFromOptions<T>(options: IServiceOptions<T>): Service<T> {
const service = new Service<T>(options);
this.addService(service);
return service;
}
public removeService(name: string): void {
// Check no other service depends on this one
for (const [svcName, svc] of this.services) {
if (svc.dependencies.includes(name)) {
throw new Error(`Cannot remove service '${name}': service '${svcName}' depends on it`);
}
}
this.services.delete(name);
}
// ── Lifecycle ────────────────────────────────────────
public async start(): Promise<void> {
// Build startup order via topological sort
this.startupOrder = this.topologicalSort();
this._startedAt = Date.now();
const startupPromise = this.startAllLevels();
// Enforce global startup timeout
if (this.options.startupTimeoutMs) {
const timeout = new plugins.smartdelay.Timeout(this.options.startupTimeoutMs);
const timeoutPromise = timeout.promise.then(() => {
throw new Error(`${this.name}: global startup timeout exceeded (${this.options.startupTimeoutMs}ms)`);
});
try {
await Promise.race([startupPromise, timeoutPromise]);
} finally {
timeout.cancel();
}
} else {
await startupPromise;
}
}
private async startAllLevels(): Promise<void> {
const startedServices: string[] = [];
logger.log('info', `${this.name}: starting ${this.services.size} services in ${this.startupOrder.length} levels`);
for (let levelIdx = 0; levelIdx < this.startupOrder.length; levelIdx++) {
const level = this.startupOrder[levelIdx];
logger.log('info', `${this.name}: starting level ${levelIdx}: [${level.join(', ')}]`);
const results = await Promise.allSettled(
level.map(async (name) => {
const service = this.services.get(name)!;
try {
await this.startServiceWithRetry(service);
startedServices.push(name);
} catch (err) {
if (service.criticality === 'critical') {
throw err;
}
// Optional service — log and continue
logger.log('warn', `${this.name}: optional service '${name}' failed to start: ${err instanceof Error ? err.message : String(err)}`);
}
}),
);
// Check if any critical service failed
for (let i = 0; i < results.length; i++) {
const result = results[i];
if (result.status === 'rejected') {
const name = level[i];
const service = this.services.get(name);
if (service && service.criticality === 'critical') {
logger.log('error', `${this.name}: critical service '${name}' failed, aborting startup`);
// Rollback: stop all started services in reverse order
await this.stopServices(startedServices.reverse());
throw new Error(`Critical service '${name}' failed to start: ${result.reason instanceof Error ? result.reason.message : String(result.reason)}`);
}
}
}
}
const statuses = this.getAllStatuses();
const running = statuses.filter((s) => s.state === 'running').length;
const failed = statuses.filter((s) => s.state === 'failed').length;
logger.log('info', `${this.name}: startup complete — ${running} running, ${failed} failed`);
}
public async stop(): Promise<void> {
logger.log('info', `${this.name}: stopping all services`);
// Stop in reverse startup order
const reversedLevels = [...this.startupOrder].reverse();
for (const level of reversedLevels) {
const runningInLevel = level.filter((name) => {
const svc = this.services.get(name);
return svc && svc.state !== 'stopped';
});
if (runningInLevel.length === 0) continue;
await Promise.allSettled(
runningInLevel.map(async (name) => {
const service = this.services.get(name)!;
try {
const timeout = new plugins.smartdelay.Timeout(this.options.shutdownTimeoutMs);
const timeoutPromise = timeout.promise.then(() => {
throw new Error(`Timeout stopping service '${name}'`);
});
try {
await Promise.race([service.stop(), timeoutPromise]);
} finally {
timeout.cancel();
}
} catch (err) {
logger.log('warn', `${this.name}: error stopping '${name}': ${err instanceof Error ? err.message : String(err)}`);
}
}),
);
}
// Clean up subscriptions
for (const sub of this.subscriptions) {
sub.unsubscribe();
}
this.subscriptions = [];
this._startedAt = undefined;
logger.log('info', `${this.name}: all services stopped`);
}
// ── Querying ─────────────────────────────────────────
public getService(name: string): Service | undefined {
return this.services.get(name);
}
public getServiceStatus(name: string): IServiceStatus | undefined {
return this.services.get(name)?.getStatus();
}
public getAllStatuses(): IServiceStatus[] {
return Array.from(this.services.values()).map((s) => s.getStatus());
}
public getServicesByLabel(key: string, value: string): Service[] {
return Array.from(this.services.values()).filter((s) => s.labels[key] === value);
}
public getServicesStatusByLabel(key: string, value: string): IServiceStatus[] {
return this.getServicesByLabel(key, value).map((s) => s.getStatus());
}
public getHealth(): IServiceManagerHealth {
const statuses = this.getAllStatuses();
let overall: TOverallHealth = 'healthy';
const hasCriticalDown = statuses.some(
(s) => s.criticality === 'critical' && s.state !== 'running' && s.state !== 'stopped',
);
const hasAnyDown = statuses.some(
(s) => s.state !== 'running' && s.state !== 'stopped',
);
if (hasCriticalDown) {
overall = 'unhealthy';
} else if (hasAnyDown) {
overall = 'degraded';
}
return {
overall,
services: statuses,
startedAt: this._startedAt,
uptime: this._startedAt ? Date.now() - this._startedAt : undefined,
};
}
// ── Runtime Operations ───────────────────────────────
public async restartService(name: string): Promise<void> {
const service = this.services.get(name);
if (!service) {
throw new Error(`Service '${name}' not found`);
}
// Find all transitive dependents
const dependents = this.getTransitiveDependents(name);
// Stop dependents in reverse dependency order
const dependentLevels = this.getLevelsForServices(dependents).reverse();
for (const level of dependentLevels) {
await Promise.allSettled(
level.map((depName) => {
const svc = this.services.get(depName);
return svc && svc.state !== 'stopped' ? svc.stop() : Promise.resolve();
}),
);
}
// Stop the target service
await service.stop();
// Start the target service
await this.startServiceWithRetry(service);
// Restart dependents in dependency order
const dependentLevelsForward = this.getLevelsForServices(dependents);
for (const level of dependentLevelsForward) {
await Promise.allSettled(
level.map(async (depName) => {
const svc = this.services.get(depName)!;
try {
await this.startServiceWithRetry(svc);
} catch (err) {
logger.log('warn', `${this.name}: failed to restart dependent '${depName}': ${err instanceof Error ? err.message : String(err)}`);
}
}),
);
}
}
// ── Internal helpers ─────────────────────────────────
private async startServiceWithRetry(service: Service): Promise<void> {
const retryConfig = service.retryConfig || this.options.defaultRetry;
const maxRetries = retryConfig.maxRetries ?? 3;
const baseDelay = retryConfig.baseDelayMs ?? 1000;
const maxDelay = retryConfig.maxDelayMs ?? 30000;
const factor = retryConfig.backoffFactor ?? 2;
let lastError: Error | undefined;
for (let attempt = 0; attempt <= maxRetries; attempt++) {
try {
await service.start();
return;
} catch (err) {
lastError = err instanceof Error ? err : new Error(String(err));
service.retryCount = attempt + 1;
if (attempt < maxRetries) {
const delay = Math.min(baseDelay * Math.pow(factor, attempt), maxDelay);
const jitter = 0.8 + Math.random() * 0.4;
const actualDelay = Math.floor(delay * jitter);
service.eventSubject.next({
type: 'retrying',
serviceName: service.name,
state: service.state,
timestamp: Date.now(),
error: lastError.message,
attempt: attempt + 1,
});
logger.log('info', `${this.name}: retrying '${service.name}' in ${actualDelay}ms (attempt ${attempt + 1}/${maxRetries})`);
await plugins.smartdelay.delayFor(actualDelay);
}
}
}
throw lastError!;
}
private async stopServices(names: string[]): Promise<void> {
await Promise.allSettled(
names.map(async (name) => {
const service = this.services.get(name);
if (service && service.state !== 'stopped') {
try {
await service.stop();
} catch (err) {
logger.log('warn', `${this.name}: error stopping '${name}' during rollback: ${err instanceof Error ? err.message : String(err)}`);
}
}
}),
);
}
/**
* Topological sort using Kahn's algorithm.
* Returns levels of service names — services within a level can start in parallel.
*/
private topologicalSort(): string[][] {
const names = new Set(this.services.keys());
const inDegree = new Map<string, number>();
const dependents = new Map<string, string[]>(); // dep -> services that depend on it
// Initialize
for (const name of names) {
inDegree.set(name, 0);
dependents.set(name, []);
}
// Build graph
for (const [name, service] of this.services) {
for (const dep of service.dependencies) {
if (!names.has(dep)) {
throw new Error(`Service '${name}' depends on '${dep}', which is not registered`);
}
inDegree.set(name, (inDegree.get(name) || 0) + 1);
dependents.get(dep)!.push(name);
}
}
// Process level by level
const levels: string[][] = [];
const remaining = new Set(names);
while (remaining.size > 0) {
const level: string[] = [];
for (const name of remaining) {
if ((inDegree.get(name) || 0) === 0) {
level.push(name);
}
}
if (level.length === 0) {
// Cycle detected
const cycleNodes = Array.from(remaining).join(', ');
throw new Error(`Circular dependency detected involving services: ${cycleNodes}`);
}
// Remove this level's nodes and update in-degrees
for (const name of level) {
remaining.delete(name);
for (const dependent of dependents.get(name) || []) {
inDegree.set(dependent, (inDegree.get(dependent) || 0) - 1);
}
}
levels.push(level);
}
return levels;
}
/**
* Get all services that transitively depend on the given service.
*/
private getTransitiveDependents(name: string): string[] {
const result = new Set<string>();
const queue = [name];
while (queue.length > 0) {
const current = queue.shift()!;
for (const [svcName, svc] of this.services) {
if (svc.dependencies.includes(current) && !result.has(svcName)) {
result.add(svcName);
queue.push(svcName);
}
}
}
return Array.from(result);
}
/**
* Organize a set of service names into dependency-ordered levels.
*/
private getLevelsForServices(names: string[]): string[][] {
if (names.length === 0) return [];
const nameSet = new Set(names);
const levels: string[][] = [];
// Simple approach: filter the global startup order to include only the given names
for (const level of this.startupOrder) {
const filtered = level.filter((n) => nameSet.has(n));
if (filtered.length > 0) {
levels.push(filtered);
}
}
return levels;
}
}

View File

@@ -79,4 +79,114 @@ export interface ITaskEvent {
timestamp: number;
stepName?: string; // present when type === 'step'
error?: string; // present when type === 'failed'
}
// ── Service Lifecycle Types ──────────────────────────────────────
export type TServiceState =
| 'stopped'
| 'starting'
| 'running'
| 'degraded'
| 'failed'
| 'stopping';
export type TServiceCriticality = 'critical' | 'optional';
export type TServiceEventType =
| 'started'
| 'stopped'
| 'failed'
| 'degraded'
| 'recovered'
| 'retrying'
| 'healthCheck'
| 'autoRestarting';
export interface IServiceEvent {
type: TServiceEventType;
serviceName: string;
state: TServiceState;
timestamp: number;
error?: string;
attempt?: number;
}
export interface IServiceStatus {
name: string;
state: TServiceState;
criticality: TServiceCriticality;
startedAt?: number;
stoppedAt?: number;
lastHealthCheck?: number;
healthCheckOk?: boolean;
uptime?: number;
errorCount: number;
lastError?: string;
retryCount: number;
dependencies: string[];
labels?: Record<string, string>;
hasInstance?: boolean;
}
export interface IRetryConfig {
/** Maximum retry attempts. 0 = no retries. Default: 3 */
maxRetries?: number;
/** Base delay in ms. Default: 1000 */
baseDelayMs?: number;
/** Maximum delay cap in ms. Default: 30000 */
maxDelayMs?: number;
/** Multiplier per attempt. Default: 2 */
backoffFactor?: number;
}
export interface IHealthCheckConfig {
/** Interval in ms between health checks. Default: 30000 */
intervalMs?: number;
/** Timeout for a single health check call. Default: 5000 */
timeoutMs?: number;
/** Consecutive failures before marking degraded. Default: 3 */
failuresBeforeDegraded?: number;
/** Consecutive failures before marking failed. Default: 5 */
failuresBeforeFailed?: number;
/** Auto-restart the service when it transitions to failed. Default: false */
autoRestart?: boolean;
/** Maximum number of auto-restart attempts. 0 = unlimited. Default: 3 */
maxAutoRestarts?: number;
/** Base delay in ms before first auto-restart. Default: 5000 */
autoRestartDelayMs?: number;
/** Backoff multiplier per auto-restart attempt. Default: 2 */
autoRestartBackoffFactor?: number;
}
export interface IServiceOptions<T = any> {
name: string;
start: () => Promise<T>;
stop: (instance: T) => Promise<void>;
healthCheck?: (instance: T) => Promise<boolean>;
criticality?: TServiceCriticality;
dependencies?: string[];
retry?: IRetryConfig;
healthCheckConfig?: IHealthCheckConfig;
startupTimeoutMs?: number;
labels?: Record<string, string>;
}
export interface IServiceManagerOptions {
name?: string;
defaultRetry?: IRetryConfig;
defaultHealthCheck?: IHealthCheckConfig;
/** Timeout in ms for the entire startup sequence. Default: 120000 */
startupTimeoutMs?: number;
/** Timeout in ms for the entire shutdown sequence. Default: 30000 */
shutdownTimeoutMs?: number;
}
export type TOverallHealth = 'healthy' | 'degraded' | 'unhealthy';
export interface IServiceManagerHealth {
overall: TOverallHealth;
services: IServiceStatus[];
startedAt?: number;
uptime?: number;
}

View File

@@ -3,6 +3,6 @@
*/
export const commitinfo = {
name: '@push.rocks/taskbuffer',
version: '6.1.2',
version: '8.0.2',
description: 'A flexible task management library supporting TypeScript, allowing for task buffering, scheduling, and execution with dependency management.'
}