Compare commits

..

10 Commits

13 changed files with 1386 additions and 126 deletions

View File

@@ -1,5 +1,45 @@
# Changelog
## 2026-02-15 - 6.1.2 - fix(deps)
bump @push.rocks/smarttime to ^4.2.3
- Updated @push.rocks/smarttime from ^4.1.1 to ^4.2.3
- Non-breaking dependency version bump; increment patch version
## 2026-02-15 - 6.1.1 - fix(tests)
improve buffered task tests: add chain, concurrency and queue behavior tests
- Replace tools.delayFor with @push.rocks/smartdelay for more deterministic timing in tests
- Add tests for afterTask chaining, bufferMax concurrency, queued-run limits, and re-trigger behavior
- Rename tasks to descriptive names and fix afterTask chaining order to avoid circular references
- Change test runner invocation to export default tap.start() instead of calling tap.start() directly
## 2026-02-15 - 6.1.0 - feat(taskbuffer)
add sliding-window rate limiting and result-sharing to TaskConstraintGroup and integrate with TaskManager
- Added IRateLimitConfig and TResultSharingMode types and exported them from the public index
- TaskConstraintGroup: added rateLimit and resultSharingMode options, internal completion timestamp tracking, and last-result storage
- TaskConstraintGroup: new helpers - pruneCompletionTimestamps, getRateLimitDelay, getNextAvailableDelay, recordResult, getLastResult, hasResultSharing
- TaskConstraintGroup: rate-limit logic enforces maxPerWindow (counts running + completions) and composes with cooldown/maxConcurrent
- TaskManager: records successful task results to constraint groups and resolves queued entries immediately when a shared result exists
- TaskManager: queue drain now considers unified next-available delay (cooldown + rate limit) when scheduling retries
- Documentation updated: README and hints with usage examples for sliding-window rate limiting and result sharing
- Comprehensive tests added for rate limiting, concurrency interaction, and result-sharing behavior
## 2026-02-15 - 6.0.1 - fix(taskbuffer)
no changes to commit
- Git diff shows no changes
- package.json current version is 6.0.0; no version bump required
## 2026-02-15 - 6.0.0 - BREAKING CHANGE(constraints)
make TaskConstraintGroup constraint matcher input-aware and add shouldExecute pre-execution hook
- Rename ITaskConstraintGroupOptions.constraintKeyForTask -> constraintKeyForExecution(task, input?) and update TaskConstraintGroup.getConstraintKey signature
- Add optional shouldExecute(task, input?) hook; TaskManager checks shouldExecute before immediate runs, after acquiring slots, and when draining the constraint queue (queued tasks are skipped when shouldExecute returns false)
- Export ITaskExecution type and store constraintKeys on queued entries (IConstrainedTaskEntry.constraintKeys)
- Documentation and tests updated to demonstrate input-aware constraint keys and shouldExecute pruning
## 2026-02-15 - 5.0.1 - fix(tests)
add and tighten constraint-related tests covering return values, error propagation, concurrency, cooldown timing, and constraint removal

View File

@@ -1,6 +1,6 @@
{
"name": "@push.rocks/taskbuffer",
"version": "5.0.1",
"version": "6.1.2",
"private": false,
"description": "A flexible task management library supporting TypeScript, allowing for task buffering, scheduling, and execution with dependency management.",
"main": "dist_ts/index.js",
@@ -40,7 +40,7 @@
"@push.rocks/smartlog": "^3.1.11",
"@push.rocks/smartpromise": "^4.2.3",
"@push.rocks/smartrx": "^3.0.10",
"@push.rocks/smarttime": "^4.1.1",
"@push.rocks/smarttime": "^4.2.3",
"@push.rocks/smartunique": "^3.0.9"
},
"devDependencies": {

52
pnpm-lock.yaml generated
View File

@@ -27,8 +27,8 @@ importers:
specifier: ^3.0.10
version: 3.0.10
'@push.rocks/smarttime':
specifier: ^4.1.1
version: 4.1.1
specifier: ^4.2.3
version: 4.2.3
'@push.rocks/smartunique':
specifier: ^3.0.9
version: 3.0.9
@@ -863,8 +863,8 @@ packages:
'@push.rocks/smartstring@4.1.0':
resolution: {integrity: sha512-Q4py/Nm3KTDhQ9EiC75yBtSTLR0KLMwhKM+8gGcutgKotZT6wJ3gncjmtD8LKFfNhb4lSaFMgPJgLrCHTOH6Iw==}
'@push.rocks/smarttime@4.1.1':
resolution: {integrity: sha512-Ha/3J/G+zfTl4ahpZgF6oUOZnUjpLhrBja0OQ2cloFxF9sKT8I1COaSqIfBGDtoK2Nly4UD4aTJ3JcJNOg/kgA==}
'@push.rocks/smarttime@4.2.3':
resolution: {integrity: sha512-8gMg8RUkrCG4p9NcEUZV7V6KpL24+jAMK02g7qyhfA6giz/JJWD0+8w8xjSR+G7qe16KVQ2y3RbvAL9TxmO36g==}
'@push.rocks/smartunique@3.0.9':
resolution: {integrity: sha512-q6DYQgT7/dqdWi9HusvtWCjdsFzLFXY9LTtaZV6IYNJt6teZOonoygxTdNt9XLn6niBSbLYrHSKvJNTRH/uK+g==}
@@ -1795,8 +1795,8 @@ packages:
typescript:
optional: true
croner@9.1.0:
resolution: {integrity: sha512-p9nwwR4qyT5W996vBZhdvBCnMhicY5ytZkR4D1Xj0wuTDEiMnjwR57Q3RXYY/s0EpX6Ay3vgIcfaR+ewGHsi+g==}
croner@10.0.1:
resolution: {integrity: sha512-ixNtAJndqh173VQ4KodSdJEI6nuioBWI0V1ITNKhZZsO0pEMoDxz539T4FTTbSZ/xIOSuDnzxLVRqBVSvPNE2g==}
engines: {node: '>=18.0'}
cross-spawn@7.0.6:
@@ -1814,8 +1814,8 @@ packages:
date-fns@4.1.0:
resolution: {integrity: sha512-Ukq0owbQXxa/U3EGtsdVBkR1w7KOQ5gIBqdH2hkvknzZPYvBxb/aa6E8L7tmjFtkwZBu3UXBbjIgPo/Ez4xaNg==}
dayjs@1.11.13:
resolution: {integrity: sha512-oaMBel6gjolK862uaPQOVTA7q3TZhuSvuMQAAglQDOWYO9A91IrAOUJEyKVlqJlHE0vq5p5UXxzdPfMH/x6xNg==}
dayjs@1.11.19:
resolution: {integrity: sha512-t5EcLVS6QPBNqM2z8fakk/NKel+Xzshgt8FFKAn+qwlD1pzZWxh0nVCrvFK7ZDb6XucZeF9z8C7CBWTRIVApAw==}
debug@4.3.7:
resolution: {integrity: sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==}
@@ -2928,8 +2928,8 @@ packages:
resolution: {integrity: sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==}
engines: {node: '>=8'}
pretty-ms@9.2.0:
resolution: {integrity: sha512-4yf0QO/sllf/1zbZWYnvWw3NxCQwLXKzIj0G849LSufP15BXKM0rbD2Z3wVnkMfjdn/CB0Dpp444gYAACdsplg==}
pretty-ms@9.3.0:
resolution: {integrity: sha512-gjVS5hOP+M3wMm5nmNOucbIrqudzs9v/57bWRHQWLYklXqoXKrVfYW2W9+glfGsqtPgpiz5WwyEEB+ksXIx3gQ==}
engines: {node: '>=18'}
progress@2.0.3:
@@ -3584,7 +3584,7 @@ snapshots:
'@push.rocks/smartrx': 3.0.10
'@push.rocks/smartsitemap': 2.0.4
'@push.rocks/smartstream': 3.2.5
'@push.rocks/smarttime': 4.1.1
'@push.rocks/smarttime': 4.2.3
'@push.rocks/taskbuffer': 3.5.0
'@push.rocks/webrequest': 3.0.37
'@push.rocks/webstore': 2.0.20
@@ -4412,7 +4412,7 @@ snapshots:
'@push.rocks/smartrequest': 5.0.1
'@push.rocks/smarts3': 3.0.3
'@push.rocks/smartshell': 3.3.0
'@push.rocks/smarttime': 4.1.1
'@push.rocks/smarttime': 4.2.3
'@types/ws': 8.18.1
figures: 6.1.0
ws: 8.19.0
@@ -4777,7 +4777,7 @@ snapshots:
'@push.rocks/smartmatch': 2.0.0
'@push.rocks/smartpromise': 4.2.3
'@push.rocks/smartrx': 3.0.10
'@push.rocks/smarttime': 4.1.1
'@push.rocks/smarttime': 4.2.3
'@types/minimatch': 5.1.2
'@types/symbol-tree': 3.2.5
symbol-tree: 3.2.4
@@ -4899,7 +4899,7 @@ snapshots:
'@push.rocks/smarterror': 2.0.1
'@push.rocks/smarthash': 3.2.6
'@push.rocks/smartpromise': 4.2.3
'@push.rocks/smarttime': 4.1.1
'@push.rocks/smarttime': 4.2.3
'@push.rocks/smartchok@1.2.0':
dependencies:
@@ -4942,7 +4942,7 @@ snapshots:
'@push.rocks/smartpromise': 4.2.3
'@push.rocks/smartrx': 3.0.10
'@push.rocks/smartstring': 4.1.0
'@push.rocks/smarttime': 4.1.1
'@push.rocks/smarttime': 4.2.3
'@push.rocks/smartunique': 3.0.9
'@push.rocks/taskbuffer': 3.5.0
'@tsclass/tsclass': 9.3.0
@@ -5120,7 +5120,7 @@ snapshots:
'@push.rocks/smartfile': 11.2.7
'@push.rocks/smarthash': 3.2.6
'@push.rocks/smartpromise': 4.2.3
'@push.rocks/smarttime': 4.1.1
'@push.rocks/smarttime': 4.2.3
'@push.rocks/webrequest': 4.0.1
'@tsclass/tsclass': 9.3.0
@@ -5193,7 +5193,7 @@ snapshots:
'@push.rocks/smartpath': 6.0.0
'@push.rocks/smartpromise': 4.2.3
'@push.rocks/smartrequest': 4.4.2
'@push.rocks/smarttime': 4.1.1
'@push.rocks/smarttime': 4.2.3
'@push.rocks/smartversion': 3.0.5
package-json: 8.1.1
transitivePeerDependencies:
@@ -5362,7 +5362,7 @@ snapshots:
'@push.rocks/smartlog': 3.1.11
'@push.rocks/smartpromise': 4.2.3
'@push.rocks/smartrx': 3.0.10
'@push.rocks/smarttime': 4.1.1
'@push.rocks/smarttime': 4.2.3
engine.io: 6.6.4
socket.io: 4.8.1
socket.io-client: 4.8.1
@@ -5414,16 +5414,16 @@ snapshots:
dependencies:
'@push.rocks/isounique': 1.0.5
'@push.rocks/smarttime@4.1.1':
'@push.rocks/smarttime@4.2.3':
dependencies:
'@push.rocks/lik': 6.2.2
'@push.rocks/smartdelay': 3.0.5
'@push.rocks/smartpromise': 4.2.3
croner: 9.1.0
croner: 10.0.1
date-fns: 4.1.0
dayjs: 1.11.13
dayjs: 1.11.19
is-nan: 1.3.2
pretty-ms: 9.2.0
pretty-ms: 9.3.0
'@push.rocks/smartunique@3.0.9':
dependencies:
@@ -5459,7 +5459,7 @@ snapshots:
'@push.rocks/smartlog': 3.1.11
'@push.rocks/smartpromise': 4.2.3
'@push.rocks/smartrx': 3.0.10
'@push.rocks/smarttime': 4.1.1
'@push.rocks/smarttime': 4.2.3
'@push.rocks/smartunique': 3.0.9
transitivePeerDependencies:
- '@nuxt/kit'
@@ -6474,7 +6474,7 @@ snapshots:
optionalDependencies:
typescript: 5.9.3
croner@9.1.0: {}
croner@10.0.1: {}
cross-spawn@7.0.6:
dependencies:
@@ -6490,7 +6490,7 @@ snapshots:
date-fns@4.1.0: {}
dayjs@1.11.13: {}
dayjs@1.11.19: {}
debug@4.3.7:
dependencies:
@@ -7866,7 +7866,7 @@ snapshots:
dependencies:
find-up: 4.1.0
pretty-ms@9.2.0:
pretty-ms@9.3.0:
dependencies:
parse-ms: 4.0.0

View File

@@ -12,11 +12,30 @@
- Typed data bag accessible as `task.data`
### TaskConstraintGroup
- `new TaskConstraintGroup<TData>({ name, constraintKeyForTask, maxConcurrent?, cooldownMs? })`
- `constraintKeyForTask(task)` returns a string key (constraint applies) or `null` (skip)
- `new TaskConstraintGroup<TData>({ name, constraintKeyForExecution, maxConcurrent?, cooldownMs?, shouldExecute?, rateLimit?, resultSharingMode? })`
- `constraintKeyForExecution(task, input?)` returns a string key (constraint applies) or `null` (skip). Receives both task and runtime input.
- `shouldExecute(task, input?)` — optional pre-execution check. Returns `false` to skip (deferred resolves `undefined`). Can be async.
- `maxConcurrent` (default: `Infinity`) — max concurrent tasks per key
- `cooldownMs` (default: `0`) — minimum ms gap between completions per key
- Methods: `canRun(key)`, `acquireSlot(key)`, `releaseSlot(key)`, `getCooldownRemaining(key)`, `getRunningCount(key)`, `reset()`
- `rateLimit` (optional) — `{ maxPerWindow: number, windowMs: number }` sliding window rate limiter. Counts both running + completed tasks in window.
- `resultSharingMode` (default: `'none'`) — `'none'` | `'share-latest'`. When `'share-latest'`, queued tasks for the same key resolve with the first task's result without executing.
- Methods: `getConstraintKey(task, input?)`, `checkShouldExecute(task, input?)`, `canRun(key)`, `acquireSlot(key)`, `releaseSlot(key)`, `getCooldownRemaining(key)`, `getRateLimitDelay(key)`, `getNextAvailableDelay(key)`, `getRunningCount(key)`, `recordResult(key, result)`, `getLastResult(key)`, `hasResultSharing()`, `reset()`
- `ITaskExecution<TData>` type exported from index — `{ task, input }` tuple
### Rate Limiting (v6.1.0+)
- Sliding window rate limiter: `rateLimit: { maxPerWindow: N, windowMs: ms }`
- Counts running + completed tasks against the window cap
- Per-key independence: saturating key A doesn't block key B
- Composable with `maxConcurrent` and `cooldownMs`
- `getNextAvailableDelay(key)` returns `Math.max(cooldownRemaining, rateLimitDelay)` — unified "how long until I can run" answer
- Drain timer auto-schedules based on shortest delay across all constraints
### Result Sharing (v6.1.0+)
- `resultSharingMode: 'share-latest'` — queued tasks for the same key get the first task's result without executing
- Only successful results are shared (errors from `catchErrors: true` or thrown errors are NOT shared)
- `shouldExecute` is NOT called for shared results (the task's purpose was already fulfilled)
- `lastResults` persists until `reset()` — for time-bounded sharing, use `shouldExecute` to control staleness
- Composable with rate limiting: rate-limited waiters get shared result without waiting for the window
### TaskManager Constraint Integration
- `manager.addConstraintGroup(group)` / `manager.removeConstraintGroup(name)`
@@ -26,7 +45,7 @@
### Exported from index.ts
- `TaskConstraintGroup` class
- `ITaskConstraintGroupOptions` type
- `ITaskConstraintGroupOptions`, `IRateLimitConfig`, `TResultSharingMode` types
## Error Handling (v3.6.0+)
- `Task` now has `catchErrors` constructor option (default: `false`)

239
readme.md
View File

@@ -13,7 +13,7 @@ For reporting bugs, issues, or security vulnerabilities, please visit [community
## 🌟 Features
- **🎯 Type-Safe Task Management** — Full TypeScript support with generics and type inference
- **🔒 Constraint-Based Concurrency** — Per-key mutual exclusion, group concurrency limits, and cooldown enforcement via `TaskConstraintGroup`
- **🔒 Constraint-Based Concurrency** — Per-key mutual exclusion, group concurrency limits, cooldown enforcement, sliding-window rate limiting, and result sharing via `TaskConstraintGroup`
- **📊 Real-Time Progress Tracking** — Step-based progress with percentage weights
- **⚡ Smart Buffering** — Intelligent request debouncing and batching
- **⏰ Cron Scheduling** — Schedule tasks with cron expressions
@@ -120,7 +120,7 @@ const manager = new TaskManager();
const domainMutex = new TaskConstraintGroup<{ domain: string }>({
name: 'domain-mutex',
maxConcurrent: 1,
constraintKeyForTask: (task) => task.data.domain,
constraintKeyForExecution: (task, input?) => task.data.domain,
});
manager.addConstraintGroup(domainMutex);
@@ -156,7 +156,7 @@ Cap how many tasks can run concurrently across a group:
const dnsLimit = new TaskConstraintGroup<{ group: string }>({
name: 'dns-concurrency',
maxConcurrent: 3,
constraintKeyForTask: (task) =>
constraintKeyForExecution: (task) =>
task.data.group === 'dns' ? 'dns' : null, // null = skip constraint
});
@@ -173,7 +173,7 @@ const rateLimiter = new TaskConstraintGroup<{ domain: string }>({
name: 'api-rate-limit',
maxConcurrent: 1,
cooldownMs: 11000,
constraintKeyForTask: (task) => task.data.domain,
constraintKeyForExecution: (task) => task.data.domain,
});
manager.addConstraintGroup(rateLimiter);
@@ -187,7 +187,7 @@ Limit total concurrent tasks system-wide:
const globalCap = new TaskConstraintGroup({
name: 'global-cap',
maxConcurrent: 10,
constraintKeyForTask: () => 'all', // same key = shared limit
constraintKeyForExecution: () => 'all', // same key = shared limit
});
manager.addConstraintGroup(globalCap);
@@ -208,26 +208,219 @@ await manager.triggerTask(dnsTask);
### Selective Constraints
Return `null` from `constraintKeyForTask` to exempt a task from a constraint group:
Return `null` from `constraintKeyForExecution` to exempt a task from a constraint group:
```typescript
const constraint = new TaskConstraintGroup<{ priority: string }>({
name: 'low-priority-limit',
maxConcurrent: 2,
constraintKeyForTask: (task) =>
constraintKeyForExecution: (task) =>
task.data.priority === 'low' ? 'low-priority' : null, // high priority tasks skip this constraint
});
```
### Input-Aware Constraints 🎯
The `constraintKeyForExecution` function receives both the **task** and the **runtime input** passed to `trigger(input)`. This means the same task triggered with different inputs can be constrained independently:
```typescript
const extractTLD = (domain: string) => {
const parts = domain.split('.');
return parts.slice(-2).join('.');
};
// Same TLD → serialized. Different TLDs → parallel.
const tldMutex = new TaskConstraintGroup({
name: 'tld-mutex',
maxConcurrent: 1,
constraintKeyForExecution: (task, input?: string) => {
if (!input) return null;
return extractTLD(input); // "example.com", "other.org", etc.
},
});
manager.addConstraintGroup(tldMutex);
// These two serialize (same TLD "example.com")
const p1 = manager.triggerTaskConstrained(getCert, 'app.example.com');
const p2 = manager.triggerTaskConstrained(getCert, 'api.example.com');
// This runs in parallel (different TLD "other.org")
const p3 = manager.triggerTaskConstrained(getCert, 'my.other.org');
```
You can also combine `task.data` and `input` for composite keys:
```typescript
const providerDomain = new TaskConstraintGroup<{ provider: string }>({
name: 'provider-domain',
maxConcurrent: 1,
constraintKeyForExecution: (task, input?: string) => {
return `${task.data.provider}:${input || 'default'}`;
},
});
```
### Pre-Execution Check with `shouldExecute` ✅
The `shouldExecute` callback runs right before a queued task executes. If it returns `false`, the task is skipped and its promise resolves with `undefined`. This is perfect for scenarios where a prior execution's outcome makes subsequent queued tasks unnecessary:
```typescript
const certCache = new Map<string, string>();
const certConstraint = new TaskConstraintGroup({
name: 'cert-mutex',
maxConcurrent: 1,
constraintKeyForExecution: (task, input?: string) => {
if (!input) return null;
return extractTLD(input);
},
shouldExecute: (task, input?: string) => {
if (!input) return true;
// Skip if a wildcard cert already covers this TLD
return certCache.get(extractTLD(input)) !== 'wildcard';
},
});
const getCert = new Task({
name: 'get-certificate',
taskFunction: async (domain: string) => {
const cert = await acme.getCert(domain);
if (cert.isWildcard) certCache.set(extractTLD(domain), 'wildcard');
return cert;
},
});
manager.addConstraintGroup(certConstraint);
manager.addTask(getCert);
const r1 = manager.triggerTaskConstrained(getCert, 'app.example.com'); // runs, gets wildcard
const r2 = manager.triggerTaskConstrained(getCert, 'api.example.com'); // queued → skipped!
const r3 = manager.triggerTaskConstrained(getCert, 'my.other.org'); // parallel (different TLD)
const [cert1, cert2, cert3] = await Promise.all([r1, r2, r3]);
// cert2 === undefined (skipped because wildcard already covers example.com)
```
**`shouldExecute` semantics:**
- Runs right before execution (after slot acquisition, before `trigger()`)
- Also checked on immediate (non-queued) triggers
- Returns `false` → skip execution, deferred resolves with `undefined`
- Can be async (return `Promise<boolean>`)
- Has closure access to external state modified by prior executions
- If multiple constraint groups have `shouldExecute`, **all** must return `true`
### Sliding Window Rate Limiting
Enforce "N completions per time window" with burst capability. Unlike `cooldownMs` (which forces even spacing between executions), `rateLimit` allows bursts up to the cap, then blocks until the window slides:
```typescript
// Let's Encrypt style: 300 new orders per 3 hours
const acmeRateLimit = new TaskConstraintGroup({
name: 'acme-rate',
constraintKeyForExecution: () => 'acme-account',
rateLimit: {
maxPerWindow: 300,
windowMs: 3 * 60 * 60 * 1000, // 3 hours
},
});
manager.addConstraintGroup(acmeRateLimit);
// All 300 can burst immediately. The 301st waits until the oldest
// completion falls out of the 3-hour window.
for (const domain of domains) {
manager.triggerTaskConstrained(certTask, { domain });
}
```
Compose multiple rate limits for layered protection:
```typescript
// Per-domain weekly cap AND global order rate
const perDomainWeekly = new TaskConstraintGroup({
name: 'per-domain-weekly',
constraintKeyForExecution: (task, input) => input.registeredDomain,
rateLimit: { maxPerWindow: 50, windowMs: 7 * 24 * 60 * 60 * 1000 },
});
const globalOrderRate = new TaskConstraintGroup({
name: 'global-order-rate',
constraintKeyForExecution: () => 'global',
rateLimit: { maxPerWindow: 300, windowMs: 3 * 60 * 60 * 1000 },
});
manager.addConstraintGroup(perDomainWeekly);
manager.addConstraintGroup(globalOrderRate);
```
Combine with `maxConcurrent` and `cooldownMs` for fine-grained control:
```typescript
const throttled = new TaskConstraintGroup({
name: 'acme-throttle',
constraintKeyForExecution: () => 'acme',
maxConcurrent: 5, // max 5 concurrent requests
cooldownMs: 1000, // 1s gap after each completion
rateLimit: {
maxPerWindow: 300,
windowMs: 3 * 60 * 60 * 1000,
},
});
```
### Result Sharing — Deduplication for Concurrent Requests
When multiple callers request the same resource concurrently, `resultSharingMode: 'share-latest'` ensures only one execution occurs. All queued waiters receive the same result:
```typescript
const certMutex = new TaskConstraintGroup({
name: 'cert-per-tld',
constraintKeyForExecution: (task, input) => extractTld(input.domain),
maxConcurrent: 1,
resultSharingMode: 'share-latest',
});
manager.addConstraintGroup(certMutex);
const certTask = new Task({
name: 'obtain-cert',
taskFunction: async (input) => {
return await acmeClient.obtainWildcard(input.domain);
},
});
manager.addTask(certTask);
// Three requests for *.example.com arrive simultaneously
const [cert1, cert2, cert3] = await Promise.all([
manager.triggerTaskConstrained(certTask, { domain: 'api.example.com' }),
manager.triggerTaskConstrained(certTask, { domain: 'www.example.com' }),
manager.triggerTaskConstrained(certTask, { domain: 'mail.example.com' }),
]);
// Only ONE ACME request was made.
// cert1 === cert2 === cert3 — all callers got the same cert object.
```
**Result sharing semantics:**
- `shouldExecute` is NOT called for shared results (the task's purpose was already fulfilled)
- Error results are NOT shared — queued tasks execute independently after a failure
- `lastResults` persists until `reset()` — for time-bounded sharing, use `shouldExecute` to control staleness
- Composable with rate limiting: rate-limited waiters get shared results without waiting for the window
### How It Works
When you trigger a task through `TaskManager` (via `triggerTask`, `triggerTaskByName`, `addExecuteRemoveTask`, or cron), the manager:
1. Evaluates all registered constraint groups against the task
2. If no constraints apply (all matchers return `null`) → runs immediately
3. If all applicable constraints have capacity → acquires slots and runs
1. Evaluates all registered constraint groups against the task and input
2. If no constraints apply (all matchers return `null`) → checks `shouldExecute` → runs or skips
3. If all applicable constraints have capacity → acquires slots → checks `shouldExecute` → runs or skips
4. If any constraint blocks → enqueues the task; when a running task completes, the queue is drained
5. Cooldown-blocked tasks auto-retry after the shortest remaining cooldown expires
5. Cooldown/rate-limit-blocked tasks auto-retry after the shortest remaining delay expires
6. Queued tasks check for shared results first (if any group has `resultSharingMode: 'share-latest'`)
7. Queued tasks re-check `shouldExecute` when their turn comes — stale work is automatically pruned
## 🎯 Core Concepts
@@ -732,7 +925,7 @@ const manager = new TaskManager();
const tenantLimit = new TaskConstraintGroup<{ tenantId: string }>({
name: 'tenant-concurrency',
maxConcurrent: 2,
constraintKeyForTask: (task) => task.data.tenantId,
constraintKeyForExecution: (task, input?) => task.data.tenantId,
});
manager.addConstraintGroup(tenantLimit);
@@ -829,21 +1022,30 @@ const acmeTasks = manager.getTasksMetadataByLabel('tenantId', 'acme');
| Option | Type | Default | Description |
| --- | --- | --- | --- |
| `name` | `string` | *required* | Constraint group identifier |
| `constraintKeyForTask` | `(task) => string \| null` | *required* | Returns key for grouping, or `null` to skip |
| `constraintKeyForExecution` | `(task, input?) => string \| null` | *required* | Returns key for grouping, or `null` to skip. Receives both the task and runtime input. |
| `maxConcurrent` | `number` | `Infinity` | Max concurrent tasks per key |
| `cooldownMs` | `number` | `0` | Minimum ms between completions per key |
| `shouldExecute` | `(task, input?) => boolean \| Promise<boolean>` | — | Pre-execution check. Return `false` to skip; deferred resolves `undefined`. |
| `rateLimit` | `IRateLimitConfig` | — | Sliding window: `{ maxPerWindow, windowMs }`. Counts running + completed tasks. |
| `resultSharingMode` | `TResultSharingMode` | `'none'` | `'none'` or `'share-latest'`. Queued tasks get first task's result without executing. |
### TaskConstraintGroup Methods
| Method | Returns | Description |
| --- | --- | --- |
| `getConstraintKey(task)` | `string \| null` | Get the constraint key for a task |
| `canRun(key)` | `boolean` | Check if a slot is available |
| `getConstraintKey(task, input?)` | `string \| null` | Get the constraint key for a task + input |
| `checkShouldExecute(task, input?)` | `Promise<boolean>` | Run the `shouldExecute` callback (defaults to `true`) |
| `canRun(key)` | `boolean` | Check if a slot is available (considers concurrency, cooldown, and rate limit) |
| `acquireSlot(key)` | `void` | Claim a running slot |
| `releaseSlot(key)` | `void` | Release a slot and record completion time |
| `releaseSlot(key)` | `void` | Release a slot and record completion time + rate-limit timestamp |
| `getCooldownRemaining(key)` | `number` | Milliseconds until cooldown expires |
| `getRateLimitDelay(key)` | `number` | Milliseconds until a rate-limit slot opens |
| `getNextAvailableDelay(key)` | `number` | Max of cooldown + rate-limit delay — unified "when can I run" |
| `getRunningCount(key)` | `number` | Current running count for key |
| `reset()` | `void` | Clear all state |
| `recordResult(key, result)` | `void` | Store result for sharing (no-op if mode is `'none'`) |
| `getLastResult(key)` | `{result, timestamp} \| undefined` | Get last shared result for key |
| `hasResultSharing()` | `boolean` | Whether result sharing is enabled |
| `reset()` | `void` | Clear all state (running counts, cooldowns, rate-limit timestamps, shared results) |
### TaskManager Methods
@@ -884,12 +1086,15 @@ const acmeTasks = manager.getTasksMetadataByLabel('tenantId', 'acme');
import type {
ITaskMetadata,
ITaskExecutionReport,
ITaskExecution,
IScheduledTaskInfo,
ITaskEvent,
TTaskEventType,
ITaskStep,
ITaskFunction,
ITaskConstraintGroupOptions,
IRateLimitConfig,
TResultSharingMode,
StepNames,
} from '@push.rocks/taskbuffer';
```

View File

@@ -54,7 +54,7 @@ tap.test('should enforce group concurrency limit', async () => {
const constraint = new taskbuffer.TaskConstraintGroup<{ group: string }>({
name: 'concurrency-test',
maxConcurrent: 2,
constraintKeyForTask: (task) =>
constraintKeyForExecution: (task) =>
task.data.group === 'workers' ? 'workers' : null,
});
manager.addConstraintGroup(constraint);
@@ -97,7 +97,7 @@ tap.test('should enforce key-based mutual exclusion', async () => {
const constraint = new taskbuffer.TaskConstraintGroup<{ domain: string }>({
name: 'domain-mutex',
maxConcurrent: 1,
constraintKeyForTask: (task) => task.data.domain,
constraintKeyForExecution: (task) => task.data.domain,
});
manager.addConstraintGroup(constraint);
@@ -149,7 +149,7 @@ tap.test('should enforce cooldown between task executions', async () => {
name: 'cooldown-test',
maxConcurrent: 1,
cooldownMs: 300,
constraintKeyForTask: (task) => task.data.key,
constraintKeyForExecution: (task) => task.data.key,
});
manager.addConstraintGroup(constraint);
@@ -194,13 +194,13 @@ tap.test('should apply multiple constraint groups to one task', async () => {
const globalConstraint = new taskbuffer.TaskConstraintGroup({
name: 'global',
maxConcurrent: 3,
constraintKeyForTask: () => 'all',
constraintKeyForExecution: () => 'all',
});
const groupConstraint = new taskbuffer.TaskConstraintGroup<{ group: string }>({
name: 'group',
maxConcurrent: 1,
constraintKeyForTask: (task) => task.data.group,
constraintKeyForExecution: (task) => task.data.group,
});
manager.addConstraintGroup(globalConstraint);
@@ -242,7 +242,7 @@ tap.test('should run task unconstrained when matcher returns null', async () =>
const constraint = new taskbuffer.TaskConstraintGroup<{ skip: boolean }>({
name: 'selective',
maxConcurrent: 1,
constraintKeyForTask: (task) => (task.data.skip ? null : 'constrained'),
constraintKeyForExecution: (task) => (task.data.skip ? null : 'constrained'),
});
manager.addConstraintGroup(constraint);
@@ -269,7 +269,7 @@ tap.test('should release slot and drain queue when task fails', async () => {
const constraint = new taskbuffer.TaskConstraintGroup<{ key: string }>({
name: 'error-drain',
maxConcurrent: 1,
constraintKeyForTask: (task) => task.data.key,
constraintKeyForExecution: (task) => task.data.key,
});
manager.addConstraintGroup(constraint);
@@ -313,7 +313,7 @@ tap.test('should route triggerTaskByName through constraints', async () => {
const constraint = new taskbuffer.TaskConstraintGroup({
name: 'manager-integration',
maxConcurrent: 1,
constraintKeyForTask: () => 'all',
constraintKeyForExecution: () => 'all',
});
manager.addConstraintGroup(constraint);
@@ -356,7 +356,7 @@ tap.test('should remove a constraint group by name', async () => {
const constraint = new taskbuffer.TaskConstraintGroup({
name: 'removable',
maxConcurrent: 1,
constraintKeyForTask: () => 'all',
constraintKeyForExecution: () => 'all',
});
manager.addConstraintGroup(constraint);
@@ -373,7 +373,7 @@ tap.test('should reset constraint group state', async () => {
name: 'resettable',
maxConcurrent: 2,
cooldownMs: 1000,
constraintKeyForTask: () => 'key',
constraintKeyForExecution: () => 'key',
});
// Simulate usage
@@ -395,7 +395,7 @@ tap.test('should return correct result from queued tasks', async () => {
const constraint = new taskbuffer.TaskConstraintGroup({
name: 'return-value-test',
maxConcurrent: 1,
constraintKeyForTask: () => 'shared',
constraintKeyForExecution: () => 'shared',
});
manager.addConstraintGroup(constraint);
@@ -434,7 +434,7 @@ tap.test('should propagate errors from queued tasks (catchErrors: false)', async
const constraint = new taskbuffer.TaskConstraintGroup({
name: 'error-propagation',
maxConcurrent: 1,
constraintKeyForTask: () => 'shared',
constraintKeyForExecution: () => 'shared',
});
manager.addConstraintGroup(constraint);
@@ -484,7 +484,7 @@ tap.test('should route triggerTask() through constraints', async () => {
const constraint = new taskbuffer.TaskConstraintGroup({
name: 'trigger-task-test',
maxConcurrent: 1,
constraintKeyForTask: () => 'all',
constraintKeyForExecution: () => 'all',
});
manager.addConstraintGroup(constraint);
@@ -523,7 +523,7 @@ tap.test('should route addExecuteRemoveTask() through constraints', async () =>
const constraint = new taskbuffer.TaskConstraintGroup({
name: 'add-execute-remove-test',
maxConcurrent: 1,
constraintKeyForTask: () => 'all',
constraintKeyForExecution: () => 'all',
});
manager.addConstraintGroup(constraint);
@@ -561,7 +561,7 @@ tap.test('should execute queued tasks in FIFO order', async () => {
const constraint = new taskbuffer.TaskConstraintGroup({
name: 'fifo-test',
maxConcurrent: 1,
constraintKeyForTask: () => 'shared',
constraintKeyForExecution: () => 'shared',
});
manager.addConstraintGroup(constraint);
@@ -603,7 +603,7 @@ tap.test('should enforce both concurrency and cooldown together', async () => {
name: 'combined-test',
maxConcurrent: 2,
cooldownMs: 200,
constraintKeyForTask: () => 'shared',
constraintKeyForExecution: () => 'shared',
});
manager.addConstraintGroup(constraint);
@@ -645,7 +645,7 @@ tap.test('should unblock queued tasks when constraint group is removed', async (
const constraint = new taskbuffer.TaskConstraintGroup({
name: 'removable-constraint',
maxConcurrent: 1,
constraintKeyForTask: () => 'shared',
constraintKeyForExecution: () => 'shared',
});
manager.addConstraintGroup(constraint);
@@ -690,4 +690,746 @@ tap.test('should unblock queued tasks when constraint group is removed', async (
await manager.stop();
});
// Test 20: Intra-task concurrency by input — same task, different inputs, key extracts TLD
tap.test('should serialize same-TLD inputs and parallelize different-TLD inputs', async () => {
const manager = new taskbuffer.TaskManager();
const log: string[] = [];
const extractTLD = (domain: string) => {
const parts = domain.split('.');
return parts.slice(-2).join('.');
};
const constraint = new taskbuffer.TaskConstraintGroup({
name: 'tld-mutex',
maxConcurrent: 1,
constraintKeyForExecution: (_task, input?: string) => {
if (!input) return null;
return extractTLD(input);
},
});
manager.addConstraintGroup(constraint);
const getCert = new taskbuffer.Task({
name: 'get-cert',
taskFunction: async (domain: string) => {
log.push(`${domain}-start`);
await smartdelay.delayFor(100);
log.push(`${domain}-end`);
},
});
manager.addTask(getCert);
await Promise.all([
manager.triggerTaskConstrained(getCert, 'a.example.com'),
manager.triggerTaskConstrained(getCert, 'b.example.com'),
manager.triggerTaskConstrained(getCert, 'c.other.org'),
]);
// a.example.com and b.example.com share TLD "example.com" → serialized
const aEndIdx = log.indexOf('a.example.com-end');
const bStartIdx = log.indexOf('b.example.com-start');
expect(bStartIdx).toBeGreaterThanOrEqual(aEndIdx);
// c.other.org has different TLD → runs in parallel with a.example.com
const aStartIdx = log.indexOf('a.example.com-start');
const cStartIdx = log.indexOf('c.other.org-start');
expect(cStartIdx).toBeLessThan(aEndIdx);
await manager.stop();
});
// Test 21: shouldExecute skips queued task based on external state
tap.test('should skip queued task when shouldExecute returns false', async () => {
const manager = new taskbuffer.TaskManager();
const execLog: string[] = [];
const certCache = new Map<string, string>();
const extractTLD = (domain: string) => {
const parts = domain.split('.');
return parts.slice(-2).join('.');
};
const constraint = new taskbuffer.TaskConstraintGroup({
name: 'cert-mutex',
maxConcurrent: 1,
constraintKeyForExecution: (_task, input?: string) => {
if (!input) return null;
return extractTLD(input);
},
shouldExecute: (_task, input?: string) => {
if (!input) return true;
return certCache.get(extractTLD(input)) !== 'wildcard';
},
});
manager.addConstraintGroup(constraint);
const getCert = new taskbuffer.Task({
name: 'get-cert-skip',
taskFunction: async (domain: string) => {
execLog.push(domain);
// First execution sets wildcard in cache
certCache.set(extractTLD(domain), 'wildcard');
await smartdelay.delayFor(100);
return `cert-for-${domain}`;
},
});
manager.addTask(getCert);
const [r1, r2] = await Promise.all([
manager.triggerTaskConstrained(getCert, 'app.example.com'),
manager.triggerTaskConstrained(getCert, 'api.example.com'),
]);
// First ran, second was skipped
expect(execLog).toEqual(['app.example.com']);
expect(r1).toEqual('cert-for-app.example.com');
expect(r2).toEqual(undefined);
await manager.stop();
});
// Test 22: shouldExecute on immediate (non-queued) trigger
tap.test('should skip immediate trigger when shouldExecute returns false', async () => {
const manager = new taskbuffer.TaskManager();
let executed = false;
const constraint = new taskbuffer.TaskConstraintGroup({
name: 'always-skip',
maxConcurrent: 10,
constraintKeyForExecution: () => 'all',
shouldExecute: () => false,
});
manager.addConstraintGroup(constraint);
const task = new taskbuffer.Task({
name: 'skip-immediate',
taskFunction: async () => {
executed = true;
return 'should-not-see';
},
});
manager.addTask(task);
const result = await manager.triggerTaskConstrained(task);
expect(executed).toBeFalse();
expect(result).toEqual(undefined);
await manager.stop();
});
// Test 23: Mixed task.data + input constraint key
tap.test('should use both task.data and input in constraint key', async () => {
const manager = new taskbuffer.TaskManager();
let running = 0;
let maxRunning = 0;
const constraint = new taskbuffer.TaskConstraintGroup<{ provider: string }>({
name: 'provider-domain',
maxConcurrent: 1,
constraintKeyForExecution: (task, input?: string) => {
return `${task.data.provider}:${input || 'default'}`;
},
});
manager.addConstraintGroup(constraint);
const makeTask = (name: string, provider: string) =>
new taskbuffer.Task<undefined, [], { provider: string }>({
name,
data: { provider },
taskFunction: async () => {
running++;
maxRunning = Math.max(maxRunning, running);
await smartdelay.delayFor(100);
running--;
},
});
// Same provider + same domain input → should serialize
const t1 = makeTask('mixed-1', 'acme');
const t2 = makeTask('mixed-2', 'acme');
// Different provider + same domain → parallel
const t3 = makeTask('mixed-3', 'cloudflare');
manager.addTask(t1);
manager.addTask(t2);
manager.addTask(t3);
await Promise.all([
manager.triggerTaskConstrained(t1, 'example.com'),
manager.triggerTaskConstrained(t2, 'example.com'),
manager.triggerTaskConstrained(t3, 'example.com'),
]);
// t1 and t2 share key "acme:example.com" → serialized (max 1 at a time)
// t3 has key "cloudflare:example.com" → parallel with t1
// So maxRunning should be exactly 2 (t1 + t3, or t3 + t2)
expect(maxRunning).toBeLessThanOrEqual(2);
expect(maxRunning).toBeGreaterThanOrEqual(2);
await manager.stop();
});
// =============================================================================
// Rate Limiting Tests
// =============================================================================
// Test 24: Basic N-per-window rate limiting
tap.test('should enforce N-per-window rate limit', async () => {
const manager = new taskbuffer.TaskManager();
const execTimestamps: number[] = [];
const constraint = new taskbuffer.TaskConstraintGroup({
name: 'rate-limit-basic',
maxConcurrent: Infinity,
constraintKeyForExecution: () => 'shared',
rateLimit: {
maxPerWindow: 3,
windowMs: 1000,
},
});
manager.addConstraintGroup(constraint);
const makeTask = (id: number) =>
new taskbuffer.Task({
name: `rl-${id}`,
taskFunction: async () => {
execTimestamps.push(Date.now());
return `done-${id}`;
},
});
const tasks = [makeTask(1), makeTask(2), makeTask(3), makeTask(4), makeTask(5)];
for (const t of tasks) manager.addTask(t);
const results = await Promise.all(tasks.map((t) => manager.triggerTaskConstrained(t)));
// All 5 should eventually complete
expect(results).toEqual(['done-1', 'done-2', 'done-3', 'done-4', 'done-5']);
// First 3 should execute nearly simultaneously
const firstBatchSpread = execTimestamps[2] - execTimestamps[0];
expect(firstBatchSpread).toBeLessThan(100);
// 4th and 5th should wait for the window to slide (at least ~900ms after first)
const fourthDelay = execTimestamps[3] - execTimestamps[0];
expect(fourthDelay).toBeGreaterThanOrEqual(900);
await manager.stop();
});
// Test 25: Rate limit + maxConcurrent interaction
tap.test('should enforce both rate limit and maxConcurrent independently', async () => {
const manager = new taskbuffer.TaskManager();
let running = 0;
let maxRunning = 0;
const execTimestamps: number[] = [];
const constraint = new taskbuffer.TaskConstraintGroup({
name: 'rate-concurrent',
maxConcurrent: 2,
constraintKeyForExecution: () => 'shared',
rateLimit: {
maxPerWindow: 3,
windowMs: 2000,
},
});
manager.addConstraintGroup(constraint);
const makeTask = (id: number) =>
new taskbuffer.Task({
name: `rc-${id}`,
taskFunction: async () => {
running++;
maxRunning = Math.max(maxRunning, running);
execTimestamps.push(Date.now());
await smartdelay.delayFor(50);
running--;
},
});
const tasks = [makeTask(1), makeTask(2), makeTask(3), makeTask(4)];
for (const t of tasks) manager.addTask(t);
await Promise.all(tasks.map((t) => manager.triggerTaskConstrained(t)));
// Concurrency limit should be enforced
expect(maxRunning).toBeLessThanOrEqual(2);
// 4th task should wait for rate limit window (only 3 allowed per 2s)
const fourthDelay = execTimestamps[3] - execTimestamps[0];
expect(fourthDelay).toBeGreaterThanOrEqual(1900);
await manager.stop();
});
// Test 26: Rate limit + cooldownMs interaction
tap.test('should enforce both rate limit and cooldown together', async () => {
const manager = new taskbuffer.TaskManager();
const execTimestamps: number[] = [];
const constraint = new taskbuffer.TaskConstraintGroup({
name: 'rate-cooldown',
maxConcurrent: 1,
cooldownMs: 200,
constraintKeyForExecution: () => 'shared',
rateLimit: {
maxPerWindow: 2,
windowMs: 2000,
},
});
manager.addConstraintGroup(constraint);
const makeTask = (id: number) =>
new taskbuffer.Task({
name: `rcd-${id}`,
taskFunction: async () => {
execTimestamps.push(Date.now());
},
});
const tasks = [makeTask(1), makeTask(2), makeTask(3)];
for (const t of tasks) manager.addTask(t);
await Promise.all(tasks.map((t) => manager.triggerTaskConstrained(t)));
// Cooldown between first and second: at least 200ms
const gap12 = execTimestamps[1] - execTimestamps[0];
expect(gap12).toBeGreaterThanOrEqual(150);
// Third task blocked by rate limit (only 2 per 2000ms window) AND cooldown
const gap13 = execTimestamps[2] - execTimestamps[0];
expect(gap13).toBeGreaterThanOrEqual(1900);
await manager.stop();
});
// Test 27: Per-key rate limit independence
tap.test('should apply rate limit per key independently', async () => {
const manager = new taskbuffer.TaskManager();
const execLog: string[] = [];
const constraint = new taskbuffer.TaskConstraintGroup({
name: 'rate-per-key',
constraintKeyForExecution: (_task, input?: string) => input,
rateLimit: {
maxPerWindow: 1,
windowMs: 2000,
},
});
manager.addConstraintGroup(constraint);
const task = new taskbuffer.Task({
name: 'rate-key-task',
taskFunction: async (input: string) => {
execLog.push(input);
},
});
manager.addTask(task);
// Trigger 2 for key-A and 1 for key-B
const [r1, r2, r3] = await Promise.all([
manager.triggerTaskConstrained(task, 'key-A'),
manager.triggerTaskConstrained(task, 'key-B'),
manager.triggerTaskConstrained(task, 'key-A'), // should wait for window
]);
// key-A and key-B first calls should both execute immediately
expect(execLog[0]).toEqual('key-A');
expect(execLog[1]).toEqual('key-B');
// key-A second call eventually executes
expect(execLog).toContain('key-A');
expect(execLog.length).toEqual(3);
await manager.stop();
});
// Test 28: getNextAvailableDelay returns correct value
tap.test('should return correct getNextAvailableDelay and canRun after waiting', async () => {
const constraint = new taskbuffer.TaskConstraintGroup({
name: 'delay-check',
constraintKeyForExecution: () => 'key',
rateLimit: {
maxPerWindow: 1,
windowMs: 500,
},
});
// Initially: can run, no delay
expect(constraint.canRun('key')).toBeTrue();
expect(constraint.getNextAvailableDelay('key')).toEqual(0);
// Acquire and release to record a completion
constraint.acquireSlot('key');
constraint.releaseSlot('key');
// Now: rate limit saturated
expect(constraint.canRun('key')).toBeFalse();
const delay = constraint.getNextAvailableDelay('key');
expect(delay).toBeGreaterThan(0);
expect(delay).toBeLessThanOrEqual(500);
// Wait for window to slide
await smartdelay.delayFor(delay + 50);
expect(constraint.canRun('key')).toBeTrue();
expect(constraint.getNextAvailableDelay('key')).toEqual(0);
});
// Test 29: reset() clears rate-limit timestamps
tap.test('should clear rate limit timestamps on reset', async () => {
const constraint = new taskbuffer.TaskConstraintGroup({
name: 'reset-rate',
constraintKeyForExecution: () => 'key',
rateLimit: {
maxPerWindow: 1,
windowMs: 60000,
},
});
constraint.acquireSlot('key');
constraint.releaseSlot('key');
expect(constraint.canRun('key')).toBeFalse();
constraint.reset();
expect(constraint.canRun('key')).toBeTrue();
expect(constraint.getRateLimitDelay('key')).toEqual(0);
});
// =============================================================================
// Result Sharing Tests
// =============================================================================
// Test 30: Basic result sharing — multiple waiters get first task's result
tap.test('should share result with queued tasks (share-latest mode)', async () => {
const manager = new taskbuffer.TaskManager();
let execCount = 0;
const constraint = new taskbuffer.TaskConstraintGroup({
name: 'share-basic',
maxConcurrent: 1,
constraintKeyForExecution: () => 'shared',
resultSharingMode: 'share-latest',
});
manager.addConstraintGroup(constraint);
const makeTask = (id: number) =>
new taskbuffer.Task({
name: `share-${id}`,
taskFunction: async () => {
execCount++;
await smartdelay.delayFor(100);
return 'shared-result';
},
});
const t1 = makeTask(1);
const t2 = makeTask(2);
const t3 = makeTask(3);
manager.addTask(t1);
manager.addTask(t2);
manager.addTask(t3);
const [r1, r2, r3] = await Promise.all([
manager.triggerTaskConstrained(t1),
manager.triggerTaskConstrained(t2),
manager.triggerTaskConstrained(t3),
]);
// Only 1 execution, all get same result
expect(execCount).toEqual(1);
expect(r1).toEqual('shared-result');
expect(r2).toEqual('shared-result');
expect(r3).toEqual('shared-result');
await manager.stop();
});
// Test 31: Different keys get independent results
tap.test('should share results independently per key', async () => {
const manager = new taskbuffer.TaskManager();
let execCount = 0;
const constraint = new taskbuffer.TaskConstraintGroup({
name: 'share-per-key',
maxConcurrent: 1,
constraintKeyForExecution: (_task, input?: string) => input,
resultSharingMode: 'share-latest',
});
manager.addConstraintGroup(constraint);
const task = new taskbuffer.Task({
name: 'keyed-share',
taskFunction: async (input: string) => {
execCount++;
await smartdelay.delayFor(50);
return `result-for-${input}`;
},
});
manager.addTask(task);
const [r1, r2] = await Promise.all([
manager.triggerTaskConstrained(task, 'key-A'),
manager.triggerTaskConstrained(task, 'key-B'),
]);
// Different keys → both execute independently
expect(execCount).toEqual(2);
expect(r1).toEqual('result-for-key-A');
expect(r2).toEqual('result-for-key-B');
await manager.stop();
});
// Test 32: Default mode ('none') — no sharing
tap.test('should not share results when mode is none (default)', async () => {
const manager = new taskbuffer.TaskManager();
let execCount = 0;
const constraint = new taskbuffer.TaskConstraintGroup({
name: 'no-share',
maxConcurrent: 1,
constraintKeyForExecution: () => 'shared',
// resultSharingMode defaults to 'none'
});
manager.addConstraintGroup(constraint);
const makeTask = (id: number) =>
new taskbuffer.Task({
name: `noshare-${id}`,
taskFunction: async () => {
execCount++;
await smartdelay.delayFor(50);
return `result-${execCount}`;
},
});
const t1 = makeTask(1);
const t2 = makeTask(2);
manager.addTask(t1);
manager.addTask(t2);
const [r1, r2] = await Promise.all([
manager.triggerTaskConstrained(t1),
manager.triggerTaskConstrained(t2),
]);
// Both should execute independently
expect(execCount).toEqual(2);
expect(r1).toEqual('result-1');
expect(r2).toEqual('result-2');
await manager.stop();
});
// Test 33: Sharing takes priority over shouldExecute for queued tasks
tap.test('should not call shouldExecute for shared results', async () => {
const manager = new taskbuffer.TaskManager();
let shouldExecuteCalls = 0;
let execCount = 0;
const constraint = new taskbuffer.TaskConstraintGroup({
name: 'share-vs-should',
maxConcurrent: 1,
constraintKeyForExecution: () => 'shared',
resultSharingMode: 'share-latest',
shouldExecute: () => {
shouldExecuteCalls++;
return true;
},
});
manager.addConstraintGroup(constraint);
const makeTask = (id: number) =>
new taskbuffer.Task({
name: `svs-${id}`,
taskFunction: async () => {
execCount++;
await smartdelay.delayFor(100);
return 'shared-value';
},
});
const t1 = makeTask(1);
const t2 = makeTask(2);
const t3 = makeTask(3);
manager.addTask(t1);
manager.addTask(t2);
manager.addTask(t3);
const initialShouldExecuteCalls = shouldExecuteCalls;
await Promise.all([
manager.triggerTaskConstrained(t1),
manager.triggerTaskConstrained(t2),
manager.triggerTaskConstrained(t3),
]);
// Only 1 execution
expect(execCount).toEqual(1);
// shouldExecute called once for the first task, but not for shared results
// (t2 and t3 get shared result without going through executeWithConstraintTracking)
const totalShouldExecuteCalls = shouldExecuteCalls - initialShouldExecuteCalls;
expect(totalShouldExecuteCalls).toEqual(1);
await manager.stop();
});
// Test 34: Error results NOT shared — queued task executes after failure
tap.test('should not share error results with queued tasks', async () => {
const manager = new taskbuffer.TaskManager();
let execCount = 0;
const constraint = new taskbuffer.TaskConstraintGroup({
name: 'share-error',
maxConcurrent: 1,
constraintKeyForExecution: () => 'shared',
resultSharingMode: 'share-latest',
});
manager.addConstraintGroup(constraint);
const failTask = new taskbuffer.Task({
name: 'fail-share',
catchErrors: true,
taskFunction: async () => {
execCount++;
await smartdelay.delayFor(50);
throw new Error('fail');
},
});
const successTask = new taskbuffer.Task({
name: 'success-share',
taskFunction: async () => {
execCount++;
await smartdelay.delayFor(50);
return 'success-result';
},
});
manager.addTask(failTask);
manager.addTask(successTask);
const [r1, r2] = await Promise.all([
manager.triggerTaskConstrained(failTask),
manager.triggerTaskConstrained(successTask),
]);
// Both should have executed (error result not shared)
expect(execCount).toEqual(2);
expect(r2).toEqual('success-result');
await manager.stop();
});
// Test 35: Multiple constraint groups — sharing from one group applies
tap.test('should share result when any applicable group has sharing enabled', async () => {
const manager = new taskbuffer.TaskManager();
let execCount = 0;
const sharingGroup = new taskbuffer.TaskConstraintGroup({
name: 'sharing-group',
maxConcurrent: 1,
constraintKeyForExecution: () => 'shared',
resultSharingMode: 'share-latest',
});
const nonSharingGroup = new taskbuffer.TaskConstraintGroup({
name: 'non-sharing-group',
maxConcurrent: 5,
constraintKeyForExecution: () => 'all',
// resultSharingMode defaults to 'none'
});
manager.addConstraintGroup(sharingGroup);
manager.addConstraintGroup(nonSharingGroup);
const makeTask = (id: number) =>
new taskbuffer.Task({
name: `multi-share-${id}`,
taskFunction: async () => {
execCount++;
await smartdelay.delayFor(100);
return 'multi-group-result';
},
});
const t1 = makeTask(1);
const t2 = makeTask(2);
manager.addTask(t1);
manager.addTask(t2);
const [r1, r2] = await Promise.all([
manager.triggerTaskConstrained(t1),
manager.triggerTaskConstrained(t2),
]);
// Only 1 execution due to sharing from the sharing group
expect(execCount).toEqual(1);
expect(r1).toEqual('multi-group-result');
expect(r2).toEqual('multi-group-result');
await manager.stop();
});
// Test 36: Result sharing + rate limit combo
tap.test('should resolve rate-limited waiters with shared result', async () => {
const manager = new taskbuffer.TaskManager();
let execCount = 0;
const constraint = new taskbuffer.TaskConstraintGroup({
name: 'share-rate',
maxConcurrent: 1,
constraintKeyForExecution: () => 'shared',
resultSharingMode: 'share-latest',
rateLimit: {
maxPerWindow: 1,
windowMs: 5000,
},
});
manager.addConstraintGroup(constraint);
const makeTask = (id: number) =>
new taskbuffer.Task({
name: `sr-${id}`,
taskFunction: async () => {
execCount++;
await smartdelay.delayFor(50);
return 'rate-shared-result';
},
});
const t1 = makeTask(1);
const t2 = makeTask(2);
const t3 = makeTask(3);
manager.addTask(t1);
manager.addTask(t2);
manager.addTask(t3);
const startTime = Date.now();
const [r1, r2, r3] = await Promise.all([
manager.triggerTaskConstrained(t1),
manager.triggerTaskConstrained(t2),
manager.triggerTaskConstrained(t3),
]);
const elapsed = Date.now() - startTime;
// Only 1 execution; waiters get shared result without waiting for rate limit window
expect(execCount).toEqual(1);
expect(r1).toEqual('rate-shared-result');
expect(r2).toEqual('rate-shared-result');
expect(r3).toEqual('rate-shared-result');
// Should complete quickly (not waiting 5s for rate limit window)
expect(elapsed).toBeLessThan(1000);
await manager.stop();
});
export default tap.start();

View File

@@ -1,52 +1,151 @@
import { tap, expect } from '@git.zone/tstest/tapbundle';
import * as taskbuffer from '../ts/index.js';
import * as smartdelay from '@push.rocks/smartdelay';
let counter1 = 0;
let counter2 = 0;
let counter3 = 0;
// Test 1: Basic buffered execution with afterTask chain
tap.test('should run buffered tasks with afterTask chain', async () => {
let counter1 = 0;
let counter2 = 0;
let counter3 = 0;
tap.test('should run buffered', async (tools) => {
const task = new taskbuffer.Task({
name: 'a buffered task',
taskFunction: async () => {
counter1++;
await tools.delayFor(2000);
console.log(`task 1 ran ${counter1} times`);
},
buffered: true,
bufferMax: 1,
afterTask: () => {
return task2;
},
});
const task2 = new taskbuffer.Task({
name: 'a buffered task',
taskFunction: async () => {
counter2++;
await tools.delayFor(2000);
console.log(`task2 ran ${counter2} times`);
},
buffered: true,
bufferMax: 1,
afterTask: () => {
return task3;
},
});
const task3 = new taskbuffer.Task({
name: 'a buffered task',
name: 'buffered-chain-3',
taskFunction: async () => {
counter3++;
await tools.delayFor(2000);
console.log(`task3 ran ${counter3} times`);
await smartdelay.delayFor(50);
},
buffered: true,
bufferMax: 1,
});
while (counter1 < 10) {
await tools.delayFor(5000);
task.trigger();
const task2 = new taskbuffer.Task({
name: 'buffered-chain-2',
taskFunction: async () => {
counter2++;
await smartdelay.delayFor(50);
},
buffered: true,
bufferMax: 1,
afterTask: () => task3,
});
const task1 = new taskbuffer.Task({
name: 'buffered-chain-1',
taskFunction: async () => {
counter1++;
await smartdelay.delayFor(50);
},
buffered: true,
bufferMax: 1,
afterTask: () => task2,
});
// Trigger 3 times with enough spacing for the chain to complete
for (let i = 0; i < 3; i++) {
task1.trigger();
await smartdelay.delayFor(250); // enough for chain of 3 x 50ms tasks
}
// Wait for final chain to finish
await smartdelay.delayFor(500);
// Each task in the chain should have run at least once
expect(counter1).toBeGreaterThanOrEqual(1);
expect(counter2).toBeGreaterThanOrEqual(1);
expect(counter3).toBeGreaterThanOrEqual(1);
// afterTask chain means task2 count should match task1 (each trigger chains)
expect(counter2).toEqual(counter1);
expect(counter3).toEqual(counter1);
});
tap.start();
// Test 2: bufferMax limits concurrent buffered executions
tap.test('should respect bufferMax for concurrent buffered calls', async () => {
let running = 0;
let maxRunning = 0;
let totalRuns = 0;
const task = new taskbuffer.Task({
name: 'buffer-max-test',
taskFunction: async () => {
running++;
maxRunning = Math.max(maxRunning, running);
totalRuns++;
await smartdelay.delayFor(100);
running--;
},
buffered: true,
bufferMax: 2,
});
// Fire many triggers rapidly — only bufferMax should run concurrently
for (let i = 0; i < 10; i++) {
task.trigger();
}
// Wait for all buffered executions to complete
await smartdelay.delayFor(1000);
expect(maxRunning).toBeLessThanOrEqual(2);
expect(totalRuns).toBeGreaterThanOrEqual(1);
});
// Test 3: bufferMax limits how many runs are queued during execution
tap.test('should limit queued runs to bufferMax during execution', async () => {
let runCount = 0;
const task = new taskbuffer.Task({
name: 'buffer-queue-test',
taskFunction: async () => {
runCount++;
await smartdelay.delayFor(100);
},
buffered: true,
bufferMax: 2,
});
// Rapid-fire 5 triggers — bufferMax:2 means counter caps at 2
// so only 2 runs will happen (the initial run + 1 buffered rerun)
task.trigger();
task.trigger();
task.trigger();
task.trigger();
task.trigger();
await smartdelay.delayFor(500);
expect(runCount).toEqual(2);
});
// Test 4: Triggers spaced after completion queue new runs
tap.test('should re-trigger after previous buffered run completes', async () => {
let runCount = 0;
const task = new taskbuffer.Task({
name: 'retrigger-test',
taskFunction: async () => {
runCount++;
await smartdelay.delayFor(50);
},
buffered: true,
bufferMax: 1,
});
// First trigger starts execution
task.trigger();
// Wait for it to complete
await smartdelay.delayFor(100);
// Second trigger starts a new execution (task is now idle)
task.trigger();
await smartdelay.delayFor(100);
// Third trigger
task.trigger();
await smartdelay.delayFor(100);
expect(runCount).toEqual(3);
});
export default tap.start();

View File

@@ -3,6 +3,6 @@
*/
export const commitinfo = {
name: '@push.rocks/taskbuffer',
version: '5.0.1',
version: '6.1.2',
description: 'A flexible task management library supporting TypeScript, allowing for task buffering, scheduling, and execution with dependency management.'
}

View File

@@ -12,7 +12,7 @@ export { TaskStep } from './taskbuffer.classes.taskstep.js';
export type { ITaskStep } from './taskbuffer.classes.taskstep.js';
// Metadata interfaces
export type { ITaskMetadata, ITaskExecutionReport, IScheduledTaskInfo, ITaskEvent, TTaskEventType, ITaskConstraintGroupOptions } from './taskbuffer.interfaces.js';
export type { ITaskMetadata, ITaskExecutionReport, IScheduledTaskInfo, ITaskEvent, TTaskEventType, ITaskConstraintGroupOptions, ITaskExecution, IRateLimitConfig, TResultSharingMode } from './taskbuffer.interfaces.js';
import * as distributedCoordination from './taskbuffer.classes.distributedcoordinator.js';
export { distributedCoordination };

View File

@@ -1,27 +1,42 @@
import type { Task } from './taskbuffer.classes.task.js';
import type { ITaskConstraintGroupOptions } from './taskbuffer.interfaces.js';
import type { ITaskConstraintGroupOptions, IRateLimitConfig, TResultSharingMode } from './taskbuffer.interfaces.js';
export class TaskConstraintGroup<TData extends Record<string, unknown> = Record<string, unknown>> {
public name: string;
public maxConcurrent: number;
public cooldownMs: number;
private constraintKeyForTask: (task: Task<any, any, TData>) => string | null | undefined;
public rateLimit: IRateLimitConfig | null;
public resultSharingMode: TResultSharingMode;
private constraintKeyForExecution: (task: Task<any, any, TData>, input?: any) => string | null | undefined;
private shouldExecuteFn?: (task: Task<any, any, TData>, input?: any) => boolean | Promise<boolean>;
private runningCounts = new Map<string, number>();
private lastCompletionTimes = new Map<string, number>();
private completionTimestamps = new Map<string, number[]>();
private lastResults = new Map<string, { result: any; timestamp: number }>();
constructor(options: ITaskConstraintGroupOptions<TData>) {
this.name = options.name;
this.constraintKeyForTask = options.constraintKeyForTask;
this.constraintKeyForExecution = options.constraintKeyForExecution;
this.maxConcurrent = options.maxConcurrent ?? Infinity;
this.cooldownMs = options.cooldownMs ?? 0;
this.shouldExecuteFn = options.shouldExecute;
this.rateLimit = options.rateLimit ?? null;
this.resultSharingMode = options.resultSharingMode ?? 'none';
}
public getConstraintKey(task: Task<any, any, TData>): string | null {
const key = this.constraintKeyForTask(task);
public getConstraintKey(task: Task<any, any, TData>, input?: any): string | null {
const key = this.constraintKeyForExecution(task, input);
return key ?? null;
}
public async checkShouldExecute(task: Task<any, any, TData>, input?: any): Promise<boolean> {
if (!this.shouldExecuteFn) {
return true;
}
return this.shouldExecuteFn(task, input);
}
public canRun(subGroupKey: string): boolean {
const running = this.runningCounts.get(subGroupKey) ?? 0;
if (running >= this.maxConcurrent) {
@@ -38,6 +53,16 @@ export class TaskConstraintGroup<TData extends Record<string, unknown> = Record<
}
}
if (this.rateLimit) {
this.pruneCompletionTimestamps(subGroupKey);
const timestamps = this.completionTimestamps.get(subGroupKey);
const completedInWindow = timestamps ? timestamps.length : 0;
const running = this.runningCounts.get(subGroupKey) ?? 0;
if (completedInWindow + running >= this.rateLimit.maxPerWindow) {
return false;
}
}
return true;
}
@@ -55,6 +80,12 @@ export class TaskConstraintGroup<TData extends Record<string, unknown> = Record<
this.runningCounts.set(subGroupKey, next);
}
this.lastCompletionTimes.set(subGroupKey, Date.now());
if (this.rateLimit) {
const timestamps = this.completionTimestamps.get(subGroupKey) ?? [];
timestamps.push(Date.now());
this.completionTimestamps.set(subGroupKey, timestamps);
}
}
public getCooldownRemaining(subGroupKey: string): number {
@@ -73,8 +104,61 @@ export class TaskConstraintGroup<TData extends Record<string, unknown> = Record<
return this.runningCounts.get(subGroupKey) ?? 0;
}
// Rate limit helpers
private pruneCompletionTimestamps(subGroupKey: string): void {
const timestamps = this.completionTimestamps.get(subGroupKey);
if (!timestamps || !this.rateLimit) return;
const cutoff = Date.now() - this.rateLimit.windowMs;
let i = 0;
while (i < timestamps.length && timestamps[i] <= cutoff) {
i++;
}
if (i > 0) {
timestamps.splice(0, i);
}
}
public getRateLimitDelay(subGroupKey: string): number {
if (!this.rateLimit) return 0;
this.pruneCompletionTimestamps(subGroupKey);
const timestamps = this.completionTimestamps.get(subGroupKey);
const completedInWindow = timestamps ? timestamps.length : 0;
const running = this.runningCounts.get(subGroupKey) ?? 0;
if (completedInWindow + running < this.rateLimit.maxPerWindow) {
return 0;
}
// If only running tasks fill the window (no completions yet), we can't compute a delay
if (!timestamps || timestamps.length === 0) {
return 1; // minimal delay; drain will re-check after running tasks complete
}
// The oldest timestamp in the window determines when a slot opens
const oldestInWindow = timestamps[0];
const expiry = oldestInWindow + this.rateLimit.windowMs;
return Math.max(0, expiry - Date.now());
}
public getNextAvailableDelay(subGroupKey: string): number {
return Math.max(this.getCooldownRemaining(subGroupKey), this.getRateLimitDelay(subGroupKey));
}
// Result sharing helpers
public recordResult(subGroupKey: string, result: any): void {
if (this.resultSharingMode === 'none') return;
this.lastResults.set(subGroupKey, { result, timestamp: Date.now() });
}
public getLastResult(subGroupKey: string): { result: any; timestamp: number } | undefined {
return this.lastResults.get(subGroupKey);
}
public hasResultSharing(): boolean {
return this.resultSharingMode !== 'none';
}
public reset(): void {
this.runningCounts.clear();
this.lastCompletionTimes.clear();
this.completionTimestamps.clear();
this.lastResults.clear();
}
}

View File

@@ -80,14 +80,18 @@ export class TaskManager {
// Gather applicable constraints
const applicableGroups: Array<{ group: TaskConstraintGroup<any>; key: string }> = [];
for (const group of this.constraintGroups) {
const key = group.getConstraintKey(task);
const key = group.getConstraintKey(task, input);
if (key !== null) {
applicableGroups.push({ group, key });
}
}
// No constraints apply → trigger directly
// No constraints apply → check shouldExecute then trigger directly
if (applicableGroups.length === 0) {
const shouldRun = await this.checkAllShouldExecute(task, input);
if (!shouldRun) {
return undefined;
}
return task.trigger(input);
}
@@ -97,24 +101,56 @@ export class TaskManager {
return this.executeWithConstraintTracking(task, input, applicableGroups);
}
// Blocked → enqueue with deferred promise
// Blocked → enqueue with deferred promise and cached constraint keys
const deferred = plugins.smartpromise.defer<any>();
this.constraintQueue.push({ task, input, deferred });
const constraintKeys = new Map<string, string>();
for (const { group, key } of applicableGroups) {
constraintKeys.set(group.name, key);
}
this.constraintQueue.push({ task, input, deferred, constraintKeys });
return deferred.promise;
}
private async checkAllShouldExecute(task: Task<any, any, any>, input?: any): Promise<boolean> {
for (const group of this.constraintGroups) {
const shouldRun = await group.checkShouldExecute(task, input);
if (!shouldRun) {
return false;
}
}
return true;
}
private async executeWithConstraintTracking(
task: Task<any, any, any>,
input: any,
groups: Array<{ group: TaskConstraintGroup<any>; key: string }>,
): Promise<any> {
// Acquire slots
// Acquire slots synchronously to prevent race conditions
for (const { group, key } of groups) {
group.acquireSlot(key);
}
// Check shouldExecute after acquiring slots
const shouldRun = await this.checkAllShouldExecute(task, input);
if (!shouldRun) {
// Release slots and drain queue
for (const { group, key } of groups) {
group.releaseSlot(key);
}
this.drainConstraintQueue();
return undefined;
}
try {
return await task.trigger(input);
const result = await task.trigger(input);
// Record result for groups with result sharing (only on true success, not caught errors)
if (!task.lastError) {
for (const { group, key } of groups) {
group.recordResult(key, result);
}
}
return result;
} finally {
// Release slots
for (const { group, key } of groups) {
@@ -131,32 +167,51 @@ export class TaskManager {
for (const entry of this.constraintQueue) {
const applicableGroups: Array<{ group: TaskConstraintGroup<any>; key: string }> = [];
for (const group of this.constraintGroups) {
const key = group.getConstraintKey(entry.task);
const key = group.getConstraintKey(entry.task, entry.input);
if (key !== null) {
applicableGroups.push({ group, key });
}
}
// No constraints apply anymore (group removed?) → run directly
// No constraints apply anymore (group removed?) → check shouldExecute then run
if (applicableGroups.length === 0) {
entry.task.trigger(entry.input).then(
(result) => entry.deferred.resolve(result),
(err) => entry.deferred.reject(err),
);
this.checkAllShouldExecute(entry.task, entry.input).then((shouldRun) => {
if (!shouldRun) {
entry.deferred.resolve(undefined);
return;
}
entry.task.trigger(entry.input).then(
(result) => entry.deferred.resolve(result),
(err) => entry.deferred.reject(err),
);
});
continue;
}
// Check result sharing — if any applicable group has a shared result, resolve immediately
const sharingGroups = applicableGroups.filter(({ group }) => group.hasResultSharing());
if (sharingGroups.length > 0) {
const groupWithResult = sharingGroups.find(({ group, key }) =>
group.getLastResult(key) !== undefined
);
if (groupWithResult) {
entry.deferred.resolve(groupWithResult.group.getLastResult(groupWithResult.key)!.result);
continue;
}
}
const allCanRun = applicableGroups.every(({ group, key }) => group.canRun(key));
if (allCanRun) {
// executeWithConstraintTracking handles shouldExecute check internally
this.executeWithConstraintTracking(entry.task, entry.input, applicableGroups).then(
(result) => entry.deferred.resolve(result),
(err) => entry.deferred.reject(err),
);
} else {
stillQueued.push(entry);
// Track shortest cooldown for timer scheduling
// Track shortest delay for timer scheduling (cooldown + rate limit)
for (const { group, key } of applicableGroups) {
const remaining = group.getCooldownRemaining(key);
const remaining = group.getNextAvailableDelay(key);
if (remaining > 0 && remaining < shortestCooldown) {
shortestCooldown = remaining;
}

View File

@@ -1,17 +1,33 @@
import type { ITaskStep } from './taskbuffer.classes.taskstep.js';
import type { Task } from './taskbuffer.classes.task.js';
export interface IRateLimitConfig {
maxPerWindow: number; // max completions allowed within the sliding window
windowMs: number; // sliding window duration in ms
}
export type TResultSharingMode = 'none' | 'share-latest';
export interface ITaskConstraintGroupOptions<TData extends Record<string, unknown> = Record<string, unknown>> {
name: string;
constraintKeyForTask: (task: Task<any, any, TData>) => string | null | undefined;
constraintKeyForExecution: (task: Task<any, any, TData>, input?: any) => string | null | undefined;
maxConcurrent?: number; // default: Infinity
cooldownMs?: number; // default: 0
shouldExecute?: (task: Task<any, any, TData>, input?: any) => boolean | Promise<boolean>;
rateLimit?: IRateLimitConfig;
resultSharingMode?: TResultSharingMode; // default: 'none'
}
export interface ITaskExecution<TData extends Record<string, unknown> = Record<string, unknown>> {
task: Task<any, any, TData>;
input: any;
}
export interface IConstrainedTaskEntry {
task: Task<any, any, any>;
input: any;
deferred: import('@push.rocks/smartpromise').Deferred<any>;
constraintKeys: Map<string, string>; // groupName -> key
}
export interface ITaskMetadata {

View File

@@ -3,6 +3,6 @@
*/
export const commitinfo = {
name: '@push.rocks/taskbuffer',
version: '5.0.1',
version: '6.1.2',
description: 'A flexible task management library supporting TypeScript, allowing for task buffering, scheduling, and execution with dependency management.'
}