1436 lines
40 KiB
TypeScript
1436 lines
40 KiB
TypeScript
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
|
import * as taskbuffer from '../ts/index.js';
|
|
import * as smartdelay from '@push.rocks/smartdelay';
|
|
|
|
// Test 1: Task data property — typed data accessible
|
|
tap.test('should have typed data property on task', async () => {
|
|
const task = new taskbuffer.Task<undefined, [], { domain: string; priority: number }>({
|
|
name: 'data-task',
|
|
data: { domain: 'example.com', priority: 1 },
|
|
taskFunction: async () => {},
|
|
});
|
|
|
|
expect(task.data.domain).toEqual('example.com');
|
|
expect(task.data.priority).toEqual(1);
|
|
});
|
|
|
|
// Test 2: Task data defaults to empty object
|
|
tap.test('should default data to empty object when not provided', async () => {
|
|
const task = new taskbuffer.Task({
|
|
name: 'no-data-task',
|
|
taskFunction: async () => {},
|
|
});
|
|
|
|
expect(task.data).toBeTruthy();
|
|
expect(typeof task.data).toEqual('object');
|
|
});
|
|
|
|
// Test 3: No-constraint passthrough — behavior unchanged
|
|
tap.test('should run tasks directly when no constraints are configured', async () => {
|
|
const manager = new taskbuffer.TaskManager();
|
|
let executed = false;
|
|
|
|
const task = new taskbuffer.Task({
|
|
name: 'passthrough-task',
|
|
taskFunction: async () => {
|
|
executed = true;
|
|
return 'done';
|
|
},
|
|
});
|
|
|
|
manager.addTask(task);
|
|
const result = await manager.triggerTaskByName('passthrough-task');
|
|
expect(executed).toBeTrue();
|
|
expect(result).toEqual('done');
|
|
await manager.stop();
|
|
});
|
|
|
|
// Test 4: Group concurrency — 3 tasks, max 2 concurrent, 3rd queues
|
|
tap.test('should enforce group concurrency limit', async () => {
|
|
const manager = new taskbuffer.TaskManager();
|
|
let running = 0;
|
|
let maxRunning = 0;
|
|
|
|
const constraint = new taskbuffer.TaskConstraintGroup<{ group: string }>({
|
|
name: 'concurrency-test',
|
|
maxConcurrent: 2,
|
|
constraintKeyForExecution: (task) =>
|
|
task.data.group === 'workers' ? 'workers' : null,
|
|
});
|
|
manager.addConstraintGroup(constraint);
|
|
|
|
const makeTask = (id: number) =>
|
|
new taskbuffer.Task<undefined, [], { group: string }>({
|
|
name: `worker-${id}`,
|
|
data: { group: 'workers' },
|
|
taskFunction: async () => {
|
|
running++;
|
|
maxRunning = Math.max(maxRunning, running);
|
|
await smartdelay.delayFor(200);
|
|
running--;
|
|
},
|
|
});
|
|
|
|
const t1 = makeTask(1);
|
|
const t2 = makeTask(2);
|
|
const t3 = makeTask(3);
|
|
|
|
manager.addTask(t1);
|
|
manager.addTask(t2);
|
|
manager.addTask(t3);
|
|
|
|
await Promise.all([
|
|
manager.triggerTaskConstrained(t1),
|
|
manager.triggerTaskConstrained(t2),
|
|
manager.triggerTaskConstrained(t3),
|
|
]);
|
|
|
|
expect(maxRunning).toBeLessThanOrEqual(2);
|
|
await manager.stop();
|
|
});
|
|
|
|
// Test 5: Key-based mutual exclusion — same key sequential, different keys parallel
|
|
tap.test('should enforce key-based mutual exclusion', async () => {
|
|
const manager = new taskbuffer.TaskManager();
|
|
const log: string[] = [];
|
|
|
|
const constraint = new taskbuffer.TaskConstraintGroup<{ domain: string }>({
|
|
name: 'domain-mutex',
|
|
maxConcurrent: 1,
|
|
constraintKeyForExecution: (task) => task.data.domain,
|
|
});
|
|
manager.addConstraintGroup(constraint);
|
|
|
|
const makeTask = (name: string, domain: string, delayMs: number) =>
|
|
new taskbuffer.Task<undefined, [], { domain: string }>({
|
|
name,
|
|
data: { domain },
|
|
taskFunction: async () => {
|
|
log.push(`${name}-start`);
|
|
await smartdelay.delayFor(delayMs);
|
|
log.push(`${name}-end`);
|
|
},
|
|
});
|
|
|
|
const taskA1 = makeTask('a1', 'a.com', 100);
|
|
const taskA2 = makeTask('a2', 'a.com', 100);
|
|
const taskB1 = makeTask('b1', 'b.com', 100);
|
|
|
|
manager.addTask(taskA1);
|
|
manager.addTask(taskA2);
|
|
manager.addTask(taskB1);
|
|
|
|
await Promise.all([
|
|
manager.triggerTaskConstrained(taskA1),
|
|
manager.triggerTaskConstrained(taskA2),
|
|
manager.triggerTaskConstrained(taskB1),
|
|
]);
|
|
|
|
// a1 and a2 should be sequential (same key)
|
|
const a1EndIdx = log.indexOf('a1-end');
|
|
const a2StartIdx = log.indexOf('a2-start');
|
|
expect(a2StartIdx).toBeGreaterThanOrEqual(a1EndIdx);
|
|
|
|
// b1 should start concurrently with a1 (different key)
|
|
const a1StartIdx = log.indexOf('a1-start');
|
|
const b1StartIdx = log.indexOf('b1-start');
|
|
// Both should start before a1 ends
|
|
expect(b1StartIdx).toBeLessThan(a1EndIdx);
|
|
|
|
await manager.stop();
|
|
});
|
|
|
|
// Test 6: Cooldown enforcement
|
|
tap.test('should enforce cooldown between task executions', async () => {
|
|
const manager = new taskbuffer.TaskManager();
|
|
const timestamps: number[] = [];
|
|
|
|
const constraint = new taskbuffer.TaskConstraintGroup<{ key: string }>({
|
|
name: 'cooldown-test',
|
|
maxConcurrent: 1,
|
|
cooldownMs: 300,
|
|
constraintKeyForExecution: (task) => task.data.key,
|
|
});
|
|
manager.addConstraintGroup(constraint);
|
|
|
|
const makeTask = (name: string) =>
|
|
new taskbuffer.Task<undefined, [], { key: string }>({
|
|
name,
|
|
data: { key: 'shared' },
|
|
taskFunction: async () => {
|
|
timestamps.push(Date.now());
|
|
},
|
|
});
|
|
|
|
const t1 = makeTask('cool-1');
|
|
const t2 = makeTask('cool-2');
|
|
const t3 = makeTask('cool-3');
|
|
|
|
manager.addTask(t1);
|
|
manager.addTask(t2);
|
|
manager.addTask(t3);
|
|
|
|
await Promise.all([
|
|
manager.triggerTaskConstrained(t1),
|
|
manager.triggerTaskConstrained(t2),
|
|
manager.triggerTaskConstrained(t3),
|
|
]);
|
|
|
|
// Each execution should be at least ~300ms apart (with 200ms tolerance)
|
|
for (let i = 1; i < timestamps.length; i++) {
|
|
const gap = timestamps[i] - timestamps[i - 1];
|
|
expect(gap).toBeGreaterThanOrEqual(250); // 300ms cooldown minus 50ms tolerance
|
|
}
|
|
|
|
await manager.stop();
|
|
});
|
|
|
|
// Test 7: Multiple constraint groups on one task
|
|
tap.test('should apply multiple constraint groups to one task', async () => {
|
|
const manager = new taskbuffer.TaskManager();
|
|
let running = 0;
|
|
let maxRunning = 0;
|
|
|
|
const globalConstraint = new taskbuffer.TaskConstraintGroup({
|
|
name: 'global',
|
|
maxConcurrent: 3,
|
|
constraintKeyForExecution: () => 'all',
|
|
});
|
|
|
|
const groupConstraint = new taskbuffer.TaskConstraintGroup<{ group: string }>({
|
|
name: 'group',
|
|
maxConcurrent: 1,
|
|
constraintKeyForExecution: (task) => task.data.group,
|
|
});
|
|
|
|
manager.addConstraintGroup(globalConstraint);
|
|
manager.addConstraintGroup(groupConstraint);
|
|
|
|
const makeTask = (name: string, group: string) =>
|
|
new taskbuffer.Task<undefined, [], { group: string }>({
|
|
name,
|
|
data: { group },
|
|
taskFunction: async () => {
|
|
running++;
|
|
maxRunning = Math.max(maxRunning, running);
|
|
await smartdelay.delayFor(100);
|
|
running--;
|
|
},
|
|
});
|
|
|
|
// Same group - should be serialized by group constraint
|
|
const t1 = makeTask('multi-1', 'A');
|
|
const t2 = makeTask('multi-2', 'A');
|
|
|
|
manager.addTask(t1);
|
|
manager.addTask(t2);
|
|
|
|
await Promise.all([
|
|
manager.triggerTaskConstrained(t1),
|
|
manager.triggerTaskConstrained(t2),
|
|
]);
|
|
|
|
// With group maxConcurrent: 1, only 1 should run at a time
|
|
expect(maxRunning).toBeLessThanOrEqual(1);
|
|
await manager.stop();
|
|
});
|
|
|
|
// Test 8: Matcher returns null — task runs unconstrained
|
|
tap.test('should run task unconstrained when matcher returns null', async () => {
|
|
const manager = new taskbuffer.TaskManager();
|
|
|
|
const constraint = new taskbuffer.TaskConstraintGroup<{ skip: boolean }>({
|
|
name: 'selective',
|
|
maxConcurrent: 1,
|
|
constraintKeyForExecution: (task) => (task.data.skip ? null : 'constrained'),
|
|
});
|
|
manager.addConstraintGroup(constraint);
|
|
|
|
let unconstrained = false;
|
|
const task = new taskbuffer.Task<undefined, [], { skip: boolean }>({
|
|
name: 'skip-task',
|
|
data: { skip: true },
|
|
taskFunction: async () => {
|
|
unconstrained = true;
|
|
},
|
|
});
|
|
|
|
manager.addTask(task);
|
|
await manager.triggerTaskConstrained(task);
|
|
expect(unconstrained).toBeTrue();
|
|
await manager.stop();
|
|
});
|
|
|
|
// Test 9: Error handling — failed task releases slot, queue drains
|
|
tap.test('should release slot and drain queue when task fails', async () => {
|
|
const manager = new taskbuffer.TaskManager();
|
|
const log: string[] = [];
|
|
|
|
const constraint = new taskbuffer.TaskConstraintGroup<{ key: string }>({
|
|
name: 'error-drain',
|
|
maxConcurrent: 1,
|
|
constraintKeyForExecution: (task) => task.data.key,
|
|
});
|
|
manager.addConstraintGroup(constraint);
|
|
|
|
const failTask = new taskbuffer.Task<undefined, [], { key: string }>({
|
|
name: 'fail-task',
|
|
data: { key: 'shared' },
|
|
catchErrors: true,
|
|
taskFunction: async () => {
|
|
log.push('fail');
|
|
throw new Error('intentional');
|
|
},
|
|
});
|
|
|
|
const successTask = new taskbuffer.Task<undefined, [], { key: string }>({
|
|
name: 'success-task',
|
|
data: { key: 'shared' },
|
|
taskFunction: async () => {
|
|
log.push('success');
|
|
},
|
|
});
|
|
|
|
manager.addTask(failTask);
|
|
manager.addTask(successTask);
|
|
|
|
await Promise.all([
|
|
manager.triggerTaskConstrained(failTask),
|
|
manager.triggerTaskConstrained(successTask),
|
|
]);
|
|
|
|
expect(log).toContain('fail');
|
|
expect(log).toContain('success');
|
|
await manager.stop();
|
|
});
|
|
|
|
// Test 10: TaskManager integration — addConstraintGroup + triggerTaskByName
|
|
tap.test('should route triggerTaskByName through constraints', async () => {
|
|
const manager = new taskbuffer.TaskManager();
|
|
let running = 0;
|
|
let maxRunning = 0;
|
|
|
|
const constraint = new taskbuffer.TaskConstraintGroup({
|
|
name: 'manager-integration',
|
|
maxConcurrent: 1,
|
|
constraintKeyForExecution: () => 'all',
|
|
});
|
|
manager.addConstraintGroup(constraint);
|
|
|
|
const t1 = new taskbuffer.Task({
|
|
name: 'managed-1',
|
|
taskFunction: async () => {
|
|
running++;
|
|
maxRunning = Math.max(maxRunning, running);
|
|
await smartdelay.delayFor(100);
|
|
running--;
|
|
},
|
|
});
|
|
|
|
const t2 = new taskbuffer.Task({
|
|
name: 'managed-2',
|
|
taskFunction: async () => {
|
|
running++;
|
|
maxRunning = Math.max(maxRunning, running);
|
|
await smartdelay.delayFor(100);
|
|
running--;
|
|
},
|
|
});
|
|
|
|
manager.addTask(t1);
|
|
manager.addTask(t2);
|
|
|
|
await Promise.all([
|
|
manager.triggerTaskByName('managed-1'),
|
|
manager.triggerTaskByName('managed-2'),
|
|
]);
|
|
|
|
expect(maxRunning).toBeLessThanOrEqual(1);
|
|
await manager.stop();
|
|
});
|
|
|
|
// Test 11: removeConstraintGroup removes by name
|
|
tap.test('should remove a constraint group by name', async () => {
|
|
const manager = new taskbuffer.TaskManager();
|
|
|
|
const constraint = new taskbuffer.TaskConstraintGroup({
|
|
name: 'removable',
|
|
maxConcurrent: 1,
|
|
constraintKeyForExecution: () => 'all',
|
|
});
|
|
|
|
manager.addConstraintGroup(constraint);
|
|
expect(manager.constraintGroups.length).toEqual(1);
|
|
|
|
manager.removeConstraintGroup('removable');
|
|
expect(manager.constraintGroups.length).toEqual(0);
|
|
await manager.stop();
|
|
});
|
|
|
|
// Test 12: TaskConstraintGroup reset clears state
|
|
tap.test('should reset constraint group state', async () => {
|
|
const constraint = new taskbuffer.TaskConstraintGroup({
|
|
name: 'resettable',
|
|
maxConcurrent: 2,
|
|
cooldownMs: 1000,
|
|
constraintKeyForExecution: () => 'key',
|
|
});
|
|
|
|
// Simulate usage
|
|
constraint.acquireSlot('key');
|
|
expect(constraint.getRunningCount('key')).toEqual(1);
|
|
|
|
constraint.releaseSlot('key');
|
|
expect(constraint.getCooldownRemaining('key')).toBeGreaterThan(0);
|
|
|
|
constraint.reset();
|
|
expect(constraint.getRunningCount('key')).toEqual(0);
|
|
expect(constraint.getCooldownRemaining('key')).toEqual(0);
|
|
});
|
|
|
|
// Test 13: Queued task returns correct result
|
|
tap.test('should return correct result from queued tasks', async () => {
|
|
const manager = new taskbuffer.TaskManager();
|
|
|
|
const constraint = new taskbuffer.TaskConstraintGroup({
|
|
name: 'return-value-test',
|
|
maxConcurrent: 1,
|
|
constraintKeyForExecution: () => 'shared',
|
|
});
|
|
manager.addConstraintGroup(constraint);
|
|
|
|
const t1 = new taskbuffer.Task({
|
|
name: 'ret-1',
|
|
taskFunction: async () => {
|
|
await smartdelay.delayFor(100);
|
|
return 'result-A';
|
|
},
|
|
});
|
|
|
|
const t2 = new taskbuffer.Task({
|
|
name: 'ret-2',
|
|
taskFunction: async () => {
|
|
return 'result-B';
|
|
},
|
|
});
|
|
|
|
manager.addTask(t1);
|
|
manager.addTask(t2);
|
|
|
|
const [r1, r2] = await Promise.all([
|
|
manager.triggerTaskConstrained(t1),
|
|
manager.triggerTaskConstrained(t2),
|
|
]);
|
|
|
|
expect(r1).toEqual('result-A');
|
|
expect(r2).toEqual('result-B');
|
|
await manager.stop();
|
|
});
|
|
|
|
// Test 14: Error propagation for queued tasks (catchErrors: false)
|
|
tap.test('should propagate errors from queued tasks (catchErrors: false)', async () => {
|
|
const manager = new taskbuffer.TaskManager();
|
|
|
|
const constraint = new taskbuffer.TaskConstraintGroup({
|
|
name: 'error-propagation',
|
|
maxConcurrent: 1,
|
|
constraintKeyForExecution: () => 'shared',
|
|
});
|
|
manager.addConstraintGroup(constraint);
|
|
|
|
const t1 = new taskbuffer.Task({
|
|
name: 'err-first',
|
|
taskFunction: async () => {
|
|
await smartdelay.delayFor(100);
|
|
return 'ok';
|
|
},
|
|
});
|
|
|
|
const t2 = new taskbuffer.Task({
|
|
name: 'err-second',
|
|
catchErrors: false,
|
|
taskFunction: async () => {
|
|
throw new Error('queued-error');
|
|
},
|
|
});
|
|
|
|
manager.addTask(t1);
|
|
manager.addTask(t2);
|
|
|
|
const r1Promise = manager.triggerTaskConstrained(t1);
|
|
const r2Promise = manager.triggerTaskConstrained(t2);
|
|
|
|
const r1 = await r1Promise;
|
|
expect(r1).toEqual('ok');
|
|
|
|
let caughtError: Error | null = null;
|
|
try {
|
|
await r2Promise;
|
|
} catch (err) {
|
|
caughtError = err as Error;
|
|
}
|
|
|
|
expect(caughtError).toBeTruthy();
|
|
expect(caughtError!.message).toEqual('queued-error');
|
|
await manager.stop();
|
|
});
|
|
|
|
// Test 15: triggerTask() routes through constraints
|
|
tap.test('should route triggerTask() through constraints', async () => {
|
|
const manager = new taskbuffer.TaskManager();
|
|
let running = 0;
|
|
let maxRunning = 0;
|
|
|
|
const constraint = new taskbuffer.TaskConstraintGroup({
|
|
name: 'trigger-task-test',
|
|
maxConcurrent: 1,
|
|
constraintKeyForExecution: () => 'all',
|
|
});
|
|
manager.addConstraintGroup(constraint);
|
|
|
|
const makeTask = (id: number) =>
|
|
new taskbuffer.Task({
|
|
name: `tt-${id}`,
|
|
taskFunction: async () => {
|
|
running++;
|
|
maxRunning = Math.max(maxRunning, running);
|
|
await smartdelay.delayFor(100);
|
|
running--;
|
|
},
|
|
});
|
|
|
|
const t1 = makeTask(1);
|
|
const t2 = makeTask(2);
|
|
|
|
manager.addTask(t1);
|
|
manager.addTask(t2);
|
|
|
|
await Promise.all([
|
|
manager.triggerTask(t1),
|
|
manager.triggerTask(t2),
|
|
]);
|
|
|
|
expect(maxRunning).toBeLessThanOrEqual(1);
|
|
await manager.stop();
|
|
});
|
|
|
|
// Test 16: addExecuteRemoveTask() routes through constraints
|
|
tap.test('should route addExecuteRemoveTask() through constraints', async () => {
|
|
const manager = new taskbuffer.TaskManager();
|
|
let running = 0;
|
|
let maxRunning = 0;
|
|
|
|
const constraint = new taskbuffer.TaskConstraintGroup({
|
|
name: 'add-execute-remove-test',
|
|
maxConcurrent: 1,
|
|
constraintKeyForExecution: () => 'all',
|
|
});
|
|
manager.addConstraintGroup(constraint);
|
|
|
|
const makeTask = (id: number) =>
|
|
new taskbuffer.Task({
|
|
name: `aer-${id}`,
|
|
taskFunction: async () => {
|
|
running++;
|
|
maxRunning = Math.max(maxRunning, running);
|
|
await smartdelay.delayFor(100);
|
|
running--;
|
|
return `done-${id}`;
|
|
},
|
|
});
|
|
|
|
const t1 = makeTask(1);
|
|
const t2 = makeTask(2);
|
|
|
|
const [report1, report2] = await Promise.all([
|
|
manager.addExecuteRemoveTask(t1),
|
|
manager.addExecuteRemoveTask(t2),
|
|
]);
|
|
|
|
expect(maxRunning).toBeLessThanOrEqual(1);
|
|
expect(report1.result).toEqual('done-1');
|
|
expect(report2.result).toEqual('done-2');
|
|
await manager.stop();
|
|
});
|
|
|
|
// Test 17: FIFO ordering of queued tasks
|
|
tap.test('should execute queued tasks in FIFO order', async () => {
|
|
const manager = new taskbuffer.TaskManager();
|
|
const executionOrder: string[] = [];
|
|
|
|
const constraint = new taskbuffer.TaskConstraintGroup({
|
|
name: 'fifo-test',
|
|
maxConcurrent: 1,
|
|
constraintKeyForExecution: () => 'shared',
|
|
});
|
|
manager.addConstraintGroup(constraint);
|
|
|
|
const makeTask = (id: string) =>
|
|
new taskbuffer.Task({
|
|
name: `fifo-${id}`,
|
|
taskFunction: async () => {
|
|
executionOrder.push(id);
|
|
await smartdelay.delayFor(50);
|
|
},
|
|
});
|
|
|
|
const tA = makeTask('A');
|
|
const tB = makeTask('B');
|
|
const tC = makeTask('C');
|
|
|
|
manager.addTask(tA);
|
|
manager.addTask(tB);
|
|
manager.addTask(tC);
|
|
|
|
await Promise.all([
|
|
manager.triggerTaskConstrained(tA),
|
|
manager.triggerTaskConstrained(tB),
|
|
manager.triggerTaskConstrained(tC),
|
|
]);
|
|
|
|
expect(executionOrder).toEqual(['A', 'B', 'C']);
|
|
await manager.stop();
|
|
});
|
|
|
|
// Test 18: Combined concurrency + cooldown
|
|
tap.test('should enforce both concurrency and cooldown together', async () => {
|
|
const manager = new taskbuffer.TaskManager();
|
|
let running = 0;
|
|
let maxRunning = 0;
|
|
const timestamps: number[] = [];
|
|
|
|
const constraint = new taskbuffer.TaskConstraintGroup({
|
|
name: 'combined-test',
|
|
maxConcurrent: 2,
|
|
cooldownMs: 200,
|
|
constraintKeyForExecution: () => 'shared',
|
|
});
|
|
manager.addConstraintGroup(constraint);
|
|
|
|
const makeTask = (id: number) =>
|
|
new taskbuffer.Task({
|
|
name: `combo-${id}`,
|
|
taskFunction: async () => {
|
|
running++;
|
|
maxRunning = Math.max(maxRunning, running);
|
|
timestamps.push(Date.now());
|
|
await smartdelay.delayFor(100);
|
|
running--;
|
|
},
|
|
});
|
|
|
|
const tasks = [makeTask(1), makeTask(2), makeTask(3), makeTask(4)];
|
|
for (const t of tasks) {
|
|
manager.addTask(t);
|
|
}
|
|
|
|
await Promise.all(tasks.map((t) => manager.triggerTaskConstrained(t)));
|
|
|
|
// Concurrency never exceeded 2
|
|
expect(maxRunning).toBeLessThanOrEqual(2);
|
|
|
|
// First 2 tasks start nearly together, 3rd task starts after first batch completes + cooldown
|
|
// First batch completes ~100ms after start, then 200ms cooldown
|
|
const gap = timestamps[2] - timestamps[0];
|
|
expect(gap).toBeGreaterThanOrEqual(250); // 100ms task + 200ms cooldown - 50ms tolerance
|
|
|
|
await manager.stop();
|
|
});
|
|
|
|
// Test 19: Constraint removal unblocks queued tasks
|
|
tap.test('should unblock queued tasks when constraint group is removed', async () => {
|
|
const manager = new taskbuffer.TaskManager();
|
|
const log: string[] = [];
|
|
|
|
const constraint = new taskbuffer.TaskConstraintGroup({
|
|
name: 'removable-constraint',
|
|
maxConcurrent: 1,
|
|
constraintKeyForExecution: () => 'shared',
|
|
});
|
|
manager.addConstraintGroup(constraint);
|
|
|
|
const t1 = new taskbuffer.Task({
|
|
name: 'block-1',
|
|
taskFunction: async () => {
|
|
log.push('t1-start');
|
|
// Remove constraint while t1 is running so t2 runs unconstrained after drain
|
|
manager.removeConstraintGroup('removable-constraint');
|
|
await smartdelay.delayFor(100);
|
|
log.push('t1-end');
|
|
},
|
|
});
|
|
|
|
const t2 = new taskbuffer.Task({
|
|
name: 'block-2',
|
|
taskFunction: async () => {
|
|
log.push('t2-start');
|
|
log.push('t2-end');
|
|
},
|
|
});
|
|
|
|
manager.addTask(t1);
|
|
manager.addTask(t2);
|
|
|
|
await Promise.all([
|
|
manager.triggerTaskConstrained(t1),
|
|
manager.triggerTaskConstrained(t2),
|
|
]);
|
|
|
|
// Both tasks completed (drain didn't deadlock after constraint removal)
|
|
expect(log).toContain('t1-start');
|
|
expect(log).toContain('t1-end');
|
|
expect(log).toContain('t2-start');
|
|
expect(log).toContain('t2-end');
|
|
|
|
// t2 started after t1 completed (drain fires after t1 finishes)
|
|
const t1EndIdx = log.indexOf('t1-end');
|
|
const t2StartIdx = log.indexOf('t2-start');
|
|
expect(t2StartIdx).toBeGreaterThanOrEqual(t1EndIdx);
|
|
|
|
await manager.stop();
|
|
});
|
|
|
|
// Test 20: Intra-task concurrency by input — same task, different inputs, key extracts TLD
|
|
tap.test('should serialize same-TLD inputs and parallelize different-TLD inputs', async () => {
|
|
const manager = new taskbuffer.TaskManager();
|
|
const log: string[] = [];
|
|
|
|
const extractTLD = (domain: string) => {
|
|
const parts = domain.split('.');
|
|
return parts.slice(-2).join('.');
|
|
};
|
|
|
|
const constraint = new taskbuffer.TaskConstraintGroup({
|
|
name: 'tld-mutex',
|
|
maxConcurrent: 1,
|
|
constraintKeyForExecution: (_task, input?: string) => {
|
|
if (!input) return null;
|
|
return extractTLD(input);
|
|
},
|
|
});
|
|
manager.addConstraintGroup(constraint);
|
|
|
|
const getCert = new taskbuffer.Task({
|
|
name: 'get-cert',
|
|
taskFunction: async (domain: string) => {
|
|
log.push(`${domain}-start`);
|
|
await smartdelay.delayFor(100);
|
|
log.push(`${domain}-end`);
|
|
},
|
|
});
|
|
manager.addTask(getCert);
|
|
|
|
await Promise.all([
|
|
manager.triggerTaskConstrained(getCert, 'a.example.com'),
|
|
manager.triggerTaskConstrained(getCert, 'b.example.com'),
|
|
manager.triggerTaskConstrained(getCert, 'c.other.org'),
|
|
]);
|
|
|
|
// a.example.com and b.example.com share TLD "example.com" → serialized
|
|
const aEndIdx = log.indexOf('a.example.com-end');
|
|
const bStartIdx = log.indexOf('b.example.com-start');
|
|
expect(bStartIdx).toBeGreaterThanOrEqual(aEndIdx);
|
|
|
|
// c.other.org has different TLD → runs in parallel with a.example.com
|
|
const aStartIdx = log.indexOf('a.example.com-start');
|
|
const cStartIdx = log.indexOf('c.other.org-start');
|
|
expect(cStartIdx).toBeLessThan(aEndIdx);
|
|
|
|
await manager.stop();
|
|
});
|
|
|
|
// Test 21: shouldExecute skips queued task based on external state
|
|
tap.test('should skip queued task when shouldExecute returns false', async () => {
|
|
const manager = new taskbuffer.TaskManager();
|
|
const execLog: string[] = [];
|
|
const certCache = new Map<string, string>();
|
|
|
|
const extractTLD = (domain: string) => {
|
|
const parts = domain.split('.');
|
|
return parts.slice(-2).join('.');
|
|
};
|
|
|
|
const constraint = new taskbuffer.TaskConstraintGroup({
|
|
name: 'cert-mutex',
|
|
maxConcurrent: 1,
|
|
constraintKeyForExecution: (_task, input?: string) => {
|
|
if (!input) return null;
|
|
return extractTLD(input);
|
|
},
|
|
shouldExecute: (_task, input?: string) => {
|
|
if (!input) return true;
|
|
return certCache.get(extractTLD(input)) !== 'wildcard';
|
|
},
|
|
});
|
|
manager.addConstraintGroup(constraint);
|
|
|
|
const getCert = new taskbuffer.Task({
|
|
name: 'get-cert-skip',
|
|
taskFunction: async (domain: string) => {
|
|
execLog.push(domain);
|
|
// First execution sets wildcard in cache
|
|
certCache.set(extractTLD(domain), 'wildcard');
|
|
await smartdelay.delayFor(100);
|
|
return `cert-for-${domain}`;
|
|
},
|
|
});
|
|
manager.addTask(getCert);
|
|
|
|
const [r1, r2] = await Promise.all([
|
|
manager.triggerTaskConstrained(getCert, 'app.example.com'),
|
|
manager.triggerTaskConstrained(getCert, 'api.example.com'),
|
|
]);
|
|
|
|
// First ran, second was skipped
|
|
expect(execLog).toEqual(['app.example.com']);
|
|
expect(r1).toEqual('cert-for-app.example.com');
|
|
expect(r2).toEqual(undefined);
|
|
|
|
await manager.stop();
|
|
});
|
|
|
|
// Test 22: shouldExecute on immediate (non-queued) trigger
|
|
tap.test('should skip immediate trigger when shouldExecute returns false', async () => {
|
|
const manager = new taskbuffer.TaskManager();
|
|
let executed = false;
|
|
|
|
const constraint = new taskbuffer.TaskConstraintGroup({
|
|
name: 'always-skip',
|
|
maxConcurrent: 10,
|
|
constraintKeyForExecution: () => 'all',
|
|
shouldExecute: () => false,
|
|
});
|
|
manager.addConstraintGroup(constraint);
|
|
|
|
const task = new taskbuffer.Task({
|
|
name: 'skip-immediate',
|
|
taskFunction: async () => {
|
|
executed = true;
|
|
return 'should-not-see';
|
|
},
|
|
});
|
|
manager.addTask(task);
|
|
|
|
const result = await manager.triggerTaskConstrained(task);
|
|
expect(executed).toBeFalse();
|
|
expect(result).toEqual(undefined);
|
|
|
|
await manager.stop();
|
|
});
|
|
|
|
// Test 23: Mixed task.data + input constraint key
|
|
tap.test('should use both task.data and input in constraint key', async () => {
|
|
const manager = new taskbuffer.TaskManager();
|
|
let running = 0;
|
|
let maxRunning = 0;
|
|
|
|
const constraint = new taskbuffer.TaskConstraintGroup<{ provider: string }>({
|
|
name: 'provider-domain',
|
|
maxConcurrent: 1,
|
|
constraintKeyForExecution: (task, input?: string) => {
|
|
return `${task.data.provider}:${input || 'default'}`;
|
|
},
|
|
});
|
|
manager.addConstraintGroup(constraint);
|
|
|
|
const makeTask = (name: string, provider: string) =>
|
|
new taskbuffer.Task<undefined, [], { provider: string }>({
|
|
name,
|
|
data: { provider },
|
|
taskFunction: async () => {
|
|
running++;
|
|
maxRunning = Math.max(maxRunning, running);
|
|
await smartdelay.delayFor(100);
|
|
running--;
|
|
},
|
|
});
|
|
|
|
// Same provider + same domain input → should serialize
|
|
const t1 = makeTask('mixed-1', 'acme');
|
|
const t2 = makeTask('mixed-2', 'acme');
|
|
// Different provider + same domain → parallel
|
|
const t3 = makeTask('mixed-3', 'cloudflare');
|
|
|
|
manager.addTask(t1);
|
|
manager.addTask(t2);
|
|
manager.addTask(t3);
|
|
|
|
await Promise.all([
|
|
manager.triggerTaskConstrained(t1, 'example.com'),
|
|
manager.triggerTaskConstrained(t2, 'example.com'),
|
|
manager.triggerTaskConstrained(t3, 'example.com'),
|
|
]);
|
|
|
|
// t1 and t2 share key "acme:example.com" → serialized (max 1 at a time)
|
|
// t3 has key "cloudflare:example.com" → parallel with t1
|
|
// So maxRunning should be exactly 2 (t1 + t3, or t3 + t2)
|
|
expect(maxRunning).toBeLessThanOrEqual(2);
|
|
expect(maxRunning).toBeGreaterThanOrEqual(2);
|
|
|
|
await manager.stop();
|
|
});
|
|
|
|
// =============================================================================
|
|
// Rate Limiting Tests
|
|
// =============================================================================
|
|
|
|
// Test 24: Basic N-per-window rate limiting
|
|
tap.test('should enforce N-per-window rate limit', async () => {
|
|
const manager = new taskbuffer.TaskManager();
|
|
const execTimestamps: number[] = [];
|
|
|
|
const constraint = new taskbuffer.TaskConstraintGroup({
|
|
name: 'rate-limit-basic',
|
|
maxConcurrent: Infinity,
|
|
constraintKeyForExecution: () => 'shared',
|
|
rateLimit: {
|
|
maxPerWindow: 3,
|
|
windowMs: 1000,
|
|
},
|
|
});
|
|
manager.addConstraintGroup(constraint);
|
|
|
|
const makeTask = (id: number) =>
|
|
new taskbuffer.Task({
|
|
name: `rl-${id}`,
|
|
taskFunction: async () => {
|
|
execTimestamps.push(Date.now());
|
|
return `done-${id}`;
|
|
},
|
|
});
|
|
|
|
const tasks = [makeTask(1), makeTask(2), makeTask(3), makeTask(4), makeTask(5)];
|
|
for (const t of tasks) manager.addTask(t);
|
|
|
|
const results = await Promise.all(tasks.map((t) => manager.triggerTaskConstrained(t)));
|
|
|
|
// All 5 should eventually complete
|
|
expect(results).toEqual(['done-1', 'done-2', 'done-3', 'done-4', 'done-5']);
|
|
|
|
// First 3 should execute nearly simultaneously
|
|
const firstBatchSpread = execTimestamps[2] - execTimestamps[0];
|
|
expect(firstBatchSpread).toBeLessThan(100);
|
|
|
|
// 4th and 5th should wait for the window to slide (at least ~900ms after first)
|
|
const fourthDelay = execTimestamps[3] - execTimestamps[0];
|
|
expect(fourthDelay).toBeGreaterThanOrEqual(900);
|
|
|
|
await manager.stop();
|
|
});
|
|
|
|
// Test 25: Rate limit + maxConcurrent interaction
|
|
tap.test('should enforce both rate limit and maxConcurrent independently', async () => {
|
|
const manager = new taskbuffer.TaskManager();
|
|
let running = 0;
|
|
let maxRunning = 0;
|
|
const execTimestamps: number[] = [];
|
|
|
|
const constraint = new taskbuffer.TaskConstraintGroup({
|
|
name: 'rate-concurrent',
|
|
maxConcurrent: 2,
|
|
constraintKeyForExecution: () => 'shared',
|
|
rateLimit: {
|
|
maxPerWindow: 3,
|
|
windowMs: 2000,
|
|
},
|
|
});
|
|
manager.addConstraintGroup(constraint);
|
|
|
|
const makeTask = (id: number) =>
|
|
new taskbuffer.Task({
|
|
name: `rc-${id}`,
|
|
taskFunction: async () => {
|
|
running++;
|
|
maxRunning = Math.max(maxRunning, running);
|
|
execTimestamps.push(Date.now());
|
|
await smartdelay.delayFor(50);
|
|
running--;
|
|
},
|
|
});
|
|
|
|
const tasks = [makeTask(1), makeTask(2), makeTask(3), makeTask(4)];
|
|
for (const t of tasks) manager.addTask(t);
|
|
|
|
await Promise.all(tasks.map((t) => manager.triggerTaskConstrained(t)));
|
|
|
|
// Concurrency limit should be enforced
|
|
expect(maxRunning).toBeLessThanOrEqual(2);
|
|
|
|
// 4th task should wait for rate limit window (only 3 allowed per 2s)
|
|
const fourthDelay = execTimestamps[3] - execTimestamps[0];
|
|
expect(fourthDelay).toBeGreaterThanOrEqual(1900);
|
|
|
|
await manager.stop();
|
|
});
|
|
|
|
// Test 26: Rate limit + cooldownMs interaction
|
|
tap.test('should enforce both rate limit and cooldown together', async () => {
|
|
const manager = new taskbuffer.TaskManager();
|
|
const execTimestamps: number[] = [];
|
|
|
|
const constraint = new taskbuffer.TaskConstraintGroup({
|
|
name: 'rate-cooldown',
|
|
maxConcurrent: 1,
|
|
cooldownMs: 200,
|
|
constraintKeyForExecution: () => 'shared',
|
|
rateLimit: {
|
|
maxPerWindow: 2,
|
|
windowMs: 2000,
|
|
},
|
|
});
|
|
manager.addConstraintGroup(constraint);
|
|
|
|
const makeTask = (id: number) =>
|
|
new taskbuffer.Task({
|
|
name: `rcd-${id}`,
|
|
taskFunction: async () => {
|
|
execTimestamps.push(Date.now());
|
|
},
|
|
});
|
|
|
|
const tasks = [makeTask(1), makeTask(2), makeTask(3)];
|
|
for (const t of tasks) manager.addTask(t);
|
|
|
|
await Promise.all(tasks.map((t) => manager.triggerTaskConstrained(t)));
|
|
|
|
// Cooldown between first and second: at least 200ms
|
|
const gap12 = execTimestamps[1] - execTimestamps[0];
|
|
expect(gap12).toBeGreaterThanOrEqual(150);
|
|
|
|
// Third task blocked by rate limit (only 2 per 2000ms window) AND cooldown
|
|
const gap13 = execTimestamps[2] - execTimestamps[0];
|
|
expect(gap13).toBeGreaterThanOrEqual(1900);
|
|
|
|
await manager.stop();
|
|
});
|
|
|
|
// Test 27: Per-key rate limit independence
|
|
tap.test('should apply rate limit per key independently', async () => {
|
|
const manager = new taskbuffer.TaskManager();
|
|
const execLog: string[] = [];
|
|
|
|
const constraint = new taskbuffer.TaskConstraintGroup({
|
|
name: 'rate-per-key',
|
|
constraintKeyForExecution: (_task, input?: string) => input,
|
|
rateLimit: {
|
|
maxPerWindow: 1,
|
|
windowMs: 2000,
|
|
},
|
|
});
|
|
manager.addConstraintGroup(constraint);
|
|
|
|
const task = new taskbuffer.Task({
|
|
name: 'rate-key-task',
|
|
taskFunction: async (input: string) => {
|
|
execLog.push(input);
|
|
},
|
|
});
|
|
manager.addTask(task);
|
|
|
|
// Trigger 2 for key-A and 1 for key-B
|
|
const [r1, r2, r3] = await Promise.all([
|
|
manager.triggerTaskConstrained(task, 'key-A'),
|
|
manager.triggerTaskConstrained(task, 'key-B'),
|
|
manager.triggerTaskConstrained(task, 'key-A'), // should wait for window
|
|
]);
|
|
|
|
// key-A and key-B first calls should both execute immediately
|
|
expect(execLog[0]).toEqual('key-A');
|
|
expect(execLog[1]).toEqual('key-B');
|
|
// key-A second call eventually executes
|
|
expect(execLog).toContain('key-A');
|
|
expect(execLog.length).toEqual(3);
|
|
|
|
await manager.stop();
|
|
});
|
|
|
|
// Test 28: getNextAvailableDelay returns correct value
|
|
tap.test('should return correct getNextAvailableDelay and canRun after waiting', async () => {
|
|
const constraint = new taskbuffer.TaskConstraintGroup({
|
|
name: 'delay-check',
|
|
constraintKeyForExecution: () => 'key',
|
|
rateLimit: {
|
|
maxPerWindow: 1,
|
|
windowMs: 500,
|
|
},
|
|
});
|
|
|
|
// Initially: can run, no delay
|
|
expect(constraint.canRun('key')).toBeTrue();
|
|
expect(constraint.getNextAvailableDelay('key')).toEqual(0);
|
|
|
|
// Acquire and release to record a completion
|
|
constraint.acquireSlot('key');
|
|
constraint.releaseSlot('key');
|
|
|
|
// Now: rate limit saturated
|
|
expect(constraint.canRun('key')).toBeFalse();
|
|
const delay = constraint.getNextAvailableDelay('key');
|
|
expect(delay).toBeGreaterThan(0);
|
|
expect(delay).toBeLessThanOrEqual(500);
|
|
|
|
// Wait for window to slide
|
|
await smartdelay.delayFor(delay + 50);
|
|
|
|
expect(constraint.canRun('key')).toBeTrue();
|
|
expect(constraint.getNextAvailableDelay('key')).toEqual(0);
|
|
});
|
|
|
|
// Test 29: reset() clears rate-limit timestamps
|
|
tap.test('should clear rate limit timestamps on reset', async () => {
|
|
const constraint = new taskbuffer.TaskConstraintGroup({
|
|
name: 'reset-rate',
|
|
constraintKeyForExecution: () => 'key',
|
|
rateLimit: {
|
|
maxPerWindow: 1,
|
|
windowMs: 60000,
|
|
},
|
|
});
|
|
|
|
constraint.acquireSlot('key');
|
|
constraint.releaseSlot('key');
|
|
expect(constraint.canRun('key')).toBeFalse();
|
|
|
|
constraint.reset();
|
|
expect(constraint.canRun('key')).toBeTrue();
|
|
expect(constraint.getRateLimitDelay('key')).toEqual(0);
|
|
});
|
|
|
|
// =============================================================================
|
|
// Result Sharing Tests
|
|
// =============================================================================
|
|
|
|
// Test 30: Basic result sharing — multiple waiters get first task's result
|
|
tap.test('should share result with queued tasks (share-latest mode)', async () => {
|
|
const manager = new taskbuffer.TaskManager();
|
|
let execCount = 0;
|
|
|
|
const constraint = new taskbuffer.TaskConstraintGroup({
|
|
name: 'share-basic',
|
|
maxConcurrent: 1,
|
|
constraintKeyForExecution: () => 'shared',
|
|
resultSharingMode: 'share-latest',
|
|
});
|
|
manager.addConstraintGroup(constraint);
|
|
|
|
const makeTask = (id: number) =>
|
|
new taskbuffer.Task({
|
|
name: `share-${id}`,
|
|
taskFunction: async () => {
|
|
execCount++;
|
|
await smartdelay.delayFor(100);
|
|
return 'shared-result';
|
|
},
|
|
});
|
|
|
|
const t1 = makeTask(1);
|
|
const t2 = makeTask(2);
|
|
const t3 = makeTask(3);
|
|
|
|
manager.addTask(t1);
|
|
manager.addTask(t2);
|
|
manager.addTask(t3);
|
|
|
|
const [r1, r2, r3] = await Promise.all([
|
|
manager.triggerTaskConstrained(t1),
|
|
manager.triggerTaskConstrained(t2),
|
|
manager.triggerTaskConstrained(t3),
|
|
]);
|
|
|
|
// Only 1 execution, all get same result
|
|
expect(execCount).toEqual(1);
|
|
expect(r1).toEqual('shared-result');
|
|
expect(r2).toEqual('shared-result');
|
|
expect(r3).toEqual('shared-result');
|
|
|
|
await manager.stop();
|
|
});
|
|
|
|
// Test 31: Different keys get independent results
|
|
tap.test('should share results independently per key', async () => {
|
|
const manager = new taskbuffer.TaskManager();
|
|
let execCount = 0;
|
|
|
|
const constraint = new taskbuffer.TaskConstraintGroup({
|
|
name: 'share-per-key',
|
|
maxConcurrent: 1,
|
|
constraintKeyForExecution: (_task, input?: string) => input,
|
|
resultSharingMode: 'share-latest',
|
|
});
|
|
manager.addConstraintGroup(constraint);
|
|
|
|
const task = new taskbuffer.Task({
|
|
name: 'keyed-share',
|
|
taskFunction: async (input: string) => {
|
|
execCount++;
|
|
await smartdelay.delayFor(50);
|
|
return `result-for-${input}`;
|
|
},
|
|
});
|
|
manager.addTask(task);
|
|
|
|
const [r1, r2] = await Promise.all([
|
|
manager.triggerTaskConstrained(task, 'key-A'),
|
|
manager.triggerTaskConstrained(task, 'key-B'),
|
|
]);
|
|
|
|
// Different keys → both execute independently
|
|
expect(execCount).toEqual(2);
|
|
expect(r1).toEqual('result-for-key-A');
|
|
expect(r2).toEqual('result-for-key-B');
|
|
|
|
await manager.stop();
|
|
});
|
|
|
|
// Test 32: Default mode ('none') — no sharing
|
|
tap.test('should not share results when mode is none (default)', async () => {
|
|
const manager = new taskbuffer.TaskManager();
|
|
let execCount = 0;
|
|
|
|
const constraint = new taskbuffer.TaskConstraintGroup({
|
|
name: 'no-share',
|
|
maxConcurrent: 1,
|
|
constraintKeyForExecution: () => 'shared',
|
|
// resultSharingMode defaults to 'none'
|
|
});
|
|
manager.addConstraintGroup(constraint);
|
|
|
|
const makeTask = (id: number) =>
|
|
new taskbuffer.Task({
|
|
name: `noshare-${id}`,
|
|
taskFunction: async () => {
|
|
execCount++;
|
|
await smartdelay.delayFor(50);
|
|
return `result-${execCount}`;
|
|
},
|
|
});
|
|
|
|
const t1 = makeTask(1);
|
|
const t2 = makeTask(2);
|
|
|
|
manager.addTask(t1);
|
|
manager.addTask(t2);
|
|
|
|
const [r1, r2] = await Promise.all([
|
|
manager.triggerTaskConstrained(t1),
|
|
manager.triggerTaskConstrained(t2),
|
|
]);
|
|
|
|
// Both should execute independently
|
|
expect(execCount).toEqual(2);
|
|
expect(r1).toEqual('result-1');
|
|
expect(r2).toEqual('result-2');
|
|
|
|
await manager.stop();
|
|
});
|
|
|
|
// Test 33: Sharing takes priority over shouldExecute for queued tasks
|
|
tap.test('should not call shouldExecute for shared results', async () => {
|
|
const manager = new taskbuffer.TaskManager();
|
|
let shouldExecuteCalls = 0;
|
|
let execCount = 0;
|
|
|
|
const constraint = new taskbuffer.TaskConstraintGroup({
|
|
name: 'share-vs-should',
|
|
maxConcurrent: 1,
|
|
constraintKeyForExecution: () => 'shared',
|
|
resultSharingMode: 'share-latest',
|
|
shouldExecute: () => {
|
|
shouldExecuteCalls++;
|
|
return true;
|
|
},
|
|
});
|
|
manager.addConstraintGroup(constraint);
|
|
|
|
const makeTask = (id: number) =>
|
|
new taskbuffer.Task({
|
|
name: `svs-${id}`,
|
|
taskFunction: async () => {
|
|
execCount++;
|
|
await smartdelay.delayFor(100);
|
|
return 'shared-value';
|
|
},
|
|
});
|
|
|
|
const t1 = makeTask(1);
|
|
const t2 = makeTask(2);
|
|
const t3 = makeTask(3);
|
|
|
|
manager.addTask(t1);
|
|
manager.addTask(t2);
|
|
manager.addTask(t3);
|
|
|
|
const initialShouldExecuteCalls = shouldExecuteCalls;
|
|
|
|
await Promise.all([
|
|
manager.triggerTaskConstrained(t1),
|
|
manager.triggerTaskConstrained(t2),
|
|
manager.triggerTaskConstrained(t3),
|
|
]);
|
|
|
|
// Only 1 execution
|
|
expect(execCount).toEqual(1);
|
|
|
|
// shouldExecute called once for the first task, but not for shared results
|
|
// (t2 and t3 get shared result without going through executeWithConstraintTracking)
|
|
const totalShouldExecuteCalls = shouldExecuteCalls - initialShouldExecuteCalls;
|
|
expect(totalShouldExecuteCalls).toEqual(1);
|
|
|
|
await manager.stop();
|
|
});
|
|
|
|
// Test 34: Error results NOT shared — queued task executes after failure
|
|
tap.test('should not share error results with queued tasks', async () => {
|
|
const manager = new taskbuffer.TaskManager();
|
|
let execCount = 0;
|
|
|
|
const constraint = new taskbuffer.TaskConstraintGroup({
|
|
name: 'share-error',
|
|
maxConcurrent: 1,
|
|
constraintKeyForExecution: () => 'shared',
|
|
resultSharingMode: 'share-latest',
|
|
});
|
|
manager.addConstraintGroup(constraint);
|
|
|
|
const failTask = new taskbuffer.Task({
|
|
name: 'fail-share',
|
|
catchErrors: true,
|
|
taskFunction: async () => {
|
|
execCount++;
|
|
await smartdelay.delayFor(50);
|
|
throw new Error('fail');
|
|
},
|
|
});
|
|
|
|
const successTask = new taskbuffer.Task({
|
|
name: 'success-share',
|
|
taskFunction: async () => {
|
|
execCount++;
|
|
await smartdelay.delayFor(50);
|
|
return 'success-result';
|
|
},
|
|
});
|
|
|
|
manager.addTask(failTask);
|
|
manager.addTask(successTask);
|
|
|
|
const [r1, r2] = await Promise.all([
|
|
manager.triggerTaskConstrained(failTask),
|
|
manager.triggerTaskConstrained(successTask),
|
|
]);
|
|
|
|
// Both should have executed (error result not shared)
|
|
expect(execCount).toEqual(2);
|
|
expect(r2).toEqual('success-result');
|
|
|
|
await manager.stop();
|
|
});
|
|
|
|
// Test 35: Multiple constraint groups — sharing from one group applies
|
|
tap.test('should share result when any applicable group has sharing enabled', async () => {
|
|
const manager = new taskbuffer.TaskManager();
|
|
let execCount = 0;
|
|
|
|
const sharingGroup = new taskbuffer.TaskConstraintGroup({
|
|
name: 'sharing-group',
|
|
maxConcurrent: 1,
|
|
constraintKeyForExecution: () => 'shared',
|
|
resultSharingMode: 'share-latest',
|
|
});
|
|
|
|
const nonSharingGroup = new taskbuffer.TaskConstraintGroup({
|
|
name: 'non-sharing-group',
|
|
maxConcurrent: 5,
|
|
constraintKeyForExecution: () => 'all',
|
|
// resultSharingMode defaults to 'none'
|
|
});
|
|
|
|
manager.addConstraintGroup(sharingGroup);
|
|
manager.addConstraintGroup(nonSharingGroup);
|
|
|
|
const makeTask = (id: number) =>
|
|
new taskbuffer.Task({
|
|
name: `multi-share-${id}`,
|
|
taskFunction: async () => {
|
|
execCount++;
|
|
await smartdelay.delayFor(100);
|
|
return 'multi-group-result';
|
|
},
|
|
});
|
|
|
|
const t1 = makeTask(1);
|
|
const t2 = makeTask(2);
|
|
|
|
manager.addTask(t1);
|
|
manager.addTask(t2);
|
|
|
|
const [r1, r2] = await Promise.all([
|
|
manager.triggerTaskConstrained(t1),
|
|
manager.triggerTaskConstrained(t2),
|
|
]);
|
|
|
|
// Only 1 execution due to sharing from the sharing group
|
|
expect(execCount).toEqual(1);
|
|
expect(r1).toEqual('multi-group-result');
|
|
expect(r2).toEqual('multi-group-result');
|
|
|
|
await manager.stop();
|
|
});
|
|
|
|
// Test 36: Result sharing + rate limit combo
|
|
tap.test('should resolve rate-limited waiters with shared result', async () => {
|
|
const manager = new taskbuffer.TaskManager();
|
|
let execCount = 0;
|
|
|
|
const constraint = new taskbuffer.TaskConstraintGroup({
|
|
name: 'share-rate',
|
|
maxConcurrent: 1,
|
|
constraintKeyForExecution: () => 'shared',
|
|
resultSharingMode: 'share-latest',
|
|
rateLimit: {
|
|
maxPerWindow: 1,
|
|
windowMs: 5000,
|
|
},
|
|
});
|
|
manager.addConstraintGroup(constraint);
|
|
|
|
const makeTask = (id: number) =>
|
|
new taskbuffer.Task({
|
|
name: `sr-${id}`,
|
|
taskFunction: async () => {
|
|
execCount++;
|
|
await smartdelay.delayFor(50);
|
|
return 'rate-shared-result';
|
|
},
|
|
});
|
|
|
|
const t1 = makeTask(1);
|
|
const t2 = makeTask(2);
|
|
const t3 = makeTask(3);
|
|
|
|
manager.addTask(t1);
|
|
manager.addTask(t2);
|
|
manager.addTask(t3);
|
|
|
|
const startTime = Date.now();
|
|
const [r1, r2, r3] = await Promise.all([
|
|
manager.triggerTaskConstrained(t1),
|
|
manager.triggerTaskConstrained(t2),
|
|
manager.triggerTaskConstrained(t3),
|
|
]);
|
|
const elapsed = Date.now() - startTime;
|
|
|
|
// Only 1 execution; waiters get shared result without waiting for rate limit window
|
|
expect(execCount).toEqual(1);
|
|
expect(r1).toEqual('rate-shared-result');
|
|
expect(r2).toEqual('rate-shared-result');
|
|
expect(r3).toEqual('rate-shared-result');
|
|
|
|
// Should complete quickly (not waiting 5s for rate limit window)
|
|
expect(elapsed).toBeLessThan(1000);
|
|
|
|
await manager.stop();
|
|
});
|
|
|
|
export default tap.start();
|