test: cover backup orchestration
This commit is contained in:
@@ -7,7 +7,7 @@ Fast package tests stay in each component repo. This repo is for stateful cross-
|
|||||||
## Scenarios
|
## Scenarios
|
||||||
|
|
||||||
- `registry-deploy-on-push`: starts Cloudly with isolated Mongo/S3 helpers, connects Coreflow as a cluster, pushes a Docker image to Cloudly's built-in registry, verifies Cloudly metadata updates, verifies Coreflow creates the workload service, verifies Coretraffic HTTPS routing, then pushes the same tag again and verifies service recreation and routing through the new digest.
|
- `registry-deploy-on-push`: starts Cloudly with isolated Mongo/S3 helpers, connects Coreflow as a cluster, pushes a Docker image to Cloudly's built-in registry, verifies Cloudly metadata updates, verifies Coreflow creates the workload service, verifies Coretraffic HTTPS routing, then pushes the same tag again and verifies service recreation and routing through the new digest.
|
||||||
- `corestore-volume-driver`: starts Corestore with isolated ports and a temporary Docker plugin socket, verifies the VolumeDriver protocol, snapshots/restores a managed volume through `containerarchive`, and checks Coreflow's generated corestore volume mount specs.
|
- `corestore-volume-driver`: starts Corestore with isolated ports and a temporary Docker plugin socket, verifies the VolumeDriver protocol, snapshots/restores a managed volume through `containerarchive`, checks Coreflow's generated corestore volume mount specs, and verifies Coreflow backup/restore orchestration for volume, database, and object-storage snapshots.
|
||||||
- `onebox-basic-lifecycle`: starts Onebox in dev mode, verifies core services, deploys a workload, checks HTTP plus HTTPS routing through ingress, removes the workload, and verifies cleanup.
|
- `onebox-basic-lifecycle`: starts Onebox in dev mode, verifies core services, deploys a workload, checks HTTP plus HTTPS routing through ingress, removes the workload, and verifies cleanup.
|
||||||
|
|
||||||
## Host Run
|
## Host Run
|
||||||
|
|||||||
@@ -321,11 +321,130 @@ const assertCorestoreVolumeDriver = async () => {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const assertCoreflowBackupOrchestration = async () => {
|
||||||
|
const socketPath = join(buildDir, 'backup-plugins', 'corestore.sock');
|
||||||
|
const dataDir = join(buildDir, 'backup-data');
|
||||||
|
await mkdir(dirname(socketPath), { recursive: true });
|
||||||
|
|
||||||
|
const controlPort = await getFreePort();
|
||||||
|
const s3Port = await getFreePort();
|
||||||
|
const dbPort = await getFreePort();
|
||||||
|
const corestore = new CoreStore({
|
||||||
|
dataDir,
|
||||||
|
bindAddress: '127.0.0.1',
|
||||||
|
publicHost: '127.0.0.1',
|
||||||
|
controlPort,
|
||||||
|
s3Port,
|
||||||
|
dbPort,
|
||||||
|
apiToken,
|
||||||
|
volumePluginSocketPath: socketPath,
|
||||||
|
});
|
||||||
|
const previousControlUrl = process.env.CORESTORE_CONTROL_URL;
|
||||||
|
const previousApiToken = process.env.CORESTORE_API_TOKEN;
|
||||||
|
|
||||||
|
try {
|
||||||
|
await corestore.start();
|
||||||
|
process.env.CORESTORE_CONTROL_URL = `http://127.0.0.1:${controlPort}`;
|
||||||
|
process.env.CORESTORE_API_TOKEN = apiToken;
|
||||||
|
|
||||||
|
const volumeName = 'backup-orchestration-data';
|
||||||
|
const service: IService = {
|
||||||
|
id: 'svc-backup-orchestration',
|
||||||
|
data: {
|
||||||
|
name: 'backup-orchestration',
|
||||||
|
description: 'Coreflow backup orchestration service',
|
||||||
|
imageId: 'image-backup-orchestration',
|
||||||
|
imageVersion: 'latest',
|
||||||
|
environment: {},
|
||||||
|
secretBundleId: 'secret-backup-orchestration',
|
||||||
|
serviceCategory: 'workload',
|
||||||
|
deploymentStrategy: 'custom',
|
||||||
|
scaleFactor: 1,
|
||||||
|
balancingStrategy: 'round-robin',
|
||||||
|
ports: {
|
||||||
|
web: 80,
|
||||||
|
},
|
||||||
|
volumes: [
|
||||||
|
{
|
||||||
|
source: volumeName,
|
||||||
|
mountPath: '/data',
|
||||||
|
backup: true,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
domains: [],
|
||||||
|
deploymentIds: [],
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
const createVolumeResponse = await controlPost<any>(controlPort, '/volumes/create', {
|
||||||
|
name: volumeName,
|
||||||
|
serviceId: service.id,
|
||||||
|
serviceName: service.data.name,
|
||||||
|
mountPath: '/data',
|
||||||
|
backup: true,
|
||||||
|
});
|
||||||
|
const mountpoint = createVolumeResponse.volume?.Mountpoint;
|
||||||
|
assert(typeof mountpoint === 'string', `Could not create backup test volume: ${JSON.stringify(createVolumeResponse)}`);
|
||||||
|
await mkdir(mountpoint, { recursive: true });
|
||||||
|
await writeFile(join(mountpoint, 'state.txt'), 'before backup\n');
|
||||||
|
|
||||||
|
const provisionResponse = await controlPost<any>(controlPort, '/resources/provision', {
|
||||||
|
serviceId: service.id,
|
||||||
|
serviceName: service.data.name,
|
||||||
|
capabilities: ['database', 'objectstorage'],
|
||||||
|
});
|
||||||
|
assert(provisionResponse.resources?.length === 2, `Resource provisioning failed: ${JSON.stringify(provisionResponse)}`);
|
||||||
|
|
||||||
|
const coreflow = new Coreflow();
|
||||||
|
const backupResult = await coreflow.backupManager.executeServiceBackup({
|
||||||
|
backupId: 'backup-orchestration-smoke',
|
||||||
|
service,
|
||||||
|
tags: {
|
||||||
|
scenario: scenarioName,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
const snapshotTypes = backupResult.snapshots.map((snapshotArg: any) => snapshotArg.type).sort();
|
||||||
|
assert(
|
||||||
|
JSON.stringify(snapshotTypes) === JSON.stringify(['database', 'objectstorage', 'volume']),
|
||||||
|
`Unexpected backup snapshots: ${JSON.stringify(backupResult)}`,
|
||||||
|
);
|
||||||
|
|
||||||
|
await writeFile(join(mountpoint, 'state.txt'), 'after backup mutation\n');
|
||||||
|
await writeFile(join(mountpoint, 'stale.txt'), 'stale\n');
|
||||||
|
|
||||||
|
const restoreResult = await coreflow.backupManager.executeServiceRestore({
|
||||||
|
backupId: 'backup-orchestration-smoke',
|
||||||
|
service,
|
||||||
|
snapshots: backupResult.snapshots,
|
||||||
|
clear: true,
|
||||||
|
});
|
||||||
|
assert(restoreResult.restored.length === 3, `Unexpected restore result: ${JSON.stringify(restoreResult)}`);
|
||||||
|
assert(await readFile(join(mountpoint, 'state.txt'), 'utf8') === 'before backup\n', 'Coreflow restore did not restore volume data');
|
||||||
|
assert(!(await pathExists(join(mountpoint, 'stale.txt'))), 'Coreflow restore did not clear stale volume data');
|
||||||
|
} finally {
|
||||||
|
if (previousControlUrl === undefined) {
|
||||||
|
delete process.env.CORESTORE_CONTROL_URL;
|
||||||
|
} else {
|
||||||
|
process.env.CORESTORE_CONTROL_URL = previousControlUrl;
|
||||||
|
}
|
||||||
|
if (previousApiToken === undefined) {
|
||||||
|
delete process.env.CORESTORE_API_TOKEN;
|
||||||
|
} else {
|
||||||
|
process.env.CORESTORE_API_TOKEN = previousApiToken;
|
||||||
|
}
|
||||||
|
await corestore.stop().catch((errorArg) => {
|
||||||
|
console.log(`[${scenarioName}] Failed to stop backup Corestore: ${(errorArg as Error).message}`);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
const main = async () => {
|
const main = async () => {
|
||||||
try {
|
try {
|
||||||
await mkdir(buildDir, { recursive: true });
|
await mkdir(buildDir, { recursive: true });
|
||||||
assertCoreflowVolumeMounts();
|
assertCoreflowVolumeMounts();
|
||||||
await assertCorestoreVolumeDriver();
|
await assertCorestoreVolumeDriver();
|
||||||
|
await assertCoreflowBackupOrchestration();
|
||||||
console.log(`[${scenarioName}] PASS`);
|
console.log(`[${scenarioName}] PASS`);
|
||||||
} finally {
|
} finally {
|
||||||
await rm(buildDir, { recursive: true, force: true });
|
await rm(buildDir, { recursive: true, force: true });
|
||||||
|
|||||||
Reference in New Issue
Block a user