feat: add backup replication targets
This commit is contained in:
@@ -1,9 +1,43 @@
|
||||
import type { Cloudly } from '../classes.cloudly.js';
|
||||
import * as plugins from '../plugins.js';
|
||||
import { BackupRecord } from './classes.backuprecord.js';
|
||||
import { createBackupTargetWriterFromEnv, type IBackupTargetWriter } from './classes.replicationtarget.js';
|
||||
|
||||
export type TBackupStatus = 'pending' | 'running' | 'ready' | 'failed' | 'restoring' | 'restored';
|
||||
export type TBackupStatus =
|
||||
| 'pending'
|
||||
| 'running'
|
||||
| 'replicating'
|
||||
| 'replicated'
|
||||
| 'ready'
|
||||
| 'failed'
|
||||
| 'restoring'
|
||||
| 'restored';
|
||||
export type TBackupResourceType = 'volume' | 'database' | 'objectstorage';
|
||||
export type TBackupReplicationTargetType = 's3' | 'smb';
|
||||
|
||||
export interface IBackupArchiveObject {
|
||||
path: string;
|
||||
size: number;
|
||||
sha256: string;
|
||||
}
|
||||
|
||||
export interface IBackupArchiveManifest {
|
||||
version: 1;
|
||||
backupId: string;
|
||||
createdAt: number;
|
||||
objects: IBackupArchiveObject[];
|
||||
totalSize: number;
|
||||
}
|
||||
|
||||
export interface IBackupReplicationResult {
|
||||
targetType: TBackupReplicationTargetType;
|
||||
targetPath: string;
|
||||
manifestPath: string;
|
||||
manifestSha256: string;
|
||||
objectCount: number;
|
||||
totalSize: number;
|
||||
completedAt: number;
|
||||
}
|
||||
|
||||
export interface IBackupSnapshotData {
|
||||
type: TBackupResourceType;
|
||||
@@ -28,6 +62,7 @@ export interface IBackupRecordData {
|
||||
status: TBackupStatus;
|
||||
trigger: 'manual' | 'scheduled';
|
||||
snapshots: IBackupSnapshotData[];
|
||||
replication?: IBackupReplicationResult;
|
||||
createdAt: number;
|
||||
updatedAt: number;
|
||||
completedAt?: number;
|
||||
@@ -44,6 +79,7 @@ export interface IBackupRecordData {
|
||||
export class CloudlyBackupManager {
|
||||
public typedrouter = new plugins.typedrequest.TypedRouter();
|
||||
public cloudlyRef: Cloudly;
|
||||
private backupTargetWriter?: IBackupTargetWriter;
|
||||
|
||||
get db() {
|
||||
return this.cloudlyRef.mongodbConnector.smartdataDb;
|
||||
@@ -93,6 +129,41 @@ export class CloudlyBackupManager {
|
||||
};
|
||||
}),
|
||||
);
|
||||
|
||||
this.typedrouter.addTypedHandler(
|
||||
new plugins.typedrequest.TypedHandler<any>('prepareBackupReplication', async (requestArg) => {
|
||||
await this.passClusterIdentity(requestArg);
|
||||
return await this.prepareBackupReplication(requestArg);
|
||||
}),
|
||||
);
|
||||
|
||||
this.typedrouter.addTypedHandler(
|
||||
new plugins.typedrequest.TypedHandler<any>('uploadBackupArchiveObject', async (requestArg) => {
|
||||
await this.passClusterIdentity(requestArg);
|
||||
return await this.uploadBackupArchiveObject(requestArg);
|
||||
}),
|
||||
);
|
||||
|
||||
this.typedrouter.addTypedHandler(
|
||||
new plugins.typedrequest.TypedHandler<any>('completeBackupReplication', async (requestArg) => {
|
||||
await this.passClusterIdentity(requestArg);
|
||||
return await this.completeBackupReplication(requestArg);
|
||||
}),
|
||||
);
|
||||
|
||||
this.typedrouter.addTypedHandler(
|
||||
new plugins.typedrequest.TypedHandler<any>('getBackupArchiveManifest', async (requestArg) => {
|
||||
await this.passClusterIdentity(requestArg);
|
||||
return await this.getBackupArchiveManifest(requestArg);
|
||||
}),
|
||||
);
|
||||
|
||||
this.typedrouter.addTypedHandler(
|
||||
new plugins.typedrequest.TypedHandler<any>('downloadBackupArchiveObject', async (requestArg) => {
|
||||
await this.passClusterIdentity(requestArg);
|
||||
return await this.downloadBackupArchiveObject(requestArg);
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
public async start() {
|
||||
@@ -190,9 +261,16 @@ export class CloudlyBackupManager {
|
||||
backupId: backup.id,
|
||||
service: await service.createSavableObject(),
|
||||
tags: requestArg.tags,
|
||||
replication: {
|
||||
enabled: true,
|
||||
},
|
||||
}, backup.clusterId);
|
||||
backup.snapshots = result.snapshots || [];
|
||||
backup.status = 'ready';
|
||||
if (!result.replication) {
|
||||
throw new Error('Coreflow did not complete remote backup replication');
|
||||
}
|
||||
backup.replication = result.replication;
|
||||
backup.status = 'replicated';
|
||||
backup.completedAt = Date.now();
|
||||
backup.updatedAt = Date.now();
|
||||
await backup.save();
|
||||
@@ -218,7 +296,7 @@ export class CloudlyBackupManager {
|
||||
serviceId: serviceIdArg,
|
||||
});
|
||||
const completedBackups = backups
|
||||
.filter((backupArg) => backupArg.status === 'ready' || backupArg.status === 'restored' || backupArg.status === 'failed')
|
||||
.filter((backupArg) => backupArg.status === 'replicated' || backupArg.status === 'restored' || backupArg.status === 'failed')
|
||||
.sort((a, b) => (b.createdAt || 0) - (a.createdAt || 0));
|
||||
for (const backup of completedBackups.slice(keepLast)) {
|
||||
await backup.delete();
|
||||
@@ -235,7 +313,7 @@ export class CloudlyBackupManager {
|
||||
if (!backup) {
|
||||
throw new plugins.typedrequest.TypedResponseError(`Backup ${requestArg.backupId} not found`);
|
||||
}
|
||||
if (backup.status !== 'ready' && backup.status !== 'restored') {
|
||||
if (backup.status !== 'replicated' && backup.status !== 'restored') {
|
||||
throw new plugins.typedrequest.TypedResponseError(`Backup ${backup.id} is not restorable in status ${backup.status}`);
|
||||
}
|
||||
const service = await this.cloudlyRef.serviceManager.CService.getInstance({
|
||||
@@ -245,6 +323,7 @@ export class CloudlyBackupManager {
|
||||
throw new plugins.typedrequest.TypedResponseError(`Service ${backup.serviceId} not found`);
|
||||
}
|
||||
|
||||
const previousStatus = backup.status;
|
||||
backup.status = 'restoring';
|
||||
backup.updatedAt = Date.now();
|
||||
await backup.save();
|
||||
@@ -256,6 +335,9 @@ export class CloudlyBackupManager {
|
||||
snapshots: backup.snapshots || [],
|
||||
clear: requestArg.clear,
|
||||
resourceTypes: requestArg.resourceTypes,
|
||||
replication: {
|
||||
enabled: true,
|
||||
},
|
||||
}, backup.clusterId);
|
||||
backup.status = 'restored';
|
||||
backup.restoreHistory = [
|
||||
@@ -268,7 +350,7 @@ export class CloudlyBackupManager {
|
||||
backup.updatedAt = Date.now();
|
||||
await backup.save();
|
||||
} catch (error) {
|
||||
backup.status = 'ready';
|
||||
backup.status = previousStatus;
|
||||
backup.restoreHistory = [
|
||||
...(backup.restoreHistory || []),
|
||||
{
|
||||
@@ -285,6 +367,188 @@ export class CloudlyBackupManager {
|
||||
return await backup.createSavableObject();
|
||||
}
|
||||
|
||||
private getBackupTargetWriter() {
|
||||
if (!this.backupTargetWriter) {
|
||||
this.backupTargetWriter = createBackupTargetWriterFromEnv();
|
||||
}
|
||||
return this.backupTargetWriter;
|
||||
}
|
||||
|
||||
private normalizeTargetPath(pathArg: string) {
|
||||
const normalized = plugins.path.posix
|
||||
.normalize(String(pathArg || '').replace(/\\/g, '/').trim())
|
||||
.replace(/^\/+/, '');
|
||||
if (!normalized || normalized === '.' || normalized.startsWith('../') || normalized.includes('/../')) {
|
||||
throw new Error(`Invalid backup target path ${pathArg}`);
|
||||
}
|
||||
return normalized;
|
||||
}
|
||||
|
||||
private getBackupTargetPath(backupArg: BackupRecord) {
|
||||
return this.normalizeTargetPath([
|
||||
process.env.CLOUDLY_BACKUP_TARGET_PREFIX || 'serve.zone-backups',
|
||||
'clusters',
|
||||
backupArg.clusterId || 'default',
|
||||
'services',
|
||||
backupArg.serviceId,
|
||||
'backups',
|
||||
backupArg.id,
|
||||
].filter(Boolean).join('/'));
|
||||
}
|
||||
|
||||
private getArchiveObjectTargetPath(backupArg: BackupRecord, objectPathArg: string) {
|
||||
return this.normalizeTargetPath(`${this.getBackupTargetPath(backupArg)}/archive/${objectPathArg}`);
|
||||
}
|
||||
|
||||
private getManifestTargetPath(backupArg: BackupRecord) {
|
||||
return this.normalizeTargetPath(`${this.getBackupTargetPath(backupArg)}/manifest.json`);
|
||||
}
|
||||
|
||||
private getSha256(contentsArg: Buffer) {
|
||||
return plugins.crypto.createHash('sha256').update(contentsArg).digest('hex');
|
||||
}
|
||||
|
||||
private assertObjectMatches(objectArg: IBackupArchiveObject, contentsArg: Buffer) {
|
||||
if (contentsArg.length !== objectArg.size || this.getSha256(contentsArg) !== objectArg.sha256) {
|
||||
throw new Error(`Backup archive object checksum mismatch for ${objectArg.path}`);
|
||||
}
|
||||
}
|
||||
|
||||
private createManifestBuffer(backupArg: BackupRecord, manifestArg: IBackupArchiveManifest) {
|
||||
return Buffer.from(`${JSON.stringify({
|
||||
version: 1,
|
||||
backupId: backupArg.id,
|
||||
serviceId: backupArg.serviceId,
|
||||
serviceName: backupArg.serviceName,
|
||||
clusterId: backupArg.clusterId,
|
||||
archive: manifestArg,
|
||||
}, null, 2)}\n`);
|
||||
}
|
||||
|
||||
private async getBackupForClusterRequest(backupIdArg: string, identityArg: plugins.servezoneInterfaces.data.IIdentity) {
|
||||
const backup = await BackupRecord.getInstance({ id: backupIdArg });
|
||||
if (!backup) {
|
||||
throw new plugins.typedrequest.TypedResponseError(`Backup ${backupIdArg} not found`);
|
||||
}
|
||||
const identityClusterId = (identityArg as any).clusterId;
|
||||
if (backup.clusterId && identityClusterId && backup.clusterId !== identityClusterId) {
|
||||
throw new plugins.typedrequest.TypedResponseError(`Backup ${backupIdArg} does not belong to this cluster`);
|
||||
}
|
||||
return backup;
|
||||
}
|
||||
|
||||
public async prepareBackupReplication(requestArg: {
|
||||
identity: plugins.servezoneInterfaces.data.IIdentity;
|
||||
backupId: string;
|
||||
manifest: IBackupArchiveManifest;
|
||||
}) {
|
||||
const backup = await this.getBackupForClusterRequest(requestArg.backupId, requestArg.identity);
|
||||
const targetWriter = this.getBackupTargetWriter();
|
||||
const missingObjects: IBackupArchiveObject[] = [];
|
||||
for (const object of requestArg.manifest.objects || []) {
|
||||
const targetPath = this.getArchiveObjectTargetPath(backup, object.path);
|
||||
if (!await targetWriter.hasObject(targetPath, object)) {
|
||||
missingObjects.push(object);
|
||||
}
|
||||
}
|
||||
backup.status = 'replicating';
|
||||
backup.updatedAt = Date.now();
|
||||
await backup.save();
|
||||
return { missingObjects };
|
||||
}
|
||||
|
||||
public async uploadBackupArchiveObject(requestArg: {
|
||||
identity: plugins.servezoneInterfaces.data.IIdentity;
|
||||
backupId: string;
|
||||
object: IBackupArchiveObject;
|
||||
contentsBase64: string;
|
||||
}) {
|
||||
const backup = await this.getBackupForClusterRequest(requestArg.backupId, requestArg.identity);
|
||||
const contents = Buffer.from(requestArg.contentsBase64 || '', 'base64');
|
||||
this.assertObjectMatches(requestArg.object, contents);
|
||||
await this.getBackupTargetWriter().putObject(
|
||||
this.getArchiveObjectTargetPath(backup, requestArg.object.path),
|
||||
requestArg.object,
|
||||
contents,
|
||||
);
|
||||
return { accepted: true };
|
||||
}
|
||||
|
||||
public async completeBackupReplication(requestArg: {
|
||||
identity: plugins.servezoneInterfaces.data.IIdentity;
|
||||
backupId: string;
|
||||
manifest: IBackupArchiveManifest;
|
||||
}) {
|
||||
const backup = await this.getBackupForClusterRequest(requestArg.backupId, requestArg.identity);
|
||||
const targetWriter = this.getBackupTargetWriter();
|
||||
for (const object of requestArg.manifest.objects || []) {
|
||||
const targetPath = this.getArchiveObjectTargetPath(backup, object.path);
|
||||
if (!await targetWriter.hasObject(targetPath, object)) {
|
||||
throw new Error(`Remote backup target is missing archive object ${object.path}`);
|
||||
}
|
||||
}
|
||||
|
||||
const manifestPath = this.getManifestTargetPath(backup);
|
||||
const manifestBuffer = this.createManifestBuffer(backup, requestArg.manifest);
|
||||
const manifestObject = {
|
||||
path: 'manifest.json',
|
||||
size: manifestBuffer.length,
|
||||
sha256: this.getSha256(manifestBuffer),
|
||||
};
|
||||
await targetWriter.putObject(manifestPath, manifestObject, manifestBuffer);
|
||||
|
||||
const replication: IBackupReplicationResult = {
|
||||
targetType: targetWriter.targetType,
|
||||
targetPath: this.getBackupTargetPath(backup),
|
||||
manifestPath,
|
||||
manifestSha256: manifestObject.sha256,
|
||||
objectCount: requestArg.manifest.objects.length,
|
||||
totalSize: requestArg.manifest.totalSize,
|
||||
completedAt: Date.now(),
|
||||
};
|
||||
backup.replication = replication;
|
||||
backup.status = 'replicated';
|
||||
backup.completedAt = replication.completedAt;
|
||||
backup.updatedAt = replication.completedAt;
|
||||
await backup.save();
|
||||
return { replication };
|
||||
}
|
||||
|
||||
public async getBackupArchiveManifest(requestArg: {
|
||||
identity: plugins.servezoneInterfaces.data.IIdentity;
|
||||
backupId: string;
|
||||
}) {
|
||||
const backup = await this.getBackupForClusterRequest(requestArg.backupId, requestArg.identity);
|
||||
if (!backup.replication) {
|
||||
throw new plugins.typedrequest.TypedResponseError(`Backup ${backup.id} has not been replicated`);
|
||||
}
|
||||
const manifestBuffer = await this.getBackupTargetWriter().readObject(backup.replication.manifestPath);
|
||||
if (this.getSha256(manifestBuffer) !== backup.replication.manifestSha256) {
|
||||
throw new Error(`Remote manifest checksum mismatch for backup ${backup.id}`);
|
||||
}
|
||||
const parsedManifest = JSON.parse(manifestBuffer.toString('utf8'));
|
||||
return { manifest: parsedManifest.archive as IBackupArchiveManifest };
|
||||
}
|
||||
|
||||
public async downloadBackupArchiveObject(requestArg: {
|
||||
identity: plugins.servezoneInterfaces.data.IIdentity;
|
||||
backupId: string;
|
||||
object: IBackupArchiveObject;
|
||||
}) {
|
||||
const backup = await this.getBackupForClusterRequest(requestArg.backupId, requestArg.identity);
|
||||
if (!backup.replication) {
|
||||
throw new plugins.typedrequest.TypedResponseError(`Backup ${backup.id} has not been replicated`);
|
||||
}
|
||||
const contents = await this.getBackupTargetWriter().readObject(
|
||||
this.getArchiveObjectTargetPath(backup, requestArg.object.path),
|
||||
);
|
||||
this.assertObjectMatches(requestArg.object, contents);
|
||||
return {
|
||||
object: requestArg.object,
|
||||
contentsBase64: contents.toString('base64'),
|
||||
};
|
||||
}
|
||||
|
||||
private async fireCoreflowRequest(methodArg: string, payloadArg: Record<string, unknown>, clusterIdArg?: string) {
|
||||
const typedsocket = this.cloudlyRef.server.typedServer?.typedsocket;
|
||||
if (!typedsocket) {
|
||||
@@ -317,4 +581,11 @@ export class CloudlyBackupManager {
|
||||
this.cloudlyRef.authManager.adminIdentityGuard,
|
||||
]);
|
||||
}
|
||||
|
||||
private async passClusterIdentity(requestData: { identity: plugins.servezoneInterfaces.data.IIdentity }) {
|
||||
await this.passValidIdentity(requestData);
|
||||
if (requestData.identity.role !== 'cluster') {
|
||||
throw new plugins.typedrequest.TypedResponseError('Cluster identity required');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user