1608 lines
51 KiB
TypeScript
1608 lines
51 KiB
TypeScript
/**
|
|
* Backup Manager for Onebox
|
|
*
|
|
* Handles service backup and restore operations using @serve.zone/containerarchive
|
|
* for content-addressed, deduplicated, optionally encrypted backup storage.
|
|
*
|
|
* Features:
|
|
* - Service configuration export/import
|
|
* - MongoDB database dumps
|
|
* - MinIO bucket contents
|
|
* - ClickHouse database dumps
|
|
* - MariaDB database dumps
|
|
* - Redis data export
|
|
* - Docker image export/import
|
|
* - Content-addressed deduplication via containerarchive
|
|
* - Legacy .tar.enc backup support for backward compatibility
|
|
*/
|
|
|
|
import * as plugins from '../plugins.ts';
|
|
import type {
|
|
IService,
|
|
IBackup,
|
|
IBackupManifest,
|
|
IBackupServiceConfig,
|
|
IBackupPlatformResource,
|
|
IBackupResult,
|
|
IRestoreOptions,
|
|
IRestoreResult,
|
|
TPlatformServiceType,
|
|
IPlatformResource,
|
|
IBackupCreateOptions,
|
|
} from '../types.ts';
|
|
import { logger } from '../logging.ts';
|
|
import { getErrorMessage } from '../utils/error.ts';
|
|
import { credentialEncryption } from './encryption.ts';
|
|
import type { Onebox } from './onebox.ts';
|
|
import { projectInfo } from '../info.ts';
|
|
|
|
// Legacy encryption parameters (for old .tar.enc backups and download encryption)
|
|
const ENCRYPTION_ALGORITHM = 'AES-GCM';
|
|
const KEY_LENGTH = 256;
|
|
const IV_LENGTH = 12;
|
|
const SALT_LENGTH = 32;
|
|
const PBKDF2_ITERATIONS = 100000;
|
|
|
|
export class BackupManager {
|
|
private oneboxRef: Onebox;
|
|
public archive: plugins.ContainerArchive | null = null;
|
|
|
|
constructor(oneboxRef: Onebox) {
|
|
this.oneboxRef = oneboxRef;
|
|
}
|
|
|
|
/**
|
|
* Initialize the containerarchive repository.
|
|
* Opens an existing repo or creates a new one.
|
|
*/
|
|
async init(): Promise<void> {
|
|
const repoPath = this.getArchiveRepoPath();
|
|
const passphrase = this.getBackupPassword() || undefined;
|
|
|
|
try {
|
|
// Try to open existing repo
|
|
this.archive = await plugins.ContainerArchive.open(repoPath, { passphrase });
|
|
logger.info('ContainerArchive backup repository opened');
|
|
} catch {
|
|
// Initialize new repo
|
|
try {
|
|
this.archive = await plugins.ContainerArchive.init(repoPath, { passphrase });
|
|
logger.info('ContainerArchive backup repository initialized');
|
|
} catch (initError) {
|
|
logger.warn(`ContainerArchive initialization failed: ${getErrorMessage(initError)}`);
|
|
this.archive = null;
|
|
}
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Close the containerarchive repository
|
|
*/
|
|
async close(): Promise<void> {
|
|
if (this.archive) {
|
|
await this.archive.close();
|
|
this.archive = null;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Create a backup for a service
|
|
* @param serviceName - Name of the service to backup
|
|
* @param options - Optional backup creation options (scheduleId)
|
|
*/
|
|
async createBackup(serviceName: string, options?: IBackupCreateOptions): Promise<IBackupResult> {
|
|
const service = this.oneboxRef.database.getServiceByName(serviceName);
|
|
if (!service) {
|
|
throw new Error(`Service not found: ${serviceName}`);
|
|
}
|
|
|
|
// Ensure archive is available
|
|
if (!this.archive) {
|
|
await this.init();
|
|
}
|
|
if (!this.archive) {
|
|
throw new Error('Backup archive not available. Check backup manager initialization.');
|
|
}
|
|
|
|
logger.info(`Creating backup for service: ${serviceName}`);
|
|
|
|
// Create temp directory for backup contents
|
|
const timestamp = Date.now();
|
|
const tempDir = `/tmp/onebox-backup-${serviceName}-${timestamp}`;
|
|
await Deno.mkdir(tempDir, { recursive: true });
|
|
|
|
try {
|
|
// 1. Export service configuration
|
|
const serviceConfig = await this.exportServiceConfig(service);
|
|
await Deno.writeTextFile(
|
|
`${tempDir}/service.json`,
|
|
JSON.stringify(serviceConfig, null, 2)
|
|
);
|
|
|
|
// 2. Export platform resources metadata and data
|
|
const platformResources: IBackupPlatformResource[] = [];
|
|
const resourceTypes: TPlatformServiceType[] = [];
|
|
|
|
if (service.platformRequirements) {
|
|
const resources = await this.oneboxRef.platformServices.getResourcesForService(service.id!);
|
|
|
|
for (const { resource, platformService, credentials } of resources) {
|
|
// Store resource metadata
|
|
platformResources.push({
|
|
resourceType: resource.resourceType,
|
|
resourceName: resource.resourceName,
|
|
platformServiceType: platformService.type,
|
|
credentials,
|
|
});
|
|
|
|
// Track resource types
|
|
if (!resourceTypes.includes(platformService.type)) {
|
|
resourceTypes.push(platformService.type);
|
|
}
|
|
|
|
// Create data directory
|
|
const dataDir = `${tempDir}/data/${platformService.type}`;
|
|
await Deno.mkdir(dataDir, { recursive: true });
|
|
|
|
// Export data based on type
|
|
switch (platformService.type) {
|
|
case 'mongodb':
|
|
await this.exportMongoDatabase(dataDir, resource, credentials);
|
|
break;
|
|
case 'minio':
|
|
await this.exportMinioBucket(dataDir, resource, credentials);
|
|
break;
|
|
case 'clickhouse':
|
|
await this.exportClickHouseDatabase(dataDir, resource, credentials);
|
|
break;
|
|
case 'mariadb':
|
|
await this.exportMariaDBDatabase(dataDir, resource, credentials);
|
|
break;
|
|
case 'redis':
|
|
await this.exportRedisData(dataDir, resource, credentials);
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
await Deno.writeTextFile(
|
|
`${tempDir}/platform-resources.json`,
|
|
JSON.stringify(platformResources, null, 2)
|
|
);
|
|
|
|
// 3. Export Docker image if configured
|
|
const includeImage = service.includeImageInBackup !== false; // Default true
|
|
if (includeImage && service.image) {
|
|
await Deno.mkdir(`${tempDir}/data/image`, { recursive: true });
|
|
await this.exportDockerImage(service.image, `${tempDir}/data/image/image.tar`);
|
|
}
|
|
|
|
// 4. Build ingest items from temp directory files
|
|
const items: Array<{ stream: NodeJS.ReadableStream; name: string; type?: string }> = [];
|
|
|
|
// Service config
|
|
items.push({
|
|
stream: plugins.nodeFs.createReadStream(`${tempDir}/service.json`),
|
|
name: 'service-config',
|
|
type: 'json',
|
|
});
|
|
|
|
// Platform resources metadata
|
|
items.push({
|
|
stream: plugins.nodeFs.createReadStream(`${tempDir}/platform-resources.json`),
|
|
name: 'platform-resources-meta',
|
|
type: 'json',
|
|
});
|
|
|
|
// Platform data files
|
|
for (const resourceType of resourceTypes) {
|
|
const dataDir = `${tempDir}/data/${resourceType}`;
|
|
try {
|
|
for await (const entry of Deno.readDir(dataDir)) {
|
|
if (entry.isFile) {
|
|
items.push({
|
|
stream: plugins.nodeFs.createReadStream(`${dataDir}/${entry.name}`),
|
|
name: `data/${resourceType}/${entry.name}`,
|
|
type: 'data',
|
|
});
|
|
}
|
|
}
|
|
} catch {
|
|
// Directory may not exist if export produced no files
|
|
}
|
|
}
|
|
|
|
// Docker image
|
|
if (includeImage && service.image) {
|
|
const imagePath = `${tempDir}/data/image/image.tar`;
|
|
try {
|
|
await Deno.stat(imagePath);
|
|
items.push({
|
|
stream: plugins.nodeFs.createReadStream(imagePath),
|
|
name: 'docker-image',
|
|
type: 'image',
|
|
});
|
|
} catch {
|
|
// Image export may have failed silently
|
|
}
|
|
}
|
|
|
|
// 5. Build snapshot tags
|
|
const tags: Record<string, string> = {
|
|
serviceName: service.name,
|
|
serviceId: String(service.id),
|
|
oneboxVersion: projectInfo.version,
|
|
includesImage: String(includeImage),
|
|
platformResources: JSON.stringify(resourceTypes),
|
|
};
|
|
if (options?.scheduleId) {
|
|
tags.scheduleId = String(options.scheduleId);
|
|
}
|
|
|
|
// 6. Ingest multi-item snapshot into containerarchive
|
|
const snapshot = await this.archive.ingestMulti(items, { tags });
|
|
|
|
// 7. Store backup record in database
|
|
const backup: IBackup = {
|
|
serviceId: service.id!,
|
|
serviceName: service.name,
|
|
filename: '', // No longer file-based
|
|
snapshotId: snapshot.id,
|
|
sizeBytes: snapshot.originalSize,
|
|
storedSizeBytes: snapshot.storedSize,
|
|
createdAt: timestamp,
|
|
includesImage: includeImage,
|
|
platformResources: resourceTypes,
|
|
checksum: `sha256:${snapshot.id}`,
|
|
scheduleId: options?.scheduleId,
|
|
};
|
|
|
|
const createdBackup = this.oneboxRef.database.createBackup(backup);
|
|
|
|
// 8. Cleanup temp files
|
|
await Deno.remove(tempDir, { recursive: true });
|
|
|
|
const dedup = snapshot.reusedChunks > 0
|
|
? ` (${snapshot.reusedChunks} chunks reused, ${Math.round((1 - snapshot.storedSize / snapshot.originalSize) * 100)}% space saved)`
|
|
: '';
|
|
logger.success(`Backup created for service ${serviceName}: snapshot ${snapshot.id}${dedup}`);
|
|
|
|
return {
|
|
backup: createdBackup,
|
|
snapshotId: snapshot.id,
|
|
};
|
|
} catch (error) {
|
|
// Cleanup on error
|
|
try {
|
|
await Deno.remove(tempDir, { recursive: true });
|
|
} catch {
|
|
// Ignore cleanup errors
|
|
}
|
|
logger.error(`Failed to create backup for ${serviceName}: ${getErrorMessage(error)}`);
|
|
throw error;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Restore a backup
|
|
* @param backupIdOrPath - Backup ID (number) or legacy file path (string)
|
|
* @param options - Restore options (mode, newServiceName, etc.)
|
|
*/
|
|
async restoreBackup(backupIdOrPath: number | string, options: IRestoreOptions): Promise<IRestoreResult> {
|
|
// Handle legacy file path restore
|
|
if (typeof backupIdOrPath === 'string') {
|
|
return this.restoreLegacyBackup(backupIdOrPath, options);
|
|
}
|
|
|
|
// Look up backup by ID
|
|
const backup = this.oneboxRef.database.getBackupById(backupIdOrPath);
|
|
if (!backup) {
|
|
throw new Error(`Backup not found: ${backupIdOrPath}`);
|
|
}
|
|
|
|
// Legacy file-based backup (no snapshotId)
|
|
if (!backup.snapshotId) {
|
|
const filePath = this.getBackupFilePath(backupIdOrPath);
|
|
if (!filePath) {
|
|
throw new Error('Backup file not found');
|
|
}
|
|
return this.restoreLegacyBackup(filePath, options);
|
|
}
|
|
|
|
// ContainerArchive-based restore
|
|
if (!this.archive) {
|
|
await this.init();
|
|
}
|
|
if (!this.archive) {
|
|
throw new Error('Backup archive not available');
|
|
}
|
|
|
|
logger.info(`Restoring backup: snapshot ${backup.snapshotId}`);
|
|
|
|
const timestamp = Date.now();
|
|
const tempDir = `/tmp/onebox-restore-${timestamp}`;
|
|
await Deno.mkdir(tempDir, { recursive: true });
|
|
|
|
const warnings: string[] = [];
|
|
|
|
try {
|
|
// 1. Get snapshot metadata
|
|
const snapshot = await this.archive.getSnapshot(backup.snapshotId);
|
|
|
|
// 2. Restore service config
|
|
const serviceConfigStream = await this.archive.restore(backup.snapshotId, {
|
|
item: 'service-config',
|
|
});
|
|
const serviceConfigJson = await this.streamToString(serviceConfigStream);
|
|
const serviceConfig: IBackupServiceConfig = JSON.parse(serviceConfigJson);
|
|
|
|
// 3. Restore platform resources metadata
|
|
let platformResources: IBackupPlatformResource[] = [];
|
|
try {
|
|
const platformMetaStream = await this.archive.restore(backup.snapshotId, {
|
|
item: 'platform-resources-meta',
|
|
});
|
|
platformResources = JSON.parse(await this.streamToString(platformMetaStream));
|
|
} catch {
|
|
// No platform resources in this backup
|
|
}
|
|
|
|
// 4. Restore data items to temp dir
|
|
for (const item of snapshot.items) {
|
|
if (item.name.startsWith('data/')) {
|
|
const itemStream = await this.archive.restore(backup.snapshotId, {
|
|
item: item.name,
|
|
});
|
|
const fullPath = `${tempDir}/${item.name}`;
|
|
const parentDir = plugins.path.dirname(fullPath);
|
|
await Deno.mkdir(parentDir, { recursive: true });
|
|
await this.streamToFile(itemStream, fullPath);
|
|
}
|
|
}
|
|
|
|
// 5. Restore docker image if present
|
|
const manifest: IBackupManifest = {
|
|
version: '1.0',
|
|
createdAt: backup.createdAt,
|
|
oneboxVersion: snapshot.tags.oneboxVersion || 'unknown',
|
|
serviceName: backup.serviceName,
|
|
includesImage: backup.includesImage,
|
|
platformResources: backup.platformResources,
|
|
checksum: backup.checksum,
|
|
};
|
|
|
|
if (manifest.includesImage) {
|
|
try {
|
|
const imageStream = await this.archive.restore(backup.snapshotId, {
|
|
item: 'docker-image',
|
|
});
|
|
const imagePath = `${tempDir}/data/image/image.tar`;
|
|
await Deno.mkdir(plugins.path.dirname(imagePath), { recursive: true });
|
|
await this.streamToFile(imageStream, imagePath);
|
|
} catch (error) {
|
|
warnings.push(`Docker image restore failed: ${getErrorMessage(error)}`);
|
|
}
|
|
}
|
|
|
|
// 6. From here, the same logic as legacy restore applies
|
|
return await this.performRestore(
|
|
serviceConfig,
|
|
platformResources,
|
|
manifest,
|
|
tempDir,
|
|
options,
|
|
warnings,
|
|
);
|
|
} catch (error) {
|
|
try {
|
|
await Deno.remove(tempDir, { recursive: true });
|
|
} catch {
|
|
// Ignore cleanup errors
|
|
}
|
|
logger.error(`Failed to restore backup: ${getErrorMessage(error)}`);
|
|
throw error;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* List all backups
|
|
*/
|
|
listBackups(serviceName?: string): IBackup[] {
|
|
if (serviceName) {
|
|
const service = this.oneboxRef.database.getServiceByName(serviceName);
|
|
if (!service) {
|
|
return [];
|
|
}
|
|
return this.oneboxRef.database.getBackupsByService(service.id!);
|
|
}
|
|
return this.oneboxRef.database.getAllBackups();
|
|
}
|
|
|
|
/**
|
|
* Delete a backup
|
|
*/
|
|
async deleteBackup(backupId: number): Promise<void> {
|
|
const backup = this.oneboxRef.database.getBackupById(backupId);
|
|
if (!backup) {
|
|
throw new Error(`Backup not found: ${backupId}`);
|
|
}
|
|
|
|
if (backup.snapshotId) {
|
|
// ContainerArchive backup: delete DB record only.
|
|
// Actual storage reclaimed on next prune().
|
|
this.oneboxRef.database.deleteBackup(backupId);
|
|
logger.info(`Backup record deleted: snapshot ${backup.snapshotId} (storage reclaimed on next prune)`);
|
|
} else {
|
|
// Legacy file-based backup
|
|
const backupsDir = this.getBackupsDirectory();
|
|
const filePath = `${backupsDir}/${backup.filename}`;
|
|
try {
|
|
await Deno.remove(filePath);
|
|
} catch {
|
|
logger.warn(`Could not delete backup file: ${filePath}`);
|
|
}
|
|
this.oneboxRef.database.deleteBackup(backupId);
|
|
logger.info(`Backup deleted: ${backup.filename}`);
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Get backup file path for download (legacy backups only)
|
|
*/
|
|
getBackupFilePath(backupId: number): string | null {
|
|
const backup = this.oneboxRef.database.getBackupById(backupId);
|
|
if (!backup) {
|
|
return null;
|
|
}
|
|
if (backup.snapshotId) {
|
|
// ContainerArchive backup - no direct file path
|
|
return null;
|
|
}
|
|
const backupsDir = this.getBackupsDirectory();
|
|
return `${backupsDir}/${backup.filename}`;
|
|
}
|
|
|
|
/**
|
|
* Export a containerarchive backup as a downloadable encrypted tar stream.
|
|
* Returns the path to a temporary encrypted file (caller must clean up).
|
|
*/
|
|
async getBackupExportPath(backupId: number): Promise<string | null> {
|
|
const backup = this.oneboxRef.database.getBackupById(backupId);
|
|
if (!backup?.snapshotId || !this.archive) {
|
|
return null;
|
|
}
|
|
|
|
const timestamp = Date.now();
|
|
const tempDir = `/tmp/onebox-export-${timestamp}`;
|
|
await Deno.mkdir(tempDir, { recursive: true });
|
|
|
|
try {
|
|
const snapshot = await this.archive.getSnapshot(backup.snapshotId);
|
|
|
|
// Restore all items to temp dir
|
|
for (const item of snapshot.items) {
|
|
const itemStream = await this.archive.restore(backup.snapshotId, {
|
|
item: item.name,
|
|
});
|
|
const itemPath = `${tempDir}/${item.name}`;
|
|
await Deno.mkdir(plugins.path.dirname(itemPath), { recursive: true });
|
|
await this.streamToFile(itemStream, itemPath);
|
|
}
|
|
|
|
// Create tar archive
|
|
const tarPath = `/tmp/onebox-export-${timestamp}.tar`;
|
|
await this.createTarArchive(tempDir, tarPath);
|
|
await Deno.remove(tempDir, { recursive: true });
|
|
|
|
// Encrypt for transport
|
|
const password = this.getBackupPassword();
|
|
if (password) {
|
|
const encPath = `${tarPath}.enc`;
|
|
await this.encryptFile(tarPath, encPath, password);
|
|
await Deno.remove(tarPath);
|
|
return encPath;
|
|
}
|
|
|
|
return tarPath;
|
|
} catch (error) {
|
|
try {
|
|
await Deno.remove(tempDir, { recursive: true });
|
|
} catch { /* ignore */ }
|
|
logger.error(`Failed to export backup: ${getErrorMessage(error)}`);
|
|
return null;
|
|
}
|
|
}
|
|
|
|
// ========== Private Methods ==========
|
|
|
|
/**
|
|
* Get backup password from settings
|
|
*/
|
|
private getBackupPassword(): string | null {
|
|
return this.oneboxRef.database.getSetting('backup_encryption_password');
|
|
}
|
|
|
|
/**
|
|
* Get containerarchive repository path
|
|
*/
|
|
private getArchiveRepoPath(): string {
|
|
const dataDir = this.oneboxRef.database.getSetting('dataDir') || './.nogit';
|
|
return `${dataDir}/backup-archive`;
|
|
}
|
|
|
|
/**
|
|
* Get legacy backups directory
|
|
*/
|
|
private getBackupsDirectory(): string {
|
|
const dataDir = this.oneboxRef.database.getSetting('dataDir') || './.nogit';
|
|
return `${dataDir}/backups`;
|
|
}
|
|
|
|
/**
|
|
* Restore from a legacy .tar.enc file
|
|
*/
|
|
private async restoreLegacyBackup(backupPath: string, options: IRestoreOptions): Promise<IRestoreResult> {
|
|
const backupPassword = this.getBackupPassword();
|
|
if (!backupPassword) {
|
|
throw new Error('Backup password not configured.');
|
|
}
|
|
|
|
logger.info(`Restoring legacy backup from: ${backupPath}`);
|
|
|
|
const timestamp = Date.now();
|
|
const tempDir = `/tmp/onebox-restore-${timestamp}`;
|
|
await Deno.mkdir(tempDir, { recursive: true });
|
|
|
|
const warnings: string[] = [];
|
|
|
|
try {
|
|
// 1. Decrypt the archive
|
|
const tarPath = `${tempDir}/backup.tar`;
|
|
await this.decryptFile(backupPath, tarPath, backupPassword);
|
|
|
|
// 2. Extract tar archive
|
|
await this.extractTarArchive(tarPath, tempDir);
|
|
|
|
// 3. Read and validate manifest
|
|
const manifestPath = `${tempDir}/manifest.json`;
|
|
const manifestData = await Deno.readTextFile(manifestPath);
|
|
const manifest: IBackupManifest = JSON.parse(manifestData);
|
|
|
|
// 4. Read service config
|
|
const serviceConfigPath = `${tempDir}/service.json`;
|
|
const serviceConfigData = await Deno.readTextFile(serviceConfigPath);
|
|
const serviceConfig: IBackupServiceConfig = JSON.parse(serviceConfigData);
|
|
|
|
// 5. Read platform resources
|
|
let platformResources: IBackupPlatformResource[] = [];
|
|
try {
|
|
const resourcesData = await Deno.readTextFile(`${tempDir}/platform-resources.json`);
|
|
platformResources = JSON.parse(resourcesData);
|
|
} catch {
|
|
// No platform resources in backup
|
|
}
|
|
|
|
return await this.performRestore(
|
|
serviceConfig,
|
|
platformResources,
|
|
manifest,
|
|
tempDir,
|
|
options,
|
|
warnings,
|
|
);
|
|
} catch (error) {
|
|
try {
|
|
await Deno.remove(tempDir, { recursive: true });
|
|
} catch {
|
|
// Ignore cleanup errors
|
|
}
|
|
logger.error(`Failed to restore legacy backup: ${getErrorMessage(error)}`);
|
|
throw error;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Shared restore logic: takes parsed backup data and performs the actual restore
|
|
*/
|
|
private async performRestore(
|
|
serviceConfig: IBackupServiceConfig,
|
|
platformResources: IBackupPlatformResource[],
|
|
manifest: IBackupManifest,
|
|
tempDir: string,
|
|
options: IRestoreOptions,
|
|
warnings: string[],
|
|
): Promise<IRestoreResult> {
|
|
try {
|
|
// Determine service name based on mode
|
|
let serviceName: string;
|
|
let existingService: IService | null = null;
|
|
|
|
switch (options.mode) {
|
|
case 'restore':
|
|
serviceName = manifest.serviceName;
|
|
existingService = this.oneboxRef.database.getServiceByName(serviceName);
|
|
if (!existingService) {
|
|
throw new Error(`Service '${serviceName}' not found. Use 'import' mode to create a new service.`);
|
|
}
|
|
if (!options.overwriteExisting) {
|
|
throw new Error(`Service '${serviceName}' exists. Set overwriteExisting=true to proceed.`);
|
|
}
|
|
break;
|
|
|
|
case 'import':
|
|
case 'clone':
|
|
if (!options.newServiceName) {
|
|
throw new Error(`New service name required for '${options.mode}' mode.`);
|
|
}
|
|
serviceName = options.newServiceName;
|
|
existingService = this.oneboxRef.database.getServiceByName(serviceName);
|
|
if (existingService) {
|
|
throw new Error(`Service '${serviceName}' already exists. Choose a different name.`);
|
|
}
|
|
break;
|
|
|
|
default:
|
|
throw new Error(`Invalid restore mode: ${options.mode}`);
|
|
}
|
|
|
|
// Import Docker image if present
|
|
if (manifest.includesImage) {
|
|
const imagePath = `${tempDir}/data/image/image.tar`;
|
|
try {
|
|
await Deno.stat(imagePath);
|
|
const newImageTag = await this.importDockerImage(imagePath);
|
|
serviceConfig.image = newImageTag;
|
|
logger.info(`Docker image imported: ${newImageTag}`);
|
|
} catch (error) {
|
|
warnings.push(`Docker image import failed: ${getErrorMessage(error)}`);
|
|
}
|
|
}
|
|
|
|
// Create or update service
|
|
let service: IService;
|
|
let platformResourcesRestored = 0;
|
|
|
|
if (options.mode === 'restore' && existingService) {
|
|
// Update existing service
|
|
this.oneboxRef.database.updateService(existingService.id!, {
|
|
image: serviceConfig.image,
|
|
registry: serviceConfig.registry,
|
|
port: serviceConfig.port,
|
|
domain: serviceConfig.domain,
|
|
useOneboxRegistry: serviceConfig.useOneboxRegistry,
|
|
registryRepository: serviceConfig.registryRepository,
|
|
registryImageTag: serviceConfig.registryImageTag,
|
|
autoUpdateOnPush: serviceConfig.autoUpdateOnPush,
|
|
platformRequirements: serviceConfig.platformRequirements,
|
|
updatedAt: Date.now(),
|
|
});
|
|
|
|
const updatedEnvVars = { ...serviceConfig.envVars };
|
|
|
|
if (!options.skipPlatformData && platformResources.length > 0) {
|
|
platformResourcesRestored = await this.restorePlatformResources(
|
|
existingService.id!,
|
|
platformResources,
|
|
tempDir,
|
|
warnings
|
|
);
|
|
}
|
|
|
|
this.oneboxRef.database.updateService(existingService.id!, { envVars: updatedEnvVars });
|
|
service = this.oneboxRef.database.getServiceByName(serviceName)!;
|
|
} else {
|
|
// Create new service
|
|
const deployOptions = {
|
|
name: serviceName,
|
|
image: serviceConfig.image,
|
|
registry: serviceConfig.registry,
|
|
port: serviceConfig.port,
|
|
domain: options.mode === 'clone' ? undefined : serviceConfig.domain,
|
|
envVars: serviceConfig.envVars,
|
|
useOneboxRegistry: serviceConfig.useOneboxRegistry,
|
|
registryImageTag: serviceConfig.registryImageTag,
|
|
autoUpdateOnPush: serviceConfig.autoUpdateOnPush,
|
|
enableMongoDB: serviceConfig.platformRequirements?.mongodb,
|
|
enableS3: serviceConfig.platformRequirements?.s3,
|
|
enableClickHouse: serviceConfig.platformRequirements?.clickhouse,
|
|
enableRedis: serviceConfig.platformRequirements?.redis,
|
|
enableMariaDB: serviceConfig.platformRequirements?.mariadb,
|
|
};
|
|
|
|
service = await this.oneboxRef.services.deployService(deployOptions);
|
|
|
|
if (!options.skipPlatformData && platformResources.length > 0) {
|
|
await new Promise((resolve) => setTimeout(resolve, 2000));
|
|
|
|
platformResourcesRestored = await this.restorePlatformResources(
|
|
service.id!,
|
|
platformResources,
|
|
tempDir,
|
|
warnings
|
|
);
|
|
}
|
|
}
|
|
|
|
// Cleanup
|
|
await Deno.remove(tempDir, { recursive: true });
|
|
|
|
logger.success(`Backup restored successfully as service '${serviceName}'`);
|
|
|
|
return {
|
|
service,
|
|
platformResourcesRestored,
|
|
warnings,
|
|
};
|
|
} catch (error) {
|
|
try {
|
|
await Deno.remove(tempDir, { recursive: true });
|
|
} catch {
|
|
// Ignore cleanup errors
|
|
}
|
|
throw error;
|
|
}
|
|
}
|
|
|
|
// ========== Stream Helpers ==========
|
|
|
|
/**
|
|
* Read a Node.js ReadableStream into a string
|
|
*/
|
|
private async streamToString(stream: NodeJS.ReadableStream): Promise<string> {
|
|
const chunks: Uint8Array[] = [];
|
|
for await (const chunk of stream as AsyncIterable<Uint8Array>) {
|
|
chunks.push(chunk instanceof Uint8Array ? chunk : new TextEncoder().encode(String(chunk)));
|
|
}
|
|
const combined = new Uint8Array(chunks.reduce((sum, c) => sum + c.length, 0));
|
|
let offset = 0;
|
|
for (const chunk of chunks) {
|
|
combined.set(chunk, offset);
|
|
offset += chunk.length;
|
|
}
|
|
return new TextDecoder().decode(combined);
|
|
}
|
|
|
|
/**
|
|
* Write a Node.js ReadableStream to a file
|
|
*/
|
|
private async streamToFile(stream: NodeJS.ReadableStream, filePath: string): Promise<void> {
|
|
const writeStream = plugins.nodeFs.createWriteStream(filePath);
|
|
await new Promise<void>((resolve, reject) => {
|
|
(stream as any).pipe(writeStream);
|
|
writeStream.on('finish', resolve);
|
|
writeStream.on('error', reject);
|
|
});
|
|
}
|
|
|
|
// ========== Platform Export/Import Methods ==========
|
|
|
|
/**
|
|
* Export service configuration
|
|
*/
|
|
private async exportServiceConfig(service: IService): Promise<IBackupServiceConfig> {
|
|
return {
|
|
name: service.name,
|
|
image: service.image,
|
|
registry: service.registry,
|
|
envVars: service.envVars,
|
|
port: service.port,
|
|
domain: service.domain,
|
|
useOneboxRegistry: service.useOneboxRegistry,
|
|
registryRepository: service.registryRepository,
|
|
registryImageTag: service.registryImageTag,
|
|
autoUpdateOnPush: service.autoUpdateOnPush,
|
|
platformRequirements: service.platformRequirements,
|
|
includeImageInBackup: service.includeImageInBackup,
|
|
};
|
|
}
|
|
|
|
/**
|
|
* Export MongoDB database
|
|
*/
|
|
private async exportMongoDatabase(
|
|
dataDir: string,
|
|
resource: IPlatformResource,
|
|
credentials: Record<string, string>
|
|
): Promise<void> {
|
|
logger.info(`Exporting MongoDB database: ${resource.resourceName}`);
|
|
|
|
const mongoService = this.oneboxRef.database.getPlatformServiceById(resource.platformServiceId);
|
|
if (!mongoService || !mongoService.containerId) {
|
|
throw new Error('MongoDB service not running');
|
|
}
|
|
|
|
const connectionUri = credentials.connectionUri || credentials.MONGODB_URI;
|
|
if (!connectionUri) {
|
|
throw new Error('MongoDB connection URI not found in credentials');
|
|
}
|
|
|
|
const archivePath = `/tmp/${resource.resourceName}.archive`;
|
|
const result = await this.oneboxRef.docker.execInContainer(mongoService.containerId, [
|
|
'mongodump',
|
|
`--uri=${connectionUri}`,
|
|
`--archive=${archivePath}`,
|
|
'--gzip',
|
|
]);
|
|
|
|
if (result.exitCode !== 0) {
|
|
throw new Error(`mongodump failed: ${result.stderr}`);
|
|
}
|
|
|
|
const container = await this.oneboxRef.docker.getContainerById(mongoService.containerId);
|
|
if (!container) {
|
|
throw new Error('MongoDB container not found');
|
|
}
|
|
|
|
const copyResult = await this.oneboxRef.docker.execInContainer(mongoService.containerId, [
|
|
'cat',
|
|
archivePath,
|
|
]);
|
|
|
|
const localPath = `${dataDir}/${resource.resourceName}.archive`;
|
|
const encoder = new TextEncoder();
|
|
await Deno.writeFile(localPath, encoder.encode(copyResult.stdout));
|
|
|
|
await this.oneboxRef.docker.execInContainer(mongoService.containerId, ['rm', archivePath]);
|
|
|
|
logger.success(`MongoDB database exported: ${resource.resourceName}`);
|
|
}
|
|
|
|
/**
|
|
* Export MinIO bucket
|
|
*/
|
|
private async exportMinioBucket(
|
|
dataDir: string,
|
|
resource: IPlatformResource,
|
|
credentials: Record<string, string>
|
|
): Promise<void> {
|
|
logger.info(`Exporting MinIO bucket: ${resource.resourceName}`);
|
|
|
|
const bucketDir = `${dataDir}/${resource.resourceName}`;
|
|
await Deno.mkdir(bucketDir, { recursive: true });
|
|
|
|
const endpoint = credentials.endpoint || credentials.S3_ENDPOINT;
|
|
const accessKey = credentials.accessKey || credentials.S3_ACCESS_KEY;
|
|
const secretKey = credentials.secretKey || credentials.S3_SECRET_KEY;
|
|
const bucket = credentials.bucket || credentials.S3_BUCKET;
|
|
|
|
if (!endpoint || !accessKey || !secretKey || !bucket) {
|
|
throw new Error('MinIO credentials incomplete');
|
|
}
|
|
|
|
const s3Client = new plugins.smartstorage.SmartStorage({
|
|
endpoint,
|
|
accessKey,
|
|
secretKey,
|
|
bucket,
|
|
});
|
|
|
|
await s3Client.start();
|
|
|
|
const objects = await s3Client.listObjects();
|
|
|
|
for (const obj of objects) {
|
|
const objectKey = obj.Key;
|
|
if (!objectKey) continue;
|
|
|
|
const objectData = await s3Client.getObject(objectKey);
|
|
if (objectData) {
|
|
const objectPath = `${bucketDir}/${objectKey}`;
|
|
const parentDir = plugins.path.dirname(objectPath);
|
|
await Deno.mkdir(parentDir, { recursive: true });
|
|
await Deno.writeFile(objectPath, objectData);
|
|
}
|
|
}
|
|
|
|
await s3Client.stop();
|
|
|
|
await Deno.writeTextFile(
|
|
`${bucketDir}/_metadata.json`,
|
|
JSON.stringify({ bucket, objectCount: objects.length }, null, 2)
|
|
);
|
|
|
|
logger.success(`MinIO bucket exported: ${resource.resourceName} (${objects.length} objects)`);
|
|
}
|
|
|
|
/**
|
|
* Export ClickHouse database
|
|
*/
|
|
private async exportClickHouseDatabase(
|
|
dataDir: string,
|
|
resource: IPlatformResource,
|
|
credentials: Record<string, string>
|
|
): Promise<void> {
|
|
logger.info(`Exporting ClickHouse database: ${resource.resourceName}`);
|
|
|
|
const clickhouseService = this.oneboxRef.database.getPlatformServiceByType('clickhouse');
|
|
if (!clickhouseService || !clickhouseService.containerId) {
|
|
throw new Error('ClickHouse service not running');
|
|
}
|
|
|
|
const dbName = credentials.database || credentials.CLICKHOUSE_DB;
|
|
const user = credentials.username || credentials.CLICKHOUSE_USER || 'default';
|
|
const password = credentials.password || credentials.CLICKHOUSE_PASSWORD || '';
|
|
|
|
if (!dbName) {
|
|
throw new Error('ClickHouse database name not found in credentials');
|
|
}
|
|
|
|
const tablesResult = await this.oneboxRef.docker.execInContainer(clickhouseService.containerId, [
|
|
'clickhouse-client',
|
|
`--user=${user}`,
|
|
`--password=${password}`,
|
|
'--query',
|
|
`SELECT name FROM system.tables WHERE database = '${dbName}'`,
|
|
]);
|
|
|
|
if (tablesResult.exitCode !== 0) {
|
|
throw new Error(`Failed to list ClickHouse tables: ${tablesResult.stderr}`);
|
|
}
|
|
|
|
const tables = tablesResult.stdout.trim().split('\n').filter(Boolean);
|
|
const dumpPath = `${dataDir}/${resource.resourceName}.sql`;
|
|
let dumpContent = `-- ClickHouse backup for database: ${dbName}\n`;
|
|
dumpContent += `-- Created: ${new Date().toISOString()}\n\n`;
|
|
dumpContent += `CREATE DATABASE IF NOT EXISTS ${dbName};\n\n`;
|
|
|
|
for (const table of tables) {
|
|
const createResult = await this.oneboxRef.docker.execInContainer(clickhouseService.containerId, [
|
|
'clickhouse-client',
|
|
`--user=${user}`,
|
|
`--password=${password}`,
|
|
'--query',
|
|
`SHOW CREATE TABLE ${dbName}.${table}`,
|
|
]);
|
|
|
|
if (createResult.exitCode === 0) {
|
|
dumpContent += `-- Table: ${table}\n`;
|
|
dumpContent += createResult.stdout + ';\n\n';
|
|
}
|
|
|
|
const dataResult = await this.oneboxRef.docker.execInContainer(clickhouseService.containerId, [
|
|
'clickhouse-client',
|
|
`--user=${user}`,
|
|
`--password=${password}`,
|
|
'--query',
|
|
`SELECT * FROM ${dbName}.${table} FORMAT TabSeparatedWithNames`,
|
|
]);
|
|
|
|
if (dataResult.exitCode === 0 && dataResult.stdout.trim()) {
|
|
const tableDataPath = `${dataDir}/${resource.resourceName}_${table}.tsv`;
|
|
await Deno.writeTextFile(tableDataPath, dataResult.stdout);
|
|
}
|
|
}
|
|
|
|
await Deno.writeTextFile(dumpPath, dumpContent);
|
|
|
|
logger.success(`ClickHouse database exported: ${resource.resourceName} (${tables.length} tables)`);
|
|
}
|
|
|
|
/**
|
|
* Export MariaDB database
|
|
*/
|
|
private async exportMariaDBDatabase(
|
|
dataDir: string,
|
|
resource: IPlatformResource,
|
|
credentials: Record<string, string>,
|
|
): Promise<void> {
|
|
logger.info(`Exporting MariaDB database: ${resource.resourceName}`);
|
|
|
|
const mariadbService = this.oneboxRef.database.getPlatformServiceByType('mariadb');
|
|
if (!mariadbService || !mariadbService.containerId) {
|
|
throw new Error('MariaDB service not running');
|
|
}
|
|
|
|
const dbName = credentials.database || resource.resourceName;
|
|
const user = credentials.username || 'root';
|
|
const password = credentials.password || '';
|
|
|
|
if (!dbName) {
|
|
throw new Error('MariaDB database name not found in credentials');
|
|
}
|
|
|
|
const result = await this.oneboxRef.docker.execInContainer(mariadbService.containerId, [
|
|
'mariadb-dump',
|
|
'-u', user,
|
|
`-p${password}`,
|
|
'--single-transaction',
|
|
'--routines',
|
|
'--triggers',
|
|
dbName,
|
|
]);
|
|
|
|
if (result.exitCode !== 0) {
|
|
throw new Error(`MariaDB dump failed: ${result.stderr.substring(0, 500)}`);
|
|
}
|
|
|
|
await Deno.writeTextFile(`${dataDir}/${resource.resourceName}.sql`, result.stdout);
|
|
logger.success(`MariaDB database exported: ${resource.resourceName}`);
|
|
}
|
|
|
|
/**
|
|
* Export Redis data
|
|
*/
|
|
private async exportRedisData(
|
|
dataDir: string,
|
|
resource: IPlatformResource,
|
|
credentials: Record<string, string>,
|
|
): Promise<void> {
|
|
logger.info(`Exporting Redis data: ${resource.resourceName}`);
|
|
|
|
const redisService = this.oneboxRef.database.getPlatformServiceByType('redis');
|
|
if (!redisService || !redisService.containerId) {
|
|
throw new Error('Redis service not running');
|
|
}
|
|
|
|
const password = credentials.password || '';
|
|
const dbIndex = credentials.db || '0';
|
|
|
|
await this.oneboxRef.docker.execInContainer(redisService.containerId, [
|
|
'redis-cli', '-a', password, 'BGSAVE',
|
|
]);
|
|
|
|
await new Promise((resolve) => setTimeout(resolve, 2000));
|
|
|
|
const keysResult = await this.oneboxRef.docker.execInContainer(redisService.containerId, [
|
|
'redis-cli', '-a', password, '-n', dbIndex, 'KEYS', '*',
|
|
]);
|
|
|
|
if (keysResult.exitCode !== 0) {
|
|
throw new Error(`Redis KEYS failed: ${keysResult.stderr.substring(0, 200)}`);
|
|
}
|
|
|
|
const keys = keysResult.stdout.trim().split('\n').filter(k => k.length > 0);
|
|
const exportData: Record<string, { type: string; value: string; ttl: number }> = {};
|
|
|
|
for (const key of keys) {
|
|
const typeResult = await this.oneboxRef.docker.execInContainer(redisService.containerId, [
|
|
'redis-cli', '-a', password, '-n', dbIndex, 'TYPE', key,
|
|
]);
|
|
const keyType = typeResult.stdout.trim();
|
|
|
|
const ttlResult = await this.oneboxRef.docker.execInContainer(redisService.containerId, [
|
|
'redis-cli', '-a', password, '-n', dbIndex, 'TTL', key,
|
|
]);
|
|
const ttl = parseInt(ttlResult.stdout.trim(), 10);
|
|
|
|
const dumpResult = await this.oneboxRef.docker.execInContainer(redisService.containerId, [
|
|
'redis-cli', '-a', password, '-n', dbIndex, '--no-auth-warning', 'DUMP', key,
|
|
]);
|
|
|
|
exportData[key] = {
|
|
type: keyType,
|
|
value: dumpResult.stdout,
|
|
ttl: ttl > 0 ? ttl : 0,
|
|
};
|
|
}
|
|
|
|
await Deno.writeTextFile(
|
|
`${dataDir}/${resource.resourceName}.json`,
|
|
JSON.stringify({ dbIndex, keys: exportData }, null, 2)
|
|
);
|
|
|
|
logger.success(`Redis data exported: ${resource.resourceName} (${keys.length} keys)`);
|
|
}
|
|
|
|
/**
|
|
* Export Docker image
|
|
*/
|
|
private async exportDockerImage(imageName: string, outputPath: string): Promise<void> {
|
|
logger.info(`Exporting Docker image: ${imageName}`);
|
|
|
|
const command = new Deno.Command('docker', {
|
|
args: ['save', '-o', outputPath, imageName],
|
|
});
|
|
|
|
const result = await command.output();
|
|
|
|
if (!result.success) {
|
|
const stderr = new TextDecoder().decode(result.stderr);
|
|
throw new Error(`docker save failed: ${stderr}`);
|
|
}
|
|
|
|
logger.success(`Docker image exported: ${imageName}`);
|
|
}
|
|
|
|
/**
|
|
* Import Docker image
|
|
*/
|
|
private async importDockerImage(imagePath: string): Promise<string> {
|
|
logger.info(`Importing Docker image from: ${imagePath}`);
|
|
|
|
const command = new Deno.Command('docker', {
|
|
args: ['load', '-i', imagePath],
|
|
});
|
|
|
|
const result = await command.output();
|
|
|
|
if (!result.success) {
|
|
const stderr = new TextDecoder().decode(result.stderr);
|
|
throw new Error(`docker load failed: ${stderr}`);
|
|
}
|
|
|
|
const stdout = new TextDecoder().decode(result.stdout);
|
|
const match = stdout.match(/Loaded image: (.+)/);
|
|
const imageName = match ? match[1].trim() : 'unknown';
|
|
|
|
logger.success(`Docker image imported: ${imageName}`);
|
|
return imageName;
|
|
}
|
|
|
|
/**
|
|
* Restore platform resources for a service
|
|
*/
|
|
private async restorePlatformResources(
|
|
serviceId: number,
|
|
backupResources: IBackupPlatformResource[],
|
|
tempDir: string,
|
|
warnings: string[]
|
|
): Promise<number> {
|
|
let restoredCount = 0;
|
|
|
|
const existingResources = await this.oneboxRef.platformServices.getResourcesForService(serviceId);
|
|
|
|
for (const backupResource of backupResources) {
|
|
try {
|
|
const existing = existingResources.find(
|
|
(e) =>
|
|
e.platformService.type === backupResource.platformServiceType &&
|
|
e.resource.resourceType === backupResource.resourceType
|
|
);
|
|
|
|
if (!existing) {
|
|
warnings.push(
|
|
`Platform resource ${backupResource.platformServiceType}/${backupResource.resourceName} not provisioned. Skipping data import.`
|
|
);
|
|
continue;
|
|
}
|
|
|
|
const dataDir = `${tempDir}/data/${backupResource.platformServiceType}`;
|
|
|
|
switch (backupResource.platformServiceType) {
|
|
case 'mongodb':
|
|
await this.importMongoDatabase(
|
|
dataDir,
|
|
existing.resource,
|
|
existing.credentials,
|
|
backupResource.resourceName
|
|
);
|
|
restoredCount++;
|
|
break;
|
|
case 'minio':
|
|
await this.importMinioBucket(
|
|
dataDir,
|
|
existing.resource,
|
|
existing.credentials,
|
|
backupResource.resourceName
|
|
);
|
|
restoredCount++;
|
|
break;
|
|
case 'clickhouse':
|
|
await this.importClickHouseDatabase(
|
|
dataDir,
|
|
existing.resource,
|
|
existing.credentials,
|
|
backupResource.resourceName
|
|
);
|
|
restoredCount++;
|
|
break;
|
|
case 'mariadb':
|
|
await this.importMariaDBDatabase(
|
|
dataDir,
|
|
existing.resource,
|
|
existing.credentials,
|
|
backupResource.resourceName
|
|
);
|
|
restoredCount++;
|
|
break;
|
|
case 'redis':
|
|
await this.importRedisData(
|
|
dataDir,
|
|
existing.resource,
|
|
existing.credentials,
|
|
backupResource.resourceName
|
|
);
|
|
restoredCount++;
|
|
break;
|
|
}
|
|
} catch (error) {
|
|
warnings.push(
|
|
`Failed to restore ${backupResource.platformServiceType} resource: ${getErrorMessage(error)}`
|
|
);
|
|
}
|
|
}
|
|
|
|
return restoredCount;
|
|
}
|
|
|
|
/**
|
|
* Import MongoDB database
|
|
*/
|
|
private async importMongoDatabase(
|
|
dataDir: string,
|
|
resource: IPlatformResource,
|
|
credentials: Record<string, string>,
|
|
backupResourceName: string
|
|
): Promise<void> {
|
|
logger.info(`Importing MongoDB database: ${resource.resourceName}`);
|
|
|
|
const mongoService = this.oneboxRef.database.getPlatformServiceById(resource.platformServiceId);
|
|
if (!mongoService || !mongoService.containerId) {
|
|
throw new Error('MongoDB service not running');
|
|
}
|
|
|
|
const archivePath = `${dataDir}/${backupResourceName}.archive`;
|
|
const connectionUri = credentials.connectionUri || credentials.MONGODB_URI;
|
|
|
|
if (!connectionUri) {
|
|
throw new Error('MongoDB connection URI not found');
|
|
}
|
|
|
|
const archiveData = await Deno.readFile(archivePath);
|
|
const containerArchivePath = `/tmp/${resource.resourceName}.archive`;
|
|
|
|
const base64Data = btoa(String.fromCharCode(...archiveData));
|
|
|
|
await this.oneboxRef.docker.execInContainer(mongoService.containerId, [
|
|
'bash',
|
|
'-c',
|
|
`echo '${base64Data}' | base64 -d > ${containerArchivePath}`,
|
|
]);
|
|
|
|
const result = await this.oneboxRef.docker.execInContainer(mongoService.containerId, [
|
|
'mongorestore',
|
|
`--uri=${connectionUri}`,
|
|
`--archive=${containerArchivePath}`,
|
|
'--gzip',
|
|
'--drop',
|
|
]);
|
|
|
|
if (result.exitCode !== 0) {
|
|
throw new Error(`mongorestore failed: ${result.stderr}`);
|
|
}
|
|
|
|
await this.oneboxRef.docker.execInContainer(mongoService.containerId, ['rm', containerArchivePath]);
|
|
|
|
logger.success(`MongoDB database imported: ${resource.resourceName}`);
|
|
}
|
|
|
|
/**
|
|
* Import MinIO bucket
|
|
*/
|
|
private async importMinioBucket(
|
|
dataDir: string,
|
|
resource: IPlatformResource,
|
|
credentials: Record<string, string>,
|
|
backupResourceName: string
|
|
): Promise<void> {
|
|
logger.info(`Importing MinIO bucket: ${resource.resourceName}`);
|
|
|
|
const bucketDir = `${dataDir}/${backupResourceName}`;
|
|
|
|
const endpoint = credentials.endpoint || credentials.S3_ENDPOINT;
|
|
const accessKey = credentials.accessKey || credentials.S3_ACCESS_KEY;
|
|
const secretKey = credentials.secretKey || credentials.S3_SECRET_KEY;
|
|
const bucket = credentials.bucket || credentials.S3_BUCKET;
|
|
|
|
if (!endpoint || !accessKey || !secretKey || !bucket) {
|
|
throw new Error('MinIO credentials incomplete');
|
|
}
|
|
|
|
const s3Client = new plugins.smartstorage.SmartStorage({
|
|
endpoint,
|
|
accessKey,
|
|
secretKey,
|
|
bucket,
|
|
});
|
|
|
|
await s3Client.start();
|
|
|
|
let uploadedCount = 0;
|
|
|
|
for await (const entry of Deno.readDir(bucketDir)) {
|
|
if (entry.name === '_metadata.json') continue;
|
|
|
|
const filePath = `${bucketDir}/${entry.name}`;
|
|
|
|
if (entry.isFile) {
|
|
const fileData = await Deno.readFile(filePath);
|
|
await s3Client.putObject(entry.name, fileData);
|
|
uploadedCount++;
|
|
}
|
|
}
|
|
|
|
await s3Client.stop();
|
|
|
|
logger.success(`MinIO bucket imported: ${resource.resourceName} (${uploadedCount} objects)`);
|
|
}
|
|
|
|
/**
|
|
* Import ClickHouse database
|
|
*/
|
|
private async importClickHouseDatabase(
|
|
dataDir: string,
|
|
resource: IPlatformResource,
|
|
credentials: Record<string, string>,
|
|
backupResourceName: string
|
|
): Promise<void> {
|
|
logger.info(`Importing ClickHouse database: ${resource.resourceName}`);
|
|
|
|
const clickhouseService = this.oneboxRef.database.getPlatformServiceByType('clickhouse');
|
|
if (!clickhouseService || !clickhouseService.containerId) {
|
|
throw new Error('ClickHouse service not running');
|
|
}
|
|
|
|
const dbName = credentials.database || credentials.CLICKHOUSE_DB;
|
|
const user = credentials.username || credentials.CLICKHOUSE_USER || 'default';
|
|
const password = credentials.password || credentials.CLICKHOUSE_PASSWORD || '';
|
|
|
|
if (!dbName) {
|
|
throw new Error('ClickHouse database name not found');
|
|
}
|
|
|
|
const sqlPath = `${dataDir}/${backupResourceName}.sql`;
|
|
const sqlContent = await Deno.readTextFile(sqlPath);
|
|
|
|
const statements = sqlContent.split(';').filter((s) => s.trim());
|
|
|
|
for (const statement of statements) {
|
|
if (statement.trim().startsWith('--')) continue;
|
|
|
|
const result = await this.oneboxRef.docker.execInContainer(clickhouseService.containerId, [
|
|
'clickhouse-client',
|
|
`--user=${user}`,
|
|
`--password=${password}`,
|
|
'--query',
|
|
statement.trim(),
|
|
]);
|
|
|
|
if (result.exitCode !== 0) {
|
|
logger.warn(`ClickHouse statement failed: ${result.stderr}`);
|
|
}
|
|
}
|
|
|
|
try {
|
|
for await (const entry of Deno.readDir(dataDir)) {
|
|
if (entry.name.endsWith('.tsv') && entry.name.startsWith(`${backupResourceName}_`)) {
|
|
const tableName = entry.name.replace(`${backupResourceName}_`, '').replace('.tsv', '');
|
|
const tsvPath = `${dataDir}/${entry.name}`;
|
|
const tsvContent = await Deno.readTextFile(tsvPath);
|
|
|
|
const lines = tsvContent.split('\n');
|
|
if (lines.length > 1) {
|
|
const result = await this.oneboxRef.docker.execInContainer(clickhouseService.containerId, [
|
|
'clickhouse-client',
|
|
`--user=${user}`,
|
|
`--password=${password}`,
|
|
'--query',
|
|
`INSERT INTO ${dbName}.${tableName} FORMAT TabSeparated`,
|
|
]);
|
|
|
|
if (result.exitCode !== 0) {
|
|
logger.warn(`ClickHouse data import failed for ${tableName}: ${result.stderr}`);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
} catch {
|
|
// No TSV files found
|
|
}
|
|
|
|
logger.success(`ClickHouse database imported: ${resource.resourceName}`);
|
|
}
|
|
|
|
/**
|
|
* Import MariaDB database
|
|
*/
|
|
private async importMariaDBDatabase(
|
|
dataDir: string,
|
|
resource: IPlatformResource,
|
|
credentials: Record<string, string>,
|
|
backupResourceName: string,
|
|
): Promise<void> {
|
|
logger.info(`Importing MariaDB database: ${resource.resourceName}`);
|
|
|
|
const mariadbService = this.oneboxRef.database.getPlatformServiceByType('mariadb');
|
|
if (!mariadbService || !mariadbService.containerId) {
|
|
throw new Error('MariaDB service not running');
|
|
}
|
|
|
|
const dbName = credentials.database || resource.resourceName;
|
|
const user = credentials.username || 'root';
|
|
const password = credentials.password || '';
|
|
|
|
if (!dbName) {
|
|
throw new Error('MariaDB database name not found');
|
|
}
|
|
|
|
const sqlPath = `${dataDir}/${backupResourceName}.sql`;
|
|
let sqlContent: string;
|
|
try {
|
|
sqlContent = await Deno.readTextFile(sqlPath);
|
|
} catch {
|
|
logger.warn(`MariaDB dump file not found: ${sqlPath}`);
|
|
return;
|
|
}
|
|
|
|
if (!sqlContent.trim()) {
|
|
logger.warn(`MariaDB dump file is empty: ${sqlPath}`);
|
|
return;
|
|
}
|
|
|
|
const statements = sqlContent
|
|
.split(';\n')
|
|
.map(s => s.trim())
|
|
.filter(s => s.length > 0 && !s.startsWith('--') && !s.startsWith('/*'));
|
|
|
|
for (const statement of statements) {
|
|
const result = await this.oneboxRef.docker.execInContainer(mariadbService.containerId, [
|
|
'mariadb',
|
|
'-u', user,
|
|
`-p${password}`,
|
|
dbName,
|
|
'-e', statement + ';',
|
|
]);
|
|
|
|
if (result.exitCode !== 0) {
|
|
logger.warn(`MariaDB statement failed: ${result.stderr.substring(0, 200)}`);
|
|
}
|
|
}
|
|
|
|
logger.success(`MariaDB database imported: ${resource.resourceName}`);
|
|
}
|
|
|
|
/**
|
|
* Import Redis data
|
|
*/
|
|
private async importRedisData(
|
|
dataDir: string,
|
|
resource: IPlatformResource,
|
|
credentials: Record<string, string>,
|
|
backupResourceName: string,
|
|
): Promise<void> {
|
|
logger.info(`Importing Redis data: ${resource.resourceName}`);
|
|
|
|
const redisService = this.oneboxRef.database.getPlatformServiceByType('redis');
|
|
if (!redisService || !redisService.containerId) {
|
|
throw new Error('Redis service not running');
|
|
}
|
|
|
|
const password = credentials.password || '';
|
|
const dbIndex = credentials.db || '0';
|
|
|
|
const jsonPath = `${dataDir}/${backupResourceName}.json`;
|
|
let exportContent: string;
|
|
try {
|
|
exportContent = await Deno.readTextFile(jsonPath);
|
|
} catch {
|
|
logger.warn(`Redis export file not found: ${jsonPath}`);
|
|
return;
|
|
}
|
|
|
|
const exportData = JSON.parse(exportContent);
|
|
const keys = exportData.keys || {};
|
|
let importedCount = 0;
|
|
|
|
for (const [key, data] of Object.entries(keys) as Array<[string, { type: string; value: string; ttl: number }]>) {
|
|
const args = ['redis-cli', '-a', password, '-n', dbIndex, 'RESTORE', key, String(data.ttl * 1000), data.value, 'REPLACE'];
|
|
|
|
const result = await this.oneboxRef.docker.execInContainer(redisService.containerId, args);
|
|
|
|
if (result.exitCode !== 0) {
|
|
logger.warn(`Redis RESTORE failed for key '${key}': ${result.stderr.substring(0, 200)}`);
|
|
} else {
|
|
importedCount++;
|
|
}
|
|
}
|
|
|
|
logger.success(`Redis data imported: ${resource.resourceName} (${importedCount} keys)`);
|
|
}
|
|
|
|
// ========== Legacy Archive/Encryption Methods ==========
|
|
|
|
/**
|
|
* Create tar archive from directory
|
|
*/
|
|
private async createTarArchive(sourceDir: string, outputPath: string): Promise<void> {
|
|
const command = new Deno.Command('tar', {
|
|
args: ['-cf', outputPath, '-C', sourceDir, '.'],
|
|
});
|
|
|
|
const result = await command.output();
|
|
|
|
if (!result.success) {
|
|
const stderr = new TextDecoder().decode(result.stderr);
|
|
throw new Error(`tar create failed: ${stderr}`);
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Extract tar archive to directory
|
|
*/
|
|
private async extractTarArchive(archivePath: string, outputDir: string): Promise<void> {
|
|
const command = new Deno.Command('tar', {
|
|
args: ['-xf', archivePath, '-C', outputDir],
|
|
});
|
|
|
|
const result = await command.output();
|
|
|
|
if (!result.success) {
|
|
const stderr = new TextDecoder().decode(result.stderr);
|
|
throw new Error(`tar extract failed: ${stderr}`);
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Encrypt a file using AES-256-GCM
|
|
*/
|
|
private async encryptFile(inputPath: string, outputPath: string, password: string): Promise<void> {
|
|
const data = await Deno.readFile(inputPath);
|
|
|
|
const salt = crypto.getRandomValues(new Uint8Array(SALT_LENGTH));
|
|
const key = await this.deriveKey(password, salt);
|
|
|
|
const iv = crypto.getRandomValues(new Uint8Array(IV_LENGTH));
|
|
|
|
const ciphertext = await crypto.subtle.encrypt({ name: ENCRYPTION_ALGORITHM, iv }, key, data);
|
|
|
|
const combined = new Uint8Array(salt.length + iv.length + ciphertext.byteLength);
|
|
combined.set(salt, 0);
|
|
combined.set(iv, salt.length);
|
|
combined.set(new Uint8Array(ciphertext), salt.length + iv.length);
|
|
|
|
await Deno.writeFile(outputPath, combined);
|
|
}
|
|
|
|
/**
|
|
* Decrypt a file using AES-256-GCM
|
|
*/
|
|
private async decryptFile(inputPath: string, outputPath: string, password: string): Promise<void> {
|
|
const combined = await Deno.readFile(inputPath);
|
|
|
|
const salt = combined.slice(0, SALT_LENGTH);
|
|
const iv = combined.slice(SALT_LENGTH, SALT_LENGTH + IV_LENGTH);
|
|
const ciphertext = combined.slice(SALT_LENGTH + IV_LENGTH);
|
|
|
|
const key = await this.deriveKey(password, salt);
|
|
|
|
try {
|
|
const decrypted = await crypto.subtle.decrypt({ name: ENCRYPTION_ALGORITHM, iv }, key, ciphertext);
|
|
await Deno.writeFile(outputPath, new Uint8Array(decrypted));
|
|
} catch {
|
|
throw new Error('Decryption failed. Invalid backup password or corrupted file.');
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Derive encryption key from password using PBKDF2
|
|
*/
|
|
private async deriveKey(password: string, salt: Uint8Array): Promise<CryptoKey> {
|
|
const encoder = new TextEncoder();
|
|
const passwordBytes = encoder.encode(password);
|
|
|
|
const baseKey = await crypto.subtle.importKey('raw', passwordBytes, 'PBKDF2', false, ['deriveKey']);
|
|
|
|
return await crypto.subtle.deriveKey(
|
|
{
|
|
name: 'PBKDF2',
|
|
salt,
|
|
iterations: PBKDF2_ITERATIONS,
|
|
hash: 'SHA-256',
|
|
},
|
|
baseKey,
|
|
{ name: ENCRYPTION_ALGORITHM, length: KEY_LENGTH },
|
|
false,
|
|
['encrypt', 'decrypt']
|
|
);
|
|
}
|
|
|
|
/**
|
|
* Compute SHA-256 checksum
|
|
*/
|
|
private async computeChecksum(data: Uint8Array): Promise<string> {
|
|
const hashBuffer = await crypto.subtle.digest('SHA-256', data);
|
|
const hashArray = new Uint8Array(hashBuffer);
|
|
return 'sha256:' + Array.from(hashArray).map((b) => b.toString(16).padStart(2, '0')).join('');
|
|
}
|
|
}
|