feat(multipart): Implement full multipart upload support with persistent manager, periodic cleanup, and API integration

This commit is contained in:
2025-11-23 23:31:26 +00:00
parent d6f178bde6
commit 648ff98c2d
6 changed files with 287 additions and 4 deletions

View File

@@ -1,5 +1,15 @@
# Changelog # Changelog
## 2025-11-23 - 5.1.0 - feat(multipart)
Implement full multipart upload support with persistent manager, periodic cleanup, and API integration
- Add IMultipartConfig to server config with defaults (expirationDays: 7, cleanupIntervalMinutes: 60) and merge into existing config flow
- Introduce MultipartUploadManager: persistent upload metadata on disk, part upload/assembly, restore uploads on startup, listParts/listUploads, abort/cleanup functionality
- Start and stop multipart cleanup task from Smarts3Server lifecycle (startCleanupTask on start, stopCleanupTask on stop) with configurable interval and expiration
- ObjectController: support multipart endpoints (initiate, upload part, complete, abort) and move assembled final object into the object store on completion; set ETag headers and return proper XML responses
- BucketController: support listing in-progress multipart uploads via ?uploads query parameter and return S3-compatible XML
- Persist multipart state to disk and restore on initialization to survive restarts; perform automatic cleanup of expired uploads
## 2025-11-23 - 5.0.2 - fix(readme) ## 2025-11-23 - 5.0.2 - fix(readme)
Clarify contribution agreement requirement in README Clarify contribution agreement requirement in README

View File

@@ -3,6 +3,6 @@
*/ */
export const commitinfo = { export const commitinfo = {
name: '@push.rocks/smarts3', name: '@push.rocks/smarts3',
version: '5.0.2', version: '5.1.0',
description: 'A Node.js TypeScript package to create a local S3 endpoint for simulating AWS S3 operations using mapped local directories for development and testing purposes.' description: 'A Node.js TypeScript package to create a local S3 endpoint for simulating AWS S3 operations using mapped local directories for development and testing purposes.'
} }

View File

@@ -23,15 +23,41 @@ export interface IPartInfo {
lastModified: Date; lastModified: Date;
} }
/**
* Serializable version of upload metadata for disk persistence
*/
interface ISerializableUpload {
uploadId: string;
bucket: string;
key: string;
initiated: string; // ISO date string
metadata: Record<string, string>;
parts: Array<{
partNumber: number;
etag: string;
size: number;
lastModified: string; // ISO date string
}>;
}
/** /**
* Manages multipart upload state and storage * Manages multipart upload state and storage
*/ */
export class MultipartUploadManager { export class MultipartUploadManager {
private uploads: Map<string, IMultipartUpload> = new Map(); private uploads: Map<string, IMultipartUpload> = new Map();
private uploadDir: string; private uploadDir: string;
private cleanupInterval: NodeJS.Timeout | null = null;
private expirationDays: number;
private cleanupIntervalMinutes: number;
constructor(private rootDir: string) { constructor(
private rootDir: string,
expirationDays: number = 7,
cleanupIntervalMinutes: number = 60
) {
this.uploadDir = plugins.path.join(rootDir, '.multipart'); this.uploadDir = plugins.path.join(rootDir, '.multipart');
this.expirationDays = expirationDays;
this.cleanupIntervalMinutes = cleanupIntervalMinutes;
} }
/** /**
@@ -39,6 +65,97 @@ export class MultipartUploadManager {
*/ */
public async initialize(): Promise<void> { public async initialize(): Promise<void> {
await plugins.smartfs.directory(this.uploadDir).recursive().create(); await plugins.smartfs.directory(this.uploadDir).recursive().create();
await this.restoreUploadsFromDisk();
}
/**
* Save upload metadata to disk for persistence
*/
private async saveUploadMetadata(uploadId: string): Promise<void> {
const upload = this.uploads.get(uploadId);
if (!upload) {
return;
}
const metadataPath = plugins.path.join(this.uploadDir, uploadId, 'metadata.json');
const serializable: ISerializableUpload = {
uploadId: upload.uploadId,
bucket: upload.bucket,
key: upload.key,
initiated: upload.initiated.toISOString(),
metadata: upload.metadata,
parts: Array.from(upload.parts.values()).map(part => ({
partNumber: part.partNumber,
etag: part.etag,
size: part.size,
lastModified: part.lastModified.toISOString(),
})),
};
await plugins.smartfs.file(metadataPath).write(JSON.stringify(serializable, null, 2));
}
/**
* Restore uploads from disk on initialization
*/
private async restoreUploadsFromDisk(): Promise<void> {
const uploadDirExists = await plugins.smartfs.directory(this.uploadDir).exists();
if (!uploadDirExists) {
return;
}
const entries = await plugins.smartfs.directory(this.uploadDir).includeStats().list();
for (const entry of entries) {
if (!entry.isDirectory) {
continue;
}
const uploadId = entry.name;
const metadataPath = plugins.path.join(this.uploadDir, uploadId, 'metadata.json');
// Check if metadata.json exists
const metadataExists = await plugins.smartfs.file(metadataPath).exists();
if (!metadataExists) {
// Orphaned upload directory - clean it up
console.warn(`Orphaned multipart upload directory found: ${uploadId}, cleaning up`);
await plugins.smartfs.directory(plugins.path.join(this.uploadDir, uploadId)).recursive().delete();
continue;
}
try {
// Read and parse metadata
const metadataContent = await plugins.smartfs.file(metadataPath).read();
const serialized: ISerializableUpload = JSON.parse(metadataContent as string);
// Restore to memory
const parts = new Map<number, IPartInfo>();
for (const part of serialized.parts) {
parts.set(part.partNumber, {
partNumber: part.partNumber,
etag: part.etag,
size: part.size,
lastModified: new Date(part.lastModified),
});
}
this.uploads.set(uploadId, {
uploadId: serialized.uploadId,
bucket: serialized.bucket,
key: serialized.key,
initiated: new Date(serialized.initiated),
parts,
metadata: serialized.metadata,
});
console.log(`Restored multipart upload: ${uploadId} (${serialized.bucket}/${serialized.key})`);
} catch (error) {
// Corrupted metadata - clean up
console.error(`Failed to restore multipart upload ${uploadId}:`, error);
await plugins.smartfs.directory(plugins.path.join(this.uploadDir, uploadId)).recursive().delete();
}
}
} }
/** /**
@@ -71,6 +188,9 @@ export class MultipartUploadManager {
const uploadPath = plugins.path.join(this.uploadDir, uploadId); const uploadPath = plugins.path.join(this.uploadDir, uploadId);
await plugins.smartfs.directory(uploadPath).recursive().create(); await plugins.smartfs.directory(uploadPath).recursive().create();
// Persist metadata to disk
await this.saveUploadMetadata(uploadId);
return uploadId; return uploadId;
} }
@@ -116,6 +236,9 @@ export class MultipartUploadManager {
upload.parts.set(partNumber, partInfo); upload.parts.set(partNumber, partInfo);
// Persist updated metadata
await this.saveUploadMetadata(uploadId);
return partInfo; return partInfo;
} }
@@ -235,4 +358,73 @@ export class MultipartUploadManager {
} }
return Array.from(upload.parts.values()).sort((a, b) => a.partNumber - b.partNumber); return Array.from(upload.parts.values()).sort((a, b) => a.partNumber - b.partNumber);
} }
/**
* Start automatic cleanup task for expired uploads
*/
public startCleanupTask(): void {
if (this.cleanupInterval) {
console.warn('Cleanup task is already running');
return;
}
// Run cleanup immediately on start
this.performCleanup().catch(err => {
console.error('Failed to perform initial multipart cleanup:', err);
});
// Then schedule periodic cleanup
const intervalMs = this.cleanupIntervalMinutes * 60 * 1000;
this.cleanupInterval = setInterval(() => {
this.performCleanup().catch(err => {
console.error('Failed to perform scheduled multipart cleanup:', err);
});
}, intervalMs);
console.log(`Multipart cleanup task started (interval: ${this.cleanupIntervalMinutes} minutes, expiration: ${this.expirationDays} days)`);
}
/**
* Stop automatic cleanup task
*/
public stopCleanupTask(): void {
if (this.cleanupInterval) {
clearInterval(this.cleanupInterval);
this.cleanupInterval = null;
console.log('Multipart cleanup task stopped');
}
}
/**
* Perform cleanup of expired uploads
*/
private async performCleanup(): Promise<void> {
const now = Date.now();
const expirationMs = this.expirationDays * 24 * 60 * 60 * 1000;
const expiredUploads: string[] = [];
// Find expired uploads
for (const [uploadId, upload] of this.uploads.entries()) {
const age = now - upload.initiated.getTime();
if (age > expirationMs) {
expiredUploads.push(uploadId);
}
}
if (expiredUploads.length === 0) {
return;
}
console.log(`Cleaning up ${expiredUploads.length} expired multipart upload(s)`);
// Delete expired uploads
for (const uploadId of expiredUploads) {
try {
await this.abortUpload(uploadId);
console.log(`Deleted expired multipart upload: ${uploadId}`);
} catch (err) {
console.error(`Failed to delete expired upload ${uploadId}:`, err);
}
}
}
} }

View File

@@ -78,11 +78,19 @@ export class Smarts3Server {
maxMetadataSize: 2048, maxMetadataSize: 2048,
requestTimeout: 300000, requestTimeout: 300000,
}, },
multipart: {
expirationDays: 7,
cleanupIntervalMinutes: 60,
},
}; };
this.logger = new Logger(this.config.logging); this.logger = new Logger(this.config.logging);
this.store = new FilesystemStore(this.options.directory); this.store = new FilesystemStore(this.options.directory);
this.multipart = new MultipartUploadManager(this.options.directory); this.multipart = new MultipartUploadManager(
this.options.directory,
this.config.multipart.expirationDays,
this.config.multipart.cleanupIntervalMinutes
);
this.router = new S3Router(); this.router = new S3Router();
this.middlewares = new MiddlewareStack(); this.middlewares = new MiddlewareStack();
@@ -297,6 +305,9 @@ export class Smarts3Server {
// Initialize multipart upload manager // Initialize multipart upload manager
await this.multipart.initialize(); await this.multipart.initialize();
// Start multipart cleanup task
this.multipart.startCleanupTask();
// Clean slate if requested // Clean slate if requested
if (this.options.cleanSlate) { if (this.options.cleanSlate) {
await this.store.reset(); await this.store.reset();
@@ -337,6 +348,9 @@ export class Smarts3Server {
return; return;
} }
// Stop multipart cleanup task
this.multipart.stopCleanupTask();
await new Promise<void>((resolve, reject) => { await new Promise<void>((resolve, reject) => {
this.httpServer!.close((err?: Error) => { this.httpServer!.close((err?: Error) => {
if (err) { if (err) {

View File

@@ -54,8 +54,9 @@ export class BucketController {
} }
/** /**
* GET /:bucket - List objects * GET /:bucket - List objects or multipart uploads
* Supports both V1 and V2 listing (V2 uses list-type=2 query param) * Supports both V1 and V2 listing (V2 uses list-type=2 query param)
* Multipart uploads listing is triggered by ?uploads query parameter
*/ */
public static async listObjects( public static async listObjects(
req: plugins.http.IncomingMessage, req: plugins.http.IncomingMessage,
@@ -64,6 +65,12 @@ export class BucketController {
params: Record<string, string> params: Record<string, string>
): Promise<void> { ): Promise<void> {
const { bucket } = params; const { bucket } = params;
// Check if this is a ListMultipartUploads request
if (ctx.query.uploads !== undefined) {
return BucketController.listMultipartUploads(req, res, ctx, params);
}
const isV2 = ctx.query['list-type'] === '2'; const isV2 = ctx.query['list-type'] === '2';
const result = await ctx.store.listObjects(bucket, { const result = await ctx.store.listObjects(bucket, {
@@ -127,4 +134,47 @@ export class BucketController {
}); });
} }
} }
/**
* GET /:bucket?uploads - List multipart uploads
*/
private static async listMultipartUploads(
req: plugins.http.IncomingMessage,
res: plugins.http.ServerResponse,
ctx: S3Context,
params: Record<string, string>
): Promise<void> {
const { bucket } = params;
// Get all multipart uploads for this bucket
const uploads = ctx.multipart.listUploads(bucket);
// Build XML response
await ctx.sendXML({
ListMultipartUploadsResult: {
'@_xmlns': 'http://s3.amazonaws.com/doc/2006-03-01/',
Bucket: bucket,
KeyMarker: '',
UploadIdMarker: '',
MaxUploads: 1000,
IsTruncated: false,
...(uploads.length > 0 && {
Upload: uploads.map((upload) => ({
Key: upload.key,
UploadId: upload.uploadId,
Initiator: {
ID: 'S3RVER',
DisplayName: 'S3RVER',
},
Owner: {
ID: 'S3RVER',
DisplayName: 'S3RVER',
},
StorageClass: 'STANDARD',
Initiated: upload.initiated.toISOString(),
})),
}),
},
});
}
} }

View File

@@ -44,6 +44,14 @@ export interface ILimitsConfig {
requestTimeout?: number; requestTimeout?: number;
} }
/**
* Multipart upload configuration
*/
export interface IMultipartConfig {
expirationDays?: number;
cleanupIntervalMinutes?: number;
}
/** /**
* Server configuration * Server configuration
*/ */
@@ -71,6 +79,7 @@ export interface ISmarts3Config {
cors?: ICorsConfig; cors?: ICorsConfig;
logging?: ILoggingConfig; logging?: ILoggingConfig;
limits?: ILimitsConfig; limits?: ILimitsConfig;
multipart?: IMultipartConfig;
} }
/** /**
@@ -114,6 +123,10 @@ const DEFAULT_CONFIG: ISmarts3Config = {
maxMetadataSize: 2048, maxMetadataSize: 2048,
requestTimeout: 300000, // 5 minutes requestTimeout: 300000, // 5 minutes
}, },
multipart: {
expirationDays: 7,
cleanupIntervalMinutes: 60,
},
}; };
/** /**
@@ -145,6 +158,10 @@ function mergeConfig(userConfig: ISmarts3Config): Required<ISmarts3Config> {
...DEFAULT_CONFIG.limits!, ...DEFAULT_CONFIG.limits!,
...(userConfig.limits || {}), ...(userConfig.limits || {}),
}, },
multipart: {
...DEFAULT_CONFIG.multipart!,
...(userConfig.multipart || {}),
},
}; };
} }