4 Commits

Author SHA1 Message Date
263e7a58b9 v3.2.0
Some checks failed
Default (tags) / security (push) Successful in 25s
Default (tags) / test (push) Failing after 35s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-11-23 22:41:46 +00:00
74b81d7ba8 feat(multipart): Add multipart upload support with MultipartUploadManager and controller integration 2025-11-23 22:41:46 +00:00
0d4837184f v3.1.0
Some checks failed
Default (tags) / security (push) Successful in 38s
Default (tags) / test (push) Failing after 36s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-11-23 22:37:32 +00:00
7f3de92961 feat(logging): Add structured Logger and integrate into Smarts3Server; pass full config to server 2025-11-23 22:37:32 +00:00
9 changed files with 748 additions and 36 deletions

View File

@@ -1,5 +1,22 @@
# Changelog
## 2025-11-23 - 3.2.0 - feat(multipart)
Add multipart upload support with MultipartUploadManager and controller integration
- Introduce MultipartUploadManager (ts/classes/multipart-manager.ts) to manage multipart upload lifecycle and store parts on disk
- Wire multipart manager into server and request context (S3Context, Smarts3Server) and initialize multipart storage on server start
- Add multipart-related routes and handlers in ObjectController: initiate (POST ?uploads), upload part (PUT ?partNumber&uploadId), complete (POST ?uploadId), and abort (DELETE ?uploadId)
- On complete, combine parts into final object and store via existing FilesystemStore workflow
- Expose multipart manager on Smarts3Server for controller access
## 2025-11-23 - 3.1.0 - feat(logging)
Add structured Logger and integrate into Smarts3Server; pass full config to server
- Introduce a new Logger class (ts/classes/logger.ts) providing leveled logging (error, warn, info, debug), text/json formats and an enable flag.
- Integrate Logger into Smarts3Server: use structured logging for server lifecycle events, HTTP request/response logging and S3 errors instead of direct console usage.
- Smarts3 now passes the full merged configuration into Smarts3Server (config.logging can control logging behavior).
- Server start/stop messages and internal request/error logs are emitted via the Logger and respect the configured logging level/format and silent option.
## 2025-11-23 - 3.0.4 - fix(smarts3)
Use filesystem store for bucket creation and remove smartbucket runtime dependency

View File

@@ -1,6 +1,6 @@
{
"name": "@push.rocks/smarts3",
"version": "3.0.4",
"version": "3.2.0",
"private": false,
"description": "A Node.js TypeScript package to create a local S3 endpoint for simulating AWS S3 operations using mapped local directories for development and testing purposes.",
"main": "dist_ts/index.js",

View File

@@ -3,6 +3,6 @@
*/
export const commitinfo = {
name: '@push.rocks/smarts3',
version: '3.0.4',
version: '3.2.0',
description: 'A Node.js TypeScript package to create a local S3 endpoint for simulating AWS S3 operations using mapped local directories for development and testing purposes.'
}

View File

@@ -2,6 +2,7 @@ import * as plugins from '../plugins.js';
import { S3Error } from './s3-error.js';
import { createXml } from '../utils/xml.utils.js';
import type { FilesystemStore } from './filesystem-store.js';
import type { MultipartUploadManager } from './multipart-manager.js';
import type { Readable } from 'stream';
/**
@@ -14,6 +15,7 @@ export class S3Context {
public params: Record<string, string> = {};
public query: Record<string, string> = {};
public store: FilesystemStore;
public multipart: MultipartUploadManager;
private req: plugins.http.IncomingMessage;
private res: plugins.http.ServerResponse;
@@ -23,11 +25,13 @@ export class S3Context {
constructor(
req: plugins.http.IncomingMessage,
res: plugins.http.ServerResponse,
store: FilesystemStore
store: FilesystemStore,
multipart: MultipartUploadManager
) {
this.req = req;
this.res = res;
this.store = store;
this.multipart = multipart;
this.method = req.method || 'GET';
this.headers = req.headers;

130
ts/classes/logger.ts Normal file
View File

@@ -0,0 +1,130 @@
import type { ILoggingConfig } from '../index.js';
/**
* Log levels in order of severity
*/
const LOG_LEVELS = {
error: 0,
warn: 1,
info: 2,
debug: 3,
} as const;
type LogLevel = keyof typeof LOG_LEVELS;
/**
* Structured logger with configurable levels and formats
*/
export class Logger {
private config: Required<ILoggingConfig>;
private minLevel: number;
constructor(config: ILoggingConfig) {
// Apply defaults for any missing config
this.config = {
level: config.level ?? 'info',
format: config.format ?? 'text',
enabled: config.enabled ?? true,
};
this.minLevel = LOG_LEVELS[this.config.level];
}
/**
* Check if a log level should be output
*/
private shouldLog(level: LogLevel): boolean {
if (!this.config.enabled) {
return false;
}
return LOG_LEVELS[level] <= this.minLevel;
}
/**
* Format a log message
*/
private format(level: LogLevel, message: string, meta?: Record<string, any>): string {
const timestamp = new Date().toISOString();
if (this.config.format === 'json') {
return JSON.stringify({
timestamp,
level,
message,
...(meta || {}),
});
}
// Text format
const metaStr = meta ? ` ${JSON.stringify(meta)}` : '';
return `[${timestamp}] ${level.toUpperCase()}: ${message}${metaStr}`;
}
/**
* Log at error level
*/
public error(message: string, meta?: Record<string, any>): void {
if (this.shouldLog('error')) {
console.error(this.format('error', message, meta));
}
}
/**
* Log at warn level
*/
public warn(message: string, meta?: Record<string, any>): void {
if (this.shouldLog('warn')) {
console.warn(this.format('warn', message, meta));
}
}
/**
* Log at info level
*/
public info(message: string, meta?: Record<string, any>): void {
if (this.shouldLog('info')) {
console.log(this.format('info', message, meta));
}
}
/**
* Log at debug level
*/
public debug(message: string, meta?: Record<string, any>): void {
if (this.shouldLog('debug')) {
console.log(this.format('debug', message, meta));
}
}
/**
* Log HTTP request
*/
public request(method: string, url: string, meta?: Record<string, any>): void {
this.info(`${method} ${url}`, meta);
}
/**
* Log HTTP response
*/
public response(method: string, url: string, statusCode: number, duration: number): void {
const level: LogLevel = statusCode >= 500 ? 'error' : statusCode >= 400 ? 'warn' : 'info';
if (this.shouldLog(level)) {
const message = `${method} ${url} - ${statusCode} (${duration}ms)`;
if (level === 'error') {
this.error(message, { statusCode, duration });
} else if (level === 'warn') {
this.warn(message, { statusCode, duration });
} else {
this.info(message, { statusCode, duration });
}
}
}
/**
* Log S3 error
*/
public s3Error(code: string, message: string, status: number): void {
this.error(`[S3Error] ${code}: ${message}`, { code, status });
}
}

View File

@@ -0,0 +1,238 @@
import * as plugins from '../plugins.js';
import { Readable } from 'stream';
/**
* Multipart upload metadata
*/
export interface IMultipartUpload {
uploadId: string;
bucket: string;
key: string;
initiated: Date;
parts: Map<number, IPartInfo>;
metadata: Record<string, string>;
}
/**
* Part information
*/
export interface IPartInfo {
partNumber: number;
etag: string;
size: number;
lastModified: Date;
}
/**
* Manages multipart upload state and storage
*/
export class MultipartUploadManager {
private uploads: Map<string, IMultipartUpload> = new Map();
private uploadDir: string;
constructor(private rootDir: string) {
this.uploadDir = plugins.path.join(rootDir, '.multipart');
}
/**
* Initialize multipart uploads directory
*/
public async initialize(): Promise<void> {
await plugins.smartfs.directory(this.uploadDir).recursive().create();
}
/**
* Generate a unique upload ID
*/
private generateUploadId(): string {
return plugins.crypto.randomBytes(16).toString('hex');
}
/**
* Initiate a new multipart upload
*/
public async initiateUpload(
bucket: string,
key: string,
metadata: Record<string, string>
): Promise<string> {
const uploadId = this.generateUploadId();
this.uploads.set(uploadId, {
uploadId,
bucket,
key,
initiated: new Date(),
parts: new Map(),
metadata,
});
// Create directory for this upload's parts
const uploadPath = plugins.path.join(this.uploadDir, uploadId);
await plugins.smartfs.directory(uploadPath).recursive().create();
return uploadId;
}
/**
* Upload a part
*/
public async uploadPart(
uploadId: string,
partNumber: number,
stream: Readable
): Promise<IPartInfo> {
const upload = this.uploads.get(uploadId);
if (!upload) {
throw new Error('No such upload');
}
const partPath = plugins.path.join(this.uploadDir, uploadId, `part-${partNumber}`);
// Write part to disk
const webWriteStream = await plugins.smartfs.file(partPath).writeStream();
const writer = webWriteStream.getWriter();
let size = 0;
const hash = plugins.crypto.createHash('md5');
for await (const chunk of stream) {
const buffer = Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk);
await writer.write(new Uint8Array(buffer));
hash.update(buffer);
size += buffer.length;
}
await writer.close();
const etag = hash.digest('hex');
const partInfo: IPartInfo = {
partNumber,
etag,
size,
lastModified: new Date(),
};
upload.parts.set(partNumber, partInfo);
return partInfo;
}
/**
* Complete multipart upload - combine all parts
*/
public async completeUpload(
uploadId: string,
parts: Array<{ PartNumber: number; ETag: string }>
): Promise<{ etag: string; size: number }> {
const upload = this.uploads.get(uploadId);
if (!upload) {
throw new Error('No such upload');
}
// Verify all parts are uploaded
for (const part of parts) {
const uploadedPart = upload.parts.get(part.PartNumber);
if (!uploadedPart) {
throw new Error(`Part ${part.PartNumber} not uploaded`);
}
// Normalize ETag format (remove quotes if present)
const normalizedETag = part.ETag.replace(/"/g, '');
if (uploadedPart.etag !== normalizedETag) {
throw new Error(`Part ${part.PartNumber} ETag mismatch`);
}
}
// Sort parts by part number
const sortedParts = parts.sort((a, b) => a.PartNumber - b.PartNumber);
// Combine parts into final object
const finalPath = plugins.path.join(this.uploadDir, uploadId, 'final');
const webWriteStream = await plugins.smartfs.file(finalPath).writeStream();
const writer = webWriteStream.getWriter();
const hash = plugins.crypto.createHash('md5');
let totalSize = 0;
for (const part of sortedParts) {
const partPath = plugins.path.join(this.uploadDir, uploadId, `part-${part.PartNumber}`);
// Read part and write to final file
const partContent = await plugins.smartfs.file(partPath).read();
const buffer = Buffer.isBuffer(partContent) ? partContent : Buffer.from(partContent as string);
await writer.write(new Uint8Array(buffer));
hash.update(buffer);
totalSize += buffer.length;
}
await writer.close();
const etag = hash.digest('hex');
return { etag, size: totalSize };
}
/**
* Get the final combined file path
*/
public getFinalPath(uploadId: string): string {
return plugins.path.join(this.uploadDir, uploadId, 'final');
}
/**
* Get upload metadata
*/
public getUpload(uploadId: string): IMultipartUpload | undefined {
return this.uploads.get(uploadId);
}
/**
* Abort multipart upload - clean up parts
*/
public async abortUpload(uploadId: string): Promise<void> {
const upload = this.uploads.get(uploadId);
if (!upload) {
throw new Error('No such upload');
}
// Delete upload directory
const uploadPath = plugins.path.join(this.uploadDir, uploadId);
await plugins.smartfs.directory(uploadPath).recursive().delete();
// Remove from memory
this.uploads.delete(uploadId);
}
/**
* Clean up upload after completion
*/
public async cleanupUpload(uploadId: string): Promise<void> {
const uploadPath = plugins.path.join(this.uploadDir, uploadId);
await plugins.smartfs.directory(uploadPath).recursive().delete();
this.uploads.delete(uploadId);
}
/**
* List all in-progress uploads for a bucket
*/
public listUploads(bucket?: string): IMultipartUpload[] {
const uploads = Array.from(this.uploads.values());
if (bucket) {
return uploads.filter((u) => u.bucket === bucket);
}
return uploads;
}
/**
* List parts for an upload
*/
public listParts(uploadId: string): IPartInfo[] {
const upload = this.uploads.get(uploadId);
if (!upload) {
throw new Error('No such upload');
}
return Array.from(upload.parts.values()).sort((a, b) => a.partNumber - b.partNumber);
}
}

View File

@@ -4,9 +4,12 @@ import { MiddlewareStack } from './middleware-stack.js';
import { S3Context } from './context.js';
import { FilesystemStore } from './filesystem-store.js';
import { S3Error } from './s3-error.js';
import { Logger } from './logger.js';
import { MultipartUploadManager } from './multipart-manager.js';
import { ServiceController } from '../controllers/service.controller.js';
import { BucketController } from '../controllers/bucket.controller.js';
import { ObjectController } from '../controllers/object.controller.js';
import type { ISmarts3Config } from '../index.js';
export interface ISmarts3ServerOptions {
port?: number;
@@ -14,6 +17,7 @@ export interface ISmarts3ServerOptions {
directory?: string;
cleanSlate?: boolean;
silent?: boolean;
config?: Required<ISmarts3Config>;
}
/**
@@ -25,19 +29,60 @@ export class Smarts3Server {
private router: S3Router;
private middlewares: MiddlewareStack;
public store: FilesystemStore; // Made public for direct access from Smarts3 class
private options: Required<ISmarts3ServerOptions>;
public multipart: MultipartUploadManager; // Made public for controller access
private options: Required<Omit<ISmarts3ServerOptions, 'config'>>;
private config: Required<ISmarts3Config>;
private logger: Logger;
constructor(options: ISmarts3ServerOptions = {}) {
this.options = {
port: 3000,
address: '0.0.0.0',
directory: plugins.path.join(process.cwd(), '.nogit/bucketsDir'),
cleanSlate: false,
silent: false,
...options,
port: options.port ?? 3000,
address: options.address ?? '0.0.0.0',
directory: options.directory ?? plugins.path.join(process.cwd(), '.nogit/bucketsDir'),
cleanSlate: options.cleanSlate ?? false,
silent: options.silent ?? false,
};
// Store config for middleware and feature configuration
// If no config provided, create minimal default (for backward compatibility)
this.config = options.config ?? {
server: {
port: this.options.port,
address: this.options.address,
silent: this.options.silent,
},
storage: {
directory: this.options.directory,
cleanSlate: this.options.cleanSlate,
},
auth: {
enabled: false,
credentials: [{ accessKeyId: 'S3RVER', secretAccessKey: 'S3RVER' }],
},
cors: {
enabled: false,
allowedOrigins: ['*'],
allowedMethods: ['GET', 'POST', 'PUT', 'DELETE', 'HEAD', 'OPTIONS'],
allowedHeaders: ['*'],
exposedHeaders: ['ETag', 'x-amz-request-id', 'x-amz-version-id'],
maxAge: 86400,
allowCredentials: false,
},
logging: {
level: 'info',
format: 'text',
enabled: true,
},
limits: {
maxObjectSize: 5 * 1024 * 1024 * 1024,
maxMetadataSize: 2048,
requestTimeout: 300000,
},
};
this.logger = new Logger(this.config.logging);
this.store = new FilesystemStore(this.options.directory);
this.multipart = new MultipartUploadManager(this.options.directory);
this.router = new S3Router();
this.middlewares = new MiddlewareStack();
@@ -49,20 +94,118 @@ export class Smarts3Server {
* Setup middleware stack
*/
private setupMiddlewares(): void {
// Logger middleware
if (!this.options.silent) {
// CORS middleware (must be first to handle preflight requests)
if (this.config.cors.enabled) {
this.middlewares.use(async (req, res, ctx, next) => {
const start = Date.now();
console.log(`${req.method} ${req.url}`);
console.log(` Headers:`, JSON.stringify(req.headers, null, 2).slice(0, 200));
const origin = req.headers.origin || req.headers.referer;
// Check if origin is allowed
const allowedOrigins = this.config.cors.allowedOrigins || ['*'];
const isOriginAllowed =
allowedOrigins.includes('*') ||
(origin && allowedOrigins.includes(origin));
if (isOriginAllowed) {
// Set CORS headers
res.setHeader(
'Access-Control-Allow-Origin',
allowedOrigins.includes('*') ? '*' : origin || '*'
);
if (this.config.cors.allowCredentials) {
res.setHeader('Access-Control-Allow-Credentials', 'true');
}
// Handle preflight OPTIONS request
if (req.method === 'OPTIONS') {
res.setHeader(
'Access-Control-Allow-Methods',
(this.config.cors.allowedMethods || []).join(', ')
);
res.setHeader(
'Access-Control-Allow-Headers',
(this.config.cors.allowedHeaders || []).join(', ')
);
if (this.config.cors.maxAge) {
res.setHeader(
'Access-Control-Max-Age',
String(this.config.cors.maxAge)
);
}
res.writeHead(204);
res.end();
return; // Don't call next() for OPTIONS
}
// Set exposed headers for actual requests
if (this.config.cors.exposedHeaders && this.config.cors.exposedHeaders.length > 0) {
res.setHeader(
'Access-Control-Expose-Headers',
this.config.cors.exposedHeaders.join(', ')
);
}
}
await next();
const duration = Date.now() - start;
console.log(`${req.method} ${req.url} - ${res.statusCode} (${duration}ms)`);
});
}
// TODO: Add authentication middleware
// TODO: Add CORS middleware
// Authentication middleware (simple static credentials)
if (this.config.auth.enabled) {
this.middlewares.use(async (req, res, ctx, next) => {
const authHeader = req.headers.authorization;
// Extract access key from Authorization header
let accessKeyId: string | undefined;
if (authHeader) {
// Support multiple auth formats:
// 1. AWS accessKeyId:signature
// 2. AWS4-HMAC-SHA256 Credential=accessKeyId/date/region/service/aws4_request, ...
if (authHeader.startsWith('AWS ')) {
accessKeyId = authHeader.substring(4).split(':')[0];
} else if (authHeader.startsWith('AWS4-HMAC-SHA256')) {
const credentialMatch = authHeader.match(/Credential=([^/]+)\//);
accessKeyId = credentialMatch ? credentialMatch[1] : undefined;
}
}
// Check if access key is valid
const isValid = this.config.auth.credentials.some(
(cred) => cred.accessKeyId === accessKeyId
);
if (!isValid) {
ctx.throw('AccessDenied', 'Access Denied');
return;
}
await next();
});
}
// Logger middleware
if (!this.options.silent && this.config.logging.enabled) {
this.middlewares.use(async (req, res, ctx, next) => {
const start = Date.now();
// Log request
this.logger.request(req.method || 'UNKNOWN', req.url || '/', {
headers: req.headers,
});
await next();
// Log response
const duration = Date.now() - start;
this.logger.response(
req.method || 'UNKNOWN',
req.url || '/',
res.statusCode || 500,
duration
);
});
}
}
/**
@@ -80,6 +223,7 @@ export class Smarts3Server {
// Object level (/:bucket/:key*)
this.router.put('/:bucket/:key*', ObjectController.putObject);
this.router.post('/:bucket/:key*', ObjectController.postObject); // For multipart operations
this.router.get('/:bucket/:key*', ObjectController.getObject);
this.router.head('/:bucket/:key*', ObjectController.headObject);
this.router.delete('/:bucket/:key*', ObjectController.deleteObject);
@@ -92,7 +236,7 @@ export class Smarts3Server {
req: plugins.http.IncomingMessage,
res: plugins.http.ServerResponse
): Promise<void> {
const context = new S3Context(req, res, this.store);
const context = new S3Context(req, res, this.store, this.multipart);
try {
// Execute middleware stack
@@ -122,11 +266,14 @@ export class Smarts3Server {
): Promise<void> {
const s3Error = err instanceof S3Error ? err : S3Error.fromError(err);
if (!this.options.silent) {
console.error(`[S3Error] ${s3Error.code}: ${s3Error.message}`);
if (s3Error.status >= 500) {
console.error(err.stack || err);
}
// Log the error
this.logger.s3Error(s3Error.code, s3Error.message, s3Error.status);
// Log stack trace for server errors
if (s3Error.status >= 500) {
this.logger.debug('Error stack trace', {
stack: err.stack || err.toString(),
});
}
// Send error response
@@ -147,6 +294,9 @@ export class Smarts3Server {
// Initialize store
await this.store.initialize();
// Initialize multipart upload manager
await this.multipart.initialize();
// Clean slate if requested
if (this.options.cleanSlate) {
await this.store.reset();
@@ -155,7 +305,10 @@ export class Smarts3Server {
// Create HTTP server
this.httpServer = plugins.http.createServer((req, res) => {
this.handleRequest(req, res).catch((err) => {
console.error('Fatal error in request handler:', err);
this.logger.error('Fatal error in request handler', {
error: err.message,
stack: err.stack,
});
if (!res.headersSent) {
res.writeHead(500, { 'Content-Type': 'text/plain' });
res.end('Internal Server Error');
@@ -169,9 +322,7 @@ export class Smarts3Server {
if (err) {
reject(err);
} else {
if (!this.options.silent) {
console.log(`S3 server listening on ${this.options.address}:${this.options.port}`);
}
this.logger.info(`S3 server listening on ${this.options.address}:${this.options.port}`);
resolve();
}
});
@@ -191,9 +342,7 @@ export class Smarts3Server {
if (err) {
reject(err);
} else {
if (!this.options.silent) {
console.log('S3 server stopped');
}
this.logger.info('S3 server stopped');
resolve();
}
});

View File

@@ -6,7 +6,7 @@ import type { S3Context } from '../classes/context.js';
*/
export class ObjectController {
/**
* PUT /:bucket/:key* - Upload object or copy object
* PUT /:bucket/:key* - Upload object, copy object, or upload part
*/
public static async putObject(
req: plugins.http.IncomingMessage,
@@ -16,6 +16,11 @@ export class ObjectController {
): Promise<void> {
const { bucket, key } = params;
// Check if this is a multipart upload part
if (ctx.query.partNumber && ctx.query.uploadId) {
return ObjectController.uploadPart(req, res, ctx, params);
}
// Check if this is a COPY operation
const copySource = ctx.headers['x-amz-copy-source'] as string | undefined;
if (copySource) {
@@ -133,7 +138,7 @@ export class ObjectController {
}
/**
* DELETE /:bucket/:key* - Delete object
* DELETE /:bucket/:key* - Delete object or abort multipart upload
*/
public static async deleteObject(
req: plugins.http.IncomingMessage,
@@ -143,6 +148,11 @@ export class ObjectController {
): Promise<void> {
const { bucket, key } = params;
// Check if this is an abort multipart upload
if (ctx.query.uploadId) {
return ObjectController.abortMultipartUpload(req, res, ctx, params);
}
await ctx.store.deleteObject(bucket, key);
ctx.status(204).send('');
}
@@ -201,4 +211,168 @@ export class ObjectController {
},
});
}
/**
* POST /:bucket/:key* - Initiate or complete multipart upload
*/
public static async postObject(
req: plugins.http.IncomingMessage,
res: plugins.http.ServerResponse,
ctx: S3Context,
params: Record<string, string>
): Promise<void> {
// Check if this is initiate multipart upload
if (ctx.query.uploads !== undefined) {
return ObjectController.initiateMultipartUpload(req, res, ctx, params);
}
// Check if this is complete multipart upload
if (ctx.query.uploadId) {
return ObjectController.completeMultipartUpload(req, res, ctx, params);
}
ctx.throw('InvalidRequest', 'Invalid POST request');
}
/**
* Initiate Multipart Upload (POST with ?uploads)
*/
private static async initiateMultipartUpload(
req: plugins.http.IncomingMessage,
res: plugins.http.ServerResponse,
ctx: S3Context,
params: Record<string, string>
): Promise<void> {
const { bucket, key } = params;
// Extract metadata from headers
const metadata: Record<string, string> = {};
for (const [header, value] of Object.entries(ctx.headers)) {
if (header.startsWith('x-amz-meta-')) {
metadata[header] = value as string;
}
if (header === 'content-type' && value) {
metadata['content-type'] = value as string;
}
}
// Initiate upload
const uploadId = await ctx.multipart.initiateUpload(bucket, key, metadata);
// Send XML response
await ctx.sendXML({
InitiateMultipartUploadResult: {
Bucket: bucket,
Key: key,
UploadId: uploadId,
},
});
}
/**
* Upload Part (PUT with ?partNumber&uploadId)
*/
private static async uploadPart(
req: plugins.http.IncomingMessage,
res: plugins.http.ServerResponse,
ctx: S3Context,
params: Record<string, string>
): Promise<void> {
const uploadId = ctx.query.uploadId!;
const partNumber = parseInt(ctx.query.partNumber!);
if (isNaN(partNumber) || partNumber < 1 || partNumber > 10000) {
ctx.throw('InvalidPartNumber', 'Part number must be between 1 and 10000');
}
// Upload the part
const partInfo = await ctx.multipart.uploadPart(
uploadId,
partNumber,
ctx.getRequestStream() as any as import('stream').Readable
);
// Set ETag header (part ETag)
ctx.setHeader('ETag', `"${partInfo.etag}"`);
ctx.status(200).send('');
}
/**
* Complete Multipart Upload (POST with ?uploadId)
*/
private static async completeMultipartUpload(
req: plugins.http.IncomingMessage,
res: plugins.http.ServerResponse,
ctx: S3Context,
params: Record<string, string>
): Promise<void> {
const { bucket, key } = params;
const uploadId = ctx.query.uploadId!;
// Read and parse request body (XML with part list)
const body = await ctx.readBody();
// Parse XML to extract parts
// Expected format: <CompleteMultipartUpload><Part><PartNumber>1</PartNumber><ETag>"etag"</ETag></Part>...</CompleteMultipartUpload>
const partMatches = body.matchAll(/<Part>.*?<PartNumber>(\d+)<\/PartNumber>.*?<ETag>(.*?)<\/ETag>.*?<\/Part>/gs);
const parts: Array<{ PartNumber: number; ETag: string }> = [];
for (const match of partMatches) {
parts.push({
PartNumber: parseInt(match[1]),
ETag: match[2],
});
}
// Complete the upload
const result = await ctx.multipart.completeUpload(uploadId, parts);
// Get upload metadata
const upload = ctx.multipart.getUpload(uploadId);
if (!upload) {
ctx.throw('NoSuchUpload', 'The specified upload does not exist');
}
// Move final file to object store
const finalPath = ctx.multipart.getFinalPath(uploadId);
const finalContent = await plugins.smartfs.file(finalPath).read();
const finalStream = plugins.http.IncomingMessage.prototype;
// Create a readable stream from the buffer
const { Readable } = await import('stream');
const finalReadableStream = Readable.from([finalContent]);
// Store the final object
await ctx.store.putObject(bucket, key, finalReadableStream, upload.metadata);
// Clean up multipart upload data
await ctx.multipart.cleanupUpload(uploadId);
// Send XML response
await ctx.sendXML({
CompleteMultipartUploadResult: {
Location: `/${bucket}/${key}`,
Bucket: bucket,
Key: key,
ETag: `"${result.etag}"`,
},
});
}
/**
* Abort Multipart Upload (DELETE with ?uploadId)
*/
private static async abortMultipartUpload(
req: plugins.http.IncomingMessage,
res: plugins.http.ServerResponse,
ctx: S3Context,
params: Record<string, string>
): Promise<void> {
const uploadId = ctx.query.uploadId!;
// Abort and cleanup
await ctx.multipart.abortUpload(uploadId);
ctx.status(204).send('');
}
}

View File

@@ -153,7 +153,7 @@ function mergeConfig(userConfig: ISmarts3Config): Required<ISmarts3Config> {
*/
export class Smarts3 {
// STATIC
public static async createAndStart(configArg: ISmarts3Config = {}) {
public static async createAndStart(configArg: ISmarts3Config | ILegacySmarts3Config = {}) {
const smartS3Instance = new Smarts3(configArg);
await smartS3Instance.start();
return smartS3Instance;
@@ -163,7 +163,7 @@ export class Smarts3 {
public config: Required<ISmarts3Config>;
public s3Instance: Smarts3Server;
constructor(configArg: ISmarts3Config = {}) {
constructor(configArg: ISmarts3Config | ILegacySmarts3Config = {}) {
this.config = mergeConfig(configArg);
}