fix(smarts3): replace TypeScript server with Rust-powered core and IPC bridge
This commit is contained in:
@@ -3,6 +3,6 @@
|
||||
*/
|
||||
export const commitinfo = {
|
||||
name: '@push.rocks/smarts3',
|
||||
version: '5.1.0',
|
||||
version: '5.1.1',
|
||||
description: 'A Node.js TypeScript package to create a local S3 endpoint for simulating AWS S3 operations using mapped local directories for development and testing purposes.'
|
||||
}
|
||||
|
||||
@@ -1,118 +0,0 @@
|
||||
import * as plugins from '../plugins.js';
|
||||
import { S3Error } from './s3-error.js';
|
||||
import { createXml } from '../utils/xml.utils.js';
|
||||
import type { FilesystemStore } from './filesystem-store.js';
|
||||
import type { MultipartUploadManager } from './multipart-manager.js';
|
||||
import type { Readable } from 'stream';
|
||||
|
||||
/**
|
||||
* S3 request context with helper methods
|
||||
*/
|
||||
export class S3Context {
|
||||
public method: string;
|
||||
public url: URL;
|
||||
public headers: plugins.http.IncomingHttpHeaders;
|
||||
public params: Record<string, string> = {};
|
||||
public query: Record<string, string> = {};
|
||||
public store: FilesystemStore;
|
||||
public multipart: MultipartUploadManager;
|
||||
|
||||
private req: plugins.http.IncomingMessage;
|
||||
private res: plugins.http.ServerResponse;
|
||||
private statusCode: number = 200;
|
||||
private responseHeaders: Record<string, string> = {};
|
||||
|
||||
constructor(
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
store: FilesystemStore,
|
||||
multipart: MultipartUploadManager
|
||||
) {
|
||||
this.req = req;
|
||||
this.res = res;
|
||||
this.store = store;
|
||||
this.multipart = multipart;
|
||||
this.method = req.method || 'GET';
|
||||
this.headers = req.headers;
|
||||
|
||||
// Parse URL and query string
|
||||
const fullUrl = `http://${req.headers.host || 'localhost'}${req.url || '/'}`;
|
||||
this.url = new URL(fullUrl);
|
||||
|
||||
// Parse query string into object
|
||||
this.url.searchParams.forEach((value, key) => {
|
||||
this.query[key] = value;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Set response status code
|
||||
*/
|
||||
public status(code: number): this {
|
||||
this.statusCode = code;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set response header
|
||||
*/
|
||||
public setHeader(name: string, value: string | number): this {
|
||||
this.responseHeaders[name] = value.toString();
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Send response body (string, Buffer, or Stream)
|
||||
*/
|
||||
public async send(body: string | Buffer | Readable | NodeJS.ReadableStream): Promise<void> {
|
||||
// Write status and headers
|
||||
this.res.writeHead(this.statusCode, this.responseHeaders);
|
||||
|
||||
// Handle different body types
|
||||
if (typeof body === 'string' || body instanceof Buffer) {
|
||||
this.res.end(body);
|
||||
} else if (body && typeof (body as any).pipe === 'function') {
|
||||
// It's a stream
|
||||
(body as Readable).pipe(this.res);
|
||||
} else {
|
||||
this.res.end();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Send XML response
|
||||
*/
|
||||
public async sendXML(obj: any): Promise<void> {
|
||||
const xml = createXml(obj, { format: true });
|
||||
this.setHeader('Content-Type', 'application/xml');
|
||||
this.setHeader('Content-Length', Buffer.byteLength(xml));
|
||||
await this.send(xml);
|
||||
}
|
||||
|
||||
/**
|
||||
* Throw an S3 error
|
||||
*/
|
||||
public throw(code: string, message: string, detail?: Record<string, any>): never {
|
||||
throw new S3Error(code, message, detail);
|
||||
}
|
||||
|
||||
/**
|
||||
* Read and parse request body as string
|
||||
*/
|
||||
public async readBody(): Promise<string> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const chunks: Buffer[] = [];
|
||||
|
||||
this.req.on('data', (chunk) => chunks.push(chunk));
|
||||
this.req.on('end', () => resolve(Buffer.concat(chunks).toString('utf8')));
|
||||
this.req.on('error', reject);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the request stream (for streaming uploads)
|
||||
*/
|
||||
public getRequestStream(): NodeJS.ReadableStream {
|
||||
return this.req;
|
||||
}
|
||||
}
|
||||
@@ -1,562 +0,0 @@
|
||||
import * as plugins from '../plugins.js';
|
||||
import { S3Error } from './s3-error.js';
|
||||
import { Readable } from 'stream';
|
||||
|
||||
export interface IS3Bucket {
|
||||
name: string;
|
||||
creationDate: Date;
|
||||
}
|
||||
|
||||
export interface IS3Object {
|
||||
key: string;
|
||||
size: number;
|
||||
lastModified: Date;
|
||||
md5: string;
|
||||
metadata: Record<string, string>;
|
||||
content?: Readable;
|
||||
}
|
||||
|
||||
export interface IListObjectsOptions {
|
||||
prefix?: string;
|
||||
delimiter?: string;
|
||||
maxKeys?: number;
|
||||
continuationToken?: string;
|
||||
}
|
||||
|
||||
export interface IListObjectsResult {
|
||||
contents: IS3Object[];
|
||||
commonPrefixes: string[];
|
||||
isTruncated: boolean;
|
||||
nextContinuationToken?: string;
|
||||
prefix: string;
|
||||
delimiter: string;
|
||||
maxKeys: number;
|
||||
}
|
||||
|
||||
export interface IRangeOptions {
|
||||
start: number;
|
||||
end: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Filesystem-backed storage for S3 objects using smartfs
|
||||
*/
|
||||
export class FilesystemStore {
|
||||
constructor(private rootDir: string) {}
|
||||
|
||||
/**
|
||||
* Initialize store (ensure root directory exists)
|
||||
*/
|
||||
public async initialize(): Promise<void> {
|
||||
await plugins.smartfs.directory(this.rootDir).recursive().create();
|
||||
}
|
||||
|
||||
/**
|
||||
* Reset store (delete all buckets)
|
||||
*/
|
||||
public async reset(): Promise<void> {
|
||||
// Delete directory and recreate it
|
||||
const exists = await plugins.smartfs.directory(this.rootDir).exists();
|
||||
if (exists) {
|
||||
await plugins.smartfs.directory(this.rootDir).recursive().delete();
|
||||
}
|
||||
await plugins.smartfs.directory(this.rootDir).recursive().create();
|
||||
}
|
||||
|
||||
// ============================
|
||||
// BUCKET OPERATIONS
|
||||
// ============================
|
||||
|
||||
/**
|
||||
* List all buckets
|
||||
*/
|
||||
public async listBuckets(): Promise<IS3Bucket[]> {
|
||||
const entries = await plugins.smartfs.directory(this.rootDir).includeStats().list();
|
||||
const buckets: IS3Bucket[] = [];
|
||||
|
||||
for (const entry of entries) {
|
||||
if (entry.isDirectory && entry.stats) {
|
||||
buckets.push({
|
||||
name: entry.name,
|
||||
creationDate: entry.stats.birthtime,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return buckets.sort((a, b) => a.name.localeCompare(b.name));
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if bucket exists
|
||||
*/
|
||||
public async bucketExists(bucket: string): Promise<boolean> {
|
||||
const bucketPath = this.getBucketPath(bucket);
|
||||
return plugins.smartfs.directory(bucketPath).exists();
|
||||
}
|
||||
|
||||
/**
|
||||
* Create bucket
|
||||
*/
|
||||
public async createBucket(bucket: string): Promise<void> {
|
||||
const bucketPath = this.getBucketPath(bucket);
|
||||
await plugins.smartfs.directory(bucketPath).recursive().create();
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete bucket (must be empty)
|
||||
*/
|
||||
public async deleteBucket(bucket: string): Promise<void> {
|
||||
const bucketPath = this.getBucketPath(bucket);
|
||||
|
||||
// Check if bucket exists
|
||||
if (!(await this.bucketExists(bucket))) {
|
||||
throw new S3Error('NoSuchBucket', 'The specified bucket does not exist');
|
||||
}
|
||||
|
||||
// Check if bucket is empty
|
||||
const files = await plugins.smartfs.directory(bucketPath).recursive().list();
|
||||
if (files.length > 0) {
|
||||
throw new S3Error('BucketNotEmpty', 'The bucket you tried to delete is not empty');
|
||||
}
|
||||
|
||||
await plugins.smartfs.directory(bucketPath).recursive().delete();
|
||||
}
|
||||
|
||||
// ============================
|
||||
// OBJECT OPERATIONS
|
||||
// ============================
|
||||
|
||||
/**
|
||||
* List objects in bucket
|
||||
*/
|
||||
public async listObjects(
|
||||
bucket: string,
|
||||
options: IListObjectsOptions = {}
|
||||
): Promise<IListObjectsResult> {
|
||||
const bucketPath = this.getBucketPath(bucket);
|
||||
|
||||
if (!(await this.bucketExists(bucket))) {
|
||||
throw new S3Error('NoSuchBucket', 'The specified bucket does not exist');
|
||||
}
|
||||
|
||||
const {
|
||||
prefix = '',
|
||||
delimiter = '',
|
||||
maxKeys = 1000,
|
||||
continuationToken,
|
||||
} = options;
|
||||
|
||||
// List all object files recursively with filter
|
||||
const entries = await plugins.smartfs
|
||||
.directory(bucketPath)
|
||||
.recursive()
|
||||
.filter((entry) => entry.name.endsWith('._S3_object'))
|
||||
.list();
|
||||
|
||||
// Convert file paths to keys
|
||||
let keys = entries.map((entry) => {
|
||||
const relativePath = plugins.path.relative(bucketPath, entry.path);
|
||||
const key = this.decodeKey(relativePath.replace(/\._S3_object$/, ''));
|
||||
return key;
|
||||
});
|
||||
|
||||
// Apply prefix filter
|
||||
if (prefix) {
|
||||
keys = keys.filter((key) => key.startsWith(prefix));
|
||||
}
|
||||
|
||||
// Sort keys
|
||||
keys = keys.sort();
|
||||
|
||||
// Handle continuation token (simple implementation using key name)
|
||||
if (continuationToken) {
|
||||
const startIndex = keys.findIndex((key) => key > continuationToken);
|
||||
if (startIndex > 0) {
|
||||
keys = keys.slice(startIndex);
|
||||
}
|
||||
}
|
||||
|
||||
// Handle delimiter (common prefixes)
|
||||
const commonPrefixes: Set<string> = new Set();
|
||||
const contents: IS3Object[] = [];
|
||||
|
||||
for (const key of keys) {
|
||||
if (delimiter) {
|
||||
// Find first delimiter after prefix
|
||||
const remainingKey = key.slice(prefix.length);
|
||||
const delimiterIndex = remainingKey.indexOf(delimiter);
|
||||
|
||||
if (delimiterIndex !== -1) {
|
||||
// This key has a delimiter, add to common prefixes
|
||||
const commonPrefix = prefix + remainingKey.slice(0, delimiterIndex + delimiter.length);
|
||||
commonPrefixes.add(commonPrefix);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Add to contents (limited by maxKeys)
|
||||
if (contents.length >= maxKeys) {
|
||||
break;
|
||||
}
|
||||
|
||||
try {
|
||||
const objectInfo = await this.getObjectInfo(bucket, key);
|
||||
contents.push(objectInfo);
|
||||
} catch (err) {
|
||||
// Skip if object no longer exists
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
const isTruncated = keys.length > contents.length + commonPrefixes.size;
|
||||
const nextContinuationToken = isTruncated
|
||||
? contents[contents.length - 1]?.key
|
||||
: undefined;
|
||||
|
||||
return {
|
||||
contents,
|
||||
commonPrefixes: Array.from(commonPrefixes).sort(),
|
||||
isTruncated,
|
||||
nextContinuationToken,
|
||||
prefix,
|
||||
delimiter,
|
||||
maxKeys,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get object info (without content)
|
||||
*/
|
||||
private async getObjectInfo(bucket: string, key: string): Promise<IS3Object> {
|
||||
const objectPath = this.getObjectPath(bucket, key);
|
||||
const metadataPath = `${objectPath}.metadata.json`;
|
||||
const md5Path = `${objectPath}.md5`;
|
||||
|
||||
const [stats, metadata, md5] = await Promise.all([
|
||||
plugins.smartfs.file(objectPath).stat(),
|
||||
this.readMetadata(metadataPath),
|
||||
this.readMD5(objectPath, md5Path),
|
||||
]);
|
||||
|
||||
return {
|
||||
key,
|
||||
size: stats.size,
|
||||
lastModified: stats.mtime,
|
||||
md5,
|
||||
metadata,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if object exists
|
||||
*/
|
||||
public async objectExists(bucket: string, key: string): Promise<boolean> {
|
||||
const objectPath = this.getObjectPath(bucket, key);
|
||||
return plugins.smartfs.file(objectPath).exists();
|
||||
}
|
||||
|
||||
/**
|
||||
* Put object (upload with streaming)
|
||||
*/
|
||||
public async putObject(
|
||||
bucket: string,
|
||||
key: string,
|
||||
stream: NodeJS.ReadableStream,
|
||||
metadata: Record<string, string> = {}
|
||||
): Promise<{ size: number; md5: string }> {
|
||||
const objectPath = this.getObjectPath(bucket, key);
|
||||
|
||||
// Ensure bucket exists
|
||||
if (!(await this.bucketExists(bucket))) {
|
||||
throw new S3Error('NoSuchBucket', 'The specified bucket does not exist');
|
||||
}
|
||||
|
||||
// Ensure parent directory exists
|
||||
const parentDir = plugins.path.dirname(objectPath);
|
||||
await plugins.smartfs.directory(parentDir).recursive().create();
|
||||
|
||||
// Write with MD5 calculation
|
||||
const result = await this.writeStreamWithMD5(stream, objectPath);
|
||||
|
||||
// Save metadata
|
||||
const metadataPath = `${objectPath}.metadata.json`;
|
||||
await plugins.smartfs.file(metadataPath).write(JSON.stringify(metadata, null, 2));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get object (download with streaming)
|
||||
*/
|
||||
public async getObject(
|
||||
bucket: string,
|
||||
key: string,
|
||||
range?: IRangeOptions
|
||||
): Promise<IS3Object> {
|
||||
const objectPath = this.getObjectPath(bucket, key);
|
||||
|
||||
if (!(await this.objectExists(bucket, key))) {
|
||||
throw new S3Error('NoSuchKey', 'The specified key does not exist');
|
||||
}
|
||||
|
||||
const info = await this.getObjectInfo(bucket, key);
|
||||
|
||||
// Get Web ReadableStream from smartfs
|
||||
const webStream = await plugins.smartfs.file(objectPath).readStream();
|
||||
|
||||
// Convert Web Stream to Node.js Readable stream
|
||||
let nodeStream = Readable.fromWeb(webStream as any);
|
||||
|
||||
// Handle range requests if needed
|
||||
if (range) {
|
||||
// For range requests, we need to skip bytes and limit output
|
||||
let bytesRead = 0;
|
||||
const rangeStart = range.start;
|
||||
const rangeEnd = range.end;
|
||||
|
||||
nodeStream = nodeStream.pipe(new (require('stream').Transform)({
|
||||
transform(chunk: Buffer, encoding, callback) {
|
||||
const chunkStart = bytesRead;
|
||||
const chunkEnd = bytesRead + chunk.length - 1;
|
||||
bytesRead += chunk.length;
|
||||
|
||||
// Skip chunks before range
|
||||
if (chunkEnd < rangeStart) {
|
||||
callback();
|
||||
return;
|
||||
}
|
||||
|
||||
// Stop after range
|
||||
if (chunkStart > rangeEnd) {
|
||||
this.end();
|
||||
callback();
|
||||
return;
|
||||
}
|
||||
|
||||
// Slice chunk to fit range
|
||||
const sliceStart = Math.max(0, rangeStart - chunkStart);
|
||||
const sliceEnd = Math.min(chunk.length, rangeEnd - chunkStart + 1);
|
||||
|
||||
callback(null, chunk.slice(sliceStart, sliceEnd));
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
return {
|
||||
...info,
|
||||
content: nodeStream,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete object
|
||||
*/
|
||||
public async deleteObject(bucket: string, key: string): Promise<void> {
|
||||
const objectPath = this.getObjectPath(bucket, key);
|
||||
const metadataPath = `${objectPath}.metadata.json`;
|
||||
const md5Path = `${objectPath}.md5`;
|
||||
|
||||
// S3 doesn't throw error if object doesn't exist
|
||||
await Promise.all([
|
||||
plugins.smartfs.file(objectPath).delete().catch(() => {}),
|
||||
plugins.smartfs.file(metadataPath).delete().catch(() => {}),
|
||||
plugins.smartfs.file(md5Path).delete().catch(() => {}),
|
||||
]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Copy object
|
||||
*/
|
||||
public async copyObject(
|
||||
srcBucket: string,
|
||||
srcKey: string,
|
||||
destBucket: string,
|
||||
destKey: string,
|
||||
metadataDirective: 'COPY' | 'REPLACE' = 'COPY',
|
||||
newMetadata?: Record<string, string>
|
||||
): Promise<{ size: number; md5: string }> {
|
||||
const srcObjectPath = this.getObjectPath(srcBucket, srcKey);
|
||||
const destObjectPath = this.getObjectPath(destBucket, destKey);
|
||||
|
||||
// Check source exists
|
||||
if (!(await this.objectExists(srcBucket, srcKey))) {
|
||||
throw new S3Error('NoSuchKey', 'The specified key does not exist');
|
||||
}
|
||||
|
||||
// Ensure dest bucket exists
|
||||
if (!(await this.bucketExists(destBucket))) {
|
||||
throw new S3Error('NoSuchBucket', 'The specified bucket does not exist');
|
||||
}
|
||||
|
||||
// Ensure parent directory exists
|
||||
const parentDir = plugins.path.dirname(destObjectPath);
|
||||
await plugins.smartfs.directory(parentDir).recursive().create();
|
||||
|
||||
// Copy object file
|
||||
await plugins.smartfs.file(srcObjectPath).copy(destObjectPath);
|
||||
|
||||
// Handle metadata
|
||||
if (metadataDirective === 'COPY') {
|
||||
// Copy metadata
|
||||
const srcMetadataPath = `${srcObjectPath}.metadata.json`;
|
||||
const destMetadataPath = `${destObjectPath}.metadata.json`;
|
||||
await plugins.smartfs.file(srcMetadataPath).copy(destMetadataPath).catch(() => {});
|
||||
} else if (newMetadata) {
|
||||
// Replace with new metadata
|
||||
const destMetadataPath = `${destObjectPath}.metadata.json`;
|
||||
await plugins.smartfs.file(destMetadataPath).write(JSON.stringify(newMetadata, null, 2));
|
||||
}
|
||||
|
||||
// Copy MD5
|
||||
const srcMD5Path = `${srcObjectPath}.md5`;
|
||||
const destMD5Path = `${destObjectPath}.md5`;
|
||||
await plugins.smartfs.file(srcMD5Path).copy(destMD5Path).catch(() => {});
|
||||
|
||||
// Get result info
|
||||
const stats = await plugins.smartfs.file(destObjectPath).stat();
|
||||
const md5 = await this.readMD5(destObjectPath, destMD5Path);
|
||||
|
||||
return { size: stats.size, md5 };
|
||||
}
|
||||
|
||||
// ============================
|
||||
// HELPER METHODS
|
||||
// ============================
|
||||
|
||||
/**
|
||||
* Get bucket directory path
|
||||
*/
|
||||
private getBucketPath(bucket: string): string {
|
||||
return plugins.path.join(this.rootDir, bucket);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get object file path
|
||||
*/
|
||||
private getObjectPath(bucket: string, key: string): string {
|
||||
return plugins.path.join(
|
||||
this.rootDir,
|
||||
bucket,
|
||||
this.encodeKey(key) + '._S3_object'
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Encode key for Windows compatibility
|
||||
*/
|
||||
private encodeKey(key: string): string {
|
||||
if (process.platform === 'win32') {
|
||||
// Replace invalid Windows filename chars with hex encoding
|
||||
return key.replace(/[<>:"\\|?*]/g, (ch) =>
|
||||
'&' + Buffer.from(ch, 'utf8').toString('hex')
|
||||
);
|
||||
}
|
||||
return key;
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode key from filesystem path
|
||||
*/
|
||||
private decodeKey(encodedKey: string): string {
|
||||
if (process.platform === 'win32') {
|
||||
// Decode hex-encoded chars
|
||||
return encodedKey.replace(/&([0-9a-f]{2})/gi, (_, hex) =>
|
||||
Buffer.from(hex, 'hex').toString('utf8')
|
||||
);
|
||||
}
|
||||
return encodedKey;
|
||||
}
|
||||
|
||||
/**
|
||||
* Write stream to file with MD5 calculation
|
||||
*/
|
||||
private async writeStreamWithMD5(
|
||||
input: NodeJS.ReadableStream,
|
||||
destPath: string
|
||||
): Promise<{ size: number; md5: string }> {
|
||||
const hash = plugins.crypto.createHash('md5');
|
||||
let totalSize = 0;
|
||||
|
||||
return new Promise(async (resolve, reject) => {
|
||||
// Get Web WritableStream from smartfs
|
||||
const webWriteStream = await plugins.smartfs.file(destPath).writeStream();
|
||||
const writer = webWriteStream.getWriter();
|
||||
|
||||
// Read from Node.js stream and write to Web stream
|
||||
input.on('data', async (chunk: Buffer) => {
|
||||
hash.update(chunk);
|
||||
totalSize += chunk.length;
|
||||
|
||||
try {
|
||||
await writer.write(new Uint8Array(chunk));
|
||||
} catch (err) {
|
||||
reject(err);
|
||||
}
|
||||
});
|
||||
|
||||
input.on('error', (err) => {
|
||||
writer.abort(err);
|
||||
reject(err);
|
||||
});
|
||||
|
||||
input.on('end', async () => {
|
||||
try {
|
||||
await writer.close();
|
||||
const md5 = hash.digest('hex');
|
||||
|
||||
// Save MD5 to separate file
|
||||
const md5Path = `${destPath}.md5`;
|
||||
await plugins.smartfs.file(md5Path).write(md5);
|
||||
|
||||
resolve({ size: totalSize, md5 });
|
||||
} catch (err) {
|
||||
reject(err);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Read MD5 hash (calculate if missing)
|
||||
*/
|
||||
private async readMD5(objectPath: string, md5Path: string): Promise<string> {
|
||||
try {
|
||||
// Try to read cached MD5
|
||||
const md5 = await plugins.smartfs.file(md5Path).encoding('utf8').read() as string;
|
||||
return md5.trim();
|
||||
} catch (err) {
|
||||
// Calculate MD5 if not cached
|
||||
return new Promise(async (resolve, reject) => {
|
||||
const hash = plugins.crypto.createHash('md5');
|
||||
|
||||
try {
|
||||
const webStream = await plugins.smartfs.file(objectPath).readStream();
|
||||
const nodeStream = Readable.fromWeb(webStream as any);
|
||||
|
||||
nodeStream.on('data', (chunk: Buffer) => hash.update(chunk));
|
||||
nodeStream.on('end', async () => {
|
||||
const md5 = hash.digest('hex');
|
||||
// Cache it
|
||||
await plugins.smartfs.file(md5Path).write(md5);
|
||||
resolve(md5);
|
||||
});
|
||||
nodeStream.on('error', reject);
|
||||
} catch (err) {
|
||||
reject(err);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Read metadata from JSON file
|
||||
*/
|
||||
private async readMetadata(metadataPath: string): Promise<Record<string, string>> {
|
||||
try {
|
||||
const content = await plugins.smartfs.file(metadataPath).encoding('utf8').read() as string;
|
||||
return JSON.parse(content);
|
||||
} catch (err) {
|
||||
return {};
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,130 +0,0 @@
|
||||
import type { ILoggingConfig } from '../index.js';
|
||||
|
||||
/**
|
||||
* Log levels in order of severity
|
||||
*/
|
||||
const LOG_LEVELS = {
|
||||
error: 0,
|
||||
warn: 1,
|
||||
info: 2,
|
||||
debug: 3,
|
||||
} as const;
|
||||
|
||||
type LogLevel = keyof typeof LOG_LEVELS;
|
||||
|
||||
/**
|
||||
* Structured logger with configurable levels and formats
|
||||
*/
|
||||
export class Logger {
|
||||
private config: Required<ILoggingConfig>;
|
||||
private minLevel: number;
|
||||
|
||||
constructor(config: ILoggingConfig) {
|
||||
// Apply defaults for any missing config
|
||||
this.config = {
|
||||
level: config.level ?? 'info',
|
||||
format: config.format ?? 'text',
|
||||
enabled: config.enabled ?? true,
|
||||
};
|
||||
this.minLevel = LOG_LEVELS[this.config.level];
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a log level should be output
|
||||
*/
|
||||
private shouldLog(level: LogLevel): boolean {
|
||||
if (!this.config.enabled) {
|
||||
return false;
|
||||
}
|
||||
return LOG_LEVELS[level] <= this.minLevel;
|
||||
}
|
||||
|
||||
/**
|
||||
* Format a log message
|
||||
*/
|
||||
private format(level: LogLevel, message: string, meta?: Record<string, any>): string {
|
||||
const timestamp = new Date().toISOString();
|
||||
|
||||
if (this.config.format === 'json') {
|
||||
return JSON.stringify({
|
||||
timestamp,
|
||||
level,
|
||||
message,
|
||||
...(meta || {}),
|
||||
});
|
||||
}
|
||||
|
||||
// Text format
|
||||
const metaStr = meta ? ` ${JSON.stringify(meta)}` : '';
|
||||
return `[${timestamp}] ${level.toUpperCase()}: ${message}${metaStr}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Log at error level
|
||||
*/
|
||||
public error(message: string, meta?: Record<string, any>): void {
|
||||
if (this.shouldLog('error')) {
|
||||
console.error(this.format('error', message, meta));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Log at warn level
|
||||
*/
|
||||
public warn(message: string, meta?: Record<string, any>): void {
|
||||
if (this.shouldLog('warn')) {
|
||||
console.warn(this.format('warn', message, meta));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Log at info level
|
||||
*/
|
||||
public info(message: string, meta?: Record<string, any>): void {
|
||||
if (this.shouldLog('info')) {
|
||||
console.log(this.format('info', message, meta));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Log at debug level
|
||||
*/
|
||||
public debug(message: string, meta?: Record<string, any>): void {
|
||||
if (this.shouldLog('debug')) {
|
||||
console.log(this.format('debug', message, meta));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Log HTTP request
|
||||
*/
|
||||
public request(method: string, url: string, meta?: Record<string, any>): void {
|
||||
this.info(`→ ${method} ${url}`, meta);
|
||||
}
|
||||
|
||||
/**
|
||||
* Log HTTP response
|
||||
*/
|
||||
public response(method: string, url: string, statusCode: number, duration: number): void {
|
||||
const level: LogLevel = statusCode >= 500 ? 'error' : statusCode >= 400 ? 'warn' : 'info';
|
||||
|
||||
if (this.shouldLog(level)) {
|
||||
const message = `← ${method} ${url} - ${statusCode} (${duration}ms)`;
|
||||
|
||||
if (level === 'error') {
|
||||
this.error(message, { statusCode, duration });
|
||||
} else if (level === 'warn') {
|
||||
this.warn(message, { statusCode, duration });
|
||||
} else {
|
||||
this.info(message, { statusCode, duration });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Log S3 error
|
||||
*/
|
||||
public s3Error(code: string, message: string, status: number): void {
|
||||
this.error(`[S3Error] ${code}: ${message}`, { code, status });
|
||||
}
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
import * as plugins from '../plugins.js';
|
||||
import type { S3Context } from './context.js';
|
||||
|
||||
export type Middleware = (
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
ctx: S3Context,
|
||||
next: () => Promise<void>
|
||||
) => Promise<void>;
|
||||
|
||||
/**
|
||||
* Middleware stack for composing request handlers
|
||||
*/
|
||||
export class MiddlewareStack {
|
||||
private middlewares: Middleware[] = [];
|
||||
|
||||
/**
|
||||
* Add middleware to the stack
|
||||
*/
|
||||
public use(middleware: Middleware): void {
|
||||
this.middlewares.push(middleware);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute all middlewares in order
|
||||
*/
|
||||
public async execute(
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
ctx: S3Context
|
||||
): Promise<void> {
|
||||
let index = 0;
|
||||
|
||||
const next = async (): Promise<void> => {
|
||||
if (index < this.middlewares.length) {
|
||||
const middleware = this.middlewares[index++];
|
||||
await middleware(req, res, ctx, next);
|
||||
}
|
||||
};
|
||||
|
||||
await next();
|
||||
}
|
||||
}
|
||||
@@ -1,430 +0,0 @@
|
||||
import * as plugins from '../plugins.js';
|
||||
import { Readable } from 'stream';
|
||||
|
||||
/**
|
||||
* Multipart upload metadata
|
||||
*/
|
||||
export interface IMultipartUpload {
|
||||
uploadId: string;
|
||||
bucket: string;
|
||||
key: string;
|
||||
initiated: Date;
|
||||
parts: Map<number, IPartInfo>;
|
||||
metadata: Record<string, string>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Part information
|
||||
*/
|
||||
export interface IPartInfo {
|
||||
partNumber: number;
|
||||
etag: string;
|
||||
size: number;
|
||||
lastModified: Date;
|
||||
}
|
||||
|
||||
/**
|
||||
* Serializable version of upload metadata for disk persistence
|
||||
*/
|
||||
interface ISerializableUpload {
|
||||
uploadId: string;
|
||||
bucket: string;
|
||||
key: string;
|
||||
initiated: string; // ISO date string
|
||||
metadata: Record<string, string>;
|
||||
parts: Array<{
|
||||
partNumber: number;
|
||||
etag: string;
|
||||
size: number;
|
||||
lastModified: string; // ISO date string
|
||||
}>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Manages multipart upload state and storage
|
||||
*/
|
||||
export class MultipartUploadManager {
|
||||
private uploads: Map<string, IMultipartUpload> = new Map();
|
||||
private uploadDir: string;
|
||||
private cleanupInterval: NodeJS.Timeout | null = null;
|
||||
private expirationDays: number;
|
||||
private cleanupIntervalMinutes: number;
|
||||
|
||||
constructor(
|
||||
private rootDir: string,
|
||||
expirationDays: number = 7,
|
||||
cleanupIntervalMinutes: number = 60
|
||||
) {
|
||||
this.uploadDir = plugins.path.join(rootDir, '.multipart');
|
||||
this.expirationDays = expirationDays;
|
||||
this.cleanupIntervalMinutes = cleanupIntervalMinutes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize multipart uploads directory
|
||||
*/
|
||||
public async initialize(): Promise<void> {
|
||||
await plugins.smartfs.directory(this.uploadDir).recursive().create();
|
||||
await this.restoreUploadsFromDisk();
|
||||
}
|
||||
|
||||
/**
|
||||
* Save upload metadata to disk for persistence
|
||||
*/
|
||||
private async saveUploadMetadata(uploadId: string): Promise<void> {
|
||||
const upload = this.uploads.get(uploadId);
|
||||
if (!upload) {
|
||||
return;
|
||||
}
|
||||
|
||||
const metadataPath = plugins.path.join(this.uploadDir, uploadId, 'metadata.json');
|
||||
|
||||
const serializable: ISerializableUpload = {
|
||||
uploadId: upload.uploadId,
|
||||
bucket: upload.bucket,
|
||||
key: upload.key,
|
||||
initiated: upload.initiated.toISOString(),
|
||||
metadata: upload.metadata,
|
||||
parts: Array.from(upload.parts.values()).map(part => ({
|
||||
partNumber: part.partNumber,
|
||||
etag: part.etag,
|
||||
size: part.size,
|
||||
lastModified: part.lastModified.toISOString(),
|
||||
})),
|
||||
};
|
||||
|
||||
await plugins.smartfs.file(metadataPath).write(JSON.stringify(serializable, null, 2));
|
||||
}
|
||||
|
||||
/**
|
||||
* Restore uploads from disk on initialization
|
||||
*/
|
||||
private async restoreUploadsFromDisk(): Promise<void> {
|
||||
const uploadDirExists = await plugins.smartfs.directory(this.uploadDir).exists();
|
||||
if (!uploadDirExists) {
|
||||
return;
|
||||
}
|
||||
|
||||
const entries = await plugins.smartfs.directory(this.uploadDir).includeStats().list();
|
||||
|
||||
for (const entry of entries) {
|
||||
if (!entry.isDirectory) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const uploadId = entry.name;
|
||||
const metadataPath = plugins.path.join(this.uploadDir, uploadId, 'metadata.json');
|
||||
|
||||
// Check if metadata.json exists
|
||||
const metadataExists = await plugins.smartfs.file(metadataPath).exists();
|
||||
if (!metadataExists) {
|
||||
// Orphaned upload directory - clean it up
|
||||
console.warn(`Orphaned multipart upload directory found: ${uploadId}, cleaning up`);
|
||||
await plugins.smartfs.directory(plugins.path.join(this.uploadDir, uploadId)).recursive().delete();
|
||||
continue;
|
||||
}
|
||||
|
||||
try {
|
||||
// Read and parse metadata
|
||||
const metadataContent = await plugins.smartfs.file(metadataPath).read();
|
||||
const serialized: ISerializableUpload = JSON.parse(metadataContent as string);
|
||||
|
||||
// Restore to memory
|
||||
const parts = new Map<number, IPartInfo>();
|
||||
for (const part of serialized.parts) {
|
||||
parts.set(part.partNumber, {
|
||||
partNumber: part.partNumber,
|
||||
etag: part.etag,
|
||||
size: part.size,
|
||||
lastModified: new Date(part.lastModified),
|
||||
});
|
||||
}
|
||||
|
||||
this.uploads.set(uploadId, {
|
||||
uploadId: serialized.uploadId,
|
||||
bucket: serialized.bucket,
|
||||
key: serialized.key,
|
||||
initiated: new Date(serialized.initiated),
|
||||
parts,
|
||||
metadata: serialized.metadata,
|
||||
});
|
||||
|
||||
console.log(`Restored multipart upload: ${uploadId} (${serialized.bucket}/${serialized.key})`);
|
||||
} catch (error) {
|
||||
// Corrupted metadata - clean up
|
||||
console.error(`Failed to restore multipart upload ${uploadId}:`, error);
|
||||
await plugins.smartfs.directory(plugins.path.join(this.uploadDir, uploadId)).recursive().delete();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a unique upload ID
|
||||
*/
|
||||
private generateUploadId(): string {
|
||||
return plugins.crypto.randomBytes(16).toString('hex');
|
||||
}
|
||||
|
||||
/**
|
||||
* Initiate a new multipart upload
|
||||
*/
|
||||
public async initiateUpload(
|
||||
bucket: string,
|
||||
key: string,
|
||||
metadata: Record<string, string>
|
||||
): Promise<string> {
|
||||
const uploadId = this.generateUploadId();
|
||||
|
||||
this.uploads.set(uploadId, {
|
||||
uploadId,
|
||||
bucket,
|
||||
key,
|
||||
initiated: new Date(),
|
||||
parts: new Map(),
|
||||
metadata,
|
||||
});
|
||||
|
||||
// Create directory for this upload's parts
|
||||
const uploadPath = plugins.path.join(this.uploadDir, uploadId);
|
||||
await plugins.smartfs.directory(uploadPath).recursive().create();
|
||||
|
||||
// Persist metadata to disk
|
||||
await this.saveUploadMetadata(uploadId);
|
||||
|
||||
return uploadId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Upload a part
|
||||
*/
|
||||
public async uploadPart(
|
||||
uploadId: string,
|
||||
partNumber: number,
|
||||
stream: Readable
|
||||
): Promise<IPartInfo> {
|
||||
const upload = this.uploads.get(uploadId);
|
||||
if (!upload) {
|
||||
throw new Error('No such upload');
|
||||
}
|
||||
|
||||
const partPath = plugins.path.join(this.uploadDir, uploadId, `part-${partNumber}`);
|
||||
|
||||
// Write part to disk
|
||||
const webWriteStream = await plugins.smartfs.file(partPath).writeStream();
|
||||
const writer = webWriteStream.getWriter();
|
||||
|
||||
let size = 0;
|
||||
const hash = plugins.crypto.createHash('md5');
|
||||
|
||||
for await (const chunk of stream) {
|
||||
const buffer = Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk);
|
||||
await writer.write(new Uint8Array(buffer));
|
||||
hash.update(buffer);
|
||||
size += buffer.length;
|
||||
}
|
||||
|
||||
await writer.close();
|
||||
|
||||
const etag = hash.digest('hex');
|
||||
|
||||
const partInfo: IPartInfo = {
|
||||
partNumber,
|
||||
etag,
|
||||
size,
|
||||
lastModified: new Date(),
|
||||
};
|
||||
|
||||
upload.parts.set(partNumber, partInfo);
|
||||
|
||||
// Persist updated metadata
|
||||
await this.saveUploadMetadata(uploadId);
|
||||
|
||||
return partInfo;
|
||||
}
|
||||
|
||||
/**
|
||||
* Complete multipart upload - combine all parts
|
||||
*/
|
||||
public async completeUpload(
|
||||
uploadId: string,
|
||||
parts: Array<{ PartNumber: number; ETag: string }>
|
||||
): Promise<{ etag: string; size: number }> {
|
||||
const upload = this.uploads.get(uploadId);
|
||||
if (!upload) {
|
||||
throw new Error('No such upload');
|
||||
}
|
||||
|
||||
// Verify all parts are uploaded
|
||||
for (const part of parts) {
|
||||
const uploadedPart = upload.parts.get(part.PartNumber);
|
||||
if (!uploadedPart) {
|
||||
throw new Error(`Part ${part.PartNumber} not uploaded`);
|
||||
}
|
||||
// Normalize ETag format (remove quotes if present)
|
||||
const normalizedETag = part.ETag.replace(/"/g, '');
|
||||
if (uploadedPart.etag !== normalizedETag) {
|
||||
throw new Error(`Part ${part.PartNumber} ETag mismatch`);
|
||||
}
|
||||
}
|
||||
|
||||
// Sort parts by part number
|
||||
const sortedParts = parts.sort((a, b) => a.PartNumber - b.PartNumber);
|
||||
|
||||
// Combine parts into final object
|
||||
const finalPath = plugins.path.join(this.uploadDir, uploadId, 'final');
|
||||
const webWriteStream = await plugins.smartfs.file(finalPath).writeStream();
|
||||
const writer = webWriteStream.getWriter();
|
||||
|
||||
const hash = plugins.crypto.createHash('md5');
|
||||
let totalSize = 0;
|
||||
|
||||
for (const part of sortedParts) {
|
||||
const partPath = plugins.path.join(this.uploadDir, uploadId, `part-${part.PartNumber}`);
|
||||
|
||||
// Read part and write to final file
|
||||
const partContent = await plugins.smartfs.file(partPath).read();
|
||||
const buffer = Buffer.isBuffer(partContent) ? partContent : Buffer.from(partContent as string);
|
||||
|
||||
await writer.write(new Uint8Array(buffer));
|
||||
hash.update(buffer);
|
||||
totalSize += buffer.length;
|
||||
}
|
||||
|
||||
await writer.close();
|
||||
|
||||
const etag = hash.digest('hex');
|
||||
|
||||
return { etag, size: totalSize };
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the final combined file path
|
||||
*/
|
||||
public getFinalPath(uploadId: string): string {
|
||||
return plugins.path.join(this.uploadDir, uploadId, 'final');
|
||||
}
|
||||
|
||||
/**
|
||||
* Get upload metadata
|
||||
*/
|
||||
public getUpload(uploadId: string): IMultipartUpload | undefined {
|
||||
return this.uploads.get(uploadId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Abort multipart upload - clean up parts
|
||||
*/
|
||||
public async abortUpload(uploadId: string): Promise<void> {
|
||||
const upload = this.uploads.get(uploadId);
|
||||
if (!upload) {
|
||||
throw new Error('No such upload');
|
||||
}
|
||||
|
||||
// Delete upload directory
|
||||
const uploadPath = plugins.path.join(this.uploadDir, uploadId);
|
||||
await plugins.smartfs.directory(uploadPath).recursive().delete();
|
||||
|
||||
// Remove from memory
|
||||
this.uploads.delete(uploadId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean up upload after completion
|
||||
*/
|
||||
public async cleanupUpload(uploadId: string): Promise<void> {
|
||||
const uploadPath = plugins.path.join(this.uploadDir, uploadId);
|
||||
await plugins.smartfs.directory(uploadPath).recursive().delete();
|
||||
this.uploads.delete(uploadId);
|
||||
}
|
||||
|
||||
/**
|
||||
* List all in-progress uploads for a bucket
|
||||
*/
|
||||
public listUploads(bucket?: string): IMultipartUpload[] {
|
||||
const uploads = Array.from(this.uploads.values());
|
||||
if (bucket) {
|
||||
return uploads.filter((u) => u.bucket === bucket);
|
||||
}
|
||||
return uploads;
|
||||
}
|
||||
|
||||
/**
|
||||
* List parts for an upload
|
||||
*/
|
||||
public listParts(uploadId: string): IPartInfo[] {
|
||||
const upload = this.uploads.get(uploadId);
|
||||
if (!upload) {
|
||||
throw new Error('No such upload');
|
||||
}
|
||||
return Array.from(upload.parts.values()).sort((a, b) => a.partNumber - b.partNumber);
|
||||
}
|
||||
|
||||
/**
|
||||
* Start automatic cleanup task for expired uploads
|
||||
*/
|
||||
public startCleanupTask(): void {
|
||||
if (this.cleanupInterval) {
|
||||
console.warn('Cleanup task is already running');
|
||||
return;
|
||||
}
|
||||
|
||||
// Run cleanup immediately on start
|
||||
this.performCleanup().catch(err => {
|
||||
console.error('Failed to perform initial multipart cleanup:', err);
|
||||
});
|
||||
|
||||
// Then schedule periodic cleanup
|
||||
const intervalMs = this.cleanupIntervalMinutes * 60 * 1000;
|
||||
this.cleanupInterval = setInterval(() => {
|
||||
this.performCleanup().catch(err => {
|
||||
console.error('Failed to perform scheduled multipart cleanup:', err);
|
||||
});
|
||||
}, intervalMs);
|
||||
|
||||
console.log(`Multipart cleanup task started (interval: ${this.cleanupIntervalMinutes} minutes, expiration: ${this.expirationDays} days)`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop automatic cleanup task
|
||||
*/
|
||||
public stopCleanupTask(): void {
|
||||
if (this.cleanupInterval) {
|
||||
clearInterval(this.cleanupInterval);
|
||||
this.cleanupInterval = null;
|
||||
console.log('Multipart cleanup task stopped');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform cleanup of expired uploads
|
||||
*/
|
||||
private async performCleanup(): Promise<void> {
|
||||
const now = Date.now();
|
||||
const expirationMs = this.expirationDays * 24 * 60 * 60 * 1000;
|
||||
const expiredUploads: string[] = [];
|
||||
|
||||
// Find expired uploads
|
||||
for (const [uploadId, upload] of this.uploads.entries()) {
|
||||
const age = now - upload.initiated.getTime();
|
||||
if (age > expirationMs) {
|
||||
expiredUploads.push(uploadId);
|
||||
}
|
||||
}
|
||||
|
||||
if (expiredUploads.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
console.log(`Cleaning up ${expiredUploads.length} expired multipart upload(s)`);
|
||||
|
||||
// Delete expired uploads
|
||||
for (const uploadId of expiredUploads) {
|
||||
try {
|
||||
await this.abortUpload(uploadId);
|
||||
console.log(`Deleted expired multipart upload: ${uploadId}`);
|
||||
} catch (err) {
|
||||
console.error(`Failed to delete expired upload ${uploadId}:`, err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,129 +0,0 @@
|
||||
import * as plugins from '../plugins.js';
|
||||
import type { S3Context } from './context.js';
|
||||
|
||||
export type RouteHandler = (
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
ctx: S3Context,
|
||||
params: Record<string, string>
|
||||
) => Promise<void>;
|
||||
|
||||
export interface IRouteMatch {
|
||||
handler: RouteHandler;
|
||||
params: Record<string, string>;
|
||||
}
|
||||
|
||||
interface IRoute {
|
||||
method: string;
|
||||
pattern: RegExp;
|
||||
paramNames: string[];
|
||||
handler: RouteHandler;
|
||||
}
|
||||
|
||||
/**
|
||||
* Simple HTTP router with pattern matching for S3 routes
|
||||
*/
|
||||
export class S3Router {
|
||||
private routes: IRoute[] = [];
|
||||
|
||||
/**
|
||||
* Add a route with pattern matching
|
||||
* Supports patterns like:
|
||||
* - "/" (exact match)
|
||||
* - "/:bucket" (single param)
|
||||
* - "/:bucket/:key*" (param with wildcard - captures everything after)
|
||||
*/
|
||||
public add(method: string, pattern: string, handler: RouteHandler): void {
|
||||
const { regex, paramNames } = this.convertPatternToRegex(pattern);
|
||||
|
||||
this.routes.push({
|
||||
method: method.toUpperCase(),
|
||||
pattern: regex,
|
||||
paramNames,
|
||||
handler,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Match a request to a route
|
||||
*/
|
||||
public match(method: string, pathname: string): IRouteMatch | null {
|
||||
// Normalize pathname: remove trailing slash unless it's root
|
||||
const normalizedPath = pathname === '/' ? pathname : pathname.replace(/\/$/, '');
|
||||
|
||||
for (const route of this.routes) {
|
||||
if (route.method !== method.toUpperCase()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const match = normalizedPath.match(route.pattern);
|
||||
if (match) {
|
||||
// Extract params from captured groups
|
||||
const params: Record<string, string> = {};
|
||||
for (let i = 0; i < route.paramNames.length; i++) {
|
||||
params[route.paramNames[i]] = decodeURIComponent(match[i + 1] || '');
|
||||
}
|
||||
|
||||
return {
|
||||
handler: route.handler,
|
||||
params,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert path pattern to RegExp
|
||||
* Examples:
|
||||
* - "/" → /^\/$/
|
||||
* - "/:bucket" → /^\/([^/]+)$/
|
||||
* - "/:bucket/:key*" → /^\/([^/]+)\/(.+)$/
|
||||
*/
|
||||
private convertPatternToRegex(pattern: string): { regex: RegExp; paramNames: string[] } {
|
||||
const paramNames: string[] = [];
|
||||
let regexStr = pattern;
|
||||
|
||||
// Process all params in a single pass to maintain order
|
||||
regexStr = regexStr.replace(/:(\w+)(\*)?/g, (match, paramName, isWildcard) => {
|
||||
paramNames.push(paramName);
|
||||
// :param* captures rest of path, :param captures single segment
|
||||
return isWildcard ? '(.+)' : '([^/]+)';
|
||||
});
|
||||
|
||||
// Escape special regex characters
|
||||
regexStr = regexStr.replace(/\//g, '\\/');
|
||||
|
||||
// Add anchors
|
||||
regexStr = `^${regexStr}$`;
|
||||
|
||||
return {
|
||||
regex: new RegExp(regexStr),
|
||||
paramNames,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Convenience methods for common HTTP methods
|
||||
*/
|
||||
public get(pattern: string, handler: RouteHandler): void {
|
||||
this.add('GET', pattern, handler);
|
||||
}
|
||||
|
||||
public put(pattern: string, handler: RouteHandler): void {
|
||||
this.add('PUT', pattern, handler);
|
||||
}
|
||||
|
||||
public post(pattern: string, handler: RouteHandler): void {
|
||||
this.add('POST', pattern, handler);
|
||||
}
|
||||
|
||||
public delete(pattern: string, handler: RouteHandler): void {
|
||||
this.add('DELETE', pattern, handler);
|
||||
}
|
||||
|
||||
public head(pattern: string, handler: RouteHandler): void {
|
||||
this.add('HEAD', pattern, handler);
|
||||
}
|
||||
}
|
||||
@@ -1,145 +0,0 @@
|
||||
import * as plugins from '../plugins.js';
|
||||
|
||||
/**
|
||||
* S3 error codes mapped to HTTP status codes
|
||||
*/
|
||||
const S3_ERROR_CODES: Record<string, number> = {
|
||||
'AccessDenied': 403,
|
||||
'BadDigest': 400,
|
||||
'BadRequest': 400,
|
||||
'BucketAlreadyExists': 409,
|
||||
'BucketAlreadyOwnedByYou': 409,
|
||||
'BucketNotEmpty': 409,
|
||||
'CredentialsNotSupported': 400,
|
||||
'EntityTooSmall': 400,
|
||||
'EntityTooLarge': 400,
|
||||
'ExpiredToken': 400,
|
||||
'IncompleteBody': 400,
|
||||
'IncorrectNumberOfFilesInPostRequest': 400,
|
||||
'InlineDataTooLarge': 400,
|
||||
'InternalError': 500,
|
||||
'InvalidArgument': 400,
|
||||
'InvalidBucketName': 400,
|
||||
'InvalidDigest': 400,
|
||||
'InvalidLocationConstraint': 400,
|
||||
'InvalidPart': 400,
|
||||
'InvalidPartOrder': 400,
|
||||
'InvalidRange': 416,
|
||||
'InvalidRequest': 400,
|
||||
'InvalidSecurity': 403,
|
||||
'InvalidSOAPRequest': 400,
|
||||
'InvalidStorageClass': 400,
|
||||
'InvalidTargetBucketForLogging': 400,
|
||||
'InvalidToken': 400,
|
||||
'InvalidURI': 400,
|
||||
'KeyTooLongError': 400,
|
||||
'MalformedACLError': 400,
|
||||
'MalformedPOSTRequest': 400,
|
||||
'MalformedXML': 400,
|
||||
'MaxMessageLengthExceeded': 400,
|
||||
'MaxPostPreDataLengthExceededError': 400,
|
||||
'MetadataTooLarge': 400,
|
||||
'MethodNotAllowed': 405,
|
||||
'MissingContentLength': 411,
|
||||
'MissingRequestBodyError': 400,
|
||||
'MissingSecurityElement': 400,
|
||||
'MissingSecurityHeader': 400,
|
||||
'NoLoggingStatusForKey': 400,
|
||||
'NoSuchBucket': 404,
|
||||
'NoSuchKey': 404,
|
||||
'NoSuchLifecycleConfiguration': 404,
|
||||
'NoSuchUpload': 404,
|
||||
'NoSuchVersion': 404,
|
||||
'NotImplemented': 501,
|
||||
'NotSignedUp': 403,
|
||||
'OperationAborted': 409,
|
||||
'PermanentRedirect': 301,
|
||||
'PreconditionFailed': 412,
|
||||
'Redirect': 307,
|
||||
'RequestIsNotMultiPartContent': 400,
|
||||
'RequestTimeout': 400,
|
||||
'RequestTimeTooSkewed': 403,
|
||||
'RequestTorrentOfBucketError': 400,
|
||||
'SignatureDoesNotMatch': 403,
|
||||
'ServiceUnavailable': 503,
|
||||
'SlowDown': 503,
|
||||
'TemporaryRedirect': 307,
|
||||
'TokenRefreshRequired': 400,
|
||||
'TooManyBuckets': 400,
|
||||
'UnexpectedContent': 400,
|
||||
'UnresolvableGrantByEmailAddress': 400,
|
||||
'UserKeyMustBeSpecified': 400,
|
||||
};
|
||||
|
||||
/**
|
||||
* S3-compatible error class that formats errors as XML responses
|
||||
*/
|
||||
export class S3Error extends Error {
|
||||
public status: number;
|
||||
public code: string;
|
||||
public detail: Record<string, any>;
|
||||
|
||||
constructor(
|
||||
code: string,
|
||||
message: string,
|
||||
detail: Record<string, any> = {}
|
||||
) {
|
||||
super(message);
|
||||
this.name = 'S3Error';
|
||||
this.code = code;
|
||||
this.status = S3_ERROR_CODES[code] || 500;
|
||||
this.detail = detail;
|
||||
|
||||
// Maintain proper stack trace
|
||||
if (Error.captureStackTrace) {
|
||||
Error.captureStackTrace(this, S3Error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert error to S3-compatible XML format
|
||||
*/
|
||||
public toXML(): string {
|
||||
const smartXmlInstance = new plugins.SmartXml();
|
||||
const errorObj: any = {
|
||||
Error: {
|
||||
Code: this.code,
|
||||
Message: this.message,
|
||||
...this.detail,
|
||||
},
|
||||
};
|
||||
|
||||
const xml = smartXmlInstance.createXmlFromObject(errorObj);
|
||||
|
||||
// Ensure XML declaration
|
||||
if (!xml.startsWith('<?xml')) {
|
||||
return `<?xml version="1.0" encoding="UTF-8"?>\n${xml}`;
|
||||
}
|
||||
|
||||
return xml;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create S3Error from a generic Error
|
||||
*/
|
||||
public static fromError(err: any): S3Error {
|
||||
if (err instanceof S3Error) {
|
||||
return err;
|
||||
}
|
||||
|
||||
// Map common errors
|
||||
if (err.code === 'ENOENT') {
|
||||
return new S3Error('NoSuchKey', 'The specified key does not exist.');
|
||||
}
|
||||
if (err.code === 'EACCES') {
|
||||
return new S3Error('AccessDenied', 'Access Denied');
|
||||
}
|
||||
|
||||
// Default to internal error
|
||||
return new S3Error(
|
||||
'InternalError',
|
||||
'We encountered an internal error. Please try again.',
|
||||
{ OriginalError: err.message }
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,402 +0,0 @@
|
||||
import * as plugins from '../plugins.js';
|
||||
import { S3Router } from './router.js';
|
||||
import { MiddlewareStack } from './middleware-stack.js';
|
||||
import { S3Context } from './context.js';
|
||||
import { FilesystemStore } from './filesystem-store.js';
|
||||
import { S3Error } from './s3-error.js';
|
||||
import { Logger } from './logger.js';
|
||||
import { MultipartUploadManager } from './multipart-manager.js';
|
||||
import { ServiceController } from '../controllers/service.controller.js';
|
||||
import { BucketController } from '../controllers/bucket.controller.js';
|
||||
import { ObjectController } from '../controllers/object.controller.js';
|
||||
import type { ISmarts3Config } from '../index.js';
|
||||
|
||||
export interface ISmarts3ServerOptions {
|
||||
port?: number;
|
||||
address?: string;
|
||||
directory?: string;
|
||||
cleanSlate?: boolean;
|
||||
silent?: boolean;
|
||||
config?: Required<ISmarts3Config>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Custom S3-compatible server implementation
|
||||
* Built on native Node.js http module with zero framework dependencies
|
||||
*/
|
||||
export class Smarts3Server {
|
||||
private httpServer?: plugins.http.Server;
|
||||
private router: S3Router;
|
||||
private middlewares: MiddlewareStack;
|
||||
public store: FilesystemStore; // Made public for direct access from Smarts3 class
|
||||
public multipart: MultipartUploadManager; // Made public for controller access
|
||||
private options: Required<Omit<ISmarts3ServerOptions, 'config'>>;
|
||||
private config: Required<ISmarts3Config>;
|
||||
private logger: Logger;
|
||||
|
||||
constructor(options: ISmarts3ServerOptions = {}) {
|
||||
this.options = {
|
||||
port: options.port ?? 3000,
|
||||
address: options.address ?? '0.0.0.0',
|
||||
directory: options.directory ?? plugins.path.join(process.cwd(), '.nogit/bucketsDir'),
|
||||
cleanSlate: options.cleanSlate ?? false,
|
||||
silent: options.silent ?? false,
|
||||
};
|
||||
|
||||
// Store config for middleware and feature configuration
|
||||
// If no config provided, create minimal default (for backward compatibility)
|
||||
this.config = options.config ?? {
|
||||
server: {
|
||||
port: this.options.port,
|
||||
address: this.options.address,
|
||||
silent: this.options.silent,
|
||||
},
|
||||
storage: {
|
||||
directory: this.options.directory,
|
||||
cleanSlate: this.options.cleanSlate,
|
||||
},
|
||||
auth: {
|
||||
enabled: false,
|
||||
credentials: [{ accessKeyId: 'S3RVER', secretAccessKey: 'S3RVER' }],
|
||||
},
|
||||
cors: {
|
||||
enabled: false,
|
||||
allowedOrigins: ['*'],
|
||||
allowedMethods: ['GET', 'POST', 'PUT', 'DELETE', 'HEAD', 'OPTIONS'],
|
||||
allowedHeaders: ['*'],
|
||||
exposedHeaders: ['ETag', 'x-amz-request-id', 'x-amz-version-id'],
|
||||
maxAge: 86400,
|
||||
allowCredentials: false,
|
||||
},
|
||||
logging: {
|
||||
level: 'info',
|
||||
format: 'text',
|
||||
enabled: true,
|
||||
},
|
||||
limits: {
|
||||
maxObjectSize: 5 * 1024 * 1024 * 1024,
|
||||
maxMetadataSize: 2048,
|
||||
requestTimeout: 300000,
|
||||
},
|
||||
multipart: {
|
||||
expirationDays: 7,
|
||||
cleanupIntervalMinutes: 60,
|
||||
},
|
||||
};
|
||||
|
||||
this.logger = new Logger(this.config.logging);
|
||||
this.store = new FilesystemStore(this.options.directory);
|
||||
this.multipart = new MultipartUploadManager(
|
||||
this.options.directory,
|
||||
this.config.multipart.expirationDays,
|
||||
this.config.multipart.cleanupIntervalMinutes
|
||||
);
|
||||
this.router = new S3Router();
|
||||
this.middlewares = new MiddlewareStack();
|
||||
|
||||
this.setupMiddlewares();
|
||||
this.setupRoutes();
|
||||
}
|
||||
|
||||
/**
|
||||
* Setup middleware stack
|
||||
*/
|
||||
private setupMiddlewares(): void {
|
||||
// CORS middleware (must be first to handle preflight requests)
|
||||
if (this.config.cors.enabled) {
|
||||
this.middlewares.use(async (req, res, ctx, next) => {
|
||||
const origin = req.headers.origin || req.headers.referer;
|
||||
|
||||
// Check if origin is allowed
|
||||
const allowedOrigins = this.config.cors.allowedOrigins || ['*'];
|
||||
const isOriginAllowed =
|
||||
allowedOrigins.includes('*') ||
|
||||
(origin && allowedOrigins.includes(origin));
|
||||
|
||||
if (isOriginAllowed) {
|
||||
// Set CORS headers
|
||||
res.setHeader(
|
||||
'Access-Control-Allow-Origin',
|
||||
allowedOrigins.includes('*') ? '*' : origin || '*'
|
||||
);
|
||||
|
||||
if (this.config.cors.allowCredentials) {
|
||||
res.setHeader('Access-Control-Allow-Credentials', 'true');
|
||||
}
|
||||
|
||||
// Handle preflight OPTIONS request
|
||||
if (req.method === 'OPTIONS') {
|
||||
res.setHeader(
|
||||
'Access-Control-Allow-Methods',
|
||||
(this.config.cors.allowedMethods || []).join(', ')
|
||||
);
|
||||
res.setHeader(
|
||||
'Access-Control-Allow-Headers',
|
||||
(this.config.cors.allowedHeaders || []).join(', ')
|
||||
);
|
||||
if (this.config.cors.maxAge) {
|
||||
res.setHeader(
|
||||
'Access-Control-Max-Age',
|
||||
String(this.config.cors.maxAge)
|
||||
);
|
||||
}
|
||||
res.writeHead(204);
|
||||
res.end();
|
||||
return; // Don't call next() for OPTIONS
|
||||
}
|
||||
|
||||
// Set exposed headers for actual requests
|
||||
if (this.config.cors.exposedHeaders && this.config.cors.exposedHeaders.length > 0) {
|
||||
res.setHeader(
|
||||
'Access-Control-Expose-Headers',
|
||||
this.config.cors.exposedHeaders.join(', ')
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
await next();
|
||||
});
|
||||
}
|
||||
|
||||
// Authentication middleware (simple static credentials)
|
||||
if (this.config.auth.enabled) {
|
||||
this.middlewares.use(async (req, res, ctx, next) => {
|
||||
const authHeader = req.headers.authorization;
|
||||
|
||||
// Extract access key from Authorization header
|
||||
let accessKeyId: string | undefined;
|
||||
|
||||
if (authHeader) {
|
||||
// Support multiple auth formats:
|
||||
// 1. AWS accessKeyId:signature
|
||||
// 2. AWS4-HMAC-SHA256 Credential=accessKeyId/date/region/service/aws4_request, ...
|
||||
if (authHeader.startsWith('AWS ')) {
|
||||
accessKeyId = authHeader.substring(4).split(':')[0];
|
||||
} else if (authHeader.startsWith('AWS4-HMAC-SHA256')) {
|
||||
const credentialMatch = authHeader.match(/Credential=([^/]+)\//);
|
||||
accessKeyId = credentialMatch ? credentialMatch[1] : undefined;
|
||||
}
|
||||
}
|
||||
|
||||
// Check if access key is valid
|
||||
const isValid = this.config.auth.credentials.some(
|
||||
(cred) => cred.accessKeyId === accessKeyId
|
||||
);
|
||||
|
||||
if (!isValid) {
|
||||
ctx.throw('AccessDenied', 'Access Denied');
|
||||
return;
|
||||
}
|
||||
|
||||
await next();
|
||||
});
|
||||
}
|
||||
|
||||
// Logger middleware
|
||||
if (!this.options.silent && this.config.logging.enabled) {
|
||||
this.middlewares.use(async (req, res, ctx, next) => {
|
||||
const start = Date.now();
|
||||
|
||||
// Log request
|
||||
this.logger.request(req.method || 'UNKNOWN', req.url || '/', {
|
||||
headers: req.headers,
|
||||
});
|
||||
|
||||
await next();
|
||||
|
||||
// Log response
|
||||
const duration = Date.now() - start;
|
||||
this.logger.response(
|
||||
req.method || 'UNKNOWN',
|
||||
req.url || '/',
|
||||
res.statusCode || 500,
|
||||
duration
|
||||
);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Setup routes
|
||||
*/
|
||||
private setupRoutes(): void {
|
||||
// Service level (/)
|
||||
this.router.get('/', ServiceController.listBuckets);
|
||||
|
||||
// Bucket level (/:bucket)
|
||||
this.router.put('/:bucket', BucketController.createBucket);
|
||||
this.router.delete('/:bucket', BucketController.deleteBucket);
|
||||
this.router.get('/:bucket', BucketController.listObjects);
|
||||
this.router.head('/:bucket', BucketController.headBucket);
|
||||
|
||||
// Object level (/:bucket/:key*)
|
||||
this.router.put('/:bucket/:key*', ObjectController.putObject);
|
||||
this.router.post('/:bucket/:key*', ObjectController.postObject); // For multipart operations
|
||||
this.router.get('/:bucket/:key*', ObjectController.getObject);
|
||||
this.router.head('/:bucket/:key*', ObjectController.headObject);
|
||||
this.router.delete('/:bucket/:key*', ObjectController.deleteObject);
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle incoming HTTP request
|
||||
*/
|
||||
private async handleRequest(
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse
|
||||
): Promise<void> {
|
||||
const context = new S3Context(req, res, this.store, this.multipart);
|
||||
|
||||
try {
|
||||
// Execute middleware stack
|
||||
await this.middlewares.execute(req, res, context);
|
||||
|
||||
// Route to handler
|
||||
const match = this.router.match(context.method, context.url.pathname);
|
||||
|
||||
if (match) {
|
||||
context.params = match.params;
|
||||
await match.handler(req, res, context, match.params);
|
||||
} else {
|
||||
context.throw('NoSuchKey', 'The specified resource does not exist');
|
||||
}
|
||||
} catch (err) {
|
||||
await this.handleError(err, context, res);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle errors and send S3-compatible error responses
|
||||
*/
|
||||
private async handleError(
|
||||
err: any,
|
||||
context: S3Context,
|
||||
res: plugins.http.ServerResponse
|
||||
): Promise<void> {
|
||||
const s3Error = err instanceof S3Error ? err : S3Error.fromError(err);
|
||||
|
||||
// Log the error
|
||||
this.logger.s3Error(s3Error.code, s3Error.message, s3Error.status);
|
||||
|
||||
// Log stack trace for server errors
|
||||
if (s3Error.status >= 500) {
|
||||
this.logger.debug('Error stack trace', {
|
||||
stack: err.stack || err.toString(),
|
||||
});
|
||||
}
|
||||
|
||||
// Send error response
|
||||
const errorXml = s3Error.toXML();
|
||||
|
||||
res.writeHead(s3Error.status, {
|
||||
'Content-Type': 'application/xml',
|
||||
'Content-Length': Buffer.byteLength(errorXml),
|
||||
});
|
||||
|
||||
res.end(errorXml);
|
||||
}
|
||||
|
||||
/**
|
||||
* Start the server
|
||||
*/
|
||||
public async start(): Promise<void> {
|
||||
// Initialize store
|
||||
await this.store.initialize();
|
||||
|
||||
// Initialize multipart upload manager
|
||||
await this.multipart.initialize();
|
||||
|
||||
// Start multipart cleanup task
|
||||
this.multipart.startCleanupTask();
|
||||
|
||||
// Clean slate if requested
|
||||
if (this.options.cleanSlate) {
|
||||
await this.store.reset();
|
||||
}
|
||||
|
||||
// Create HTTP server
|
||||
this.httpServer = plugins.http.createServer((req, res) => {
|
||||
this.handleRequest(req, res).catch((err) => {
|
||||
this.logger.error('Fatal error in request handler', {
|
||||
error: err.message,
|
||||
stack: err.stack,
|
||||
});
|
||||
if (!res.headersSent) {
|
||||
res.writeHead(500, { 'Content-Type': 'text/plain' });
|
||||
res.end('Internal Server Error');
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// Start listening
|
||||
await new Promise<void>((resolve, reject) => {
|
||||
this.httpServer!.listen(this.options.port, this.options.address, (err?: Error) => {
|
||||
if (err) {
|
||||
reject(err);
|
||||
} else {
|
||||
this.logger.info(`S3 server listening on ${this.options.address}:${this.options.port}`);
|
||||
resolve();
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop the server
|
||||
*/
|
||||
public async stop(): Promise<void> {
|
||||
if (!this.httpServer) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Stop multipart cleanup task
|
||||
this.multipart.stopCleanupTask();
|
||||
|
||||
await new Promise<void>((resolve, reject) => {
|
||||
this.httpServer!.close((err?: Error) => {
|
||||
if (err) {
|
||||
reject(err);
|
||||
} else {
|
||||
this.logger.info('S3 server stopped');
|
||||
resolve();
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
this.httpServer = undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get server port (useful for testing with random ports)
|
||||
*/
|
||||
public getPort(): number {
|
||||
if (!this.httpServer) {
|
||||
throw new Error('Server not started');
|
||||
}
|
||||
|
||||
const address = this.httpServer.address();
|
||||
if (typeof address === 'string') {
|
||||
throw new Error('Unix socket not supported');
|
||||
}
|
||||
|
||||
return address?.port || this.options.port;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get S3 descriptor for client configuration
|
||||
*/
|
||||
public getS3Descriptor(): {
|
||||
accessKey: string;
|
||||
accessSecret: string;
|
||||
endpoint: string;
|
||||
port: number;
|
||||
useSsl: boolean;
|
||||
} {
|
||||
return {
|
||||
accessKey: 'S3RVER',
|
||||
accessSecret: 'S3RVER',
|
||||
endpoint: this.options.address === '0.0.0.0' ? '127.0.0.1' : this.options.address,
|
||||
port: this.getPort(),
|
||||
useSsl: false,
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -1,180 +0,0 @@
|
||||
import * as plugins from '../plugins.js';
|
||||
import type { S3Context } from '../classes/context.js';
|
||||
|
||||
/**
|
||||
* Bucket-level operations
|
||||
*/
|
||||
export class BucketController {
|
||||
/**
|
||||
* HEAD /:bucket - Check if bucket exists
|
||||
*/
|
||||
public static async headBucket(
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
ctx: S3Context,
|
||||
params: Record<string, string>
|
||||
): Promise<void> {
|
||||
const { bucket } = params;
|
||||
|
||||
if (await ctx.store.bucketExists(bucket)) {
|
||||
ctx.status(200).send('');
|
||||
} else {
|
||||
ctx.throw('NoSuchBucket', 'The specified bucket does not exist');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* PUT /:bucket - Create bucket
|
||||
*/
|
||||
public static async createBucket(
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
ctx: S3Context,
|
||||
params: Record<string, string>
|
||||
): Promise<void> {
|
||||
const { bucket } = params;
|
||||
|
||||
await ctx.store.createBucket(bucket);
|
||||
ctx.status(200).send('');
|
||||
}
|
||||
|
||||
/**
|
||||
* DELETE /:bucket - Delete bucket
|
||||
*/
|
||||
public static async deleteBucket(
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
ctx: S3Context,
|
||||
params: Record<string, string>
|
||||
): Promise<void> {
|
||||
const { bucket } = params;
|
||||
|
||||
await ctx.store.deleteBucket(bucket);
|
||||
ctx.status(204).send('');
|
||||
}
|
||||
|
||||
/**
|
||||
* GET /:bucket - List objects or multipart uploads
|
||||
* Supports both V1 and V2 listing (V2 uses list-type=2 query param)
|
||||
* Multipart uploads listing is triggered by ?uploads query parameter
|
||||
*/
|
||||
public static async listObjects(
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
ctx: S3Context,
|
||||
params: Record<string, string>
|
||||
): Promise<void> {
|
||||
const { bucket } = params;
|
||||
|
||||
// Check if this is a ListMultipartUploads request
|
||||
if (ctx.query.uploads !== undefined) {
|
||||
return BucketController.listMultipartUploads(req, res, ctx, params);
|
||||
}
|
||||
|
||||
const isV2 = ctx.query['list-type'] === '2';
|
||||
|
||||
const result = await ctx.store.listObjects(bucket, {
|
||||
prefix: ctx.query.prefix,
|
||||
delimiter: ctx.query.delimiter,
|
||||
maxKeys: ctx.query['max-keys'] ? parseInt(ctx.query['max-keys']) : 1000,
|
||||
continuationToken: ctx.query['continuation-token'],
|
||||
});
|
||||
|
||||
if (isV2) {
|
||||
// List Objects V2 response
|
||||
await ctx.sendXML({
|
||||
ListBucketResult: {
|
||||
'@_xmlns': 'http://s3.amazonaws.com/doc/2006-03-01/',
|
||||
Name: bucket,
|
||||
Prefix: result.prefix || '',
|
||||
MaxKeys: result.maxKeys,
|
||||
KeyCount: result.contents.length,
|
||||
IsTruncated: result.isTruncated,
|
||||
...(result.delimiter && { Delimiter: result.delimiter }),
|
||||
...(result.nextContinuationToken && {
|
||||
NextContinuationToken: result.nextContinuationToken,
|
||||
}),
|
||||
...(result.commonPrefixes.length > 0 && {
|
||||
CommonPrefixes: result.commonPrefixes.map((prefix) => ({
|
||||
Prefix: prefix,
|
||||
})),
|
||||
}),
|
||||
Contents: result.contents.map((obj) => ({
|
||||
Key: obj.key,
|
||||
LastModified: obj.lastModified.toISOString(),
|
||||
ETag: `"${obj.md5}"`,
|
||||
Size: obj.size,
|
||||
StorageClass: 'STANDARD',
|
||||
})),
|
||||
},
|
||||
});
|
||||
} else {
|
||||
// List Objects V1 response
|
||||
await ctx.sendXML({
|
||||
ListBucketResult: {
|
||||
'@_xmlns': 'http://s3.amazonaws.com/doc/2006-03-01/',
|
||||
Name: bucket,
|
||||
Prefix: result.prefix || '',
|
||||
MaxKeys: result.maxKeys,
|
||||
IsTruncated: result.isTruncated,
|
||||
...(result.delimiter && { Delimiter: result.delimiter }),
|
||||
...(result.commonPrefixes.length > 0 && {
|
||||
CommonPrefixes: result.commonPrefixes.map((prefix) => ({
|
||||
Prefix: prefix,
|
||||
})),
|
||||
}),
|
||||
Contents: result.contents.map((obj) => ({
|
||||
Key: obj.key,
|
||||
LastModified: obj.lastModified.toISOString(),
|
||||
ETag: `"${obj.md5}"`,
|
||||
Size: obj.size,
|
||||
StorageClass: 'STANDARD',
|
||||
})),
|
||||
},
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* GET /:bucket?uploads - List multipart uploads
|
||||
*/
|
||||
private static async listMultipartUploads(
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
ctx: S3Context,
|
||||
params: Record<string, string>
|
||||
): Promise<void> {
|
||||
const { bucket } = params;
|
||||
|
||||
// Get all multipart uploads for this bucket
|
||||
const uploads = ctx.multipart.listUploads(bucket);
|
||||
|
||||
// Build XML response
|
||||
await ctx.sendXML({
|
||||
ListMultipartUploadsResult: {
|
||||
'@_xmlns': 'http://s3.amazonaws.com/doc/2006-03-01/',
|
||||
Bucket: bucket,
|
||||
KeyMarker: '',
|
||||
UploadIdMarker: '',
|
||||
MaxUploads: 1000,
|
||||
IsTruncated: false,
|
||||
...(uploads.length > 0 && {
|
||||
Upload: uploads.map((upload) => ({
|
||||
Key: upload.key,
|
||||
UploadId: upload.uploadId,
|
||||
Initiator: {
|
||||
ID: 'S3RVER',
|
||||
DisplayName: 'S3RVER',
|
||||
},
|
||||
Owner: {
|
||||
ID: 'S3RVER',
|
||||
DisplayName: 'S3RVER',
|
||||
},
|
||||
StorageClass: 'STANDARD',
|
||||
Initiated: upload.initiated.toISOString(),
|
||||
})),
|
||||
}),
|
||||
},
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -1,378 +0,0 @@
|
||||
import * as plugins from '../plugins.js';
|
||||
import type { S3Context } from '../classes/context.js';
|
||||
|
||||
/**
|
||||
* Object-level operations
|
||||
*/
|
||||
export class ObjectController {
|
||||
/**
|
||||
* PUT /:bucket/:key* - Upload object, copy object, or upload part
|
||||
*/
|
||||
public static async putObject(
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
ctx: S3Context,
|
||||
params: Record<string, string>
|
||||
): Promise<void> {
|
||||
const { bucket, key } = params;
|
||||
|
||||
// Check if this is a multipart upload part
|
||||
if (ctx.query.partNumber && ctx.query.uploadId) {
|
||||
return ObjectController.uploadPart(req, res, ctx, params);
|
||||
}
|
||||
|
||||
// Check if this is a COPY operation
|
||||
const copySource = ctx.headers['x-amz-copy-source'] as string | undefined;
|
||||
if (copySource) {
|
||||
return ObjectController.copyObject(req, res, ctx, params);
|
||||
}
|
||||
|
||||
// Extract metadata from headers
|
||||
const metadata: Record<string, string> = {};
|
||||
for (const [header, value] of Object.entries(ctx.headers)) {
|
||||
if (header.startsWith('x-amz-meta-')) {
|
||||
metadata[header] = value as string;
|
||||
}
|
||||
if (header === 'content-type' && value) {
|
||||
metadata['content-type'] = value as string;
|
||||
}
|
||||
if (header === 'cache-control' && value) {
|
||||
metadata['cache-control'] = value as string;
|
||||
}
|
||||
}
|
||||
|
||||
// If no content-type, default to binary/octet-stream
|
||||
if (!metadata['content-type']) {
|
||||
metadata['content-type'] = 'binary/octet-stream';
|
||||
}
|
||||
|
||||
// Stream upload
|
||||
const result = await ctx.store.putObject(bucket, key, ctx.getRequestStream(), metadata);
|
||||
|
||||
ctx.setHeader('ETag', `"${result.md5}"`);
|
||||
ctx.status(200).send('');
|
||||
}
|
||||
|
||||
/**
|
||||
* GET /:bucket/:key* - Download object
|
||||
*/
|
||||
public static async getObject(
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
ctx: S3Context,
|
||||
params: Record<string, string>
|
||||
): Promise<void> {
|
||||
const { bucket, key } = params;
|
||||
|
||||
// Parse Range header if present
|
||||
const rangeHeader = ctx.headers.range as string | undefined;
|
||||
let range: { start: number; end: number } | undefined;
|
||||
|
||||
if (rangeHeader) {
|
||||
const matches = rangeHeader.match(/bytes=(\d+)-(\d*)/);
|
||||
if (matches) {
|
||||
const start = parseInt(matches[1]);
|
||||
const end = matches[2] ? parseInt(matches[2]) : undefined;
|
||||
range = { start, end: end || start + 1024 * 1024 }; // Default to 1MB if no end
|
||||
}
|
||||
}
|
||||
|
||||
// Get object
|
||||
const object = await ctx.store.getObject(bucket, key, range);
|
||||
|
||||
// Set response headers
|
||||
ctx.setHeader('ETag', `"${object.md5}"`);
|
||||
ctx.setHeader('Last-Modified', object.lastModified.toUTCString());
|
||||
ctx.setHeader('Content-Type', object.metadata['content-type'] || 'binary/octet-stream');
|
||||
ctx.setHeader('Accept-Ranges', 'bytes');
|
||||
|
||||
// Handle custom metadata headers
|
||||
for (const [key, value] of Object.entries(object.metadata)) {
|
||||
if (key.startsWith('x-amz-meta-')) {
|
||||
ctx.setHeader(key, value);
|
||||
}
|
||||
}
|
||||
|
||||
if (range) {
|
||||
ctx.status(206);
|
||||
ctx.setHeader('Content-Length', (range.end - range.start + 1).toString());
|
||||
ctx.setHeader('Content-Range', `bytes ${range.start}-${range.end}/${object.size}`);
|
||||
} else {
|
||||
ctx.status(200);
|
||||
ctx.setHeader('Content-Length', object.size.toString());
|
||||
}
|
||||
|
||||
// Stream response
|
||||
await ctx.send(object.content!);
|
||||
}
|
||||
|
||||
/**
|
||||
* HEAD /:bucket/:key* - Get object metadata
|
||||
*/
|
||||
public static async headObject(
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
ctx: S3Context,
|
||||
params: Record<string, string>
|
||||
): Promise<void> {
|
||||
const { bucket, key } = params;
|
||||
|
||||
// Get object (without content)
|
||||
const object = await ctx.store.getObject(bucket, key);
|
||||
|
||||
// Set response headers (same as GET but no body)
|
||||
ctx.setHeader('ETag', `"${object.md5}"`);
|
||||
ctx.setHeader('Last-Modified', object.lastModified.toUTCString());
|
||||
ctx.setHeader('Content-Type', object.metadata['content-type'] || 'binary/octet-stream');
|
||||
ctx.setHeader('Content-Length', object.size.toString());
|
||||
ctx.setHeader('Accept-Ranges', 'bytes');
|
||||
|
||||
// Handle custom metadata headers
|
||||
for (const [key, value] of Object.entries(object.metadata)) {
|
||||
if (key.startsWith('x-amz-meta-')) {
|
||||
ctx.setHeader(key, value);
|
||||
}
|
||||
}
|
||||
|
||||
ctx.status(200).send('');
|
||||
}
|
||||
|
||||
/**
|
||||
* DELETE /:bucket/:key* - Delete object or abort multipart upload
|
||||
*/
|
||||
public static async deleteObject(
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
ctx: S3Context,
|
||||
params: Record<string, string>
|
||||
): Promise<void> {
|
||||
const { bucket, key } = params;
|
||||
|
||||
// Check if this is an abort multipart upload
|
||||
if (ctx.query.uploadId) {
|
||||
return ObjectController.abortMultipartUpload(req, res, ctx, params);
|
||||
}
|
||||
|
||||
await ctx.store.deleteObject(bucket, key);
|
||||
ctx.status(204).send('');
|
||||
}
|
||||
|
||||
/**
|
||||
* COPY operation (PUT with x-amz-copy-source header)
|
||||
*/
|
||||
private static async copyObject(
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
ctx: S3Context,
|
||||
params: Record<string, string>
|
||||
): Promise<void> {
|
||||
const { bucket: destBucket, key: destKey } = params;
|
||||
const copySource = ctx.headers['x-amz-copy-source'] as string;
|
||||
|
||||
// Parse source bucket and key from copy source
|
||||
// Format: /bucket/key or bucket/key
|
||||
const sourcePath = copySource.startsWith('/') ? copySource.slice(1) : copySource;
|
||||
const firstSlash = sourcePath.indexOf('/');
|
||||
const srcBucket = decodeURIComponent(sourcePath.slice(0, firstSlash));
|
||||
const srcKey = decodeURIComponent(sourcePath.slice(firstSlash + 1));
|
||||
|
||||
// Get metadata directive (COPY or REPLACE)
|
||||
const metadataDirective = (ctx.headers['x-amz-metadata-directive'] as string)?.toUpperCase() || 'COPY';
|
||||
|
||||
// Extract new metadata if REPLACE
|
||||
let newMetadata: Record<string, string> | undefined;
|
||||
if (metadataDirective === 'REPLACE') {
|
||||
newMetadata = {};
|
||||
for (const [header, value] of Object.entries(ctx.headers)) {
|
||||
if (header.startsWith('x-amz-meta-')) {
|
||||
newMetadata[header] = value as string;
|
||||
}
|
||||
if (header === 'content-type' && value) {
|
||||
newMetadata['content-type'] = value as string;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Perform copy
|
||||
const result = await ctx.store.copyObject(
|
||||
srcBucket,
|
||||
srcKey,
|
||||
destBucket,
|
||||
destKey,
|
||||
metadataDirective as 'COPY' | 'REPLACE',
|
||||
newMetadata
|
||||
);
|
||||
|
||||
// Send XML response
|
||||
await ctx.sendXML({
|
||||
CopyObjectResult: {
|
||||
LastModified: new Date().toISOString(),
|
||||
ETag: `"${result.md5}"`,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* POST /:bucket/:key* - Initiate or complete multipart upload
|
||||
*/
|
||||
public static async postObject(
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
ctx: S3Context,
|
||||
params: Record<string, string>
|
||||
): Promise<void> {
|
||||
// Check if this is initiate multipart upload
|
||||
if (ctx.query.uploads !== undefined) {
|
||||
return ObjectController.initiateMultipartUpload(req, res, ctx, params);
|
||||
}
|
||||
|
||||
// Check if this is complete multipart upload
|
||||
if (ctx.query.uploadId) {
|
||||
return ObjectController.completeMultipartUpload(req, res, ctx, params);
|
||||
}
|
||||
|
||||
ctx.throw('InvalidRequest', 'Invalid POST request');
|
||||
}
|
||||
|
||||
/**
|
||||
* Initiate Multipart Upload (POST with ?uploads)
|
||||
*/
|
||||
private static async initiateMultipartUpload(
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
ctx: S3Context,
|
||||
params: Record<string, string>
|
||||
): Promise<void> {
|
||||
const { bucket, key } = params;
|
||||
|
||||
// Extract metadata from headers
|
||||
const metadata: Record<string, string> = {};
|
||||
for (const [header, value] of Object.entries(ctx.headers)) {
|
||||
if (header.startsWith('x-amz-meta-')) {
|
||||
metadata[header] = value as string;
|
||||
}
|
||||
if (header === 'content-type' && value) {
|
||||
metadata['content-type'] = value as string;
|
||||
}
|
||||
}
|
||||
|
||||
// Initiate upload
|
||||
const uploadId = await ctx.multipart.initiateUpload(bucket, key, metadata);
|
||||
|
||||
// Send XML response
|
||||
await ctx.sendXML({
|
||||
InitiateMultipartUploadResult: {
|
||||
Bucket: bucket,
|
||||
Key: key,
|
||||
UploadId: uploadId,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Upload Part (PUT with ?partNumber&uploadId)
|
||||
*/
|
||||
private static async uploadPart(
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
ctx: S3Context,
|
||||
params: Record<string, string>
|
||||
): Promise<void> {
|
||||
const uploadId = ctx.query.uploadId!;
|
||||
const partNumber = parseInt(ctx.query.partNumber!);
|
||||
|
||||
if (isNaN(partNumber) || partNumber < 1 || partNumber > 10000) {
|
||||
ctx.throw('InvalidPartNumber', 'Part number must be between 1 and 10000');
|
||||
}
|
||||
|
||||
// Upload the part
|
||||
const partInfo = await ctx.multipart.uploadPart(
|
||||
uploadId,
|
||||
partNumber,
|
||||
ctx.getRequestStream() as any as import('stream').Readable
|
||||
);
|
||||
|
||||
// Set ETag header (part ETag)
|
||||
ctx.setHeader('ETag', `"${partInfo.etag}"`);
|
||||
ctx.status(200).send('');
|
||||
}
|
||||
|
||||
/**
|
||||
* Complete Multipart Upload (POST with ?uploadId)
|
||||
*/
|
||||
private static async completeMultipartUpload(
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
ctx: S3Context,
|
||||
params: Record<string, string>
|
||||
): Promise<void> {
|
||||
const { bucket, key } = params;
|
||||
const uploadId = ctx.query.uploadId!;
|
||||
|
||||
// Read and parse request body (XML with part list)
|
||||
const body = await ctx.readBody();
|
||||
|
||||
// Parse XML to extract parts
|
||||
// Expected format: <CompleteMultipartUpload><Part><PartNumber>1</PartNumber><ETag>"etag"</ETag></Part>...</CompleteMultipartUpload>
|
||||
const partMatches = body.matchAll(/<Part>.*?<PartNumber>(\d+)<\/PartNumber>.*?<ETag>(.*?)<\/ETag>.*?<\/Part>/gs);
|
||||
const parts: Array<{ PartNumber: number; ETag: string }> = [];
|
||||
|
||||
for (const match of partMatches) {
|
||||
parts.push({
|
||||
PartNumber: parseInt(match[1]),
|
||||
ETag: match[2],
|
||||
});
|
||||
}
|
||||
|
||||
// Complete the upload
|
||||
const result = await ctx.multipart.completeUpload(uploadId, parts);
|
||||
|
||||
// Get upload metadata
|
||||
const upload = ctx.multipart.getUpload(uploadId);
|
||||
if (!upload) {
|
||||
ctx.throw('NoSuchUpload', 'The specified upload does not exist');
|
||||
}
|
||||
|
||||
// Move final file to object store
|
||||
const finalPath = ctx.multipart.getFinalPath(uploadId);
|
||||
const finalContent = await plugins.smartfs.file(finalPath).read();
|
||||
const finalStream = plugins.http.IncomingMessage.prototype;
|
||||
|
||||
// Create a readable stream from the buffer
|
||||
const { Readable } = await import('stream');
|
||||
const finalReadableStream = Readable.from([finalContent]);
|
||||
|
||||
// Store the final object
|
||||
await ctx.store.putObject(bucket, key, finalReadableStream, upload.metadata);
|
||||
|
||||
// Clean up multipart upload data
|
||||
await ctx.multipart.cleanupUpload(uploadId);
|
||||
|
||||
// Send XML response
|
||||
await ctx.sendXML({
|
||||
CompleteMultipartUploadResult: {
|
||||
Location: `/${bucket}/${key}`,
|
||||
Bucket: bucket,
|
||||
Key: key,
|
||||
ETag: `"${result.etag}"`,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Abort Multipart Upload (DELETE with ?uploadId)
|
||||
*/
|
||||
private static async abortMultipartUpload(
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
ctx: S3Context,
|
||||
params: Record<string, string>
|
||||
): Promise<void> {
|
||||
const uploadId = ctx.query.uploadId!;
|
||||
|
||||
// Abort and cleanup
|
||||
await ctx.multipart.abortUpload(uploadId);
|
||||
|
||||
ctx.status(204).send('');
|
||||
}
|
||||
}
|
||||
@@ -1,35 +0,0 @@
|
||||
import * as plugins from '../plugins.js';
|
||||
import type { S3Context } from '../classes/context.js';
|
||||
|
||||
/**
|
||||
* Service-level operations (root /)
|
||||
*/
|
||||
export class ServiceController {
|
||||
/**
|
||||
* GET / - List all buckets
|
||||
*/
|
||||
public static async listBuckets(
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
ctx: S3Context,
|
||||
params: Record<string, string>
|
||||
): Promise<void> {
|
||||
const buckets = await ctx.store.listBuckets();
|
||||
|
||||
await ctx.sendXML({
|
||||
ListAllMyBucketsResult: {
|
||||
'@_xmlns': 'http://s3.amazonaws.com/doc/2006-03-01/',
|
||||
Owner: {
|
||||
ID: '123456789000',
|
||||
DisplayName: 'S3rver',
|
||||
},
|
||||
Buckets: {
|
||||
Bucket: buckets.map((bucket) => ({
|
||||
Name: bucket.name,
|
||||
CreationDate: bucket.creationDate.toISOString(),
|
||||
})),
|
||||
},
|
||||
},
|
||||
});
|
||||
}
|
||||
}
|
||||
60
ts/index.ts
60
ts/index.ts
@@ -1,6 +1,5 @@
|
||||
import * as plugins from './plugins.js';
|
||||
import * as paths from './paths.js';
|
||||
import { Smarts3Server } from './classes/smarts3-server.js';
|
||||
|
||||
/**
|
||||
* Authentication configuration
|
||||
@@ -165,6 +164,15 @@ function mergeConfig(userConfig: ISmarts3Config): Required<ISmarts3Config> {
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* IPC command type map for RustBridge
|
||||
*/
|
||||
type TRustS3Commands = {
|
||||
start: { params: { config: Required<ISmarts3Config> }; result: {} };
|
||||
stop: { params: {}; result: {} };
|
||||
createBucket: { params: { name: string }; result: {} };
|
||||
};
|
||||
|
||||
/**
|
||||
* Main Smarts3 class - production-ready S3-compatible server
|
||||
*/
|
||||
@@ -178,22 +186,28 @@ export class Smarts3 {
|
||||
|
||||
// INSTANCE
|
||||
public config: Required<ISmarts3Config>;
|
||||
public s3Instance: Smarts3Server;
|
||||
private bridge: InstanceType<typeof plugins.RustBridge<TRustS3Commands>>;
|
||||
|
||||
constructor(configArg: ISmarts3Config = {}) {
|
||||
this.config = mergeConfig(configArg);
|
||||
this.bridge = new plugins.RustBridge<TRustS3Commands>({
|
||||
binaryName: 'rusts3',
|
||||
localPaths: [
|
||||
plugins.path.join(paths.packageDir, 'dist_rust', 'rusts3'),
|
||||
plugins.path.join(paths.packageDir, 'rust', 'target', 'release', 'rusts3'),
|
||||
plugins.path.join(paths.packageDir, 'rust', 'target', 'debug', 'rusts3'),
|
||||
],
|
||||
readyTimeoutMs: 30000,
|
||||
requestTimeoutMs: 300000,
|
||||
});
|
||||
}
|
||||
|
||||
public async start() {
|
||||
this.s3Instance = new Smarts3Server({
|
||||
port: this.config.server.port,
|
||||
address: this.config.server.address,
|
||||
directory: this.config.storage.directory,
|
||||
cleanSlate: this.config.storage.cleanSlate,
|
||||
silent: this.config.server.silent,
|
||||
config: this.config, // Pass full config to server
|
||||
});
|
||||
await this.s3Instance.start();
|
||||
const spawned = await this.bridge.spawn();
|
||||
if (!spawned) {
|
||||
throw new Error('Failed to spawn rusts3 binary. Make sure it is compiled (pnpm build).');
|
||||
}
|
||||
await this.bridge.sendCommand('start', { config: this.config });
|
||||
|
||||
if (!this.config.server.silent) {
|
||||
console.log('s3 server is running');
|
||||
@@ -203,7 +217,20 @@ export class Smarts3 {
|
||||
public async getS3Descriptor(
|
||||
optionsArg?: Partial<plugins.tsclass.storage.IS3Descriptor>,
|
||||
): Promise<plugins.tsclass.storage.IS3Descriptor> {
|
||||
const descriptor = this.s3Instance.getS3Descriptor();
|
||||
const cred = this.config.auth.credentials[0] || {
|
||||
accessKeyId: 'S3RVER',
|
||||
secretAccessKey: 'S3RVER',
|
||||
};
|
||||
|
||||
const descriptor: plugins.tsclass.storage.IS3Descriptor = {
|
||||
endpoint: this.config.server.address === '0.0.0.0' ? 'localhost' : this.config.server.address!,
|
||||
port: this.config.server.port!,
|
||||
useSsl: false,
|
||||
accessKey: cred.accessKeyId,
|
||||
accessSecret: cred.secretAccessKey,
|
||||
bucketName: '',
|
||||
};
|
||||
|
||||
return {
|
||||
...descriptor,
|
||||
...(optionsArg ? optionsArg : {}),
|
||||
@@ -211,15 +238,12 @@ export class Smarts3 {
|
||||
}
|
||||
|
||||
public async createBucket(bucketNameArg: string) {
|
||||
// Call the filesystem store directly instead of using the client library
|
||||
await this.s3Instance.store.createBucket(bucketNameArg);
|
||||
await this.bridge.sendCommand('createBucket', { name: bucketNameArg });
|
||||
return { name: bucketNameArg };
|
||||
}
|
||||
|
||||
public async stop() {
|
||||
await this.s3Instance.stop();
|
||||
await this.bridge.sendCommand('stop', {});
|
||||
this.bridge.kill();
|
||||
}
|
||||
}
|
||||
|
||||
// Export the custom server class for direct use
|
||||
export { Smarts3Server } from './classes/smarts3-server.js';
|
||||
|
||||
@@ -1,20 +1,13 @@
|
||||
// node native
|
||||
import * as path from 'path';
|
||||
import * as http from 'http';
|
||||
import * as crypto from 'crypto';
|
||||
import * as url from 'url';
|
||||
|
||||
export { path, http, crypto, url };
|
||||
export { path };
|
||||
|
||||
// @push.rocks scope
|
||||
import { SmartFs, SmartFsProviderNode } from '@push.rocks/smartfs';
|
||||
import * as smartpath from '@push.rocks/smartpath';
|
||||
import { SmartXml } from '@push.rocks/smartxml';
|
||||
import { RustBridge } from '@push.rocks/smartrust';
|
||||
|
||||
// Create SmartFs instance with Node.js provider
|
||||
export const smartfs = new SmartFs(new SmartFsProviderNode());
|
||||
|
||||
export { smartpath, SmartXml };
|
||||
export { smartpath, RustBridge };
|
||||
|
||||
// @tsclass scope
|
||||
import * as tsclass from '@tsclass/tsclass';
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
import * as plugins from '../plugins.js';
|
||||
|
||||
// Create a singleton instance of SmartXml
|
||||
const smartXmlInstance = new plugins.SmartXml();
|
||||
|
||||
/**
|
||||
* Parse XML string to JavaScript object
|
||||
*/
|
||||
export function parseXml(xmlString: string): any {
|
||||
return smartXmlInstance.parseXmlToObject(xmlString);
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert JavaScript object to XML string with XML declaration
|
||||
*/
|
||||
export function createXml(obj: any, options: { format?: boolean } = {}): string {
|
||||
const xml = smartXmlInstance.createXmlFromObject(obj);
|
||||
|
||||
// Ensure XML declaration is present
|
||||
if (!xml.startsWith('<?xml')) {
|
||||
return `<?xml version="1.0" encoding="UTF-8"?>\n${xml}`;
|
||||
}
|
||||
|
||||
return xml;
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper to create S3-compatible XML responses with proper namespace
|
||||
*/
|
||||
export function createS3Xml(rootElement: string, content: any, namespace = 'http://s3.amazonaws.com/doc/2006-03-01/'): string {
|
||||
const obj: any = {
|
||||
[rootElement]: {
|
||||
'@_xmlns': namespace,
|
||||
...content,
|
||||
},
|
||||
};
|
||||
|
||||
return createXml(obj, { format: true });
|
||||
}
|
||||
Reference in New Issue
Block a user