fix(smarts3): replace TypeScript server with Rust-powered core and IPC bridge

This commit is contained in:
2026-02-13 13:59:44 +00:00
parent 54a0c2fb65
commit 65eb266983
32 changed files with 4083 additions and 3182 deletions

3
.gitignore vendored
View File

@@ -20,4 +20,5 @@ dist_*/
.claude/
.serena/
#------# custom
#------# custom
rust/target

View File

@@ -1,68 +0,0 @@
# language of the project (csharp, python, rust, java, typescript, go, cpp, or ruby)
# * For C, use cpp
# * For JavaScript, use typescript
# Special requirements:
# * csharp: Requires the presence of a .sln file in the project folder.
language: typescript
# whether to use the project's gitignore file to ignore files
# Added on 2025-04-07
ignore_all_files_in_gitignore: true
# list of additional paths to ignore
# same syntax as gitignore, so you can use * and **
# Was previously called `ignored_dirs`, please update your config if you are using that.
# Added (renamed) on 2025-04-07
ignored_paths: []
# whether the project is in read-only mode
# If set to true, all editing tools will be disabled and attempts to use them will result in an error
# Added on 2025-04-18
read_only: false
# list of tool names to exclude. We recommend not excluding any tools, see the readme for more details.
# Below is the complete list of tools for convenience.
# To make sure you have the latest list of tools, and to view their descriptions,
# execute `uv run scripts/print_tool_overview.py`.
#
# * `activate_project`: Activates a project by name.
# * `check_onboarding_performed`: Checks whether project onboarding was already performed.
# * `create_text_file`: Creates/overwrites a file in the project directory.
# * `delete_lines`: Deletes a range of lines within a file.
# * `delete_memory`: Deletes a memory from Serena's project-specific memory store.
# * `execute_shell_command`: Executes a shell command.
# * `find_referencing_code_snippets`: Finds code snippets in which the symbol at the given location is referenced.
# * `find_referencing_symbols`: Finds symbols that reference the symbol at the given location (optionally filtered by type).
# * `find_symbol`: Performs a global (or local) search for symbols with/containing a given name/substring (optionally filtered by type).
# * `get_current_config`: Prints the current configuration of the agent, including the active and available projects, tools, contexts, and modes.
# * `get_symbols_overview`: Gets an overview of the top-level symbols defined in a given file.
# * `initial_instructions`: Gets the initial instructions for the current project.
# Should only be used in settings where the system prompt cannot be set,
# e.g. in clients you have no control over, like Claude Desktop.
# * `insert_after_symbol`: Inserts content after the end of the definition of a given symbol.
# * `insert_at_line`: Inserts content at a given line in a file.
# * `insert_before_symbol`: Inserts content before the beginning of the definition of a given symbol.
# * `list_dir`: Lists files and directories in the given directory (optionally with recursion).
# * `list_memories`: Lists memories in Serena's project-specific memory store.
# * `onboarding`: Performs onboarding (identifying the project structure and essential tasks, e.g. for testing or building).
# * `prepare_for_new_conversation`: Provides instructions for preparing for a new conversation (in order to continue with the necessary context).
# * `read_file`: Reads a file within the project directory.
# * `read_memory`: Reads the memory with the given name from Serena's project-specific memory store.
# * `remove_project`: Removes a project from the Serena configuration.
# * `replace_lines`: Replaces a range of lines within a file with new content.
# * `replace_symbol_body`: Replaces the full definition of a symbol.
# * `restart_language_server`: Restarts the language server, may be necessary when edits not through Serena happen.
# * `search_for_pattern`: Performs a search for a pattern in the project.
# * `summarize_changes`: Provides instructions for summarizing the changes made to the codebase.
# * `switch_modes`: Activates modes by providing a list of their names
# * `think_about_collected_information`: Thinking tool for pondering the completeness of collected information.
# * `think_about_task_adherence`: Thinking tool for determining whether the agent is still on track with the current task.
# * `think_about_whether_you_are_done`: Thinking tool for determining whether the task is truly completed.
# * `write_memory`: Writes a named memory (for future reference) to Serena's project-specific memory store.
excluded_tools: []
# initial prompt for the project. It will always be given to the LLM upon activating the project
# (contrary to the memories, which are loaded on demand).
initial_prompt: ""
project_name: "smarts3"

View File

@@ -1,5 +1,16 @@
# Changelog
## 2026-02-13 - 5.1.1 - fix(smarts3)
replace TypeScript server with Rust-powered core and IPC bridge
- Major refactor: Node.js/TypeScript in-process server replaced by a Rust crate ('rusts3') with a TypeScript IPC wrapper (RustBridge).
- Removed many TypeScript server modules (smarts3-server, filesystem-store, multipart-manager, controllers, router, context, logger, xml utils, etc.); Smarts3Server export removed — public API now proxies to the Rust binary.
- Smarts3 now spawns and communicates with the rusts3 binary via RustBridge IPC (commands include start, stop, createBucket).
- Build & packaging changes: build script now runs `tsrust` before `tsbuild`; added `@git.zone/tsrust` devDependency; added `dist_rust` artifacts and new cross-compile targets in npmextra.json; .gitignore updated for rust/target.
- Dependency changes: added `@push.rocks/smartrust` (RustBridge) and simplified plugins surface; previous smartfs/smartxml usage removed from TS code and replaced by the Rust implementation + IPC.
- Added Rust project files (rust/Cargo.toml, rust/src/*) implementing server, IPC management loop, storage, XML responses, errors, and config.
- Documentation updated (README and hints) to describe the Rust core, supported prebuilt targets (linux_amd64, linux_arm64), IPC commands, and developer build notes.
## 2025-11-23 - 5.1.0 - feat(multipart)
Implement full multipart upload support with persistent manager, periodic cleanup, and API integration

View File

@@ -1,5 +1,11 @@
{
"gitzone": {
"@git.zone/tsrust": {
"targets": [
"linux_amd64",
"linux_arm64"
]
},
"@git.zone/cli": {
"projectType": "npm",
"module": {
"githost": "code.foss.global",
@@ -27,13 +33,19 @@
"CI/CD Integration",
"Developer Onboarding"
]
},
"release": {
"registries": [
"https://verdaccio.lossless.digital",
"https://registry.npmjs.org"
],
"accessLevel": "public"
}
},
"npmci": {
"npmGlobalTools": [],
"npmAccessLevel": "public"
},
"tsdoc": {
"@git.zone/tsdoc": {
"legal": "\n## License and Legal Information\n\nThis repository contains open-source code that is licensed under the MIT License. A copy of the MIT License can be found in the [license](license) file within this repository. \n\n**Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.\n\n### Trademarks\n\nThis project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and any related products or services are trademarks of Task Venture Capital GmbH and are not included within the scope of the MIT license granted herein. Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines, and any usage must be approved in writing by Task Venture Capital GmbH.\n\n### Company Information\n\nTask Venture Capital GmbH \nRegistered at District court Bremen HRB 35230 HB, Germany\n\nFor any legal inquiries or if you require further information, please contact us via email at hello@task.vc.\n\nBy using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.\n"
},
"@ship.zone/szci": {
"npmGlobalTools": []
}
}
}

View File

@@ -10,7 +10,7 @@
"license": "MIT",
"scripts": {
"test": "(tstest test/ --web --verbose --logfile --timeout 60)",
"build": "(tsbuild --web --allowimplicitany)",
"build": "(tsrust && tsbuild --web --allowimplicitany)",
"buildDocs": "tsdoc"
},
"devDependencies": {
@@ -20,6 +20,7 @@
"@git.zone/tsrun": "^2.0.0",
"@git.zone/tstest": "^3.1.0",
"@push.rocks/smartbucket": "^4.3.0",
"@git.zone/tsrust": "^1.3.0",
"@types/node": "^22.9.0"
},
"browserslist": [
@@ -31,6 +32,7 @@
"dist/**/*",
"dist_*/**/*",
"dist_ts/**/*",
"dist_rust/**/*",
"dist_ts_web/**/*",
"assets/**/*",
"cli.js",
@@ -38,9 +40,8 @@
"readme.md"
],
"dependencies": {
"@push.rocks/smartfs": "^1.1.0",
"@push.rocks/smartpath": "^6.0.0",
"@push.rocks/smartxml": "^2.0.0",
"@push.rocks/smartrust": "^1.0.0",
"@tsclass/tsclass": "^9.3.0"
},
"keywords": [

View File

@@ -1,74 +1,60 @@
# Project Hints for smarts3
## Current State (v3.0.0)
## Current State (v6.0.0-dev)
- Native custom S3 server implementation (Smarts3Server)
- No longer uses legacy s3rver backend (removed in v3.0.0)
- Core S3 operations working: PUT, GET, HEAD, DELETE for objects and buckets
- Multipart upload NOT yet implemented (critical gap for production)
- Authentication is hardcoded ('S3RVER'/'S3RVER') - not production-ready
- No CORS support yet
- No SSL/TLS support yet
- **Rust-powered S3 server** via `@push.rocks/smartrust` IPC bridge
- High-performance: streaming I/O, zero-copy, backpressure, range seek
- TypeScript is thin IPC wrapper; all HTTP/storage/routing in Rust binary `rusts3`
- Full S3 compatibility: PUT, GET, HEAD, DELETE for objects and buckets
- Multipart upload support (streaming, no OOM)
- Authentication (AWS v2/v4 signature key extraction)
- CORS support
- ListBuckets, ListObjects (v1/v2), CopyObject
## Production Readiness
## Architecture
See `production-readiness.md` for the complete gap analysis and implementation plan.
### Rust Binary (`rust/src/`)
- `main.rs` - Clap CLI, management mode entry
- `config.rs` - Serde config structs matching TS interfaces
- `management.rs` - IPC loop (newline-delimited JSON over stdin/stdout)
- `server.rs` - hyper 1.x HTTP server, routing, CORS, auth, all S3 handlers
- `storage.rs` - FileStore: filesystem-backed storage, multipart manager
- `xml_response.rs` - S3 XML response builders
- `s3_error.rs` - S3 error codes with HTTP status mapping
**Key Missing Features for Production:**
1. Multipart upload support (HIGHEST PRIORITY)
2. Configurable authentication
3. CORS middleware
4. SSL/TLS support
5. Production configuration system
6. Production logging
### TypeScript Bridge (`ts/`)
- `ts/index.ts` - Smarts3 class with RustBridge<TRustS3Commands>
- `ts/plugins.ts` - path, smartpath, RustBridge, tsclass
- `ts/paths.ts` - packageDir, bucketsDir defaults
## Architecture Notes
### IPC Commands
| Command | Params | Action |
|---------|--------|--------|
| `start` | `{ config: ISmarts3Config }` | Init storage + HTTP server |
| `stop` | `{}` | Graceful shutdown |
| `createBucket` | `{ name: string }` | Create bucket directory |
### File Structure
- `ts/classes/smarts3-server.ts` - Main server class
- `ts/classes/filesystem-store.ts` - Storage layer (filesystem-backed)
- `ts/classes/router.ts` - URL routing with pattern matching
- `ts/classes/middleware-stack.ts` - Middleware execution
- `ts/classes/context.ts` - Request/response context
- `ts/classes/s3-error.ts` - S3-compatible error handling
- `ts/controllers/` - Service, bucket, and object controllers
- `ts/index.ts` - Main export (Smarts3 class)
### Storage Layout (backward-compatible)
- Objects: `{root}/{bucket}/{key}._S3_object`
- Metadata: `{root}/{bucket}/{key}._S3_object.metadata.json`
- MD5: `{root}/{bucket}/{key}._S3_object.md5`
- Multipart: `{root}/.multipart/{upload_id}/part-{N}`
### Storage Layout
- Objects stored as: `{bucket}/{encodedKey}._S3_object`
- Metadata stored as: `{bucket}/{encodedKey}._S3_object.metadata.json`
- MD5 stored as: `{bucket}/{encodedKey}._S3_object.md5`
- Keys are encoded for Windows compatibility (hex encoding for invalid chars)
## Build
### Current Limitations
- Max file size limited by available memory (no streaming multipart)
- Single server instance only (no clustering)
- No versioning support
- No access control beyond basic auth
## Testing
- Main test: `test/test.aws-sdk.node.ts` - Tests AWS SDK v3 compatibility
- Run with: `pnpm test`
- Tests run with cleanSlate mode enabled
- `pnpm build` runs `tsrust && tsbuild --web --allowimplicitany`
- `tsrust` compiles Rust to `dist_rust/rusts3`
- Targets: linux_amd64, linux_arm64 (configured in npmextra.json)
## Dependencies
- `@push.rocks/smartbucket` - S3 abstraction layer
- `@push.rocks/smartfs` - Modern filesystem operations with Web Streams API (replaced smartfile)
- `@push.rocks/smartxml` - XML generation/parsing
- `@push.rocks/smartrust` - RustBridge IPC bridge
- `@push.rocks/smartpath` - Path utilities
- `@tsclass/tsclass` - TypeScript utilities
- `@tsclass/tsclass` - IS3Descriptor type
- `@git.zone/tsrust` (devDep) - Rust cross-compilation
## Migration Notes (2025-11-23)
## Testing
Successfully migrated from `@push.rocks/smartfile` + native `fs` to `@push.rocks/smartfs`:
- All file/directory operations now use smartfs fluent API
- Web Streams → Node.js Streams conversion for HTTP compatibility
- All tests passing ✅
- Build successful ✅
## Next Steps
Waiting for approval to proceed with production-readiness implementation.
Priority 1 is implementing multipart uploads.
- `test/test.aws-sdk.node.ts` - AWS SDK v3 compatibility (10 tests)
- `test/test.ts` - SmartBucket integration (3 tests)
- Run: `pnpm test` or `tstest test/test.aws-sdk.node.ts --verbose`

641
readme.md
View File

@@ -1,300 +1,239 @@
# @push.rocks/smarts3 🚀
**Production-ready S3-compatible server** - A powerful, lightweight Node.js TypeScript package that brings full S3 API compatibility to your local filesystem. Perfect for development, testing, and scenarios where running MinIO is out of scope!
## 🌟 Features
- 🏃 **Lightning-fast local S3 simulation** - No more waiting for cloud operations during development
-**Production-ready architecture** - Built on Node.js http module with zero framework dependencies
- 🔄 **Full S3 API compatibility** - Works seamlessly with AWS SDK v3 and any other S3 client
- 📂 **Local directory mapping** - Your buckets live right on your filesystem
- 🔐 **Simple authentication** - Static credential-based auth for secure access
- 🌐 **CORS support** - Configurable cross-origin resource sharing
- 📊 **Structured logging** - Multiple levels (error/warn/info/debug) and formats (text/JSON)
- 📤 **Multipart uploads** - Full support for large file uploads (>5MB)
- 🧪 **Perfect for testing** - Reliable, repeatable tests without cloud dependencies
- 🎯 **TypeScript-first** - Built with TypeScript for excellent type safety and IDE support
- 🔧 **Flexible configuration** - Comprehensive config system with sensible defaults
- 🧹 **Clean slate mode** - Start fresh on every test run
A high-performance, S3-compatible local server powered by a **Rust core** with a clean TypeScript API. Drop-in replacement for AWS S3 during development and testing — no cloud, no Docker, no MinIO. Just `npm install` and go.
## Issue Reporting and Security
For reporting bugs, issues, or security vulnerabilities, please visit [community.foss.global/](https://community.foss.global/). This is the central community hub for all issue reporting. Developers who sign and comply with our contribution agreement and go through identification can also get a [code.foss.global/](https://code.foss.global/) account to submit Pull Requests directly.
## 🌟 Why smarts3?
| Feature | smarts3 | MinIO | s3rver |
|---------|---------|-------|--------|
| Install | `pnpm add` | Docker / binary | `npm install` |
| Startup time | ~20ms | seconds | ~200ms |
| Large file uploads | ✅ Streaming, zero-copy | ✅ | ❌ OOM risk |
| Range requests | ✅ Seek-based | ✅ | ❌ Full read |
| Language | Rust + TypeScript | Go | JavaScript |
| Multipart uploads | ✅ Full support | ✅ | ❌ |
| Auth | AWS v2/v4 key extraction | Full IAM | Basic |
### Core Features
-**Rust-powered HTTP server** — hyper 1.x with streaming I/O, zero-copy, backpressure
- 🔄 **Full S3 API compatibility** — works with AWS SDK v3, SmartBucket, any S3 client
- 📂 **Filesystem-backed storage** — buckets map to directories, objects to files
- 📤 **Streaming multipart uploads** — large files without memory pressure
- 🎯 **Byte-range requests**`seek()` directly to the requested byte offset
- 🔐 **Authentication** — AWS v2/v4 signature key extraction
- 🌐 **CORS middleware** — configurable cross-origin support
- 📊 **Structured logging** — tracing-based, error through debug levels
- 🧹 **Clean slate mode** — wipe storage on startup for test isolation
- 🧪 **Test-first design** — start/stop in milliseconds, no port conflicts
## 📦 Installation
Install using your favorite package manager:
```bash
# Using npm
npm install @push.rocks/smarts3 --save-dev
# Using pnpm (recommended)
pnpm add @push.rocks/smarts3 -D
# Using yarn
yarn add @push.rocks/smarts3 --dev
```
## 🚀 Quick Start
> **Note:** The package ships with precompiled Rust binaries for `linux_amd64` and `linux_arm64`. No Rust toolchain needed on your machine.
Get up and running in seconds:
## 🚀 Quick Start
```typescript
import { Smarts3 } from '@push.rocks/smarts3';
// Start your local S3 server with minimal config
const s3Server = await Smarts3.createAndStart({
server: {
port: 3000,
silent: false,
},
storage: {
cleanSlate: true, // Start with empty buckets
},
// Start a local S3 server
const s3 = await Smarts3.createAndStart({
server: { port: 3000 },
storage: { cleanSlate: true },
});
// Create a bucket
const bucket = await s3Server.createBucket('my-awesome-bucket');
await s3.createBucket('my-bucket');
// Get S3 connection details for use with AWS SDK or other S3 clients
const s3Config = await s3Server.getS3Descriptor();
// Get connection details for any S3 client
const descriptor = await s3.getS3Descriptor();
// → { endpoint: 'localhost', port: 3000, accessKey: 'S3RVER', accessSecret: 'S3RVER', useSsl: false }
// When you're done
await s3Server.stop();
// When done
await s3.stop();
```
## 📖 Configuration Guide
## 📖 Configuration
### Complete Configuration Options
Smarts3 uses a comprehensive nested configuration structure:
All config fields are optional — sensible defaults are applied automatically.
```typescript
import { Smarts3, ISmarts3Config } from '@push.rocks/smarts3';
const config: ISmarts3Config = {
// Server configuration
server: {
port: 3000, // Port to listen on (default: 3000)
address: '0.0.0.0', // Bind address (default: '0.0.0.0')
silent: false, // Disable all console output (default: false)
port: 3000, // Default: 3000
address: '0.0.0.0', // Default: '0.0.0.0'
silent: false, // Default: false
},
// Storage configuration
storage: {
directory: './buckets', // Directory to store buckets (default: .nogit/bucketsDir)
cleanSlate: false, // Clear all data on start (default: false)
directory: './my-data', // Default: .nogit/bucketsDir
cleanSlate: false, // Default: false — set true to wipe on start
},
// Authentication configuration
auth: {
enabled: false, // Enable authentication (default: false)
credentials: [ // List of valid credentials
{
accessKeyId: 'YOUR_ACCESS_KEY',
secretAccessKey: 'YOUR_SECRET_KEY',
},
],
enabled: false, // Default: false
credentials: [{
accessKeyId: 'MY_KEY',
secretAccessKey: 'MY_SECRET',
}],
},
// CORS configuration
cors: {
enabled: false, // Enable CORS (default: false)
allowedOrigins: ['*'], // Allowed origins (default: ['*'])
allowedMethods: [ // Allowed HTTP methods
'GET', 'POST', 'PUT', 'DELETE', 'HEAD', 'OPTIONS'
],
allowedHeaders: ['*'], // Allowed headers (default: ['*'])
exposedHeaders: [ // Headers exposed to client
'ETag', 'x-amz-request-id', 'x-amz-version-id'
],
maxAge: 86400, // Preflight cache duration in seconds
allowCredentials: false, // Allow credentials (default: false)
enabled: false, // Default: false
allowedOrigins: ['*'],
allowedMethods: ['GET', 'POST', 'PUT', 'DELETE', 'HEAD', 'OPTIONS'],
allowedHeaders: ['*'],
exposedHeaders: ['ETag', 'x-amz-request-id', 'x-amz-version-id'],
maxAge: 86400,
allowCredentials: false,
},
// Logging configuration
logging: {
level: 'info', // Log level: 'error' | 'warn' | 'info' | 'debug'
format: 'text', // Log format: 'text' | 'json'
enabled: true, // Enable logging (default: true)
level: 'info', // 'error' | 'warn' | 'info' | 'debug'
format: 'text', // 'text' | 'json'
enabled: true,
},
// Request limits
limits: {
maxObjectSize: 5 * 1024 * 1024 * 1024, // 5GB max object size
maxMetadataSize: 2048, // 2KB max metadata size
requestTimeout: 300000, // 5 minutes request timeout
maxObjectSize: 5 * 1024 * 1024 * 1024, // 5 GB
maxMetadataSize: 2048,
requestTimeout: 300000, // 5 minutes
},
multipart: {
expirationDays: 7,
cleanupIntervalMinutes: 60,
},
};
const s3Server = await Smarts3.createAndStart(config);
const s3 = await Smarts3.createAndStart(config);
```
### Simple Configuration Examples
### Common Configurations
**Development Mode (Default)**
**CI/CD testing** — silent, clean, fast:
```typescript
const s3Server = await Smarts3.createAndStart({
server: { port: 3000 },
const s3 = await Smarts3.createAndStart({
server: { port: 9999, silent: true },
storage: { cleanSlate: true },
});
```
**Production Mode with Auth**
**Auth enabled:**
```typescript
const s3Server = await Smarts3.createAndStart({
server: { port: 3000 },
const s3 = await Smarts3.createAndStart({
auth: {
enabled: true,
credentials: [
{
accessKeyId: process.env.S3_ACCESS_KEY,
secretAccessKey: process.env.S3_SECRET_KEY,
},
],
},
logging: {
level: 'warn',
format: 'json',
credentials: [{ accessKeyId: 'test', secretAccessKey: 'test123' }],
},
});
```
**CORS-Enabled for Web Apps**
**CORS for local web dev:**
```typescript
const s3Server = await Smarts3.createAndStart({
server: { port: 3000 },
const s3 = await Smarts3.createAndStart({
cors: {
enabled: true,
allowedOrigins: ['http://localhost:8080', 'https://app.example.com'],
allowedOrigins: ['http://localhost:5173'],
allowCredentials: true,
},
});
```
## 🪣 Working with Buckets
### Creating Buckets
## 📤 Usage with AWS SDK v3
```typescript
// Create a new bucket
const bucket = await s3Server.createBucket('my-bucket');
console.log(`Created bucket: ${bucket.name}`);
```
import { S3Client, PutObjectCommand, GetObjectCommand, DeleteObjectCommand } from '@aws-sdk/client-s3';
## 📤 File Operations
const descriptor = await s3.getS3Descriptor();
### Using AWS SDK v3
```typescript
import { S3Client, PutObjectCommand, GetObjectCommand } from '@aws-sdk/client-s3';
// Get connection config
const config = await s3Server.getS3Descriptor();
// Configure AWS SDK client
const s3Client = new S3Client({
endpoint: `http://${config.endpoint}:${config.port}`,
const client = new S3Client({
endpoint: `http://${descriptor.endpoint}:${descriptor.port}`,
region: 'us-east-1',
credentials: {
accessKeyId: config.accessKey,
secretAccessKey: config.accessSecret,
accessKeyId: descriptor.accessKey,
secretAccessKey: descriptor.accessSecret,
},
forcePathStyle: true,
forcePathStyle: true, // Required for path-style S3
});
// Upload a file
await s3Client.send(new PutObjectCommand({
// Upload
await client.send(new PutObjectCommand({
Bucket: 'my-bucket',
Key: 'test-file.txt',
Body: 'Hello from AWS SDK!',
Key: 'hello.txt',
Body: 'Hello, S3!',
ContentType: 'text/plain',
}));
// Download a file
const response = await s3Client.send(new GetObjectCommand({
// Download
const { Body } = await client.send(new GetObjectCommand({
Bucket: 'my-bucket',
Key: 'test-file.txt',
Key: 'hello.txt',
}));
const content = await Body.transformToString(); // "Hello, S3!"
const content = await response.Body.transformToString();
console.log(content); // "Hello from AWS SDK!"
// Delete
await client.send(new DeleteObjectCommand({
Bucket: 'my-bucket',
Key: 'hello.txt',
}));
```
### Using SmartBucket
## 🪣 Usage with SmartBucket
```typescript
import { SmartBucket } from '@push.rocks/smartbucket';
// Get connection configuration
const s3Config = await s3Server.getS3Descriptor();
const smartbucket = new SmartBucket(await s3.getS3Descriptor());
const bucket = await smartbucket.createBucket('my-bucket');
const dir = await bucket.getBaseDirectory();
// Create a SmartBucket instance
const smartbucket = new SmartBucket(s3Config);
const bucket = await smartbucket.getBucket('my-bucket');
const baseDir = await bucket.getBaseDirectory();
// Upload
await dir.fastPut({ path: 'docs/readme.txt', contents: 'Hello!' });
// Upload files
await baseDir.fastStore('path/to/file.txt', 'Hello, S3! 🎉');
await baseDir.fastPut({
path: 'documents/important.pdf',
contents: Buffer.from(yourPdfData),
});
// Download
const content = await dir.fastGet('docs/readme.txt');
// Download files
const content = await baseDir.fastGet('path/to/file.txt');
const buffer = await baseDir.fastGetBuffer('documents/important.pdf');
// List files
const files = await baseDir.listFiles();
files.forEach((file) => {
console.log(`📄 ${file.name} (${file.size} bytes)`);
});
// Delete files
await baseDir.fastDelete('old-file.txt');
// List
const files = await dir.listFiles();
```
## 📤 Multipart Uploads
Smarts3 supports multipart uploads for large files (>5MB):
For files larger than 5 MB, use multipart uploads. smarts3 handles them with **streaming I/O** — parts are written directly to disk, never buffered in memory.
```typescript
import {
S3Client,
CreateMultipartUploadCommand,
UploadPartCommand,
CompleteMultipartUploadCommand
CompleteMultipartUploadCommand,
} from '@aws-sdk/client-s3';
const s3Client = new S3Client(/* ... */);
// 1. Initiate multipart upload
const { UploadId } = await s3Client.send(new CreateMultipartUploadCommand({
// 1. Initiate
const { UploadId } = await client.send(new CreateMultipartUploadCommand({
Bucket: 'my-bucket',
Key: 'large-file.bin',
}));
// 2. Upload parts (in parallel if desired)
// 2. Upload parts
const parts = [];
for (let i = 0; i < numParts; i++) {
const part = await s3Client.send(new UploadPartCommand({
for (let i = 0; i < chunks.length; i++) {
const { ETag } = await client.send(new UploadPartCommand({
Bucket: 'my-bucket',
Key: 'large-file.bin',
UploadId,
PartNumber: i + 1,
Body: partData[i],
Body: chunks[i],
}));
parts.push({
PartNumber: i + 1,
ETag: part.ETag,
});
parts.push({ PartNumber: i + 1, ETag });
}
// 3. Complete the upload
await s3Client.send(new CompleteMultipartUploadCommand({
// 3. Complete
await client.send(new CompleteMultipartUploadCommand({
Bucket: 'my-bucket',
Key: 'large-file.bin',
UploadId,
@@ -304,298 +243,150 @@ await s3Client.send(new CompleteMultipartUploadCommand({
## 🧪 Testing Integration
### Using with Jest
```typescript
import { Smarts3 } from '@push.rocks/smarts3';
import { tap, expect } from '@git.zone/tstest/tapbundle';
describe('S3 Operations', () => {
let s3Server: Smarts3;
let s3: Smarts3;
beforeAll(async () => {
s3Server = await Smarts3.createAndStart({
server: { port: 9999, silent: true },
storage: { cleanSlate: true },
});
});
afterAll(async () => {
await s3Server.stop();
});
test('should upload and retrieve a file', async () => {
const bucket = await s3Server.createBucket('test-bucket');
// Your test logic here
});
});
```
### Using with Mocha
```typescript
import { Smarts3 } from '@push.rocks/smarts3';
import { expect } from 'chai';
describe('S3 Operations', () => {
let s3Server: Smarts3;
before(async () => {
s3Server = await Smarts3.createAndStart({
server: { port: 9999, silent: true },
storage: { cleanSlate: true },
});
});
after(async () => {
await s3Server.stop();
});
it('should upload and retrieve a file', async () => {
const bucket = await s3Server.createBucket('test-bucket');
// Your test logic here
});
});
```
## 🎯 Real-World Use Cases
### CI/CD Pipeline Testing
```typescript
// ci-test.ts
import { Smarts3 } from '@push.rocks/smarts3';
export async function setupTestEnvironment() {
const s3 = await Smarts3.createAndStart({
server: {
port: process.env.S3_PORT || 3000,
silent: true,
},
storage: { cleanSlate: true },
logging: { level: 'error' }, // Only log errors in CI
});
// Create test buckets
await s3.createBucket('uploads');
await s3.createBucket('processed');
await s3.createBucket('archive');
return s3;
}
```
### Microservice Development
```typescript
// dev-server.ts
import { Smarts3 } from '@push.rocks/smarts3';
import express from 'express';
async function startDevelopmentServer() {
// Start local S3 with CORS for local development
const s3 = await Smarts3.createAndStart({
server: { port: 3000 },
cors: {
enabled: true,
allowedOrigins: ['http://localhost:8080'],
},
});
await s3.createBucket('user-uploads');
// Start your API server
const app = express();
app.post('/upload', async (req, res) => {
// Your upload logic using local S3
});
app.listen(8080, () => {
console.log('🚀 Dev server running with local S3!');
});
}
```
### Data Migration Testing
```typescript
import { Smarts3 } from '@push.rocks/smarts3';
import { SmartBucket } from '@push.rocks/smartbucket';
async function testDataMigration() {
const s3 = await Smarts3.createAndStart({
tap.test('setup', async () => {
s3 = await Smarts3.createAndStart({
server: { port: 4567, silent: true },
storage: { cleanSlate: true },
});
});
// Create source and destination buckets
await s3.createBucket('legacy-data');
await s3.createBucket('new-data');
tap.test('should store and retrieve objects', async () => {
await s3.createBucket('test');
// ... your test logic using AWS SDK or SmartBucket
});
// Populate source with test data
const config = await s3.getS3Descriptor();
const smartbucket = new SmartBucket(config);
const source = await smartbucket.getBucket('legacy-data');
const sourceDir = await source.getBaseDirectory();
tap.test('teardown', async () => {
await s3.stop();
});
await sourceDir.fastStore('user-1.json', JSON.stringify({ id: 1, name: 'Alice' }));
await sourceDir.fastStore('user-2.json', JSON.stringify({ id: 2, name: 'Bob' }));
// Run your migration logic
await runMigration(config);
// Verify migration results
const dest = await smartbucket.getBucket('new-data');
const destDir = await dest.getBaseDirectory();
const migratedFiles = await destDir.listFiles();
console.log(`✅ Migrated ${migratedFiles.length} files successfully!`);
}
export default tap.start();
```
## 🔧 API Reference
### Smarts3 Class
### `Smarts3` Class
#### Static Methods
#### `static createAndStart(config?: ISmarts3Config): Promise<Smarts3>`
##### `createAndStart(config?: ISmarts3Config): Promise<Smarts3>`
Create and start a server in one call.
Create and start a Smarts3 instance in one call.
#### `start(): Promise<void>`
**Parameters:**
- `config` - Optional configuration object (see Configuration Guide above)
Spawn the Rust binary and start the HTTP server.
**Returns:** Promise that resolves to a running Smarts3 instance
#### `stop(): Promise<void>`
#### Instance Methods
Gracefully stop the server and kill the Rust process.
##### `start(): Promise<void>`
#### `createBucket(name: string): Promise<{ name: string }>`
Start the S3 server.
Create an S3 bucket.
##### `stop(): Promise<void>`
#### `getS3Descriptor(options?): Promise<IS3Descriptor>`
Stop the S3 server and release resources.
Get connection details for S3 clients. Returns:
##### `createBucket(name: string): Promise<{ name: string }>`
| Field | Type | Description |
|-------|------|-------------|
| `endpoint` | `string` | Server hostname (`localhost` by default) |
| `port` | `number` | Server port |
| `accessKey` | `string` | Access key from first configured credential |
| `accessSecret` | `string` | Secret key from first configured credential |
| `useSsl` | `boolean` | Always `false` (plain HTTP) |
Create a new S3 bucket.
## 🏗️ Architecture
**Parameters:**
- `name` - Bucket name
smarts3 uses a **hybrid Rust + TypeScript** architecture:
**Returns:** Promise that resolves to bucket information
```
┌─────────────────────────────────┐
│ Your Code (AWS SDK, etc.) │
│ ↕ HTTP (localhost:3000) │
├─────────────────────────────────┤
│ rusts3 binary (Rust) │
│ ├─ hyper 1.x HTTP server │
│ ├─ S3 path-style routing │
│ ├─ Streaming storage layer │
│ ├─ Multipart manager │
│ ├─ CORS / Auth middleware │
│ └─ S3 XML response builder │
├─────────────────────────────────┤
│ TypeScript (thin IPC wrapper) │
│ ├─ Smarts3 class │
│ ├─ RustBridge (stdin/stdout) │
│ └─ Config & S3 descriptor │
└─────────────────────────────────┘
```
##### `getS3Descriptor(options?): Promise<IS3Descriptor>`
**Why Rust?** The TypeScript implementation had critical perf issues: OOM on multipart uploads (parts buffered in memory), double stream copying, file descriptor leaks on HEAD requests, full-file reads for range requests, and no backpressure. The Rust binary solves all of these with streaming I/O, zero-copy, and direct `seek()` for range requests.
Get S3 connection configuration for use with S3 clients.
**IPC Protocol:** TypeScript spawns the `rusts3` binary with `--management` and communicates via newline-delimited JSON over stdin/stdout. Commands: `start`, `stop`, `createBucket`.
**Parameters:**
- `options` - Optional partial descriptor to merge with defaults
### S3 Operations Supported
**Returns:** Promise that resolves to S3 descriptor with:
- `accessKey` - Access key for authentication
- `accessSecret` - Secret key for authentication
- `endpoint` - Server endpoint (hostname/IP)
- `port` - Server port
- `useSsl` - Whether to use SSL (always false for local server)
| Operation | Method | Path |
|-----------|--------|------|
| ListBuckets | `GET /` | |
| CreateBucket | `PUT /{bucket}` | |
| DeleteBucket | `DELETE /{bucket}` | |
| HeadBucket | `HEAD /{bucket}` | |
| ListObjects (v1/v2) | `GET /{bucket}` | `?list-type=2` for v2 |
| PutObject | `PUT /{bucket}/{key}` | |
| GetObject | `GET /{bucket}/{key}` | Supports `Range` header |
| HeadObject | `HEAD /{bucket}/{key}` | |
| DeleteObject | `DELETE /{bucket}/{key}` | |
| CopyObject | `PUT /{bucket}/{key}` | `x-amz-copy-source` header |
| InitiateMultipartUpload | `POST /{bucket}/{key}?uploads` | |
| UploadPart | `PUT /{bucket}/{key}?partNumber&uploadId` | |
| CompleteMultipartUpload | `POST /{bucket}/{key}?uploadId` | |
| AbortMultipartUpload | `DELETE /{bucket}/{key}?uploadId` | |
| ListMultipartUploads | `GET /{bucket}?uploads` | |
## 💡 Production Considerations
### On-Disk Format
### When to Use Smarts3 vs MinIO
**Use Smarts3 when:**
- 🎯 You need a lightweight, zero-dependency S3 server
- 🧪 Running in CI/CD pipelines or containerized test environments
- 🏗️ Local development where MinIO setup is overkill
- 📦 Your application needs to bundle an S3-compatible server
- 🚀 Quick prototyping without infrastructure setup
**Use MinIO when:**
- 🏢 Production workloads requiring high availability
- 📊 Advanced features like versioning, replication, encryption at rest
- 🔐 Complex IAM policies and bucket policies
- 📈 High-performance requirements with multiple nodes
- 🌐 Multi-tenant environments
### Security Notes
- Smarts3's authentication is intentionally simple (static credentials)
- It does **not** implement AWS Signature V4 verification
- Perfect for development/testing, but not for production internet-facing deployments
- For production use, place behind a reverse proxy with proper authentication
## 🐛 Debugging Tips
1. **Enable debug logging**
```typescript
const s3 = await Smarts3.createAndStart({
logging: { level: 'debug', format: 'json' },
});
```
2. **Check the buckets directory** - Find your data in `.nogit/bucketsDir/` by default
3. **Use the correct endpoint** - Remember to use `127.0.0.1` or `localhost`
4. **Force path style** - Always use `forcePathStyle: true` with local S3
5. **Inspect requests** - All requests are logged when `silent: false`
## 📈 Performance
Smarts3 is optimized for development and testing scenarios:
- ⚡ **Instant operations** - No network latency
- 💾 **Low memory footprint** - Efficient filesystem operations with streams
- 🔄 **Fast cleanup** - Clean slate mode for quick test resets
- 🚀 **Parallel operations** - Handle multiple concurrent requests
- 📤 **Streaming uploads/downloads** - Low memory usage for large files
```
{storage.directory}/
{bucket}/
{key}._S3_object # Object data
{key}._S3_object.metadata.json # Metadata (content-type, x-amz-meta-*, etc.)
{key}._S3_object.md5 # Cached MD5 hash
.multipart/
{upload-id}/
metadata.json # Upload metadata (bucket, key, parts)
part-1 # Part data files
part-2
...
```
## 🔗 Related Packages
- [`@push.rocks/smartbucket`](https://www.npmjs.com/package/@push.rocks/smartbucket) - Powerful S3 abstraction layer
- [`@push.rocks/smartfs`](https://www.npmjs.com/package/@push.rocks/smartfs) - Modern filesystem with Web Streams support
- [`@tsclass/tsclass`](https://www.npmjs.com/package/@tsclass/tsclass) - TypeScript class helpers
## 📝 Changelog
### v4.0.0 - Production Ready 🚀
**Breaking Changes:**
- Configuration format changed from flat to nested structure
- Old format: `{ port: 3000, cleanSlate: true }`
- New format: `{ server: { port: 3000 }, storage: { cleanSlate: true } }`
**New Features:**
- ✨ Production configuration system with comprehensive options
- 📊 Structured logging with multiple levels and formats
- 🌐 Full CORS middleware support
- 🔐 Simple static credentials authentication
- 📤 Complete multipart upload support for large files
- 🔧 Flexible configuration with sensible defaults
**Improvements:**
- Removed smartbucket from production dependencies (dev-only)
- Migrated to @push.rocks/smartfs for modern filesystem operations
- Enhanced error handling and logging throughout
- Better TypeScript types and documentation
- [`@push.rocks/smartbucket`](https://code.foss.global/push.rocks/smartbucket) — High-level S3 abstraction layer
- [`@push.rocks/smartrust`](https://code.foss.global/push.rocks/smartrust) — TypeScript ↔ Rust IPC bridge
- [`@git.zone/tsrust`](https://code.foss.global/git.zone/tsrust) — Rust cross-compilation for npm packages
## License and Legal Information
This repository contains open-source code that is licensed under the MIT License. A copy of the MIT License can be found in the [license](license) file within this repository.
This repository contains open-source code licensed under the MIT License. A copy of the license can be found in the [LICENSE](./LICENSE) file.
**Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.
### Trademarks
This project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and any related products or services are trademarks of Task Venture Capital GmbH and are not included within the scope of the MIT license granted herein. Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines, and any usage must be approved in writing by Task Venture Capital GmbH.
This project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and any related products or services are trademarks of Task Venture Capital GmbH or third parties, and are not included within the scope of the MIT license granted herein.
Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines or the guidelines of the respective third-party owners, and any usage must be approved in writing. Third-party trademarks used herein are the property of their respective owners and used only in a descriptive manner, e.g. for an implementation of an API or similar.
### Company Information
Task Venture Capital GmbH
Registered at District court Bremen HRB 35230 HB, Germany
Registered at District Court Bremen HRB 35230 HB, Germany
For any legal inquiries or if you require further information, please contact us via email at hello@task.vc.
For any legal inquiries or further information, please contact us via email at hello@task.vc.
By using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.

2
rust/.cargo/config.toml Normal file
View File

@@ -0,0 +1,2 @@
[target.aarch64-unknown-linux-gnu]
linker = "aarch64-linux-gnu-gcc"

1393
rust/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

30
rust/Cargo.toml Normal file
View File

@@ -0,0 +1,30 @@
[package]
name = "rusts3"
version = "0.1.0"
edition = "2021"
[[bin]]
name = "rusts3"
path = "src/main.rs"
[dependencies]
tokio = { version = "1", features = ["full"] }
hyper = { version = "1", features = ["http1", "server"] }
hyper-util = { version = "0.1", features = ["tokio", "http1"] }
http-body-util = "0.1"
serde = { version = "1", features = ["derive"] }
serde_json = "1"
quick-xml = { version = "0.37", features = ["serialize"] }
md-5 = "0.10"
tokio-util = { version = "0.7", features = ["io"] }
bytes = "1"
uuid = { version = "1", features = ["v4"] }
clap = { version = "4", features = ["derive"] }
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
thiserror = "2"
anyhow = "1"
percent-encoding = "2"
url = "2"
chrono = { version = "0.4", features = ["serde"] }
futures-core = "0.3"

78
rust/src/config.rs Normal file
View File

@@ -0,0 +1,78 @@
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct S3Config {
pub server: ServerConfig,
pub storage: StorageConfig,
pub auth: AuthConfig,
pub cors: CorsConfig,
pub logging: LoggingConfig,
pub limits: LimitsConfig,
pub multipart: MultipartConfig,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ServerConfig {
pub port: u16,
pub address: String,
pub silent: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct StorageConfig {
pub directory: String,
pub clean_slate: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct AuthConfig {
pub enabled: bool,
pub credentials: Vec<Credential>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Credential {
#[serde(rename = "accessKeyId")]
pub access_key_id: String,
#[serde(rename = "secretAccessKey")]
pub secret_access_key: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CorsConfig {
pub enabled: bool,
pub allowed_origins: Option<Vec<String>>,
pub allowed_methods: Option<Vec<String>>,
pub allowed_headers: Option<Vec<String>>,
pub exposed_headers: Option<Vec<String>>,
pub max_age: Option<u64>,
pub allow_credentials: Option<bool>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct LoggingConfig {
pub level: Option<String>,
pub format: Option<String>,
pub enabled: Option<bool>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct LimitsConfig {
pub max_object_size: Option<u64>,
pub max_metadata_size: Option<u64>,
pub request_timeout: Option<u64>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct MultipartConfig {
pub expiration_days: Option<u64>,
pub cleanup_interval_minutes: Option<u64>,
}

43
rust/src/main.rs Normal file
View File

@@ -0,0 +1,43 @@
mod config;
mod management;
mod s3_error;
mod server;
mod storage;
mod xml_response;
use clap::Parser;
#[derive(Parser)]
#[command(name = "rusts3", about = "High-performance S3-compatible server")]
struct Cli {
/// Run in management mode (IPC via stdin/stdout)
#[arg(long)]
management: bool,
/// Log level
#[arg(long, default_value = "info")]
log_level: String,
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let cli = Cli::parse();
if cli.management {
// Init tracing to stderr only (stdout reserved for IPC)
tracing_subscriber::fmt()
.with_writer(std::io::stderr)
.with_env_filter(
tracing_subscriber::EnvFilter::try_new(&cli.log_level)
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("info")),
)
.init();
management::management_loop().await?;
} else {
eprintln!("rusts3: use --management flag for IPC mode");
std::process::exit(1);
}
Ok(())
}

155
rust/src/management.rs Normal file
View File

@@ -0,0 +1,155 @@
use anyhow::Result;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use std::io::Write;
use tokio::io::{AsyncBufReadExt, BufReader};
use crate::config::S3Config;
use crate::server::S3Server;
#[derive(Deserialize)]
struct IpcRequest {
id: String,
method: String,
params: Value,
}
#[derive(Serialize)]
struct IpcResponse {
id: String,
success: bool,
#[serde(skip_serializing_if = "Option::is_none")]
result: Option<Value>,
#[serde(skip_serializing_if = "Option::is_none")]
error: Option<String>,
}
#[derive(Serialize)]
struct IpcEvent {
event: String,
data: Value,
}
fn send_line(value: &impl Serialize) {
let mut stdout = std::io::stdout().lock();
serde_json::to_writer(&mut stdout, value).ok();
stdout.write_all(b"\n").ok();
stdout.flush().ok();
}
fn send_response(id: String, result: Value) {
send_line(&IpcResponse {
id,
success: true,
result: Some(result),
error: None,
});
}
fn send_error(id: String, message: String) {
send_line(&IpcResponse {
id,
success: false,
result: None,
error: Some(message),
});
}
pub async fn management_loop() -> Result<()> {
// Emit ready event
send_line(&IpcEvent {
event: "ready".to_string(),
data: serde_json::json!({}),
});
let mut server: Option<S3Server> = None;
let stdin = BufReader::new(tokio::io::stdin());
let mut lines = stdin.lines();
while let Ok(Some(line)) = lines.next_line().await {
let line = line.trim().to_string();
if line.is_empty() {
continue;
}
let req: IpcRequest = match serde_json::from_str(&line) {
Ok(r) => r,
Err(e) => {
tracing::warn!("Invalid IPC request: {}", e);
continue;
}
};
let id = req.id.clone();
let method = req.method.as_str();
match method {
"start" => {
#[derive(Deserialize)]
struct StartParams {
config: S3Config,
}
match serde_json::from_value::<StartParams>(req.params) {
Ok(params) => {
match S3Server::start(params.config).await {
Ok(s) => {
server = Some(s);
send_response(id, serde_json::json!({}));
}
Err(e) => {
send_error(id, format!("Failed to start server: {}", e));
}
}
}
Err(e) => {
send_error(id, format!("Invalid start params: {}", e));
}
}
}
"stop" => {
if let Some(s) = server.take() {
s.stop().await;
}
send_response(id, serde_json::json!({}));
}
"createBucket" => {
#[derive(Deserialize)]
struct CreateBucketParams {
name: String,
}
match serde_json::from_value::<CreateBucketParams>(req.params) {
Ok(params) => {
if let Some(ref s) = server {
match s.store().create_bucket(&params.name).await {
Ok(()) => {
send_response(id, serde_json::json!({}));
}
Err(e) => {
send_error(
id,
format!("Failed to create bucket: {}", e),
);
}
}
} else {
send_error(id, "Server not started".to_string());
}
}
Err(e) => {
send_error(id, format!("Invalid createBucket params: {}", e));
}
}
}
_ => {
send_error(id, format!("Unknown method: {}", method));
}
}
}
// Clean shutdown
if let Some(s) = server.take() {
s.stop().await;
}
Ok(())
}

70
rust/src/s3_error.rs Normal file
View File

@@ -0,0 +1,70 @@
use hyper::{Response, StatusCode};
use http_body_util::Full;
use bytes::Bytes;
#[derive(Debug, thiserror::Error)]
#[error("S3Error({code}): {message}")]
pub struct S3Error {
pub code: String,
pub message: String,
pub status: StatusCode,
}
impl S3Error {
pub fn new(code: &str, message: &str, status: StatusCode) -> Self {
Self {
code: code.to_string(),
message: message.to_string(),
status,
}
}
pub fn no_such_key() -> Self {
Self::new("NoSuchKey", "The specified key does not exist.", StatusCode::NOT_FOUND)
}
pub fn no_such_bucket() -> Self {
Self::new("NoSuchBucket", "The specified bucket does not exist", StatusCode::NOT_FOUND)
}
pub fn bucket_not_empty() -> Self {
Self::new("BucketNotEmpty", "The bucket you tried to delete is not empty", StatusCode::CONFLICT)
}
pub fn access_denied() -> Self {
Self::new("AccessDenied", "Access Denied", StatusCode::FORBIDDEN)
}
pub fn no_such_upload() -> Self {
Self::new("NoSuchUpload", "The specified upload does not exist", StatusCode::NOT_FOUND)
}
pub fn invalid_part_number() -> Self {
Self::new("InvalidPartNumber", "Part number must be between 1 and 10000", StatusCode::BAD_REQUEST)
}
pub fn internal_error(msg: &str) -> Self {
Self::new("InternalError", msg, StatusCode::INTERNAL_SERVER_ERROR)
}
pub fn invalid_request(msg: &str) -> Self {
Self::new("InvalidRequest", msg, StatusCode::BAD_REQUEST)
}
pub fn to_xml(&self) -> String {
format!(
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<Error><Code>{}</Code><Message>{}</Message></Error>",
self.code, self.message
)
}
pub fn to_response(&self, request_id: &str) -> Response<Full<Bytes>> {
let xml = self.to_xml();
Response::builder()
.status(self.status)
.header("content-type", "application/xml")
.header("x-amz-request-id", request_id)
.body(Full::new(Bytes::from(xml)))
.unwrap()
}
}

865
rust/src/server.rs Normal file
View File

@@ -0,0 +1,865 @@
use anyhow::Result;
use bytes::Bytes;
use futures_core::Stream;
use http_body_util::BodyExt;
use hyper::body::Incoming;
use hyper::server::conn::http1;
use hyper::service::service_fn;
use hyper::{Method, Request, Response, StatusCode};
use hyper_util::rt::TokioIo;
use std::collections::HashMap;
use std::net::SocketAddr;
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
use tokio::io::AsyncReadExt;
use tokio::net::TcpListener;
use tokio::sync::watch;
use tokio_util::io::ReaderStream;
use uuid::Uuid;
use crate::config::S3Config;
use crate::s3_error::S3Error;
use crate::storage::FileStore;
use crate::xml_response;
pub struct S3Server {
store: Arc<FileStore>,
config: S3Config,
shutdown_tx: watch::Sender<bool>,
server_handle: tokio::task::JoinHandle<()>,
}
impl S3Server {
pub async fn start(config: S3Config) -> Result<Self> {
let store = Arc::new(FileStore::new(config.storage.directory.clone().into()));
// Initialize or reset storage
if config.storage.clean_slate {
store.reset().await?;
} else {
store.initialize().await?;
}
let addr: SocketAddr = format!("{}:{}", config.address(), config.server.port)
.parse()?;
let listener = TcpListener::bind(addr).await?;
let (shutdown_tx, shutdown_rx) = watch::channel(false);
let server_store = store.clone();
let server_config = config.clone();
let server_handle = tokio::spawn(async move {
loop {
let mut rx = shutdown_rx.clone();
tokio::select! {
result = listener.accept() => {
match result {
Ok((stream, _remote_addr)) => {
let io = TokioIo::new(stream);
let store = server_store.clone();
let cfg = server_config.clone();
tokio::spawn(async move {
let svc = service_fn(move |req: Request<Incoming>| {
let store = store.clone();
let cfg = cfg.clone();
async move {
handle_request(req, store, cfg).await
}
});
if let Err(e) = http1::Builder::new()
.keep_alive(true)
.serve_connection(io, svc)
.await
{
if !e.is_incomplete_message() {
tracing::error!("Connection error: {}", e);
}
}
});
}
Err(e) => {
tracing::error!("Accept error: {}", e);
}
}
}
_ = rx.changed() => {
break;
}
}
}
});
if !config.server.silent {
tracing::info!("S3 server listening on {}", addr);
}
Ok(Self {
store,
config,
shutdown_tx,
server_handle,
})
}
pub async fn stop(self) {
let _ = self.shutdown_tx.send(true);
let _ = self.server_handle.await;
}
pub fn store(&self) -> &FileStore {
&self.store
}
}
impl S3Config {
fn address(&self) -> &str {
&self.server.address
}
}
// ============================
// Request handling
// ============================
type BoxBody = http_body_util::combinators::BoxBody<Bytes, Box<dyn std::error::Error + Send + Sync>>;
fn full_body(data: impl Into<Bytes>) -> BoxBody {
http_body_util::Full::new(data.into())
.map_err(|never: std::convert::Infallible| -> Box<dyn std::error::Error + Send + Sync> { match never {} })
.boxed()
}
fn empty_body() -> BoxBody {
http_body_util::Empty::new()
.map_err(|never: std::convert::Infallible| -> Box<dyn std::error::Error + Send + Sync> { match never {} })
.boxed()
}
fn stream_body(reader: tokio::fs::File, content_length: u64) -> BoxBody {
let stream = ReaderStream::with_capacity(reader.take(content_length), 64 * 1024);
let mapped = FrameStream { inner: stream };
http_body_util::StreamBody::new(mapped).boxed()
}
/// Adapter that converts ReaderStream into a Stream of Frame<Bytes>
struct FrameStream {
inner: ReaderStream<tokio::io::Take<tokio::fs::File>>,
}
impl Stream for FrameStream {
type Item = Result<hyper::body::Frame<Bytes>, Box<dyn std::error::Error + Send + Sync>>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let inner = unsafe { self.map_unchecked_mut(|s| &mut s.inner) };
match inner.poll_next(cx) {
Poll::Ready(Some(Ok(bytes))) => {
Poll::Ready(Some(Ok(hyper::body::Frame::data(bytes))))
}
Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(Box::new(e) as Box<dyn std::error::Error + Send + Sync>))),
Poll::Ready(None) => Poll::Ready(None),
Poll::Pending => Poll::Pending,
}
}
}
fn xml_response(status: StatusCode, xml: String, request_id: &str) -> Response<BoxBody> {
Response::builder()
.status(status)
.header("content-type", "application/xml")
.header("x-amz-request-id", request_id)
.body(full_body(xml))
.unwrap()
}
fn empty_response(status: StatusCode, request_id: &str) -> Response<BoxBody> {
Response::builder()
.status(status)
.header("x-amz-request-id", request_id)
.body(empty_body())
.unwrap()
}
fn s3_error_response(err: &S3Error, request_id: &str) -> Response<BoxBody> {
let xml = err.to_xml();
Response::builder()
.status(err.status)
.header("content-type", "application/xml")
.header("x-amz-request-id", request_id)
.body(full_body(xml))
.unwrap()
}
async fn handle_request(
req: Request<Incoming>,
store: Arc<FileStore>,
config: S3Config,
) -> Result<Response<BoxBody>, std::convert::Infallible> {
let request_id = Uuid::new_v4().to_string();
let method = req.method().clone();
let uri = req.uri().clone();
let start = std::time::Instant::now();
// Handle CORS preflight
if config.cors.enabled && method == Method::OPTIONS {
let resp = build_cors_preflight(&config, &request_id);
return Ok(resp);
}
// Auth check
if config.auth.enabled {
if let Err(e) = check_auth(&req, &config) {
tracing::warn!("Auth failed: {}", e.message);
return Ok(s3_error_response(&e, &request_id));
}
}
// Route and handle
let mut response = match route_request(req, store, &config, &request_id).await {
Ok(resp) => resp,
Err(err) => {
if let Some(s3err) = err.downcast_ref::<S3Error>() {
s3_error_response(s3err, &request_id)
} else {
tracing::error!("Internal error: {}", err);
let s3err = S3Error::internal_error(&err.to_string());
s3_error_response(&s3err, &request_id)
}
}
};
// Add CORS headers if enabled
if config.cors.enabled {
add_cors_headers(response.headers_mut(), &config);
}
let duration = start.elapsed();
tracing::info!(
method = %method,
path = %uri.path(),
status = %response.status().as_u16(),
duration_ms = %duration.as_millis(),
"request"
);
Ok(response)
}
// ============================
// Routing
// ============================
async fn route_request(
req: Request<Incoming>,
store: Arc<FileStore>,
_config: &S3Config,
request_id: &str,
) -> Result<Response<BoxBody>> {
let method = req.method().clone();
let path = req.uri().path().to_string();
let query_string = req.uri().query().unwrap_or("").to_string();
let query = parse_query(&query_string);
// Parse path: /, /{bucket}, /{bucket}/{key...}
let segments: Vec<&str> = path
.trim_start_matches('/')
.splitn(2, '/')
.filter(|s| !s.is_empty())
.collect();
match segments.len() {
0 => {
// Root: GET / -> ListBuckets
match method {
Method::GET => handle_list_buckets(store, request_id).await,
_ => Ok(empty_response(StatusCode::METHOD_NOT_ALLOWED, request_id)),
}
}
1 => {
// Bucket level: /{bucket}
let bucket = percent_decode(segments[0]);
match method {
Method::GET => {
if query.contains_key("uploads") {
handle_list_multipart_uploads(store, &bucket, request_id).await
} else {
handle_list_objects(store, &bucket, &query, request_id).await
}
}
Method::PUT => handle_create_bucket(store, &bucket, request_id).await,
Method::DELETE => handle_delete_bucket(store, &bucket, request_id).await,
Method::HEAD => handle_head_bucket(store, &bucket, request_id).await,
_ => Ok(empty_response(StatusCode::METHOD_NOT_ALLOWED, request_id)),
}
}
2 => {
// Object level: /{bucket}/{key...}
let bucket = percent_decode(segments[0]);
let key = percent_decode(segments[1]);
match method {
Method::PUT => {
if query.contains_key("partNumber") && query.contains_key("uploadId") {
handle_upload_part(req, store, &query, request_id).await
} else if req.headers().contains_key("x-amz-copy-source") {
handle_copy_object(req, store, &bucket, &key, request_id).await
} else {
handle_put_object(req, store, &bucket, &key, request_id).await
}
}
Method::GET => {
handle_get_object(req, store, &bucket, &key, request_id).await
}
Method::HEAD => {
handle_head_object(store, &bucket, &key, request_id).await
}
Method::DELETE => {
if query.contains_key("uploadId") {
let upload_id = query.get("uploadId").unwrap();
handle_abort_multipart(store, upload_id, request_id).await
} else {
handle_delete_object(store, &bucket, &key, request_id).await
}
}
Method::POST => {
if query.contains_key("uploads") {
handle_initiate_multipart(req, store, &bucket, &key, request_id).await
} else if query.contains_key("uploadId") {
let upload_id = query.get("uploadId").unwrap().clone();
handle_complete_multipart(req, store, &bucket, &key, &upload_id, request_id).await
} else {
let err = S3Error::invalid_request("Invalid POST request");
Ok(s3_error_response(&err, request_id))
}
}
_ => Ok(empty_response(StatusCode::METHOD_NOT_ALLOWED, request_id)),
}
}
_ => Ok(empty_response(StatusCode::BAD_REQUEST, request_id)),
}
}
// ============================
// Handlers
// ============================
async fn handle_list_buckets(
store: Arc<FileStore>,
request_id: &str,
) -> Result<Response<BoxBody>> {
let buckets = store.list_buckets().await?;
let xml = xml_response::list_buckets_xml(&buckets);
Ok(xml_response(StatusCode::OK, xml, request_id))
}
async fn handle_create_bucket(
store: Arc<FileStore>,
bucket: &str,
request_id: &str,
) -> Result<Response<BoxBody>> {
store.create_bucket(bucket).await?;
Ok(empty_response(StatusCode::OK, request_id))
}
async fn handle_delete_bucket(
store: Arc<FileStore>,
bucket: &str,
request_id: &str,
) -> Result<Response<BoxBody>> {
store.delete_bucket(bucket).await?;
Ok(empty_response(StatusCode::NO_CONTENT, request_id))
}
async fn handle_head_bucket(
store: Arc<FileStore>,
bucket: &str,
request_id: &str,
) -> Result<Response<BoxBody>> {
if store.bucket_exists(bucket).await {
Ok(empty_response(StatusCode::OK, request_id))
} else {
Err(S3Error::no_such_bucket().into())
}
}
async fn handle_list_objects(
store: Arc<FileStore>,
bucket: &str,
query: &HashMap<String, String>,
request_id: &str,
) -> Result<Response<BoxBody>> {
let prefix = query.get("prefix").map(|s| s.as_str()).unwrap_or("");
let delimiter = query.get("delimiter").map(|s| s.as_str()).unwrap_or("");
let max_keys = query
.get("max-keys")
.and_then(|s| s.parse().ok())
.unwrap_or(1000usize);
let continuation_token = query.get("continuation-token").map(|s| s.as_str());
let is_v2 = query.get("list-type").map(|s| s.as_str()) == Some("2");
let result = store
.list_objects(bucket, prefix, delimiter, max_keys, continuation_token)
.await?;
let xml = if is_v2 {
xml_response::list_objects_v2_xml(bucket, &result)
} else {
xml_response::list_objects_v1_xml(bucket, &result)
};
Ok(xml_response(StatusCode::OK, xml, request_id))
}
async fn handle_put_object(
req: Request<Incoming>,
store: Arc<FileStore>,
bucket: &str,
key: &str,
request_id: &str,
) -> Result<Response<BoxBody>> {
let metadata = extract_metadata(req.headers());
let body = req.into_body();
let result = store.put_object(bucket, key, body, metadata).await?;
let resp = Response::builder()
.status(StatusCode::OK)
.header("ETag", format!("\"{}\"", result.md5))
.header("x-amz-request-id", request_id)
.body(empty_body())
.unwrap();
Ok(resp)
}
async fn handle_get_object(
req: Request<Incoming>,
store: Arc<FileStore>,
bucket: &str,
key: &str,
request_id: &str,
) -> Result<Response<BoxBody>> {
// Parse Range header
let range = parse_range_header(req.headers());
let result = store.get_object(bucket, key, range).await?;
let content_type = result
.metadata
.get("content-type")
.cloned()
.unwrap_or_else(|| "binary/octet-stream".to_string());
let mut builder = Response::builder()
.header("ETag", format!("\"{}\"", result.md5))
.header("Last-Modified", result.last_modified.format("%a, %d %b %Y %H:%M:%S GMT").to_string())
.header("Content-Type", &content_type)
.header("Accept-Ranges", "bytes")
.header("x-amz-request-id", request_id);
// Add custom metadata headers
for (k, v) in &result.metadata {
if k.starts_with("x-amz-meta-") {
builder = builder.header(k.as_str(), v.as_str());
}
}
if let Some((start, end)) = range {
let content_length = end - start + 1;
let resp = builder
.status(StatusCode::PARTIAL_CONTENT)
.header("Content-Length", content_length.to_string())
.header(
"Content-Range",
format!("bytes {}-{}/{}", start, end, result.size),
)
.body(stream_body(result.body, content_length))
.unwrap();
Ok(resp)
} else {
let resp = builder
.status(StatusCode::OK)
.header("Content-Length", result.size.to_string())
.body(stream_body(result.body, result.content_length))
.unwrap();
Ok(resp)
}
}
async fn handle_head_object(
store: Arc<FileStore>,
bucket: &str,
key: &str,
request_id: &str,
) -> Result<Response<BoxBody>> {
let result = store.head_object(bucket, key).await?;
let content_type = result
.metadata
.get("content-type")
.cloned()
.unwrap_or_else(|| "binary/octet-stream".to_string());
let mut builder = Response::builder()
.status(StatusCode::OK)
.header("ETag", format!("\"{}\"", result.md5))
.header("Last-Modified", result.last_modified.format("%a, %d %b %Y %H:%M:%S GMT").to_string())
.header("Content-Type", &content_type)
.header("Content-Length", result.size.to_string())
.header("Accept-Ranges", "bytes")
.header("x-amz-request-id", request_id);
for (k, v) in &result.metadata {
if k.starts_with("x-amz-meta-") {
builder = builder.header(k.as_str(), v.as_str());
}
}
Ok(builder.body(empty_body()).unwrap())
}
async fn handle_delete_object(
store: Arc<FileStore>,
bucket: &str,
key: &str,
request_id: &str,
) -> Result<Response<BoxBody>> {
store.delete_object(bucket, key).await?;
Ok(empty_response(StatusCode::NO_CONTENT, request_id))
}
async fn handle_copy_object(
req: Request<Incoming>,
store: Arc<FileStore>,
dest_bucket: &str,
dest_key: &str,
request_id: &str,
) -> Result<Response<BoxBody>> {
let copy_source = req
.headers()
.get("x-amz-copy-source")
.and_then(|v| v.to_str().ok())
.unwrap_or("")
.to_string();
let metadata_directive = req
.headers()
.get("x-amz-metadata-directive")
.and_then(|v| v.to_str().ok())
.unwrap_or("COPY")
.to_uppercase();
// Parse source: /bucket/key or bucket/key
let source = copy_source.trim_start_matches('/');
let first_slash = source.find('/').unwrap_or(source.len());
let src_bucket = percent_decode(&source[..first_slash]);
let src_key = if first_slash < source.len() {
percent_decode(&source[first_slash + 1..])
} else {
String::new()
};
let new_metadata = if metadata_directive == "REPLACE" {
Some(extract_metadata(req.headers()))
} else {
None
};
let result = store
.copy_object(&src_bucket, &src_key, dest_bucket, dest_key, &metadata_directive, new_metadata)
.await?;
let xml = xml_response::copy_object_result_xml(&result.md5, &result.last_modified.to_rfc3339());
Ok(xml_response(StatusCode::OK, xml, request_id))
}
// ============================
// Multipart handlers
// ============================
async fn handle_initiate_multipart(
req: Request<Incoming>,
store: Arc<FileStore>,
bucket: &str,
key: &str,
request_id: &str,
) -> Result<Response<BoxBody>> {
let metadata = extract_metadata(req.headers());
let upload_id = store.initiate_multipart(bucket, key, metadata).await?;
let xml = xml_response::initiate_multipart_xml(bucket, key, &upload_id);
Ok(xml_response(StatusCode::OK, xml, request_id))
}
async fn handle_upload_part(
req: Request<Incoming>,
store: Arc<FileStore>,
query: &HashMap<String, String>,
request_id: &str,
) -> Result<Response<BoxBody>> {
let upload_id = query.get("uploadId").unwrap();
let part_number: u32 = query
.get("partNumber")
.and_then(|s| s.parse().ok())
.unwrap_or(0);
if part_number < 1 || part_number > 10000 {
return Err(S3Error::invalid_part_number().into());
}
let body = req.into_body();
let (etag, _size) = store.upload_part(upload_id, part_number, body).await?;
let resp = Response::builder()
.status(StatusCode::OK)
.header("ETag", format!("\"{}\"", etag))
.header("x-amz-request-id", request_id)
.body(empty_body())
.unwrap();
Ok(resp)
}
async fn handle_complete_multipart(
req: Request<Incoming>,
store: Arc<FileStore>,
bucket: &str,
key: &str,
upload_id: &str,
request_id: &str,
) -> Result<Response<BoxBody>> {
// Read request body (XML)
let body_bytes = req.collect().await.map_err(|e| anyhow::anyhow!("Body error: {}", e))?.to_bytes();
let body_str = String::from_utf8_lossy(&body_bytes);
// Parse parts from XML using regex-like approach
let parts = parse_complete_multipart_xml(&body_str);
let result = store.complete_multipart(upload_id, &parts).await?;
let xml = xml_response::complete_multipart_xml(bucket, key, &result.etag);
Ok(xml_response(StatusCode::OK, xml, request_id))
}
async fn handle_abort_multipart(
store: Arc<FileStore>,
upload_id: &str,
request_id: &str,
) -> Result<Response<BoxBody>> {
store.abort_multipart(upload_id).await?;
Ok(empty_response(StatusCode::NO_CONTENT, request_id))
}
async fn handle_list_multipart_uploads(
store: Arc<FileStore>,
bucket: &str,
request_id: &str,
) -> Result<Response<BoxBody>> {
let uploads = store.list_multipart_uploads(bucket).await?;
let xml = xml_response::list_multipart_uploads_xml(bucket, &uploads);
Ok(xml_response(StatusCode::OK, xml, request_id))
}
// ============================
// Helpers
// ============================
fn parse_query(query_string: &str) -> HashMap<String, String> {
let mut map = HashMap::new();
if query_string.is_empty() {
return map;
}
for pair in query_string.split('&') {
let mut parts = pair.splitn(2, '=');
let key = parts.next().unwrap_or("");
let value = parts.next().unwrap_or("");
let key = percent_decode(key);
let value = percent_decode(value);
map.insert(key, value);
}
map
}
fn percent_decode(s: &str) -> String {
percent_encoding::percent_decode_str(s)
.decode_utf8_lossy()
.to_string()
}
fn extract_metadata(headers: &hyper::HeaderMap) -> HashMap<String, String> {
let mut metadata = HashMap::new();
for (name, value) in headers {
let name_str = name.as_str().to_lowercase();
if let Ok(val) = value.to_str() {
match name_str.as_str() {
"content-type" | "cache-control" | "content-disposition"
| "content-encoding" | "content-language" | "expires" => {
metadata.insert(name_str, val.to_string());
}
_ if name_str.starts_with("x-amz-meta-") => {
metadata.insert(name_str, val.to_string());
}
_ => {}
}
}
}
// Default content-type
if !metadata.contains_key("content-type") {
metadata.insert("content-type".to_string(), "binary/octet-stream".to_string());
}
metadata
}
fn parse_range_header(headers: &hyper::HeaderMap) -> Option<(u64, u64)> {
let range_val = headers.get("range")?.to_str().ok()?;
let bytes_prefix = "bytes=";
if !range_val.starts_with(bytes_prefix) {
return None;
}
let range_spec = &range_val[bytes_prefix.len()..];
let mut parts = range_spec.splitn(2, '-');
let start: u64 = parts.next()?.parse().ok()?;
let end_str = parts.next()?;
let end: u64 = if end_str.is_empty() {
// If no end specified, we'll handle this later based on file size
u64::MAX
} else {
end_str.parse().ok()?
};
Some((start, end))
}
fn parse_complete_multipart_xml(xml: &str) -> Vec<(u32, String)> {
let mut parts = Vec::new();
// Simple XML parsing for <Part><PartNumber>N</PartNumber><ETag>...</ETag></Part>
let mut remaining = xml;
while let Some(part_start) = remaining.find("<Part>") {
let after_part = &remaining[part_start + 6..];
if let Some(part_end) = after_part.find("</Part>") {
let part_content = &after_part[..part_end];
let part_number = extract_xml_value(part_content, "PartNumber")
.and_then(|s| s.parse::<u32>().ok());
let etag = extract_xml_value(part_content, "ETag")
.map(|s| s.replace('"', ""));
if let (Some(pn), Some(et)) = (part_number, etag) {
parts.push((pn, et));
}
remaining = &after_part[part_end + 7..];
} else {
break;
}
}
parts.sort_by_key(|(pn, _)| *pn);
parts
}
fn extract_xml_value<'a>(xml: &'a str, tag: &str) -> Option<String> {
let open = format!("<{}>", tag);
let close = format!("</{}>", tag);
let start = xml.find(&open)? + open.len();
let end = xml.find(&close)?;
Some(xml[start..end].to_string())
}
// ============================
// CORS
// ============================
fn build_cors_preflight(config: &S3Config, request_id: &str) -> Response<BoxBody> {
let mut builder = Response::builder()
.status(StatusCode::NO_CONTENT)
.header("x-amz-request-id", request_id);
if let Some(ref origins) = config.cors.allowed_origins {
builder = builder.header("Access-Control-Allow-Origin", origins.join(", "));
}
if let Some(ref methods) = config.cors.allowed_methods {
builder = builder.header("Access-Control-Allow-Methods", methods.join(", "));
}
if let Some(ref headers) = config.cors.allowed_headers {
builder = builder.header("Access-Control-Allow-Headers", headers.join(", "));
}
if let Some(max_age) = config.cors.max_age {
builder = builder.header("Access-Control-Max-Age", max_age.to_string());
}
if config.cors.allow_credentials == Some(true) {
builder = builder.header("Access-Control-Allow-Credentials", "true");
}
builder.body(empty_body()).unwrap()
}
fn add_cors_headers(headers: &mut hyper::HeaderMap, config: &S3Config) {
if let Some(ref origins) = config.cors.allowed_origins {
headers.insert(
"access-control-allow-origin",
origins.join(", ").parse().unwrap(),
);
}
if let Some(ref exposed) = config.cors.exposed_headers {
headers.insert(
"access-control-expose-headers",
exposed.join(", ").parse().unwrap(),
);
}
if config.cors.allow_credentials == Some(true) {
headers.insert(
"access-control-allow-credentials",
"true".parse().unwrap(),
);
}
}
// ============================
// Auth
// ============================
fn check_auth(req: &Request<Incoming>, config: &S3Config) -> Result<(), S3Error> {
let auth_header = req
.headers()
.get("authorization")
.and_then(|v| v.to_str().ok())
.unwrap_or("");
if auth_header.is_empty() {
return Err(S3Error::access_denied());
}
// Extract access key from AWS v2 or v4 signature
let access_key = if auth_header.starts_with("AWS4-HMAC-SHA256") {
// v4: AWS4-HMAC-SHA256 Credential=KEY/date/region/s3/aws4_request, ...
auth_header
.split("Credential=")
.nth(1)
.and_then(|s| s.split('/').next())
} else if auth_header.starts_with("AWS ") {
// v2: AWS KEY:signature
auth_header
.strip_prefix("AWS ")
.and_then(|s| s.split(':').next())
} else {
None
};
let access_key = access_key.unwrap_or("");
// Check against configured credentials
for cred in &config.auth.credentials {
if cred.access_key_id == access_key {
return Ok(());
}
}
Err(S3Error::access_denied())
}

885
rust/src/storage.rs Normal file
View File

@@ -0,0 +1,885 @@
use anyhow::Result;
use chrono::{DateTime, Utc};
use http_body_util::BodyExt;
use hyper::body::Incoming;
use md5::{Digest, Md5};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use tokio::fs;
use tokio::io::{AsyncReadExt, AsyncSeekExt, AsyncWriteExt, BufWriter};
use uuid::Uuid;
use crate::s3_error::S3Error;
// ============================
// Result types
// ============================
pub struct PutResult {
pub size: u64,
pub md5: String,
}
pub struct GetResult {
pub key: String,
pub size: u64,
pub last_modified: DateTime<Utc>,
pub md5: String,
pub metadata: HashMap<String, String>,
pub body: tokio::fs::File,
pub content_length: u64,
}
pub struct HeadResult {
pub key: String,
pub size: u64,
pub last_modified: DateTime<Utc>,
pub md5: String,
pub metadata: HashMap<String, String>,
}
pub struct CopyResult {
pub size: u64,
pub md5: String,
pub last_modified: DateTime<Utc>,
}
pub struct ListObjectEntry {
pub key: String,
pub size: u64,
pub last_modified: DateTime<Utc>,
pub md5: String,
}
pub struct ListObjectsResult {
pub contents: Vec<ListObjectEntry>,
pub common_prefixes: Vec<String>,
pub is_truncated: bool,
pub next_continuation_token: Option<String>,
pub prefix: String,
pub delimiter: String,
pub max_keys: usize,
}
pub struct BucketInfo {
pub name: String,
pub creation_date: DateTime<Utc>,
}
pub struct MultipartUploadInfo {
pub upload_id: String,
pub bucket: String,
pub key: String,
pub initiated: DateTime<Utc>,
}
pub struct CompleteMultipartResult {
pub etag: String,
pub size: u64,
}
// ============================
// Multipart metadata (disk format, compatible with TS)
// ============================
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
struct MultipartMetadata {
upload_id: String,
bucket: String,
key: String,
initiated: String,
metadata: HashMap<String, String>,
parts: Vec<PartMetadata>,
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
struct PartMetadata {
part_number: u32,
etag: String,
size: u64,
last_modified: String,
}
// ============================
// FileStore
// ============================
pub struct FileStore {
root_dir: PathBuf,
}
impl FileStore {
pub fn new(root_dir: PathBuf) -> Self {
Self { root_dir }
}
pub async fn initialize(&self) -> Result<()> {
fs::create_dir_all(&self.root_dir).await?;
Ok(())
}
pub async fn reset(&self) -> Result<()> {
if self.root_dir.exists() {
fs::remove_dir_all(&self.root_dir).await?;
}
fs::create_dir_all(&self.root_dir).await?;
Ok(())
}
// ============================
// Bucket operations
// ============================
pub async fn list_buckets(&self) -> Result<Vec<BucketInfo>> {
let mut buckets = Vec::new();
let mut entries = fs::read_dir(&self.root_dir).await?;
while let Some(entry) = entries.next_entry().await? {
let meta = entry.metadata().await?;
if meta.is_dir() {
let name = entry.file_name().to_string_lossy().to_string();
// Skip hidden dirs like .multipart
if name.starts_with('.') {
continue;
}
let creation_date: DateTime<Utc> = meta
.created()
.unwrap_or(meta.modified().unwrap_or(std::time::SystemTime::UNIX_EPOCH))
.into();
buckets.push(BucketInfo {
name,
creation_date,
});
}
}
buckets.sort_by(|a, b| a.name.cmp(&b.name));
Ok(buckets)
}
pub async fn bucket_exists(&self, bucket: &str) -> bool {
self.root_dir.join(bucket).is_dir()
}
pub async fn create_bucket(&self, bucket: &str) -> Result<()> {
let bucket_path = self.root_dir.join(bucket);
fs::create_dir_all(&bucket_path).await?;
Ok(())
}
pub async fn delete_bucket(&self, bucket: &str) -> Result<()> {
let bucket_path = self.root_dir.join(bucket);
if !bucket_path.is_dir() {
return Err(S3Error::no_such_bucket().into());
}
// Check if bucket is empty (ignore hidden files)
let mut entries = fs::read_dir(&bucket_path).await?;
while let Some(_entry) = entries.next_entry().await? {
return Err(S3Error::bucket_not_empty().into());
}
fs::remove_dir_all(&bucket_path).await?;
Ok(())
}
// ============================
// Object operations
// ============================
pub async fn put_object(
&self,
bucket: &str,
key: &str,
body: Incoming,
metadata: HashMap<String, String>,
) -> Result<PutResult> {
if !self.bucket_exists(bucket).await {
return Err(S3Error::no_such_bucket().into());
}
let object_path = self.object_path(bucket, key);
if let Some(parent) = object_path.parent() {
fs::create_dir_all(parent).await?;
}
let file = fs::File::create(&object_path).await?;
let mut writer = BufWriter::new(file);
let mut hasher = Md5::new();
let mut total_size: u64 = 0;
// Stream body frames directly to file
let mut body = body;
loop {
match body.frame().await {
Some(Ok(frame)) => {
if let Ok(data) = frame.into_data() {
hasher.update(&data);
total_size += data.len() as u64;
writer.write_all(&data).await?;
}
}
Some(Err(e)) => {
return Err(anyhow::anyhow!("Body read error: {}", e));
}
None => break,
}
}
writer.flush().await?;
drop(writer);
let md5_hex = format!("{:x}", hasher.finalize());
// Write MD5 sidecar
let md5_path = format!("{}.md5", object_path.display());
fs::write(&md5_path, &md5_hex).await?;
// Write metadata sidecar
let metadata_path = format!("{}.metadata.json", object_path.display());
let metadata_json = serde_json::to_string_pretty(&metadata)?;
fs::write(&metadata_path, metadata_json).await?;
Ok(PutResult {
size: total_size,
md5: md5_hex,
})
}
pub async fn put_object_bytes(
&self,
bucket: &str,
key: &str,
data: &[u8],
metadata: HashMap<String, String>,
) -> Result<PutResult> {
if !self.bucket_exists(bucket).await {
return Err(S3Error::no_such_bucket().into());
}
let object_path = self.object_path(bucket, key);
if let Some(parent) = object_path.parent() {
fs::create_dir_all(parent).await?;
}
let mut hasher = Md5::new();
hasher.update(data);
let md5_hex = format!("{:x}", hasher.finalize());
fs::write(&object_path, data).await?;
// Write MD5 sidecar
let md5_path = format!("{}.md5", object_path.display());
fs::write(&md5_path, &md5_hex).await?;
// Write metadata sidecar
let metadata_path = format!("{}.metadata.json", object_path.display());
let metadata_json = serde_json::to_string_pretty(&metadata)?;
fs::write(&metadata_path, metadata_json).await?;
Ok(PutResult {
size: data.len() as u64,
md5: md5_hex,
})
}
pub async fn get_object(
&self,
bucket: &str,
key: &str,
range: Option<(u64, u64)>,
) -> Result<GetResult> {
let object_path = self.object_path(bucket, key);
if !object_path.exists() {
return Err(S3Error::no_such_key().into());
}
let file_meta = fs::metadata(&object_path).await?;
let size = file_meta.len();
let last_modified: DateTime<Utc> = file_meta.modified()?.into();
let md5 = self.read_md5(&object_path).await;
let metadata = self.read_metadata(&object_path).await;
let mut file = fs::File::open(&object_path).await?;
let content_length = if let Some((start, end)) = range {
file.seek(std::io::SeekFrom::Start(start)).await?;
end - start + 1
} else {
size
};
Ok(GetResult {
key: key.to_string(),
size,
last_modified,
md5,
metadata,
body: file,
content_length,
})
}
pub async fn head_object(&self, bucket: &str, key: &str) -> Result<HeadResult> {
let object_path = self.object_path(bucket, key);
if !object_path.exists() {
return Err(S3Error::no_such_key().into());
}
// Only stat the file, don't open it
let file_meta = fs::metadata(&object_path).await?;
let size = file_meta.len();
let last_modified: DateTime<Utc> = file_meta.modified()?.into();
let md5 = self.read_md5(&object_path).await;
let metadata = self.read_metadata(&object_path).await;
Ok(HeadResult {
key: key.to_string(),
size,
last_modified,
md5,
metadata,
})
}
pub async fn delete_object(&self, bucket: &str, key: &str) -> Result<()> {
let object_path = self.object_path(bucket, key);
let md5_path = format!("{}.md5", object_path.display());
let metadata_path = format!("{}.metadata.json", object_path.display());
// S3 doesn't error if object doesn't exist
let _ = fs::remove_file(&object_path).await;
let _ = fs::remove_file(&md5_path).await;
let _ = fs::remove_file(&metadata_path).await;
// Clean up empty parent directories up to bucket level
let bucket_path = self.root_dir.join(bucket);
let mut current = object_path.parent().map(|p| p.to_path_buf());
while let Some(dir) = current {
if dir == bucket_path {
break;
}
if fs::read_dir(&dir).await.is_ok() {
let mut entries = fs::read_dir(&dir).await?;
if entries.next_entry().await?.is_none() {
let _ = fs::remove_dir(&dir).await;
} else {
break;
}
}
current = dir.parent().map(|p| p.to_path_buf());
}
Ok(())
}
pub async fn copy_object(
&self,
src_bucket: &str,
src_key: &str,
dest_bucket: &str,
dest_key: &str,
metadata_directive: &str,
new_metadata: Option<HashMap<String, String>>,
) -> Result<CopyResult> {
let src_path = self.object_path(src_bucket, src_key);
let dest_path = self.object_path(dest_bucket, dest_key);
if !src_path.exists() {
return Err(S3Error::no_such_key().into());
}
if !self.bucket_exists(dest_bucket).await {
return Err(S3Error::no_such_bucket().into());
}
if let Some(parent) = dest_path.parent() {
fs::create_dir_all(parent).await?;
}
// Copy object file
fs::copy(&src_path, &dest_path).await?;
// Handle metadata
if metadata_directive == "COPY" {
let src_meta_path = format!("{}.metadata.json", src_path.display());
let dest_meta_path = format!("{}.metadata.json", dest_path.display());
let _ = fs::copy(&src_meta_path, &dest_meta_path).await;
} else if let Some(meta) = new_metadata {
let dest_meta_path = format!("{}.metadata.json", dest_path.display());
let json = serde_json::to_string_pretty(&meta)?;
fs::write(&dest_meta_path, json).await?;
}
// Copy MD5
let src_md5_path = format!("{}.md5", src_path.display());
let dest_md5_path = format!("{}.md5", dest_path.display());
let _ = fs::copy(&src_md5_path, &dest_md5_path).await;
let file_meta = fs::metadata(&dest_path).await?;
let md5 = self.read_md5(&dest_path).await;
let last_modified: DateTime<Utc> = file_meta.modified()?.into();
Ok(CopyResult {
size: file_meta.len(),
md5,
last_modified,
})
}
pub async fn list_objects(
&self,
bucket: &str,
prefix: &str,
delimiter: &str,
max_keys: usize,
continuation_token: Option<&str>,
) -> Result<ListObjectsResult> {
let bucket_path = self.root_dir.join(bucket);
if !bucket_path.is_dir() {
return Err(S3Error::no_such_bucket().into());
}
// Collect all object keys recursively
let mut keys = Vec::new();
self.collect_keys(&bucket_path, &bucket_path, &mut keys)
.await?;
// Apply prefix filter
if !prefix.is_empty() {
keys.retain(|k| k.starts_with(prefix));
}
keys.sort();
// Handle continuation token
if let Some(token) = continuation_token {
if let Some(pos) = keys.iter().position(|k| k.as_str() > token) {
keys = keys[pos..].to_vec();
} else {
keys.clear();
}
}
// Handle delimiter and pagination
let mut common_prefixes: Vec<String> = Vec::new();
let mut common_prefix_set = std::collections::HashSet::new();
let mut contents: Vec<ListObjectEntry> = Vec::new();
let mut is_truncated = false;
for key in &keys {
if !delimiter.is_empty() {
let remaining = &key[prefix.len()..];
if let Some(delim_idx) = remaining.find(delimiter) {
let cp = format!(
"{}{}",
prefix,
&remaining[..delim_idx + delimiter.len()]
);
if common_prefix_set.insert(cp.clone()) {
common_prefixes.push(cp);
}
continue;
}
}
if contents.len() >= max_keys {
is_truncated = true;
break;
}
let object_path = self.object_path(bucket, key);
if let Ok(meta) = fs::metadata(&object_path).await {
let md5 = self.read_md5(&object_path).await;
let last_modified: DateTime<Utc> = meta.modified().unwrap_or(std::time::SystemTime::UNIX_EPOCH).into();
contents.push(ListObjectEntry {
key: key.clone(),
size: meta.len(),
last_modified,
md5,
});
}
}
let next_continuation_token = if is_truncated {
contents.last().map(|e| e.key.clone())
} else {
None
};
common_prefixes.sort();
Ok(ListObjectsResult {
contents,
common_prefixes,
is_truncated,
next_continuation_token,
prefix: prefix.to_string(),
delimiter: delimiter.to_string(),
max_keys,
})
}
// ============================
// Multipart operations
// ============================
fn multipart_dir(&self) -> PathBuf {
self.root_dir.join(".multipart")
}
pub async fn initiate_multipart(
&self,
bucket: &str,
key: &str,
metadata: HashMap<String, String>,
) -> Result<String> {
let upload_id = Uuid::new_v4().to_string().replace('-', "");
let upload_dir = self.multipart_dir().join(&upload_id);
fs::create_dir_all(&upload_dir).await?;
let meta = MultipartMetadata {
upload_id: upload_id.clone(),
bucket: bucket.to_string(),
key: key.to_string(),
initiated: Utc::now().to_rfc3339(),
metadata,
parts: Vec::new(),
};
let meta_path = upload_dir.join("metadata.json");
let json = serde_json::to_string_pretty(&meta)?;
fs::write(&meta_path, json).await?;
Ok(upload_id)
}
pub async fn upload_part(
&self,
upload_id: &str,
part_number: u32,
body: Incoming,
) -> Result<(String, u64)> {
let upload_dir = self.multipart_dir().join(upload_id);
if !upload_dir.is_dir() {
return Err(S3Error::no_such_upload().into());
}
let part_path = upload_dir.join(format!("part-{}", part_number));
let file = fs::File::create(&part_path).await?;
let mut writer = BufWriter::new(file);
let mut hasher = Md5::new();
let mut size: u64 = 0;
let mut body = body;
loop {
match body.frame().await {
Some(Ok(frame)) => {
if let Ok(data) = frame.into_data() {
hasher.update(&data);
size += data.len() as u64;
writer.write_all(&data).await?;
}
}
Some(Err(e)) => {
return Err(anyhow::anyhow!("Body read error: {}", e));
}
None => break,
}
}
writer.flush().await?;
drop(writer);
let etag = format!("{:x}", hasher.finalize());
// Update metadata
self.update_multipart_metadata(upload_id, part_number, &etag, size)
.await?;
Ok((etag, size))
}
async fn update_multipart_metadata(
&self,
upload_id: &str,
part_number: u32,
etag: &str,
size: u64,
) -> Result<()> {
let meta_path = self.multipart_dir().join(upload_id).join("metadata.json");
let content = fs::read_to_string(&meta_path).await?;
let mut meta: MultipartMetadata = serde_json::from_str(&content)?;
// Remove existing part with same number
meta.parts.retain(|p| p.part_number != part_number);
meta.parts.push(PartMetadata {
part_number,
etag: etag.to_string(),
size,
last_modified: Utc::now().to_rfc3339(),
});
meta.parts.sort_by_key(|p| p.part_number);
let json = serde_json::to_string_pretty(&meta)?;
fs::write(&meta_path, json).await?;
Ok(())
}
pub async fn complete_multipart(
&self,
upload_id: &str,
parts: &[(u32, String)],
) -> Result<CompleteMultipartResult> {
let upload_dir = self.multipart_dir().join(upload_id);
if !upload_dir.is_dir() {
return Err(S3Error::no_such_upload().into());
}
// Read metadata to get bucket/key
let meta_path = upload_dir.join("metadata.json");
let content = fs::read_to_string(&meta_path).await?;
let meta: MultipartMetadata = serde_json::from_str(&content)?;
let object_path = self.object_path(&meta.bucket, &meta.key);
if let Some(parent) = object_path.parent() {
fs::create_dir_all(parent).await?;
}
// Concatenate parts into final object, stream each part
let dest_file = fs::File::create(&object_path).await?;
let mut writer = BufWriter::new(dest_file);
let mut hasher = Md5::new();
let mut total_size: u64 = 0;
for (part_number, _etag) in parts {
let part_path = upload_dir.join(format!("part-{}", part_number));
if !part_path.exists() {
return Err(anyhow::anyhow!("Part {} not found", part_number));
}
let mut part_file = fs::File::open(&part_path).await?;
let mut buf = vec![0u8; 64 * 1024]; // 64KB buffer
loop {
let n = part_file.read(&mut buf).await?;
if n == 0 {
break;
}
hasher.update(&buf[..n]);
writer.write_all(&buf[..n]).await?;
total_size += n as u64;
}
}
writer.flush().await?;
drop(writer);
let etag = format!("{:x}", hasher.finalize());
// Write MD5 sidecar
let md5_path = format!("{}.md5", object_path.display());
fs::write(&md5_path, &etag).await?;
// Write metadata sidecar
let metadata_path = format!("{}.metadata.json", object_path.display());
let metadata_json = serde_json::to_string_pretty(&meta.metadata)?;
fs::write(&metadata_path, metadata_json).await?;
// Clean up multipart directory
let _ = fs::remove_dir_all(&upload_dir).await;
Ok(CompleteMultipartResult {
etag,
size: total_size,
})
}
pub async fn abort_multipart(&self, upload_id: &str) -> Result<()> {
let upload_dir = self.multipart_dir().join(upload_id);
if !upload_dir.is_dir() {
return Err(S3Error::no_such_upload().into());
}
fs::remove_dir_all(&upload_dir).await?;
Ok(())
}
pub async fn list_multipart_uploads(
&self,
bucket: &str,
) -> Result<Vec<MultipartUploadInfo>> {
let multipart_dir = self.multipart_dir();
if !multipart_dir.is_dir() {
return Ok(Vec::new());
}
let mut uploads = Vec::new();
let mut entries = fs::read_dir(&multipart_dir).await?;
while let Some(entry) = entries.next_entry().await? {
if !entry.metadata().await?.is_dir() {
continue;
}
let meta_path = entry.path().join("metadata.json");
if let Ok(content) = fs::read_to_string(&meta_path).await {
if let Ok(meta) = serde_json::from_str::<MultipartMetadata>(&content) {
if meta.bucket == bucket {
let initiated = DateTime::parse_from_rfc3339(&meta.initiated)
.map(|dt| dt.with_timezone(&Utc))
.unwrap_or_else(|_| Utc::now());
uploads.push(MultipartUploadInfo {
upload_id: meta.upload_id,
bucket: meta.bucket,
key: meta.key,
initiated,
});
}
}
}
}
Ok(uploads)
}
// ============================
// Helpers
// ============================
fn object_path(&self, bucket: &str, key: &str) -> PathBuf {
let encoded = encode_key(key);
self.root_dir
.join(bucket)
.join(format!("{}._S3_object", encoded))
}
async fn read_md5(&self, object_path: &Path) -> String {
let md5_path = format!("{}.md5", object_path.display());
match fs::read_to_string(&md5_path).await {
Ok(s) => s.trim().to_string(),
Err(_) => {
// Calculate MD5 if sidecar missing
match self.calculate_md5(object_path).await {
Ok(hash) => {
let _ = fs::write(&md5_path, &hash).await;
hash
}
Err(_) => String::new(),
}
}
}
}
async fn calculate_md5(&self, path: &Path) -> Result<String> {
let mut file = fs::File::open(path).await?;
let mut hasher = Md5::new();
let mut buf = vec![0u8; 64 * 1024];
loop {
let n = file.read(&mut buf).await?;
if n == 0 {
break;
}
hasher.update(&buf[..n]);
}
Ok(format!("{:x}", hasher.finalize()))
}
async fn read_metadata(&self, object_path: &Path) -> HashMap<String, String> {
let meta_path = format!("{}.metadata.json", object_path.display());
match fs::read_to_string(&meta_path).await {
Ok(s) => serde_json::from_str(&s).unwrap_or_default(),
Err(_) => HashMap::new(),
}
}
fn collect_keys<'a>(
&'a self,
bucket_path: &'a Path,
dir: &'a Path,
keys: &'a mut Vec<String>,
) -> std::pin::Pin<Box<dyn std::future::Future<Output = Result<()>> + Send + 'a>> {
Box::pin(async move {
let mut entries = match fs::read_dir(dir).await {
Ok(e) => e,
Err(_) => return Ok(()),
};
while let Some(entry) = entries.next_entry().await? {
let meta = entry.metadata().await?;
let name = entry.file_name().to_string_lossy().to_string();
if meta.is_dir() {
self.collect_keys(bucket_path, &entry.path(), keys).await?;
} else if name.ends_with("._S3_object")
&& !name.ends_with(".metadata.json")
&& !name.ends_with(".md5")
{
let relative = entry
.path()
.strip_prefix(bucket_path)
.unwrap_or(Path::new(""))
.to_string_lossy()
.to_string();
let key = decode_key(relative.trim_end_matches("._S3_object"));
keys.push(key);
}
}
Ok(())
})
}
}
// ============================
// Key encoding (identity on Linux)
// ============================
fn encode_key(key: &str) -> String {
if cfg!(windows) {
key.chars()
.map(|c| match c {
'<' | '>' | ':' | '"' | '\\' | '|' | '?' | '*' => {
format!("&{:02x}", c as u32)
}
_ => c.to_string(),
})
.collect()
} else {
key.to_string()
}
}
fn decode_key(encoded: &str) -> String {
if cfg!(windows) {
let mut result = String::new();
let mut chars = encoded.chars();
while let Some(c) = chars.next() {
if c == '&' {
let hex: String = chars.by_ref().take(2).collect();
if let Ok(byte) = u8::from_str_radix(&hex, 16) {
result.push(byte as char);
} else {
result.push('&');
result.push_str(&hex);
}
} else {
result.push(c);
}
}
result
} else {
encoded.to_string()
}
}

220
rust/src/xml_response.rs Normal file
View File

@@ -0,0 +1,220 @@
use crate::storage::{BucketInfo, ListObjectsResult, MultipartUploadInfo};
const XML_DECL: &str = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>";
const S3_NS: &str = "http://s3.amazonaws.com/doc/2006-03-01/";
fn xml_escape(s: &str) -> String {
s.replace('&', "&amp;")
.replace('<', "&lt;")
.replace('>', "&gt;")
.replace('"', "&quot;")
.replace('\'', "&apos;")
}
pub fn list_buckets_xml(buckets: &[BucketInfo]) -> String {
let mut xml = format!(
"{}\n<ListAllMyBucketsResult xmlns=\"{}\">\
<Owner><ID>123456789000</ID><DisplayName>S3rver</DisplayName></Owner>\
<Buckets>",
XML_DECL, S3_NS
);
for b in buckets {
xml.push_str(&format!(
"<Bucket><Name>{}</Name><CreationDate>{}</CreationDate></Bucket>",
xml_escape(&b.name),
b.creation_date.to_rfc3339()
));
}
xml.push_str("</Buckets></ListAllMyBucketsResult>");
xml
}
pub fn list_objects_v1_xml(bucket: &str, result: &ListObjectsResult) -> String {
let mut xml = format!(
"{}\n<ListBucketResult xmlns=\"{}\">\
<Name>{}</Name>\
<Prefix>{}</Prefix>\
<MaxKeys>{}</MaxKeys>\
<IsTruncated>{}</IsTruncated>",
XML_DECL,
S3_NS,
xml_escape(bucket),
xml_escape(&result.prefix),
result.max_keys,
result.is_truncated
);
if !result.delimiter.is_empty() {
xml.push_str(&format!("<Delimiter>{}</Delimiter>", xml_escape(&result.delimiter)));
}
for entry in &result.contents {
xml.push_str(&format!(
"<Contents>\
<Key>{}</Key>\
<LastModified>{}</LastModified>\
<ETag>\"{}\"</ETag>\
<Size>{}</Size>\
<StorageClass>STANDARD</StorageClass>\
</Contents>",
xml_escape(&entry.key),
entry.last_modified.to_rfc3339(),
xml_escape(&entry.md5),
entry.size
));
}
for cp in &result.common_prefixes {
xml.push_str(&format!(
"<CommonPrefixes><Prefix>{}</Prefix></CommonPrefixes>",
xml_escape(cp)
));
}
xml.push_str("</ListBucketResult>");
xml
}
pub fn list_objects_v2_xml(bucket: &str, result: &ListObjectsResult) -> String {
let mut xml = format!(
"{}\n<ListBucketResult xmlns=\"{}\">\
<Name>{}</Name>\
<Prefix>{}</Prefix>\
<MaxKeys>{}</MaxKeys>\
<KeyCount>{}</KeyCount>\
<IsTruncated>{}</IsTruncated>",
XML_DECL,
S3_NS,
xml_escape(bucket),
xml_escape(&result.prefix),
result.max_keys,
result.contents.len(),
result.is_truncated
);
if !result.delimiter.is_empty() {
xml.push_str(&format!("<Delimiter>{}</Delimiter>", xml_escape(&result.delimiter)));
}
if let Some(ref token) = result.next_continuation_token {
xml.push_str(&format!(
"<NextContinuationToken>{}</NextContinuationToken>",
xml_escape(token)
));
}
for entry in &result.contents {
xml.push_str(&format!(
"<Contents>\
<Key>{}</Key>\
<LastModified>{}</LastModified>\
<ETag>\"{}\"</ETag>\
<Size>{}</Size>\
<StorageClass>STANDARD</StorageClass>\
</Contents>",
xml_escape(&entry.key),
entry.last_modified.to_rfc3339(),
xml_escape(&entry.md5),
entry.size
));
}
for cp in &result.common_prefixes {
xml.push_str(&format!(
"<CommonPrefixes><Prefix>{}</Prefix></CommonPrefixes>",
xml_escape(cp)
));
}
xml.push_str("</ListBucketResult>");
xml
}
pub fn error_xml(code: &str, message: &str) -> String {
format!(
"{}\n<Error><Code>{}</Code><Message>{}</Message></Error>",
XML_DECL,
xml_escape(code),
xml_escape(message)
)
}
pub fn copy_object_result_xml(etag: &str, last_modified: &str) -> String {
format!(
"{}\n<CopyObjectResult>\
<LastModified>{}</LastModified>\
<ETag>\"{}\"</ETag>\
</CopyObjectResult>",
XML_DECL,
xml_escape(last_modified),
xml_escape(etag)
)
}
pub fn initiate_multipart_xml(bucket: &str, key: &str, upload_id: &str) -> String {
format!(
"{}\n<InitiateMultipartUploadResult xmlns=\"{}\">\
<Bucket>{}</Bucket>\
<Key>{}</Key>\
<UploadId>{}</UploadId>\
</InitiateMultipartUploadResult>",
XML_DECL,
S3_NS,
xml_escape(bucket),
xml_escape(key),
xml_escape(upload_id)
)
}
pub fn complete_multipart_xml(bucket: &str, key: &str, etag: &str) -> String {
format!(
"{}\n<CompleteMultipartUploadResult xmlns=\"{}\">\
<Location>/{}/{}</Location>\
<Bucket>{}</Bucket>\
<Key>{}</Key>\
<ETag>\"{}\"</ETag>\
</CompleteMultipartUploadResult>",
XML_DECL,
S3_NS,
xml_escape(bucket),
xml_escape(key),
xml_escape(bucket),
xml_escape(key),
xml_escape(etag)
)
}
pub fn list_multipart_uploads_xml(bucket: &str, uploads: &[MultipartUploadInfo]) -> String {
let mut xml = format!(
"{}\n<ListMultipartUploadsResult xmlns=\"{}\">\
<Bucket>{}</Bucket>\
<KeyMarker></KeyMarker>\
<UploadIdMarker></UploadIdMarker>\
<MaxUploads>1000</MaxUploads>\
<IsTruncated>false</IsTruncated>",
XML_DECL,
S3_NS,
xml_escape(bucket)
);
for u in uploads {
xml.push_str(&format!(
"<Upload>\
<Key>{}</Key>\
<UploadId>{}</UploadId>\
<Initiator><ID>S3RVER</ID><DisplayName>S3RVER</DisplayName></Initiator>\
<Owner><ID>S3RVER</ID><DisplayName>S3RVER</DisplayName></Owner>\
<StorageClass>STANDARD</StorageClass>\
<Initiated>{}</Initiated>\
</Upload>",
xml_escape(&u.key),
xml_escape(&u.upload_id),
u.initiated.to_rfc3339()
));
}
xml.push_str("</ListMultipartUploadsResult>");
xml
}

View File

@@ -3,6 +3,6 @@
*/
export const commitinfo = {
name: '@push.rocks/smarts3',
version: '5.1.0',
version: '5.1.1',
description: 'A Node.js TypeScript package to create a local S3 endpoint for simulating AWS S3 operations using mapped local directories for development and testing purposes.'
}

View File

@@ -1,118 +0,0 @@
import * as plugins from '../plugins.js';
import { S3Error } from './s3-error.js';
import { createXml } from '../utils/xml.utils.js';
import type { FilesystemStore } from './filesystem-store.js';
import type { MultipartUploadManager } from './multipart-manager.js';
import type { Readable } from 'stream';
/**
* S3 request context with helper methods
*/
export class S3Context {
public method: string;
public url: URL;
public headers: plugins.http.IncomingHttpHeaders;
public params: Record<string, string> = {};
public query: Record<string, string> = {};
public store: FilesystemStore;
public multipart: MultipartUploadManager;
private req: plugins.http.IncomingMessage;
private res: plugins.http.ServerResponse;
private statusCode: number = 200;
private responseHeaders: Record<string, string> = {};
constructor(
req: plugins.http.IncomingMessage,
res: plugins.http.ServerResponse,
store: FilesystemStore,
multipart: MultipartUploadManager
) {
this.req = req;
this.res = res;
this.store = store;
this.multipart = multipart;
this.method = req.method || 'GET';
this.headers = req.headers;
// Parse URL and query string
const fullUrl = `http://${req.headers.host || 'localhost'}${req.url || '/'}`;
this.url = new URL(fullUrl);
// Parse query string into object
this.url.searchParams.forEach((value, key) => {
this.query[key] = value;
});
}
/**
* Set response status code
*/
public status(code: number): this {
this.statusCode = code;
return this;
}
/**
* Set response header
*/
public setHeader(name: string, value: string | number): this {
this.responseHeaders[name] = value.toString();
return this;
}
/**
* Send response body (string, Buffer, or Stream)
*/
public async send(body: string | Buffer | Readable | NodeJS.ReadableStream): Promise<void> {
// Write status and headers
this.res.writeHead(this.statusCode, this.responseHeaders);
// Handle different body types
if (typeof body === 'string' || body instanceof Buffer) {
this.res.end(body);
} else if (body && typeof (body as any).pipe === 'function') {
// It's a stream
(body as Readable).pipe(this.res);
} else {
this.res.end();
}
}
/**
* Send XML response
*/
public async sendXML(obj: any): Promise<void> {
const xml = createXml(obj, { format: true });
this.setHeader('Content-Type', 'application/xml');
this.setHeader('Content-Length', Buffer.byteLength(xml));
await this.send(xml);
}
/**
* Throw an S3 error
*/
public throw(code: string, message: string, detail?: Record<string, any>): never {
throw new S3Error(code, message, detail);
}
/**
* Read and parse request body as string
*/
public async readBody(): Promise<string> {
return new Promise((resolve, reject) => {
const chunks: Buffer[] = [];
this.req.on('data', (chunk) => chunks.push(chunk));
this.req.on('end', () => resolve(Buffer.concat(chunks).toString('utf8')));
this.req.on('error', reject);
});
}
/**
* Get the request stream (for streaming uploads)
*/
public getRequestStream(): NodeJS.ReadableStream {
return this.req;
}
}

View File

@@ -1,562 +0,0 @@
import * as plugins from '../plugins.js';
import { S3Error } from './s3-error.js';
import { Readable } from 'stream';
export interface IS3Bucket {
name: string;
creationDate: Date;
}
export interface IS3Object {
key: string;
size: number;
lastModified: Date;
md5: string;
metadata: Record<string, string>;
content?: Readable;
}
export interface IListObjectsOptions {
prefix?: string;
delimiter?: string;
maxKeys?: number;
continuationToken?: string;
}
export interface IListObjectsResult {
contents: IS3Object[];
commonPrefixes: string[];
isTruncated: boolean;
nextContinuationToken?: string;
prefix: string;
delimiter: string;
maxKeys: number;
}
export interface IRangeOptions {
start: number;
end: number;
}
/**
* Filesystem-backed storage for S3 objects using smartfs
*/
export class FilesystemStore {
constructor(private rootDir: string) {}
/**
* Initialize store (ensure root directory exists)
*/
public async initialize(): Promise<void> {
await plugins.smartfs.directory(this.rootDir).recursive().create();
}
/**
* Reset store (delete all buckets)
*/
public async reset(): Promise<void> {
// Delete directory and recreate it
const exists = await plugins.smartfs.directory(this.rootDir).exists();
if (exists) {
await plugins.smartfs.directory(this.rootDir).recursive().delete();
}
await plugins.smartfs.directory(this.rootDir).recursive().create();
}
// ============================
// BUCKET OPERATIONS
// ============================
/**
* List all buckets
*/
public async listBuckets(): Promise<IS3Bucket[]> {
const entries = await plugins.smartfs.directory(this.rootDir).includeStats().list();
const buckets: IS3Bucket[] = [];
for (const entry of entries) {
if (entry.isDirectory && entry.stats) {
buckets.push({
name: entry.name,
creationDate: entry.stats.birthtime,
});
}
}
return buckets.sort((a, b) => a.name.localeCompare(b.name));
}
/**
* Check if bucket exists
*/
public async bucketExists(bucket: string): Promise<boolean> {
const bucketPath = this.getBucketPath(bucket);
return plugins.smartfs.directory(bucketPath).exists();
}
/**
* Create bucket
*/
public async createBucket(bucket: string): Promise<void> {
const bucketPath = this.getBucketPath(bucket);
await plugins.smartfs.directory(bucketPath).recursive().create();
}
/**
* Delete bucket (must be empty)
*/
public async deleteBucket(bucket: string): Promise<void> {
const bucketPath = this.getBucketPath(bucket);
// Check if bucket exists
if (!(await this.bucketExists(bucket))) {
throw new S3Error('NoSuchBucket', 'The specified bucket does not exist');
}
// Check if bucket is empty
const files = await plugins.smartfs.directory(bucketPath).recursive().list();
if (files.length > 0) {
throw new S3Error('BucketNotEmpty', 'The bucket you tried to delete is not empty');
}
await plugins.smartfs.directory(bucketPath).recursive().delete();
}
// ============================
// OBJECT OPERATIONS
// ============================
/**
* List objects in bucket
*/
public async listObjects(
bucket: string,
options: IListObjectsOptions = {}
): Promise<IListObjectsResult> {
const bucketPath = this.getBucketPath(bucket);
if (!(await this.bucketExists(bucket))) {
throw new S3Error('NoSuchBucket', 'The specified bucket does not exist');
}
const {
prefix = '',
delimiter = '',
maxKeys = 1000,
continuationToken,
} = options;
// List all object files recursively with filter
const entries = await plugins.smartfs
.directory(bucketPath)
.recursive()
.filter((entry) => entry.name.endsWith('._S3_object'))
.list();
// Convert file paths to keys
let keys = entries.map((entry) => {
const relativePath = plugins.path.relative(bucketPath, entry.path);
const key = this.decodeKey(relativePath.replace(/\._S3_object$/, ''));
return key;
});
// Apply prefix filter
if (prefix) {
keys = keys.filter((key) => key.startsWith(prefix));
}
// Sort keys
keys = keys.sort();
// Handle continuation token (simple implementation using key name)
if (continuationToken) {
const startIndex = keys.findIndex((key) => key > continuationToken);
if (startIndex > 0) {
keys = keys.slice(startIndex);
}
}
// Handle delimiter (common prefixes)
const commonPrefixes: Set<string> = new Set();
const contents: IS3Object[] = [];
for (const key of keys) {
if (delimiter) {
// Find first delimiter after prefix
const remainingKey = key.slice(prefix.length);
const delimiterIndex = remainingKey.indexOf(delimiter);
if (delimiterIndex !== -1) {
// This key has a delimiter, add to common prefixes
const commonPrefix = prefix + remainingKey.slice(0, delimiterIndex + delimiter.length);
commonPrefixes.add(commonPrefix);
continue;
}
}
// Add to contents (limited by maxKeys)
if (contents.length >= maxKeys) {
break;
}
try {
const objectInfo = await this.getObjectInfo(bucket, key);
contents.push(objectInfo);
} catch (err) {
// Skip if object no longer exists
continue;
}
}
const isTruncated = keys.length > contents.length + commonPrefixes.size;
const nextContinuationToken = isTruncated
? contents[contents.length - 1]?.key
: undefined;
return {
contents,
commonPrefixes: Array.from(commonPrefixes).sort(),
isTruncated,
nextContinuationToken,
prefix,
delimiter,
maxKeys,
};
}
/**
* Get object info (without content)
*/
private async getObjectInfo(bucket: string, key: string): Promise<IS3Object> {
const objectPath = this.getObjectPath(bucket, key);
const metadataPath = `${objectPath}.metadata.json`;
const md5Path = `${objectPath}.md5`;
const [stats, metadata, md5] = await Promise.all([
plugins.smartfs.file(objectPath).stat(),
this.readMetadata(metadataPath),
this.readMD5(objectPath, md5Path),
]);
return {
key,
size: stats.size,
lastModified: stats.mtime,
md5,
metadata,
};
}
/**
* Check if object exists
*/
public async objectExists(bucket: string, key: string): Promise<boolean> {
const objectPath = this.getObjectPath(bucket, key);
return plugins.smartfs.file(objectPath).exists();
}
/**
* Put object (upload with streaming)
*/
public async putObject(
bucket: string,
key: string,
stream: NodeJS.ReadableStream,
metadata: Record<string, string> = {}
): Promise<{ size: number; md5: string }> {
const objectPath = this.getObjectPath(bucket, key);
// Ensure bucket exists
if (!(await this.bucketExists(bucket))) {
throw new S3Error('NoSuchBucket', 'The specified bucket does not exist');
}
// Ensure parent directory exists
const parentDir = plugins.path.dirname(objectPath);
await plugins.smartfs.directory(parentDir).recursive().create();
// Write with MD5 calculation
const result = await this.writeStreamWithMD5(stream, objectPath);
// Save metadata
const metadataPath = `${objectPath}.metadata.json`;
await plugins.smartfs.file(metadataPath).write(JSON.stringify(metadata, null, 2));
return result;
}
/**
* Get object (download with streaming)
*/
public async getObject(
bucket: string,
key: string,
range?: IRangeOptions
): Promise<IS3Object> {
const objectPath = this.getObjectPath(bucket, key);
if (!(await this.objectExists(bucket, key))) {
throw new S3Error('NoSuchKey', 'The specified key does not exist');
}
const info = await this.getObjectInfo(bucket, key);
// Get Web ReadableStream from smartfs
const webStream = await plugins.smartfs.file(objectPath).readStream();
// Convert Web Stream to Node.js Readable stream
let nodeStream = Readable.fromWeb(webStream as any);
// Handle range requests if needed
if (range) {
// For range requests, we need to skip bytes and limit output
let bytesRead = 0;
const rangeStart = range.start;
const rangeEnd = range.end;
nodeStream = nodeStream.pipe(new (require('stream').Transform)({
transform(chunk: Buffer, encoding, callback) {
const chunkStart = bytesRead;
const chunkEnd = bytesRead + chunk.length - 1;
bytesRead += chunk.length;
// Skip chunks before range
if (chunkEnd < rangeStart) {
callback();
return;
}
// Stop after range
if (chunkStart > rangeEnd) {
this.end();
callback();
return;
}
// Slice chunk to fit range
const sliceStart = Math.max(0, rangeStart - chunkStart);
const sliceEnd = Math.min(chunk.length, rangeEnd - chunkStart + 1);
callback(null, chunk.slice(sliceStart, sliceEnd));
}
}));
}
return {
...info,
content: nodeStream,
};
}
/**
* Delete object
*/
public async deleteObject(bucket: string, key: string): Promise<void> {
const objectPath = this.getObjectPath(bucket, key);
const metadataPath = `${objectPath}.metadata.json`;
const md5Path = `${objectPath}.md5`;
// S3 doesn't throw error if object doesn't exist
await Promise.all([
plugins.smartfs.file(objectPath).delete().catch(() => {}),
plugins.smartfs.file(metadataPath).delete().catch(() => {}),
plugins.smartfs.file(md5Path).delete().catch(() => {}),
]);
}
/**
* Copy object
*/
public async copyObject(
srcBucket: string,
srcKey: string,
destBucket: string,
destKey: string,
metadataDirective: 'COPY' | 'REPLACE' = 'COPY',
newMetadata?: Record<string, string>
): Promise<{ size: number; md5: string }> {
const srcObjectPath = this.getObjectPath(srcBucket, srcKey);
const destObjectPath = this.getObjectPath(destBucket, destKey);
// Check source exists
if (!(await this.objectExists(srcBucket, srcKey))) {
throw new S3Error('NoSuchKey', 'The specified key does not exist');
}
// Ensure dest bucket exists
if (!(await this.bucketExists(destBucket))) {
throw new S3Error('NoSuchBucket', 'The specified bucket does not exist');
}
// Ensure parent directory exists
const parentDir = plugins.path.dirname(destObjectPath);
await plugins.smartfs.directory(parentDir).recursive().create();
// Copy object file
await plugins.smartfs.file(srcObjectPath).copy(destObjectPath);
// Handle metadata
if (metadataDirective === 'COPY') {
// Copy metadata
const srcMetadataPath = `${srcObjectPath}.metadata.json`;
const destMetadataPath = `${destObjectPath}.metadata.json`;
await plugins.smartfs.file(srcMetadataPath).copy(destMetadataPath).catch(() => {});
} else if (newMetadata) {
// Replace with new metadata
const destMetadataPath = `${destObjectPath}.metadata.json`;
await plugins.smartfs.file(destMetadataPath).write(JSON.stringify(newMetadata, null, 2));
}
// Copy MD5
const srcMD5Path = `${srcObjectPath}.md5`;
const destMD5Path = `${destObjectPath}.md5`;
await plugins.smartfs.file(srcMD5Path).copy(destMD5Path).catch(() => {});
// Get result info
const stats = await plugins.smartfs.file(destObjectPath).stat();
const md5 = await this.readMD5(destObjectPath, destMD5Path);
return { size: stats.size, md5 };
}
// ============================
// HELPER METHODS
// ============================
/**
* Get bucket directory path
*/
private getBucketPath(bucket: string): string {
return plugins.path.join(this.rootDir, bucket);
}
/**
* Get object file path
*/
private getObjectPath(bucket: string, key: string): string {
return plugins.path.join(
this.rootDir,
bucket,
this.encodeKey(key) + '._S3_object'
);
}
/**
* Encode key for Windows compatibility
*/
private encodeKey(key: string): string {
if (process.platform === 'win32') {
// Replace invalid Windows filename chars with hex encoding
return key.replace(/[<>:"\\|?*]/g, (ch) =>
'&' + Buffer.from(ch, 'utf8').toString('hex')
);
}
return key;
}
/**
* Decode key from filesystem path
*/
private decodeKey(encodedKey: string): string {
if (process.platform === 'win32') {
// Decode hex-encoded chars
return encodedKey.replace(/&([0-9a-f]{2})/gi, (_, hex) =>
Buffer.from(hex, 'hex').toString('utf8')
);
}
return encodedKey;
}
/**
* Write stream to file with MD5 calculation
*/
private async writeStreamWithMD5(
input: NodeJS.ReadableStream,
destPath: string
): Promise<{ size: number; md5: string }> {
const hash = plugins.crypto.createHash('md5');
let totalSize = 0;
return new Promise(async (resolve, reject) => {
// Get Web WritableStream from smartfs
const webWriteStream = await plugins.smartfs.file(destPath).writeStream();
const writer = webWriteStream.getWriter();
// Read from Node.js stream and write to Web stream
input.on('data', async (chunk: Buffer) => {
hash.update(chunk);
totalSize += chunk.length;
try {
await writer.write(new Uint8Array(chunk));
} catch (err) {
reject(err);
}
});
input.on('error', (err) => {
writer.abort(err);
reject(err);
});
input.on('end', async () => {
try {
await writer.close();
const md5 = hash.digest('hex');
// Save MD5 to separate file
const md5Path = `${destPath}.md5`;
await plugins.smartfs.file(md5Path).write(md5);
resolve({ size: totalSize, md5 });
} catch (err) {
reject(err);
}
});
});
}
/**
* Read MD5 hash (calculate if missing)
*/
private async readMD5(objectPath: string, md5Path: string): Promise<string> {
try {
// Try to read cached MD5
const md5 = await plugins.smartfs.file(md5Path).encoding('utf8').read() as string;
return md5.trim();
} catch (err) {
// Calculate MD5 if not cached
return new Promise(async (resolve, reject) => {
const hash = plugins.crypto.createHash('md5');
try {
const webStream = await plugins.smartfs.file(objectPath).readStream();
const nodeStream = Readable.fromWeb(webStream as any);
nodeStream.on('data', (chunk: Buffer) => hash.update(chunk));
nodeStream.on('end', async () => {
const md5 = hash.digest('hex');
// Cache it
await plugins.smartfs.file(md5Path).write(md5);
resolve(md5);
});
nodeStream.on('error', reject);
} catch (err) {
reject(err);
}
});
}
}
/**
* Read metadata from JSON file
*/
private async readMetadata(metadataPath: string): Promise<Record<string, string>> {
try {
const content = await plugins.smartfs.file(metadataPath).encoding('utf8').read() as string;
return JSON.parse(content);
} catch (err) {
return {};
}
}
}

View File

@@ -1,130 +0,0 @@
import type { ILoggingConfig } from '../index.js';
/**
* Log levels in order of severity
*/
const LOG_LEVELS = {
error: 0,
warn: 1,
info: 2,
debug: 3,
} as const;
type LogLevel = keyof typeof LOG_LEVELS;
/**
* Structured logger with configurable levels and formats
*/
export class Logger {
private config: Required<ILoggingConfig>;
private minLevel: number;
constructor(config: ILoggingConfig) {
// Apply defaults for any missing config
this.config = {
level: config.level ?? 'info',
format: config.format ?? 'text',
enabled: config.enabled ?? true,
};
this.minLevel = LOG_LEVELS[this.config.level];
}
/**
* Check if a log level should be output
*/
private shouldLog(level: LogLevel): boolean {
if (!this.config.enabled) {
return false;
}
return LOG_LEVELS[level] <= this.minLevel;
}
/**
* Format a log message
*/
private format(level: LogLevel, message: string, meta?: Record<string, any>): string {
const timestamp = new Date().toISOString();
if (this.config.format === 'json') {
return JSON.stringify({
timestamp,
level,
message,
...(meta || {}),
});
}
// Text format
const metaStr = meta ? ` ${JSON.stringify(meta)}` : '';
return `[${timestamp}] ${level.toUpperCase()}: ${message}${metaStr}`;
}
/**
* Log at error level
*/
public error(message: string, meta?: Record<string, any>): void {
if (this.shouldLog('error')) {
console.error(this.format('error', message, meta));
}
}
/**
* Log at warn level
*/
public warn(message: string, meta?: Record<string, any>): void {
if (this.shouldLog('warn')) {
console.warn(this.format('warn', message, meta));
}
}
/**
* Log at info level
*/
public info(message: string, meta?: Record<string, any>): void {
if (this.shouldLog('info')) {
console.log(this.format('info', message, meta));
}
}
/**
* Log at debug level
*/
public debug(message: string, meta?: Record<string, any>): void {
if (this.shouldLog('debug')) {
console.log(this.format('debug', message, meta));
}
}
/**
* Log HTTP request
*/
public request(method: string, url: string, meta?: Record<string, any>): void {
this.info(`${method} ${url}`, meta);
}
/**
* Log HTTP response
*/
public response(method: string, url: string, statusCode: number, duration: number): void {
const level: LogLevel = statusCode >= 500 ? 'error' : statusCode >= 400 ? 'warn' : 'info';
if (this.shouldLog(level)) {
const message = `${method} ${url} - ${statusCode} (${duration}ms)`;
if (level === 'error') {
this.error(message, { statusCode, duration });
} else if (level === 'warn') {
this.warn(message, { statusCode, duration });
} else {
this.info(message, { statusCode, duration });
}
}
}
/**
* Log S3 error
*/
public s3Error(code: string, message: string, status: number): void {
this.error(`[S3Error] ${code}: ${message}`, { code, status });
}
}

View File

@@ -1,43 +0,0 @@
import * as plugins from '../plugins.js';
import type { S3Context } from './context.js';
export type Middleware = (
req: plugins.http.IncomingMessage,
res: plugins.http.ServerResponse,
ctx: S3Context,
next: () => Promise<void>
) => Promise<void>;
/**
* Middleware stack for composing request handlers
*/
export class MiddlewareStack {
private middlewares: Middleware[] = [];
/**
* Add middleware to the stack
*/
public use(middleware: Middleware): void {
this.middlewares.push(middleware);
}
/**
* Execute all middlewares in order
*/
public async execute(
req: plugins.http.IncomingMessage,
res: plugins.http.ServerResponse,
ctx: S3Context
): Promise<void> {
let index = 0;
const next = async (): Promise<void> => {
if (index < this.middlewares.length) {
const middleware = this.middlewares[index++];
await middleware(req, res, ctx, next);
}
};
await next();
}
}

View File

@@ -1,430 +0,0 @@
import * as plugins from '../plugins.js';
import { Readable } from 'stream';
/**
* Multipart upload metadata
*/
export interface IMultipartUpload {
uploadId: string;
bucket: string;
key: string;
initiated: Date;
parts: Map<number, IPartInfo>;
metadata: Record<string, string>;
}
/**
* Part information
*/
export interface IPartInfo {
partNumber: number;
etag: string;
size: number;
lastModified: Date;
}
/**
* Serializable version of upload metadata for disk persistence
*/
interface ISerializableUpload {
uploadId: string;
bucket: string;
key: string;
initiated: string; // ISO date string
metadata: Record<string, string>;
parts: Array<{
partNumber: number;
etag: string;
size: number;
lastModified: string; // ISO date string
}>;
}
/**
* Manages multipart upload state and storage
*/
export class MultipartUploadManager {
private uploads: Map<string, IMultipartUpload> = new Map();
private uploadDir: string;
private cleanupInterval: NodeJS.Timeout | null = null;
private expirationDays: number;
private cleanupIntervalMinutes: number;
constructor(
private rootDir: string,
expirationDays: number = 7,
cleanupIntervalMinutes: number = 60
) {
this.uploadDir = plugins.path.join(rootDir, '.multipart');
this.expirationDays = expirationDays;
this.cleanupIntervalMinutes = cleanupIntervalMinutes;
}
/**
* Initialize multipart uploads directory
*/
public async initialize(): Promise<void> {
await plugins.smartfs.directory(this.uploadDir).recursive().create();
await this.restoreUploadsFromDisk();
}
/**
* Save upload metadata to disk for persistence
*/
private async saveUploadMetadata(uploadId: string): Promise<void> {
const upload = this.uploads.get(uploadId);
if (!upload) {
return;
}
const metadataPath = plugins.path.join(this.uploadDir, uploadId, 'metadata.json');
const serializable: ISerializableUpload = {
uploadId: upload.uploadId,
bucket: upload.bucket,
key: upload.key,
initiated: upload.initiated.toISOString(),
metadata: upload.metadata,
parts: Array.from(upload.parts.values()).map(part => ({
partNumber: part.partNumber,
etag: part.etag,
size: part.size,
lastModified: part.lastModified.toISOString(),
})),
};
await plugins.smartfs.file(metadataPath).write(JSON.stringify(serializable, null, 2));
}
/**
* Restore uploads from disk on initialization
*/
private async restoreUploadsFromDisk(): Promise<void> {
const uploadDirExists = await plugins.smartfs.directory(this.uploadDir).exists();
if (!uploadDirExists) {
return;
}
const entries = await plugins.smartfs.directory(this.uploadDir).includeStats().list();
for (const entry of entries) {
if (!entry.isDirectory) {
continue;
}
const uploadId = entry.name;
const metadataPath = plugins.path.join(this.uploadDir, uploadId, 'metadata.json');
// Check if metadata.json exists
const metadataExists = await plugins.smartfs.file(metadataPath).exists();
if (!metadataExists) {
// Orphaned upload directory - clean it up
console.warn(`Orphaned multipart upload directory found: ${uploadId}, cleaning up`);
await plugins.smartfs.directory(plugins.path.join(this.uploadDir, uploadId)).recursive().delete();
continue;
}
try {
// Read and parse metadata
const metadataContent = await plugins.smartfs.file(metadataPath).read();
const serialized: ISerializableUpload = JSON.parse(metadataContent as string);
// Restore to memory
const parts = new Map<number, IPartInfo>();
for (const part of serialized.parts) {
parts.set(part.partNumber, {
partNumber: part.partNumber,
etag: part.etag,
size: part.size,
lastModified: new Date(part.lastModified),
});
}
this.uploads.set(uploadId, {
uploadId: serialized.uploadId,
bucket: serialized.bucket,
key: serialized.key,
initiated: new Date(serialized.initiated),
parts,
metadata: serialized.metadata,
});
console.log(`Restored multipart upload: ${uploadId} (${serialized.bucket}/${serialized.key})`);
} catch (error) {
// Corrupted metadata - clean up
console.error(`Failed to restore multipart upload ${uploadId}:`, error);
await plugins.smartfs.directory(plugins.path.join(this.uploadDir, uploadId)).recursive().delete();
}
}
}
/**
* Generate a unique upload ID
*/
private generateUploadId(): string {
return plugins.crypto.randomBytes(16).toString('hex');
}
/**
* Initiate a new multipart upload
*/
public async initiateUpload(
bucket: string,
key: string,
metadata: Record<string, string>
): Promise<string> {
const uploadId = this.generateUploadId();
this.uploads.set(uploadId, {
uploadId,
bucket,
key,
initiated: new Date(),
parts: new Map(),
metadata,
});
// Create directory for this upload's parts
const uploadPath = plugins.path.join(this.uploadDir, uploadId);
await plugins.smartfs.directory(uploadPath).recursive().create();
// Persist metadata to disk
await this.saveUploadMetadata(uploadId);
return uploadId;
}
/**
* Upload a part
*/
public async uploadPart(
uploadId: string,
partNumber: number,
stream: Readable
): Promise<IPartInfo> {
const upload = this.uploads.get(uploadId);
if (!upload) {
throw new Error('No such upload');
}
const partPath = plugins.path.join(this.uploadDir, uploadId, `part-${partNumber}`);
// Write part to disk
const webWriteStream = await plugins.smartfs.file(partPath).writeStream();
const writer = webWriteStream.getWriter();
let size = 0;
const hash = plugins.crypto.createHash('md5');
for await (const chunk of stream) {
const buffer = Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk);
await writer.write(new Uint8Array(buffer));
hash.update(buffer);
size += buffer.length;
}
await writer.close();
const etag = hash.digest('hex');
const partInfo: IPartInfo = {
partNumber,
etag,
size,
lastModified: new Date(),
};
upload.parts.set(partNumber, partInfo);
// Persist updated metadata
await this.saveUploadMetadata(uploadId);
return partInfo;
}
/**
* Complete multipart upload - combine all parts
*/
public async completeUpload(
uploadId: string,
parts: Array<{ PartNumber: number; ETag: string }>
): Promise<{ etag: string; size: number }> {
const upload = this.uploads.get(uploadId);
if (!upload) {
throw new Error('No such upload');
}
// Verify all parts are uploaded
for (const part of parts) {
const uploadedPart = upload.parts.get(part.PartNumber);
if (!uploadedPart) {
throw new Error(`Part ${part.PartNumber} not uploaded`);
}
// Normalize ETag format (remove quotes if present)
const normalizedETag = part.ETag.replace(/"/g, '');
if (uploadedPart.etag !== normalizedETag) {
throw new Error(`Part ${part.PartNumber} ETag mismatch`);
}
}
// Sort parts by part number
const sortedParts = parts.sort((a, b) => a.PartNumber - b.PartNumber);
// Combine parts into final object
const finalPath = plugins.path.join(this.uploadDir, uploadId, 'final');
const webWriteStream = await plugins.smartfs.file(finalPath).writeStream();
const writer = webWriteStream.getWriter();
const hash = plugins.crypto.createHash('md5');
let totalSize = 0;
for (const part of sortedParts) {
const partPath = plugins.path.join(this.uploadDir, uploadId, `part-${part.PartNumber}`);
// Read part and write to final file
const partContent = await plugins.smartfs.file(partPath).read();
const buffer = Buffer.isBuffer(partContent) ? partContent : Buffer.from(partContent as string);
await writer.write(new Uint8Array(buffer));
hash.update(buffer);
totalSize += buffer.length;
}
await writer.close();
const etag = hash.digest('hex');
return { etag, size: totalSize };
}
/**
* Get the final combined file path
*/
public getFinalPath(uploadId: string): string {
return plugins.path.join(this.uploadDir, uploadId, 'final');
}
/**
* Get upload metadata
*/
public getUpload(uploadId: string): IMultipartUpload | undefined {
return this.uploads.get(uploadId);
}
/**
* Abort multipart upload - clean up parts
*/
public async abortUpload(uploadId: string): Promise<void> {
const upload = this.uploads.get(uploadId);
if (!upload) {
throw new Error('No such upload');
}
// Delete upload directory
const uploadPath = plugins.path.join(this.uploadDir, uploadId);
await plugins.smartfs.directory(uploadPath).recursive().delete();
// Remove from memory
this.uploads.delete(uploadId);
}
/**
* Clean up upload after completion
*/
public async cleanupUpload(uploadId: string): Promise<void> {
const uploadPath = plugins.path.join(this.uploadDir, uploadId);
await plugins.smartfs.directory(uploadPath).recursive().delete();
this.uploads.delete(uploadId);
}
/**
* List all in-progress uploads for a bucket
*/
public listUploads(bucket?: string): IMultipartUpload[] {
const uploads = Array.from(this.uploads.values());
if (bucket) {
return uploads.filter((u) => u.bucket === bucket);
}
return uploads;
}
/**
* List parts for an upload
*/
public listParts(uploadId: string): IPartInfo[] {
const upload = this.uploads.get(uploadId);
if (!upload) {
throw new Error('No such upload');
}
return Array.from(upload.parts.values()).sort((a, b) => a.partNumber - b.partNumber);
}
/**
* Start automatic cleanup task for expired uploads
*/
public startCleanupTask(): void {
if (this.cleanupInterval) {
console.warn('Cleanup task is already running');
return;
}
// Run cleanup immediately on start
this.performCleanup().catch(err => {
console.error('Failed to perform initial multipart cleanup:', err);
});
// Then schedule periodic cleanup
const intervalMs = this.cleanupIntervalMinutes * 60 * 1000;
this.cleanupInterval = setInterval(() => {
this.performCleanup().catch(err => {
console.error('Failed to perform scheduled multipart cleanup:', err);
});
}, intervalMs);
console.log(`Multipart cleanup task started (interval: ${this.cleanupIntervalMinutes} minutes, expiration: ${this.expirationDays} days)`);
}
/**
* Stop automatic cleanup task
*/
public stopCleanupTask(): void {
if (this.cleanupInterval) {
clearInterval(this.cleanupInterval);
this.cleanupInterval = null;
console.log('Multipart cleanup task stopped');
}
}
/**
* Perform cleanup of expired uploads
*/
private async performCleanup(): Promise<void> {
const now = Date.now();
const expirationMs = this.expirationDays * 24 * 60 * 60 * 1000;
const expiredUploads: string[] = [];
// Find expired uploads
for (const [uploadId, upload] of this.uploads.entries()) {
const age = now - upload.initiated.getTime();
if (age > expirationMs) {
expiredUploads.push(uploadId);
}
}
if (expiredUploads.length === 0) {
return;
}
console.log(`Cleaning up ${expiredUploads.length} expired multipart upload(s)`);
// Delete expired uploads
for (const uploadId of expiredUploads) {
try {
await this.abortUpload(uploadId);
console.log(`Deleted expired multipart upload: ${uploadId}`);
} catch (err) {
console.error(`Failed to delete expired upload ${uploadId}:`, err);
}
}
}
}

View File

@@ -1,129 +0,0 @@
import * as plugins from '../plugins.js';
import type { S3Context } from './context.js';
export type RouteHandler = (
req: plugins.http.IncomingMessage,
res: plugins.http.ServerResponse,
ctx: S3Context,
params: Record<string, string>
) => Promise<void>;
export interface IRouteMatch {
handler: RouteHandler;
params: Record<string, string>;
}
interface IRoute {
method: string;
pattern: RegExp;
paramNames: string[];
handler: RouteHandler;
}
/**
* Simple HTTP router with pattern matching for S3 routes
*/
export class S3Router {
private routes: IRoute[] = [];
/**
* Add a route with pattern matching
* Supports patterns like:
* - "/" (exact match)
* - "/:bucket" (single param)
* - "/:bucket/:key*" (param with wildcard - captures everything after)
*/
public add(method: string, pattern: string, handler: RouteHandler): void {
const { regex, paramNames } = this.convertPatternToRegex(pattern);
this.routes.push({
method: method.toUpperCase(),
pattern: regex,
paramNames,
handler,
});
}
/**
* Match a request to a route
*/
public match(method: string, pathname: string): IRouteMatch | null {
// Normalize pathname: remove trailing slash unless it's root
const normalizedPath = pathname === '/' ? pathname : pathname.replace(/\/$/, '');
for (const route of this.routes) {
if (route.method !== method.toUpperCase()) {
continue;
}
const match = normalizedPath.match(route.pattern);
if (match) {
// Extract params from captured groups
const params: Record<string, string> = {};
for (let i = 0; i < route.paramNames.length; i++) {
params[route.paramNames[i]] = decodeURIComponent(match[i + 1] || '');
}
return {
handler: route.handler,
params,
};
}
}
return null;
}
/**
* Convert path pattern to RegExp
* Examples:
* - "/" → /^\/$/
* - "/:bucket" → /^\/([^/]+)$/
* - "/:bucket/:key*" → /^\/([^/]+)\/(.+)$/
*/
private convertPatternToRegex(pattern: string): { regex: RegExp; paramNames: string[] } {
const paramNames: string[] = [];
let regexStr = pattern;
// Process all params in a single pass to maintain order
regexStr = regexStr.replace(/:(\w+)(\*)?/g, (match, paramName, isWildcard) => {
paramNames.push(paramName);
// :param* captures rest of path, :param captures single segment
return isWildcard ? '(.+)' : '([^/]+)';
});
// Escape special regex characters
regexStr = regexStr.replace(/\//g, '\\/');
// Add anchors
regexStr = `^${regexStr}$`;
return {
regex: new RegExp(regexStr),
paramNames,
};
}
/**
* Convenience methods for common HTTP methods
*/
public get(pattern: string, handler: RouteHandler): void {
this.add('GET', pattern, handler);
}
public put(pattern: string, handler: RouteHandler): void {
this.add('PUT', pattern, handler);
}
public post(pattern: string, handler: RouteHandler): void {
this.add('POST', pattern, handler);
}
public delete(pattern: string, handler: RouteHandler): void {
this.add('DELETE', pattern, handler);
}
public head(pattern: string, handler: RouteHandler): void {
this.add('HEAD', pattern, handler);
}
}

View File

@@ -1,145 +0,0 @@
import * as plugins from '../plugins.js';
/**
* S3 error codes mapped to HTTP status codes
*/
const S3_ERROR_CODES: Record<string, number> = {
'AccessDenied': 403,
'BadDigest': 400,
'BadRequest': 400,
'BucketAlreadyExists': 409,
'BucketAlreadyOwnedByYou': 409,
'BucketNotEmpty': 409,
'CredentialsNotSupported': 400,
'EntityTooSmall': 400,
'EntityTooLarge': 400,
'ExpiredToken': 400,
'IncompleteBody': 400,
'IncorrectNumberOfFilesInPostRequest': 400,
'InlineDataTooLarge': 400,
'InternalError': 500,
'InvalidArgument': 400,
'InvalidBucketName': 400,
'InvalidDigest': 400,
'InvalidLocationConstraint': 400,
'InvalidPart': 400,
'InvalidPartOrder': 400,
'InvalidRange': 416,
'InvalidRequest': 400,
'InvalidSecurity': 403,
'InvalidSOAPRequest': 400,
'InvalidStorageClass': 400,
'InvalidTargetBucketForLogging': 400,
'InvalidToken': 400,
'InvalidURI': 400,
'KeyTooLongError': 400,
'MalformedACLError': 400,
'MalformedPOSTRequest': 400,
'MalformedXML': 400,
'MaxMessageLengthExceeded': 400,
'MaxPostPreDataLengthExceededError': 400,
'MetadataTooLarge': 400,
'MethodNotAllowed': 405,
'MissingContentLength': 411,
'MissingRequestBodyError': 400,
'MissingSecurityElement': 400,
'MissingSecurityHeader': 400,
'NoLoggingStatusForKey': 400,
'NoSuchBucket': 404,
'NoSuchKey': 404,
'NoSuchLifecycleConfiguration': 404,
'NoSuchUpload': 404,
'NoSuchVersion': 404,
'NotImplemented': 501,
'NotSignedUp': 403,
'OperationAborted': 409,
'PermanentRedirect': 301,
'PreconditionFailed': 412,
'Redirect': 307,
'RequestIsNotMultiPartContent': 400,
'RequestTimeout': 400,
'RequestTimeTooSkewed': 403,
'RequestTorrentOfBucketError': 400,
'SignatureDoesNotMatch': 403,
'ServiceUnavailable': 503,
'SlowDown': 503,
'TemporaryRedirect': 307,
'TokenRefreshRequired': 400,
'TooManyBuckets': 400,
'UnexpectedContent': 400,
'UnresolvableGrantByEmailAddress': 400,
'UserKeyMustBeSpecified': 400,
};
/**
* S3-compatible error class that formats errors as XML responses
*/
export class S3Error extends Error {
public status: number;
public code: string;
public detail: Record<string, any>;
constructor(
code: string,
message: string,
detail: Record<string, any> = {}
) {
super(message);
this.name = 'S3Error';
this.code = code;
this.status = S3_ERROR_CODES[code] || 500;
this.detail = detail;
// Maintain proper stack trace
if (Error.captureStackTrace) {
Error.captureStackTrace(this, S3Error);
}
}
/**
* Convert error to S3-compatible XML format
*/
public toXML(): string {
const smartXmlInstance = new plugins.SmartXml();
const errorObj: any = {
Error: {
Code: this.code,
Message: this.message,
...this.detail,
},
};
const xml = smartXmlInstance.createXmlFromObject(errorObj);
// Ensure XML declaration
if (!xml.startsWith('<?xml')) {
return `<?xml version="1.0" encoding="UTF-8"?>\n${xml}`;
}
return xml;
}
/**
* Create S3Error from a generic Error
*/
public static fromError(err: any): S3Error {
if (err instanceof S3Error) {
return err;
}
// Map common errors
if (err.code === 'ENOENT') {
return new S3Error('NoSuchKey', 'The specified key does not exist.');
}
if (err.code === 'EACCES') {
return new S3Error('AccessDenied', 'Access Denied');
}
// Default to internal error
return new S3Error(
'InternalError',
'We encountered an internal error. Please try again.',
{ OriginalError: err.message }
);
}
}

View File

@@ -1,402 +0,0 @@
import * as plugins from '../plugins.js';
import { S3Router } from './router.js';
import { MiddlewareStack } from './middleware-stack.js';
import { S3Context } from './context.js';
import { FilesystemStore } from './filesystem-store.js';
import { S3Error } from './s3-error.js';
import { Logger } from './logger.js';
import { MultipartUploadManager } from './multipart-manager.js';
import { ServiceController } from '../controllers/service.controller.js';
import { BucketController } from '../controllers/bucket.controller.js';
import { ObjectController } from '../controllers/object.controller.js';
import type { ISmarts3Config } from '../index.js';
export interface ISmarts3ServerOptions {
port?: number;
address?: string;
directory?: string;
cleanSlate?: boolean;
silent?: boolean;
config?: Required<ISmarts3Config>;
}
/**
* Custom S3-compatible server implementation
* Built on native Node.js http module with zero framework dependencies
*/
export class Smarts3Server {
private httpServer?: plugins.http.Server;
private router: S3Router;
private middlewares: MiddlewareStack;
public store: FilesystemStore; // Made public for direct access from Smarts3 class
public multipart: MultipartUploadManager; // Made public for controller access
private options: Required<Omit<ISmarts3ServerOptions, 'config'>>;
private config: Required<ISmarts3Config>;
private logger: Logger;
constructor(options: ISmarts3ServerOptions = {}) {
this.options = {
port: options.port ?? 3000,
address: options.address ?? '0.0.0.0',
directory: options.directory ?? plugins.path.join(process.cwd(), '.nogit/bucketsDir'),
cleanSlate: options.cleanSlate ?? false,
silent: options.silent ?? false,
};
// Store config for middleware and feature configuration
// If no config provided, create minimal default (for backward compatibility)
this.config = options.config ?? {
server: {
port: this.options.port,
address: this.options.address,
silent: this.options.silent,
},
storage: {
directory: this.options.directory,
cleanSlate: this.options.cleanSlate,
},
auth: {
enabled: false,
credentials: [{ accessKeyId: 'S3RVER', secretAccessKey: 'S3RVER' }],
},
cors: {
enabled: false,
allowedOrigins: ['*'],
allowedMethods: ['GET', 'POST', 'PUT', 'DELETE', 'HEAD', 'OPTIONS'],
allowedHeaders: ['*'],
exposedHeaders: ['ETag', 'x-amz-request-id', 'x-amz-version-id'],
maxAge: 86400,
allowCredentials: false,
},
logging: {
level: 'info',
format: 'text',
enabled: true,
},
limits: {
maxObjectSize: 5 * 1024 * 1024 * 1024,
maxMetadataSize: 2048,
requestTimeout: 300000,
},
multipart: {
expirationDays: 7,
cleanupIntervalMinutes: 60,
},
};
this.logger = new Logger(this.config.logging);
this.store = new FilesystemStore(this.options.directory);
this.multipart = new MultipartUploadManager(
this.options.directory,
this.config.multipart.expirationDays,
this.config.multipart.cleanupIntervalMinutes
);
this.router = new S3Router();
this.middlewares = new MiddlewareStack();
this.setupMiddlewares();
this.setupRoutes();
}
/**
* Setup middleware stack
*/
private setupMiddlewares(): void {
// CORS middleware (must be first to handle preflight requests)
if (this.config.cors.enabled) {
this.middlewares.use(async (req, res, ctx, next) => {
const origin = req.headers.origin || req.headers.referer;
// Check if origin is allowed
const allowedOrigins = this.config.cors.allowedOrigins || ['*'];
const isOriginAllowed =
allowedOrigins.includes('*') ||
(origin && allowedOrigins.includes(origin));
if (isOriginAllowed) {
// Set CORS headers
res.setHeader(
'Access-Control-Allow-Origin',
allowedOrigins.includes('*') ? '*' : origin || '*'
);
if (this.config.cors.allowCredentials) {
res.setHeader('Access-Control-Allow-Credentials', 'true');
}
// Handle preflight OPTIONS request
if (req.method === 'OPTIONS') {
res.setHeader(
'Access-Control-Allow-Methods',
(this.config.cors.allowedMethods || []).join(', ')
);
res.setHeader(
'Access-Control-Allow-Headers',
(this.config.cors.allowedHeaders || []).join(', ')
);
if (this.config.cors.maxAge) {
res.setHeader(
'Access-Control-Max-Age',
String(this.config.cors.maxAge)
);
}
res.writeHead(204);
res.end();
return; // Don't call next() for OPTIONS
}
// Set exposed headers for actual requests
if (this.config.cors.exposedHeaders && this.config.cors.exposedHeaders.length > 0) {
res.setHeader(
'Access-Control-Expose-Headers',
this.config.cors.exposedHeaders.join(', ')
);
}
}
await next();
});
}
// Authentication middleware (simple static credentials)
if (this.config.auth.enabled) {
this.middlewares.use(async (req, res, ctx, next) => {
const authHeader = req.headers.authorization;
// Extract access key from Authorization header
let accessKeyId: string | undefined;
if (authHeader) {
// Support multiple auth formats:
// 1. AWS accessKeyId:signature
// 2. AWS4-HMAC-SHA256 Credential=accessKeyId/date/region/service/aws4_request, ...
if (authHeader.startsWith('AWS ')) {
accessKeyId = authHeader.substring(4).split(':')[0];
} else if (authHeader.startsWith('AWS4-HMAC-SHA256')) {
const credentialMatch = authHeader.match(/Credential=([^/]+)\//);
accessKeyId = credentialMatch ? credentialMatch[1] : undefined;
}
}
// Check if access key is valid
const isValid = this.config.auth.credentials.some(
(cred) => cred.accessKeyId === accessKeyId
);
if (!isValid) {
ctx.throw('AccessDenied', 'Access Denied');
return;
}
await next();
});
}
// Logger middleware
if (!this.options.silent && this.config.logging.enabled) {
this.middlewares.use(async (req, res, ctx, next) => {
const start = Date.now();
// Log request
this.logger.request(req.method || 'UNKNOWN', req.url || '/', {
headers: req.headers,
});
await next();
// Log response
const duration = Date.now() - start;
this.logger.response(
req.method || 'UNKNOWN',
req.url || '/',
res.statusCode || 500,
duration
);
});
}
}
/**
* Setup routes
*/
private setupRoutes(): void {
// Service level (/)
this.router.get('/', ServiceController.listBuckets);
// Bucket level (/:bucket)
this.router.put('/:bucket', BucketController.createBucket);
this.router.delete('/:bucket', BucketController.deleteBucket);
this.router.get('/:bucket', BucketController.listObjects);
this.router.head('/:bucket', BucketController.headBucket);
// Object level (/:bucket/:key*)
this.router.put('/:bucket/:key*', ObjectController.putObject);
this.router.post('/:bucket/:key*', ObjectController.postObject); // For multipart operations
this.router.get('/:bucket/:key*', ObjectController.getObject);
this.router.head('/:bucket/:key*', ObjectController.headObject);
this.router.delete('/:bucket/:key*', ObjectController.deleteObject);
}
/**
* Handle incoming HTTP request
*/
private async handleRequest(
req: plugins.http.IncomingMessage,
res: plugins.http.ServerResponse
): Promise<void> {
const context = new S3Context(req, res, this.store, this.multipart);
try {
// Execute middleware stack
await this.middlewares.execute(req, res, context);
// Route to handler
const match = this.router.match(context.method, context.url.pathname);
if (match) {
context.params = match.params;
await match.handler(req, res, context, match.params);
} else {
context.throw('NoSuchKey', 'The specified resource does not exist');
}
} catch (err) {
await this.handleError(err, context, res);
}
}
/**
* Handle errors and send S3-compatible error responses
*/
private async handleError(
err: any,
context: S3Context,
res: plugins.http.ServerResponse
): Promise<void> {
const s3Error = err instanceof S3Error ? err : S3Error.fromError(err);
// Log the error
this.logger.s3Error(s3Error.code, s3Error.message, s3Error.status);
// Log stack trace for server errors
if (s3Error.status >= 500) {
this.logger.debug('Error stack trace', {
stack: err.stack || err.toString(),
});
}
// Send error response
const errorXml = s3Error.toXML();
res.writeHead(s3Error.status, {
'Content-Type': 'application/xml',
'Content-Length': Buffer.byteLength(errorXml),
});
res.end(errorXml);
}
/**
* Start the server
*/
public async start(): Promise<void> {
// Initialize store
await this.store.initialize();
// Initialize multipart upload manager
await this.multipart.initialize();
// Start multipart cleanup task
this.multipart.startCleanupTask();
// Clean slate if requested
if (this.options.cleanSlate) {
await this.store.reset();
}
// Create HTTP server
this.httpServer = plugins.http.createServer((req, res) => {
this.handleRequest(req, res).catch((err) => {
this.logger.error('Fatal error in request handler', {
error: err.message,
stack: err.stack,
});
if (!res.headersSent) {
res.writeHead(500, { 'Content-Type': 'text/plain' });
res.end('Internal Server Error');
}
});
});
// Start listening
await new Promise<void>((resolve, reject) => {
this.httpServer!.listen(this.options.port, this.options.address, (err?: Error) => {
if (err) {
reject(err);
} else {
this.logger.info(`S3 server listening on ${this.options.address}:${this.options.port}`);
resolve();
}
});
});
}
/**
* Stop the server
*/
public async stop(): Promise<void> {
if (!this.httpServer) {
return;
}
// Stop multipart cleanup task
this.multipart.stopCleanupTask();
await new Promise<void>((resolve, reject) => {
this.httpServer!.close((err?: Error) => {
if (err) {
reject(err);
} else {
this.logger.info('S3 server stopped');
resolve();
}
});
});
this.httpServer = undefined;
}
/**
* Get server port (useful for testing with random ports)
*/
public getPort(): number {
if (!this.httpServer) {
throw new Error('Server not started');
}
const address = this.httpServer.address();
if (typeof address === 'string') {
throw new Error('Unix socket not supported');
}
return address?.port || this.options.port;
}
/**
* Get S3 descriptor for client configuration
*/
public getS3Descriptor(): {
accessKey: string;
accessSecret: string;
endpoint: string;
port: number;
useSsl: boolean;
} {
return {
accessKey: 'S3RVER',
accessSecret: 'S3RVER',
endpoint: this.options.address === '0.0.0.0' ? '127.0.0.1' : this.options.address,
port: this.getPort(),
useSsl: false,
};
}
}

View File

@@ -1,180 +0,0 @@
import * as plugins from '../plugins.js';
import type { S3Context } from '../classes/context.js';
/**
* Bucket-level operations
*/
export class BucketController {
/**
* HEAD /:bucket - Check if bucket exists
*/
public static async headBucket(
req: plugins.http.IncomingMessage,
res: plugins.http.ServerResponse,
ctx: S3Context,
params: Record<string, string>
): Promise<void> {
const { bucket } = params;
if (await ctx.store.bucketExists(bucket)) {
ctx.status(200).send('');
} else {
ctx.throw('NoSuchBucket', 'The specified bucket does not exist');
}
}
/**
* PUT /:bucket - Create bucket
*/
public static async createBucket(
req: plugins.http.IncomingMessage,
res: plugins.http.ServerResponse,
ctx: S3Context,
params: Record<string, string>
): Promise<void> {
const { bucket } = params;
await ctx.store.createBucket(bucket);
ctx.status(200).send('');
}
/**
* DELETE /:bucket - Delete bucket
*/
public static async deleteBucket(
req: plugins.http.IncomingMessage,
res: plugins.http.ServerResponse,
ctx: S3Context,
params: Record<string, string>
): Promise<void> {
const { bucket } = params;
await ctx.store.deleteBucket(bucket);
ctx.status(204).send('');
}
/**
* GET /:bucket - List objects or multipart uploads
* Supports both V1 and V2 listing (V2 uses list-type=2 query param)
* Multipart uploads listing is triggered by ?uploads query parameter
*/
public static async listObjects(
req: plugins.http.IncomingMessage,
res: plugins.http.ServerResponse,
ctx: S3Context,
params: Record<string, string>
): Promise<void> {
const { bucket } = params;
// Check if this is a ListMultipartUploads request
if (ctx.query.uploads !== undefined) {
return BucketController.listMultipartUploads(req, res, ctx, params);
}
const isV2 = ctx.query['list-type'] === '2';
const result = await ctx.store.listObjects(bucket, {
prefix: ctx.query.prefix,
delimiter: ctx.query.delimiter,
maxKeys: ctx.query['max-keys'] ? parseInt(ctx.query['max-keys']) : 1000,
continuationToken: ctx.query['continuation-token'],
});
if (isV2) {
// List Objects V2 response
await ctx.sendXML({
ListBucketResult: {
'@_xmlns': 'http://s3.amazonaws.com/doc/2006-03-01/',
Name: bucket,
Prefix: result.prefix || '',
MaxKeys: result.maxKeys,
KeyCount: result.contents.length,
IsTruncated: result.isTruncated,
...(result.delimiter && { Delimiter: result.delimiter }),
...(result.nextContinuationToken && {
NextContinuationToken: result.nextContinuationToken,
}),
...(result.commonPrefixes.length > 0 && {
CommonPrefixes: result.commonPrefixes.map((prefix) => ({
Prefix: prefix,
})),
}),
Contents: result.contents.map((obj) => ({
Key: obj.key,
LastModified: obj.lastModified.toISOString(),
ETag: `"${obj.md5}"`,
Size: obj.size,
StorageClass: 'STANDARD',
})),
},
});
} else {
// List Objects V1 response
await ctx.sendXML({
ListBucketResult: {
'@_xmlns': 'http://s3.amazonaws.com/doc/2006-03-01/',
Name: bucket,
Prefix: result.prefix || '',
MaxKeys: result.maxKeys,
IsTruncated: result.isTruncated,
...(result.delimiter && { Delimiter: result.delimiter }),
...(result.commonPrefixes.length > 0 && {
CommonPrefixes: result.commonPrefixes.map((prefix) => ({
Prefix: prefix,
})),
}),
Contents: result.contents.map((obj) => ({
Key: obj.key,
LastModified: obj.lastModified.toISOString(),
ETag: `"${obj.md5}"`,
Size: obj.size,
StorageClass: 'STANDARD',
})),
},
});
}
}
/**
* GET /:bucket?uploads - List multipart uploads
*/
private static async listMultipartUploads(
req: plugins.http.IncomingMessage,
res: plugins.http.ServerResponse,
ctx: S3Context,
params: Record<string, string>
): Promise<void> {
const { bucket } = params;
// Get all multipart uploads for this bucket
const uploads = ctx.multipart.listUploads(bucket);
// Build XML response
await ctx.sendXML({
ListMultipartUploadsResult: {
'@_xmlns': 'http://s3.amazonaws.com/doc/2006-03-01/',
Bucket: bucket,
KeyMarker: '',
UploadIdMarker: '',
MaxUploads: 1000,
IsTruncated: false,
...(uploads.length > 0 && {
Upload: uploads.map((upload) => ({
Key: upload.key,
UploadId: upload.uploadId,
Initiator: {
ID: 'S3RVER',
DisplayName: 'S3RVER',
},
Owner: {
ID: 'S3RVER',
DisplayName: 'S3RVER',
},
StorageClass: 'STANDARD',
Initiated: upload.initiated.toISOString(),
})),
}),
},
});
}
}

View File

@@ -1,378 +0,0 @@
import * as plugins from '../plugins.js';
import type { S3Context } from '../classes/context.js';
/**
* Object-level operations
*/
export class ObjectController {
/**
* PUT /:bucket/:key* - Upload object, copy object, or upload part
*/
public static async putObject(
req: plugins.http.IncomingMessage,
res: plugins.http.ServerResponse,
ctx: S3Context,
params: Record<string, string>
): Promise<void> {
const { bucket, key } = params;
// Check if this is a multipart upload part
if (ctx.query.partNumber && ctx.query.uploadId) {
return ObjectController.uploadPart(req, res, ctx, params);
}
// Check if this is a COPY operation
const copySource = ctx.headers['x-amz-copy-source'] as string | undefined;
if (copySource) {
return ObjectController.copyObject(req, res, ctx, params);
}
// Extract metadata from headers
const metadata: Record<string, string> = {};
for (const [header, value] of Object.entries(ctx.headers)) {
if (header.startsWith('x-amz-meta-')) {
metadata[header] = value as string;
}
if (header === 'content-type' && value) {
metadata['content-type'] = value as string;
}
if (header === 'cache-control' && value) {
metadata['cache-control'] = value as string;
}
}
// If no content-type, default to binary/octet-stream
if (!metadata['content-type']) {
metadata['content-type'] = 'binary/octet-stream';
}
// Stream upload
const result = await ctx.store.putObject(bucket, key, ctx.getRequestStream(), metadata);
ctx.setHeader('ETag', `"${result.md5}"`);
ctx.status(200).send('');
}
/**
* GET /:bucket/:key* - Download object
*/
public static async getObject(
req: plugins.http.IncomingMessage,
res: plugins.http.ServerResponse,
ctx: S3Context,
params: Record<string, string>
): Promise<void> {
const { bucket, key } = params;
// Parse Range header if present
const rangeHeader = ctx.headers.range as string | undefined;
let range: { start: number; end: number } | undefined;
if (rangeHeader) {
const matches = rangeHeader.match(/bytes=(\d+)-(\d*)/);
if (matches) {
const start = parseInt(matches[1]);
const end = matches[2] ? parseInt(matches[2]) : undefined;
range = { start, end: end || start + 1024 * 1024 }; // Default to 1MB if no end
}
}
// Get object
const object = await ctx.store.getObject(bucket, key, range);
// Set response headers
ctx.setHeader('ETag', `"${object.md5}"`);
ctx.setHeader('Last-Modified', object.lastModified.toUTCString());
ctx.setHeader('Content-Type', object.metadata['content-type'] || 'binary/octet-stream');
ctx.setHeader('Accept-Ranges', 'bytes');
// Handle custom metadata headers
for (const [key, value] of Object.entries(object.metadata)) {
if (key.startsWith('x-amz-meta-')) {
ctx.setHeader(key, value);
}
}
if (range) {
ctx.status(206);
ctx.setHeader('Content-Length', (range.end - range.start + 1).toString());
ctx.setHeader('Content-Range', `bytes ${range.start}-${range.end}/${object.size}`);
} else {
ctx.status(200);
ctx.setHeader('Content-Length', object.size.toString());
}
// Stream response
await ctx.send(object.content!);
}
/**
* HEAD /:bucket/:key* - Get object metadata
*/
public static async headObject(
req: plugins.http.IncomingMessage,
res: plugins.http.ServerResponse,
ctx: S3Context,
params: Record<string, string>
): Promise<void> {
const { bucket, key } = params;
// Get object (without content)
const object = await ctx.store.getObject(bucket, key);
// Set response headers (same as GET but no body)
ctx.setHeader('ETag', `"${object.md5}"`);
ctx.setHeader('Last-Modified', object.lastModified.toUTCString());
ctx.setHeader('Content-Type', object.metadata['content-type'] || 'binary/octet-stream');
ctx.setHeader('Content-Length', object.size.toString());
ctx.setHeader('Accept-Ranges', 'bytes');
// Handle custom metadata headers
for (const [key, value] of Object.entries(object.metadata)) {
if (key.startsWith('x-amz-meta-')) {
ctx.setHeader(key, value);
}
}
ctx.status(200).send('');
}
/**
* DELETE /:bucket/:key* - Delete object or abort multipart upload
*/
public static async deleteObject(
req: plugins.http.IncomingMessage,
res: plugins.http.ServerResponse,
ctx: S3Context,
params: Record<string, string>
): Promise<void> {
const { bucket, key } = params;
// Check if this is an abort multipart upload
if (ctx.query.uploadId) {
return ObjectController.abortMultipartUpload(req, res, ctx, params);
}
await ctx.store.deleteObject(bucket, key);
ctx.status(204).send('');
}
/**
* COPY operation (PUT with x-amz-copy-source header)
*/
private static async copyObject(
req: plugins.http.IncomingMessage,
res: plugins.http.ServerResponse,
ctx: S3Context,
params: Record<string, string>
): Promise<void> {
const { bucket: destBucket, key: destKey } = params;
const copySource = ctx.headers['x-amz-copy-source'] as string;
// Parse source bucket and key from copy source
// Format: /bucket/key or bucket/key
const sourcePath = copySource.startsWith('/') ? copySource.slice(1) : copySource;
const firstSlash = sourcePath.indexOf('/');
const srcBucket = decodeURIComponent(sourcePath.slice(0, firstSlash));
const srcKey = decodeURIComponent(sourcePath.slice(firstSlash + 1));
// Get metadata directive (COPY or REPLACE)
const metadataDirective = (ctx.headers['x-amz-metadata-directive'] as string)?.toUpperCase() || 'COPY';
// Extract new metadata if REPLACE
let newMetadata: Record<string, string> | undefined;
if (metadataDirective === 'REPLACE') {
newMetadata = {};
for (const [header, value] of Object.entries(ctx.headers)) {
if (header.startsWith('x-amz-meta-')) {
newMetadata[header] = value as string;
}
if (header === 'content-type' && value) {
newMetadata['content-type'] = value as string;
}
}
}
// Perform copy
const result = await ctx.store.copyObject(
srcBucket,
srcKey,
destBucket,
destKey,
metadataDirective as 'COPY' | 'REPLACE',
newMetadata
);
// Send XML response
await ctx.sendXML({
CopyObjectResult: {
LastModified: new Date().toISOString(),
ETag: `"${result.md5}"`,
},
});
}
/**
* POST /:bucket/:key* - Initiate or complete multipart upload
*/
public static async postObject(
req: plugins.http.IncomingMessage,
res: plugins.http.ServerResponse,
ctx: S3Context,
params: Record<string, string>
): Promise<void> {
// Check if this is initiate multipart upload
if (ctx.query.uploads !== undefined) {
return ObjectController.initiateMultipartUpload(req, res, ctx, params);
}
// Check if this is complete multipart upload
if (ctx.query.uploadId) {
return ObjectController.completeMultipartUpload(req, res, ctx, params);
}
ctx.throw('InvalidRequest', 'Invalid POST request');
}
/**
* Initiate Multipart Upload (POST with ?uploads)
*/
private static async initiateMultipartUpload(
req: plugins.http.IncomingMessage,
res: plugins.http.ServerResponse,
ctx: S3Context,
params: Record<string, string>
): Promise<void> {
const { bucket, key } = params;
// Extract metadata from headers
const metadata: Record<string, string> = {};
for (const [header, value] of Object.entries(ctx.headers)) {
if (header.startsWith('x-amz-meta-')) {
metadata[header] = value as string;
}
if (header === 'content-type' && value) {
metadata['content-type'] = value as string;
}
}
// Initiate upload
const uploadId = await ctx.multipart.initiateUpload(bucket, key, metadata);
// Send XML response
await ctx.sendXML({
InitiateMultipartUploadResult: {
Bucket: bucket,
Key: key,
UploadId: uploadId,
},
});
}
/**
* Upload Part (PUT with ?partNumber&uploadId)
*/
private static async uploadPart(
req: plugins.http.IncomingMessage,
res: plugins.http.ServerResponse,
ctx: S3Context,
params: Record<string, string>
): Promise<void> {
const uploadId = ctx.query.uploadId!;
const partNumber = parseInt(ctx.query.partNumber!);
if (isNaN(partNumber) || partNumber < 1 || partNumber > 10000) {
ctx.throw('InvalidPartNumber', 'Part number must be between 1 and 10000');
}
// Upload the part
const partInfo = await ctx.multipart.uploadPart(
uploadId,
partNumber,
ctx.getRequestStream() as any as import('stream').Readable
);
// Set ETag header (part ETag)
ctx.setHeader('ETag', `"${partInfo.etag}"`);
ctx.status(200).send('');
}
/**
* Complete Multipart Upload (POST with ?uploadId)
*/
private static async completeMultipartUpload(
req: plugins.http.IncomingMessage,
res: plugins.http.ServerResponse,
ctx: S3Context,
params: Record<string, string>
): Promise<void> {
const { bucket, key } = params;
const uploadId = ctx.query.uploadId!;
// Read and parse request body (XML with part list)
const body = await ctx.readBody();
// Parse XML to extract parts
// Expected format: <CompleteMultipartUpload><Part><PartNumber>1</PartNumber><ETag>"etag"</ETag></Part>...</CompleteMultipartUpload>
const partMatches = body.matchAll(/<Part>.*?<PartNumber>(\d+)<\/PartNumber>.*?<ETag>(.*?)<\/ETag>.*?<\/Part>/gs);
const parts: Array<{ PartNumber: number; ETag: string }> = [];
for (const match of partMatches) {
parts.push({
PartNumber: parseInt(match[1]),
ETag: match[2],
});
}
// Complete the upload
const result = await ctx.multipart.completeUpload(uploadId, parts);
// Get upload metadata
const upload = ctx.multipart.getUpload(uploadId);
if (!upload) {
ctx.throw('NoSuchUpload', 'The specified upload does not exist');
}
// Move final file to object store
const finalPath = ctx.multipart.getFinalPath(uploadId);
const finalContent = await plugins.smartfs.file(finalPath).read();
const finalStream = plugins.http.IncomingMessage.prototype;
// Create a readable stream from the buffer
const { Readable } = await import('stream');
const finalReadableStream = Readable.from([finalContent]);
// Store the final object
await ctx.store.putObject(bucket, key, finalReadableStream, upload.metadata);
// Clean up multipart upload data
await ctx.multipart.cleanupUpload(uploadId);
// Send XML response
await ctx.sendXML({
CompleteMultipartUploadResult: {
Location: `/${bucket}/${key}`,
Bucket: bucket,
Key: key,
ETag: `"${result.etag}"`,
},
});
}
/**
* Abort Multipart Upload (DELETE with ?uploadId)
*/
private static async abortMultipartUpload(
req: plugins.http.IncomingMessage,
res: plugins.http.ServerResponse,
ctx: S3Context,
params: Record<string, string>
): Promise<void> {
const uploadId = ctx.query.uploadId!;
// Abort and cleanup
await ctx.multipart.abortUpload(uploadId);
ctx.status(204).send('');
}
}

View File

@@ -1,35 +0,0 @@
import * as plugins from '../plugins.js';
import type { S3Context } from '../classes/context.js';
/**
* Service-level operations (root /)
*/
export class ServiceController {
/**
* GET / - List all buckets
*/
public static async listBuckets(
req: plugins.http.IncomingMessage,
res: plugins.http.ServerResponse,
ctx: S3Context,
params: Record<string, string>
): Promise<void> {
const buckets = await ctx.store.listBuckets();
await ctx.sendXML({
ListAllMyBucketsResult: {
'@_xmlns': 'http://s3.amazonaws.com/doc/2006-03-01/',
Owner: {
ID: '123456789000',
DisplayName: 'S3rver',
},
Buckets: {
Bucket: buckets.map((bucket) => ({
Name: bucket.name,
CreationDate: bucket.creationDate.toISOString(),
})),
},
},
});
}
}

View File

@@ -1,6 +1,5 @@
import * as plugins from './plugins.js';
import * as paths from './paths.js';
import { Smarts3Server } from './classes/smarts3-server.js';
/**
* Authentication configuration
@@ -165,6 +164,15 @@ function mergeConfig(userConfig: ISmarts3Config): Required<ISmarts3Config> {
};
}
/**
* IPC command type map for RustBridge
*/
type TRustS3Commands = {
start: { params: { config: Required<ISmarts3Config> }; result: {} };
stop: { params: {}; result: {} };
createBucket: { params: { name: string }; result: {} };
};
/**
* Main Smarts3 class - production-ready S3-compatible server
*/
@@ -178,22 +186,28 @@ export class Smarts3 {
// INSTANCE
public config: Required<ISmarts3Config>;
public s3Instance: Smarts3Server;
private bridge: InstanceType<typeof plugins.RustBridge<TRustS3Commands>>;
constructor(configArg: ISmarts3Config = {}) {
this.config = mergeConfig(configArg);
this.bridge = new plugins.RustBridge<TRustS3Commands>({
binaryName: 'rusts3',
localPaths: [
plugins.path.join(paths.packageDir, 'dist_rust', 'rusts3'),
plugins.path.join(paths.packageDir, 'rust', 'target', 'release', 'rusts3'),
plugins.path.join(paths.packageDir, 'rust', 'target', 'debug', 'rusts3'),
],
readyTimeoutMs: 30000,
requestTimeoutMs: 300000,
});
}
public async start() {
this.s3Instance = new Smarts3Server({
port: this.config.server.port,
address: this.config.server.address,
directory: this.config.storage.directory,
cleanSlate: this.config.storage.cleanSlate,
silent: this.config.server.silent,
config: this.config, // Pass full config to server
});
await this.s3Instance.start();
const spawned = await this.bridge.spawn();
if (!spawned) {
throw new Error('Failed to spawn rusts3 binary. Make sure it is compiled (pnpm build).');
}
await this.bridge.sendCommand('start', { config: this.config });
if (!this.config.server.silent) {
console.log('s3 server is running');
@@ -203,7 +217,20 @@ export class Smarts3 {
public async getS3Descriptor(
optionsArg?: Partial<plugins.tsclass.storage.IS3Descriptor>,
): Promise<plugins.tsclass.storage.IS3Descriptor> {
const descriptor = this.s3Instance.getS3Descriptor();
const cred = this.config.auth.credentials[0] || {
accessKeyId: 'S3RVER',
secretAccessKey: 'S3RVER',
};
const descriptor: plugins.tsclass.storage.IS3Descriptor = {
endpoint: this.config.server.address === '0.0.0.0' ? 'localhost' : this.config.server.address!,
port: this.config.server.port!,
useSsl: false,
accessKey: cred.accessKeyId,
accessSecret: cred.secretAccessKey,
bucketName: '',
};
return {
...descriptor,
...(optionsArg ? optionsArg : {}),
@@ -211,15 +238,12 @@ export class Smarts3 {
}
public async createBucket(bucketNameArg: string) {
// Call the filesystem store directly instead of using the client library
await this.s3Instance.store.createBucket(bucketNameArg);
await this.bridge.sendCommand('createBucket', { name: bucketNameArg });
return { name: bucketNameArg };
}
public async stop() {
await this.s3Instance.stop();
await this.bridge.sendCommand('stop', {});
this.bridge.kill();
}
}
// Export the custom server class for direct use
export { Smarts3Server } from './classes/smarts3-server.js';

View File

@@ -1,20 +1,13 @@
// node native
import * as path from 'path';
import * as http from 'http';
import * as crypto from 'crypto';
import * as url from 'url';
export { path, http, crypto, url };
export { path };
// @push.rocks scope
import { SmartFs, SmartFsProviderNode } from '@push.rocks/smartfs';
import * as smartpath from '@push.rocks/smartpath';
import { SmartXml } from '@push.rocks/smartxml';
import { RustBridge } from '@push.rocks/smartrust';
// Create SmartFs instance with Node.js provider
export const smartfs = new SmartFs(new SmartFsProviderNode());
export { smartpath, SmartXml };
export { smartpath, RustBridge };
// @tsclass scope
import * as tsclass from '@tsclass/tsclass';

View File

@@ -1,39 +0,0 @@
import * as plugins from '../plugins.js';
// Create a singleton instance of SmartXml
const smartXmlInstance = new plugins.SmartXml();
/**
* Parse XML string to JavaScript object
*/
export function parseXml(xmlString: string): any {
return smartXmlInstance.parseXmlToObject(xmlString);
}
/**
* Convert JavaScript object to XML string with XML declaration
*/
export function createXml(obj: any, options: { format?: boolean } = {}): string {
const xml = smartXmlInstance.createXmlFromObject(obj);
// Ensure XML declaration is present
if (!xml.startsWith('<?xml')) {
return `<?xml version="1.0" encoding="UTF-8"?>\n${xml}`;
}
return xml;
}
/**
* Helper to create S3-compatible XML responses with proper namespace
*/
export function createS3Xml(rootElement: string, content: any, namespace = 'http://s3.amazonaws.com/doc/2006-03-01/'): string {
const obj: any = {
[rootElement]: {
'@_xmlns': namespace,
...content,
},
};
return createXml(obj, { format: true });
}