Compare commits
16 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| d437ffc226 | |||
| e36758f183 | |||
| adf45dce2d | |||
| eb232b6e8e | |||
| 0b9d8c4a72 | |||
| 65eb266983 | |||
| 54a0c2fb65 | |||
| 648ff98c2d | |||
| d6f178bde6 | |||
| ffaef5cb15 | |||
| d4cc1d43ea | |||
| 759becdd04 | |||
| 51e8836227 | |||
| 3c0a54e08b | |||
| c074a5d2ed | |||
| a9ba9de6be |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -21,3 +21,4 @@ dist_*/
|
||||
.serena/
|
||||
|
||||
#------# custom
|
||||
rust/target
|
||||
@@ -1,68 +0,0 @@
|
||||
# language of the project (csharp, python, rust, java, typescript, go, cpp, or ruby)
|
||||
# * For C, use cpp
|
||||
# * For JavaScript, use typescript
|
||||
# Special requirements:
|
||||
# * csharp: Requires the presence of a .sln file in the project folder.
|
||||
language: typescript
|
||||
|
||||
# whether to use the project's gitignore file to ignore files
|
||||
# Added on 2025-04-07
|
||||
ignore_all_files_in_gitignore: true
|
||||
# list of additional paths to ignore
|
||||
# same syntax as gitignore, so you can use * and **
|
||||
# Was previously called `ignored_dirs`, please update your config if you are using that.
|
||||
# Added (renamed) on 2025-04-07
|
||||
ignored_paths: []
|
||||
|
||||
# whether the project is in read-only mode
|
||||
# If set to true, all editing tools will be disabled and attempts to use them will result in an error
|
||||
# Added on 2025-04-18
|
||||
read_only: false
|
||||
|
||||
|
||||
# list of tool names to exclude. We recommend not excluding any tools, see the readme for more details.
|
||||
# Below is the complete list of tools for convenience.
|
||||
# To make sure you have the latest list of tools, and to view their descriptions,
|
||||
# execute `uv run scripts/print_tool_overview.py`.
|
||||
#
|
||||
# * `activate_project`: Activates a project by name.
|
||||
# * `check_onboarding_performed`: Checks whether project onboarding was already performed.
|
||||
# * `create_text_file`: Creates/overwrites a file in the project directory.
|
||||
# * `delete_lines`: Deletes a range of lines within a file.
|
||||
# * `delete_memory`: Deletes a memory from Serena's project-specific memory store.
|
||||
# * `execute_shell_command`: Executes a shell command.
|
||||
# * `find_referencing_code_snippets`: Finds code snippets in which the symbol at the given location is referenced.
|
||||
# * `find_referencing_symbols`: Finds symbols that reference the symbol at the given location (optionally filtered by type).
|
||||
# * `find_symbol`: Performs a global (or local) search for symbols with/containing a given name/substring (optionally filtered by type).
|
||||
# * `get_current_config`: Prints the current configuration of the agent, including the active and available projects, tools, contexts, and modes.
|
||||
# * `get_symbols_overview`: Gets an overview of the top-level symbols defined in a given file.
|
||||
# * `initial_instructions`: Gets the initial instructions for the current project.
|
||||
# Should only be used in settings where the system prompt cannot be set,
|
||||
# e.g. in clients you have no control over, like Claude Desktop.
|
||||
# * `insert_after_symbol`: Inserts content after the end of the definition of a given symbol.
|
||||
# * `insert_at_line`: Inserts content at a given line in a file.
|
||||
# * `insert_before_symbol`: Inserts content before the beginning of the definition of a given symbol.
|
||||
# * `list_dir`: Lists files and directories in the given directory (optionally with recursion).
|
||||
# * `list_memories`: Lists memories in Serena's project-specific memory store.
|
||||
# * `onboarding`: Performs onboarding (identifying the project structure and essential tasks, e.g. for testing or building).
|
||||
# * `prepare_for_new_conversation`: Provides instructions for preparing for a new conversation (in order to continue with the necessary context).
|
||||
# * `read_file`: Reads a file within the project directory.
|
||||
# * `read_memory`: Reads the memory with the given name from Serena's project-specific memory store.
|
||||
# * `remove_project`: Removes a project from the Serena configuration.
|
||||
# * `replace_lines`: Replaces a range of lines within a file with new content.
|
||||
# * `replace_symbol_body`: Replaces the full definition of a symbol.
|
||||
# * `restart_language_server`: Restarts the language server, may be necessary when edits not through Serena happen.
|
||||
# * `search_for_pattern`: Performs a search for a pattern in the project.
|
||||
# * `summarize_changes`: Provides instructions for summarizing the changes made to the codebase.
|
||||
# * `switch_modes`: Activates modes by providing a list of their names
|
||||
# * `think_about_collected_information`: Thinking tool for pondering the completeness of collected information.
|
||||
# * `think_about_task_adherence`: Thinking tool for determining whether the agent is still on track with the current task.
|
||||
# * `think_about_whether_you_are_done`: Thinking tool for determining whether the task is truly completed.
|
||||
# * `write_memory`: Writes a named memory (for future reference) to Serena's project-specific memory store.
|
||||
excluded_tools: []
|
||||
|
||||
# initial prompt for the project. It will always be given to the LLM upon activating the project
|
||||
# (contrary to the memories, which are loaded on demand).
|
||||
initial_prompt: ""
|
||||
|
||||
project_name: "smarts3"
|
||||
70
changelog.md
70
changelog.md
@@ -1,5 +1,75 @@
|
||||
# Changelog
|
||||
|
||||
## 2026-02-17 - 5.3.0 - feat(auth)
|
||||
add AWS SigV4 authentication and bucket policy support
|
||||
|
||||
- Implement AWS SigV4 full verification (constant-time comparison, 15-minute clock skew enforcement) and expose default signing region (server.region = 'us-east-1').
|
||||
- Add IAM-style bucket policy engine with Put/Get/Delete policy APIs (GetBucketPolicy/PutBucketPolicy/DeleteBucketPolicy), wildcard action/resource matching, Allow/Deny evaluation, and on-disk persistence under .policies/{bucket}.policy.json.
|
||||
- Documentation and README expanded with policy usage, examples, API table entries, and notes about policy CRUD and behavior for anonymous/authenticated requests.
|
||||
- Rust code refactors: simplify storage/server result structs and multipart handling (removed several unused size/key/bucket fields), remove S3Error::to_response and error_xml helpers, and other internal cleanup to support new auth/policy features.
|
||||
|
||||
## 2026-02-17 - 5.2.0 - feat(auth,policy)
|
||||
add AWS SigV4 authentication and S3 bucket policy support
|
||||
|
||||
- Implemented real AWS SigV4 verification (HMAC-SHA256), including x-amz-date handling, clock skew enforcement and constant-time signature comparison
|
||||
- Added bucket policy model, validator and evaluation engine (Deny > Allow > NoOpinion) with a PolicyStore (RwLock cache + disk-backed .policies/*.policy.json)
|
||||
- Integrated action resolution and auth+policy pipeline into the HTTP server: authorization checks run per-request, anonymous requests are denied by default, ListAllMyBuckets requires authentication
|
||||
- Added bucket policy CRUD handlers via ?policy query parameter (GET/PUT/DELETE) and cleanup of policies on bucket deletion
|
||||
- Storage and config updates: created .policies dir and policy path helpers; default region added to server config (TS + Rust)
|
||||
- Added comprehensive tests for auth and policy behavior (policy CRUD, evaluation, per-action enforcement, auth integration)
|
||||
- Updated Rust dependencies and Cargo.toml/Cargo.lock to include hmac, sha2, hex, subtle, cpufeatures
|
||||
|
||||
## 2026-02-13 - 5.1.1 - fix(smarts3)
|
||||
replace TypeScript server with Rust-powered core and IPC bridge
|
||||
|
||||
- Major refactor: Node.js/TypeScript in-process server replaced by a Rust crate ('rusts3') with a TypeScript IPC wrapper (RustBridge).
|
||||
- Removed many TypeScript server modules (smarts3-server, filesystem-store, multipart-manager, controllers, router, context, logger, xml utils, etc.); Smarts3Server export removed — public API now proxies to the Rust binary.
|
||||
- Smarts3 now spawns and communicates with the rusts3 binary via RustBridge IPC (commands include start, stop, createBucket).
|
||||
- Build & packaging changes: build script now runs `tsrust` before `tsbuild`; added `@git.zone/tsrust` devDependency; added `dist_rust` artifacts and new cross-compile targets in npmextra.json; .gitignore updated for rust/target.
|
||||
- Dependency changes: added `@push.rocks/smartrust` (RustBridge) and simplified plugins surface; previous smartfs/smartxml usage removed from TS code and replaced by the Rust implementation + IPC.
|
||||
- Added Rust project files (rust/Cargo.toml, rust/src/*) implementing server, IPC management loop, storage, XML responses, errors, and config.
|
||||
- Documentation updated (README and hints) to describe the Rust core, supported prebuilt targets (linux_amd64, linux_arm64), IPC commands, and developer build notes.
|
||||
|
||||
## 2025-11-23 - 5.1.0 - feat(multipart)
|
||||
Implement full multipart upload support with persistent manager, periodic cleanup, and API integration
|
||||
|
||||
- Add IMultipartConfig to server config with defaults (expirationDays: 7, cleanupIntervalMinutes: 60) and merge into existing config flow
|
||||
- Introduce MultipartUploadManager: persistent upload metadata on disk, part upload/assembly, restore uploads on startup, listParts/listUploads, abort/cleanup functionality
|
||||
- Start and stop multipart cleanup task from Smarts3Server lifecycle (startCleanupTask on start, stopCleanupTask on stop) with configurable interval and expiration
|
||||
- ObjectController: support multipart endpoints (initiate, upload part, complete, abort) and move assembled final object into the object store on completion; set ETag headers and return proper XML responses
|
||||
- BucketController: support listing in-progress multipart uploads via ?uploads query parameter and return S3-compatible XML
|
||||
- Persist multipart state to disk and restore on initialization to survive restarts; perform automatic cleanup of expired uploads
|
||||
|
||||
## 2025-11-23 - 5.0.2 - fix(readme)
|
||||
Clarify contribution agreement requirement in README
|
||||
|
||||
- Updated the Issue Reporting and Security section in readme.md to make it explicit that developers must sign and comply with the contribution agreement (and complete identification) before obtaining a code.foss.global account to submit pull requests.
|
||||
|
||||
## 2025-11-23 - 5.0.1 - fix(docs)
|
||||
Clarify README wording about S3 compatibility and AWS SDK usage
|
||||
|
||||
- Update README wording to "Full S3 API compatibility" and clarify it works seamlessly with AWS SDK v3 and other S3 clients
|
||||
|
||||
## 2025-11-23 - 5.0.0 - BREAKING CHANGE(core)
|
||||
Production-ready S3-compatible server: nested config, multipart uploads, CORS, structured logging, SmartFS migration and improved error handling
|
||||
|
||||
- Breaking change: configuration format migrated from flat to nested structure (server, storage, auth, cors, logging, limits). Update existing configs accordingly.
|
||||
- Implemented full multipart upload support (initiate, upload part, complete, abort) with on-disk part management and final assembly.
|
||||
- Added CORS middleware with configurable origins, methods, headers, exposed headers, maxAge and credentials support.
|
||||
- Structured, configurable logging (levels: error|warn|info|debug; formats: text|json) and request/response logging middleware.
|
||||
- Simple static credential authentication middleware (configurable list of credentials).
|
||||
- Migrated filesystem operations to @push.rocks/smartfs (Web Streams interoperability) and removed smartbucket from production dependencies.
|
||||
- Improved S3-compatible error handling and XML responses (S3Error class and XML utilities).
|
||||
- Exposed Smarts3Server and made store/multipart managers accessible for tests and advanced usage; added helper methods like getS3Descriptor and createBucket.
|
||||
|
||||
## 2025-11-23 - 4.0.0 - BREAKING CHANGE(Smarts3)
|
||||
Migrate Smarts3 configuration to nested server/storage objects and remove legacy flat config support
|
||||
|
||||
- Smarts3.createAndStart() and Smarts3 constructor now accept ISmarts3Config with nested `server` and `storage` objects.
|
||||
- Removed support for the legacy flat config shape (top-level `port` and `cleanSlate`) / ILegacySmarts3Config.
|
||||
- Updated tests to use new config shape (server:{ port, silent } and storage:{ cleanSlate }).
|
||||
- mergeConfig and Smarts3Server now rely on the nested config shape; consumers must update their initialization code.
|
||||
|
||||
## 2025-11-23 - 3.2.0 - feat(multipart)
|
||||
Add multipart upload support with MultipartUploadManager and controller integration
|
||||
|
||||
|
||||
@@ -1,5 +1,11 @@
|
||||
{
|
||||
"gitzone": {
|
||||
"@git.zone/tsrust": {
|
||||
"targets": [
|
||||
"linux_amd64",
|
||||
"linux_arm64"
|
||||
]
|
||||
},
|
||||
"@git.zone/cli": {
|
||||
"projectType": "npm",
|
||||
"module": {
|
||||
"githost": "code.foss.global",
|
||||
@@ -27,13 +33,19 @@
|
||||
"CI/CD Integration",
|
||||
"Developer Onboarding"
|
||||
]
|
||||
},
|
||||
"release": {
|
||||
"registries": [
|
||||
"https://verdaccio.lossless.digital",
|
||||
"https://registry.npmjs.org"
|
||||
],
|
||||
"accessLevel": "public"
|
||||
}
|
||||
},
|
||||
"npmci": {
|
||||
"npmGlobalTools": [],
|
||||
"npmAccessLevel": "public"
|
||||
},
|
||||
"tsdoc": {
|
||||
"@git.zone/tsdoc": {
|
||||
"legal": "\n## License and Legal Information\n\nThis repository contains open-source code that is licensed under the MIT License. A copy of the MIT License can be found in the [license](license) file within this repository. \n\n**Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.\n\n### Trademarks\n\nThis project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and any related products or services are trademarks of Task Venture Capital GmbH and are not included within the scope of the MIT license granted herein. Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines, and any usage must be approved in writing by Task Venture Capital GmbH.\n\n### Company Information\n\nTask Venture Capital GmbH \nRegistered at District court Bremen HRB 35230 HB, Germany\n\nFor any legal inquiries or if you require further information, please contact us via email at hello@task.vc.\n\nBy using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.\n"
|
||||
},
|
||||
"@ship.zone/szci": {
|
||||
"npmGlobalTools": []
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@push.rocks/smarts3",
|
||||
"version": "3.2.0",
|
||||
"version": "5.3.0",
|
||||
"private": false,
|
||||
"description": "A Node.js TypeScript package to create a local S3 endpoint for simulating AWS S3 operations using mapped local directories for development and testing purposes.",
|
||||
"main": "dist_ts/index.js",
|
||||
@@ -10,7 +10,7 @@
|
||||
"license": "MIT",
|
||||
"scripts": {
|
||||
"test": "(tstest test/ --web --verbose --logfile --timeout 60)",
|
||||
"build": "(tsbuild --web --allowimplicitany)",
|
||||
"build": "(tsrust && tsbuild --web --allowimplicitany)",
|
||||
"buildDocs": "tsdoc"
|
||||
},
|
||||
"devDependencies": {
|
||||
@@ -20,6 +20,7 @@
|
||||
"@git.zone/tsrun": "^2.0.0",
|
||||
"@git.zone/tstest": "^3.1.0",
|
||||
"@push.rocks/smartbucket": "^4.3.0",
|
||||
"@git.zone/tsrust": "^1.3.0",
|
||||
"@types/node": "^22.9.0"
|
||||
},
|
||||
"browserslist": [
|
||||
@@ -31,6 +32,7 @@
|
||||
"dist/**/*",
|
||||
"dist_*/**/*",
|
||||
"dist_ts/**/*",
|
||||
"dist_rust/**/*",
|
||||
"dist_ts_web/**/*",
|
||||
"assets/**/*",
|
||||
"cli.js",
|
||||
@@ -38,9 +40,8 @@
|
||||
"readme.md"
|
||||
],
|
||||
"dependencies": {
|
||||
"@push.rocks/smartfs": "^1.1.0",
|
||||
"@push.rocks/smartpath": "^6.0.0",
|
||||
"@push.rocks/smartxml": "^2.0.0",
|
||||
"@push.rocks/smartrust": "^1.0.0",
|
||||
"@tsclass/tsclass": "^9.3.0"
|
||||
},
|
||||
"keywords": [
|
||||
|
||||
111
readme.hints.md
111
readme.hints.md
@@ -1,74 +1,69 @@
|
||||
# Project Hints for smarts3
|
||||
|
||||
## Current State (v3.0.0)
|
||||
## Current State (v6.0.0-dev)
|
||||
|
||||
- Native custom S3 server implementation (Smarts3Server)
|
||||
- No longer uses legacy s3rver backend (removed in v3.0.0)
|
||||
- Core S3 operations working: PUT, GET, HEAD, DELETE for objects and buckets
|
||||
- Multipart upload NOT yet implemented (critical gap for production)
|
||||
- Authentication is hardcoded ('S3RVER'/'S3RVER') - not production-ready
|
||||
- No CORS support yet
|
||||
- No SSL/TLS support yet
|
||||
- **Rust-powered S3 server** via `@push.rocks/smartrust` IPC bridge
|
||||
- High-performance: streaming I/O, zero-copy, backpressure, range seek
|
||||
- TypeScript is thin IPC wrapper; all HTTP/storage/routing in Rust binary `rusts3`
|
||||
- Full S3 compatibility: PUT, GET, HEAD, DELETE for objects and buckets
|
||||
- Multipart upload support (streaming, no OOM)
|
||||
- **Real AWS SigV4 authentication** (cryptographic signature verification)
|
||||
- **Bucket policies** (AWS/MinIO-compatible JSON policies, public access support)
|
||||
- CORS support
|
||||
- ListBuckets, ListObjects (v1/v2), CopyObject
|
||||
|
||||
## Production Readiness
|
||||
## Architecture
|
||||
|
||||
See `production-readiness.md` for the complete gap analysis and implementation plan.
|
||||
### Rust Binary (`rust/src/`)
|
||||
- `main.rs` - Clap CLI, management mode entry
|
||||
- `config.rs` - Serde config structs matching TS interfaces (includes `region`)
|
||||
- `management.rs` - IPC loop (newline-delimited JSON over stdin/stdout)
|
||||
- `server.rs` - hyper 1.x HTTP server, routing, CORS, auth+policy pipeline, all S3 handlers
|
||||
- `storage.rs` - FileStore: filesystem-backed storage, multipart manager, `.policies/` dir
|
||||
- `xml_response.rs` - S3 XML response builders
|
||||
- `s3_error.rs` - S3 error codes with HTTP status mapping
|
||||
- `auth.rs` - AWS SigV4 signature verification (HMAC-SHA256, clock skew, constant-time compare)
|
||||
- `action.rs` - S3Action enum + request-to-IAM-action resolver + RequestContext
|
||||
- `policy.rs` - BucketPolicy model, evaluation engine (Deny > Allow > NoOpinion), PolicyStore (RwLock cache + disk)
|
||||
|
||||
**Key Missing Features for Production:**
|
||||
1. Multipart upload support (HIGHEST PRIORITY)
|
||||
2. Configurable authentication
|
||||
3. CORS middleware
|
||||
4. SSL/TLS support
|
||||
5. Production configuration system
|
||||
6. Production logging
|
||||
### TypeScript Bridge (`ts/`)
|
||||
- `ts/index.ts` - Smarts3 class with RustBridge<TRustS3Commands>
|
||||
- `ts/plugins.ts` - path, smartpath, RustBridge, tsclass
|
||||
- `ts/paths.ts` - packageDir, bucketsDir defaults
|
||||
|
||||
## Architecture Notes
|
||||
### IPC Commands
|
||||
| Command | Params | Action |
|
||||
|---------|--------|--------|
|
||||
| `start` | `{ config: ISmarts3Config }` | Init storage + HTTP server |
|
||||
| `stop` | `{}` | Graceful shutdown |
|
||||
| `createBucket` | `{ name: string }` | Create bucket directory |
|
||||
|
||||
### File Structure
|
||||
- `ts/classes/smarts3-server.ts` - Main server class
|
||||
- `ts/classes/filesystem-store.ts` - Storage layer (filesystem-backed)
|
||||
- `ts/classes/router.ts` - URL routing with pattern matching
|
||||
- `ts/classes/middleware-stack.ts` - Middleware execution
|
||||
- `ts/classes/context.ts` - Request/response context
|
||||
- `ts/classes/s3-error.ts` - S3-compatible error handling
|
||||
- `ts/controllers/` - Service, bucket, and object controllers
|
||||
- `ts/index.ts` - Main export (Smarts3 class)
|
||||
### Storage Layout (backward-compatible)
|
||||
- Objects: `{root}/{bucket}/{key}._S3_object`
|
||||
- Metadata: `{root}/{bucket}/{key}._S3_object.metadata.json`
|
||||
- MD5: `{root}/{bucket}/{key}._S3_object.md5`
|
||||
- Multipart: `{root}/.multipart/{upload_id}/part-{N}`
|
||||
- Policies: `{root}/.policies/{bucket}.policy.json`
|
||||
|
||||
### Storage Layout
|
||||
- Objects stored as: `{bucket}/{encodedKey}._S3_object`
|
||||
- Metadata stored as: `{bucket}/{encodedKey}._S3_object.metadata.json`
|
||||
- MD5 stored as: `{bucket}/{encodedKey}._S3_object.md5`
|
||||
- Keys are encoded for Windows compatibility (hex encoding for invalid chars)
|
||||
## Build
|
||||
|
||||
### Current Limitations
|
||||
- Max file size limited by available memory (no streaming multipart)
|
||||
- Single server instance only (no clustering)
|
||||
- No versioning support
|
||||
- No access control beyond basic auth
|
||||
|
||||
## Testing
|
||||
|
||||
- Main test: `test/test.aws-sdk.node.ts` - Tests AWS SDK v3 compatibility
|
||||
- Run with: `pnpm test`
|
||||
- Tests run with cleanSlate mode enabled
|
||||
- `pnpm build` runs `tsrust && tsbuild --web --allowimplicitany`
|
||||
- `tsrust` compiles Rust to `dist_rust/rusts3`
|
||||
- Targets: linux_amd64, linux_arm64 (configured in npmextra.json)
|
||||
|
||||
## Dependencies
|
||||
|
||||
- `@push.rocks/smartbucket` - S3 abstraction layer
|
||||
- `@push.rocks/smartfs` - Modern filesystem operations with Web Streams API (replaced smartfile)
|
||||
- `@push.rocks/smartxml` - XML generation/parsing
|
||||
- `@push.rocks/smartrust` - RustBridge IPC bridge
|
||||
- `@push.rocks/smartpath` - Path utilities
|
||||
- `@tsclass/tsclass` - TypeScript utilities
|
||||
- `@tsclass/tsclass` - IS3Descriptor type
|
||||
- `@git.zone/tsrust` (devDep) - Rust cross-compilation
|
||||
|
||||
## Migration Notes (2025-11-23)
|
||||
## Testing
|
||||
|
||||
Successfully migrated from `@push.rocks/smartfile` + native `fs` to `@push.rocks/smartfs`:
|
||||
- All file/directory operations now use smartfs fluent API
|
||||
- Web Streams → Node.js Streams conversion for HTTP compatibility
|
||||
- All tests passing ✅
|
||||
- Build successful ✅
|
||||
|
||||
## Next Steps
|
||||
|
||||
Waiting for approval to proceed with production-readiness implementation.
|
||||
Priority 1 is implementing multipart uploads.
|
||||
- `test/test.aws-sdk.node.ts` - AWS SDK v3 compatibility (10 tests, auth disabled, port 3337)
|
||||
- `test/test.auth.node.ts` - Auth + bucket policy integration (20 tests, auth enabled, port 3344)
|
||||
- `test/test.policy-crud.node.ts` - Policy API CRUD + validation edge cases (17 tests, port 3345)
|
||||
- `test/test.policy-eval.node.ts` - Policy evaluation: principals, actions, resources, deny-vs-allow (22 tests, port 3346)
|
||||
- `test/test.policy-actions.node.ts` - Per-action policy enforcement (15 tests, port 3347)
|
||||
- `test/test.ts` - SmartBucket integration (3 tests)
|
||||
- Run: `pnpm test` or `tstest test/test.aws-sdk.node.ts --verbose`
|
||||
|
||||
689
readme.md
689
readme.md
@@ -1,436 +1,451 @@
|
||||
# @push.rocks/smarts3 🚀
|
||||
|
||||
**Mock S3 made simple** - A powerful Node.js TypeScript package for creating a local S3 endpoint that simulates AWS S3 operations using mapped local directories. Perfect for development and testing!
|
||||
|
||||
## 🌟 Features
|
||||
|
||||
- 🏃 **Lightning-fast local S3 simulation** - No more waiting for cloud operations during development
|
||||
- ⚡ **Native custom S3 server** - Built on Node.js http module with zero framework dependencies
|
||||
- 🔄 **Full AWS S3 API compatibility** - Drop-in replacement for AWS SDK v3 and other S3 clients
|
||||
- 📂 **Local directory mapping** - Your buckets live right on your filesystem with Windows-compatible encoding
|
||||
- 🧪 **Perfect for testing** - Reliable, repeatable tests without cloud dependencies
|
||||
- 🎯 **TypeScript-first** - Built with TypeScript for excellent type safety and IDE support
|
||||
- 🔧 **Zero configuration** - Works out of the box with sensible defaults
|
||||
- 🧹 **Clean slate mode** - Start fresh on every test run
|
||||
A high-performance, S3-compatible local server powered by a **Rust core** with a clean TypeScript API. Drop-in replacement for AWS S3 during development and testing — no cloud, no Docker, no MinIO. Just `npm install` and go.
|
||||
|
||||
## Issue Reporting and Security
|
||||
|
||||
For reporting bugs, issues, or security vulnerabilities, please visit [community.foss.global/](https://community.foss.global/). This is the central community hub for all issue reporting. Developers who want to sign a contribution agreement and go through identification can also get a [code.foss.global/](https://code.foss.global/) account to submit Pull Requests directly.
|
||||
For reporting bugs, issues, or security vulnerabilities, please visit [community.foss.global/](https://community.foss.global/). This is the central community hub for all issue reporting. Developers who sign and comply with our contribution agreement and go through identification can also get a [code.foss.global/](https://code.foss.global/) account to submit Pull Requests directly.
|
||||
|
||||
## 🌟 Why smarts3?
|
||||
|
||||
| Feature | smarts3 | MinIO | s3rver |
|
||||
|---------|---------|-------|--------|
|
||||
| Install | `pnpm add` | Docker / binary | `npm install` |
|
||||
| Startup time | ~20ms | seconds | ~200ms |
|
||||
| Large file uploads | ✅ Streaming, zero-copy | ✅ | ❌ OOM risk |
|
||||
| Range requests | ✅ Seek-based | ✅ | ❌ Full read |
|
||||
| Language | Rust + TypeScript | Go | JavaScript |
|
||||
| Multipart uploads | ✅ Full support | ✅ | ❌ |
|
||||
| Auth | ✅ AWS SigV4 (full verification) | Full IAM | Basic |
|
||||
| Bucket policies | ✅ IAM-style evaluation | ✅ | ❌ |
|
||||
|
||||
### Core Features
|
||||
|
||||
- ⚡ **Rust-powered HTTP server** — hyper 1.x with streaming I/O, zero-copy, backpressure
|
||||
- 🔄 **Full S3 API compatibility** — works with AWS SDK v3, SmartBucket, any S3 client
|
||||
- 📂 **Filesystem-backed storage** — buckets map to directories, objects to files
|
||||
- 📤 **Streaming multipart uploads** — large files without memory pressure
|
||||
- 🎯 **Byte-range requests** — `seek()` directly to the requested byte offset
|
||||
- 🔐 **AWS SigV4 authentication** — full signature verification with constant-time comparison and 15-min clock skew enforcement
|
||||
- 📜 **Bucket policies** — IAM-style JSON policies with Allow/Deny evaluation, wildcard matching, and anonymous access support
|
||||
- 🌐 **CORS middleware** — configurable cross-origin support
|
||||
- 📊 **Structured logging** — tracing-based, error through debug levels
|
||||
- 🧹 **Clean slate mode** — wipe storage on startup for test isolation
|
||||
- 🧪 **Test-first design** — start/stop in milliseconds, no port conflicts
|
||||
|
||||
## 📦 Installation
|
||||
|
||||
Install using your favorite package manager:
|
||||
|
||||
```bash
|
||||
# Using npm
|
||||
npm install @push.rocks/smarts3 --save-dev
|
||||
|
||||
# Using pnpm (recommended)
|
||||
pnpm add @push.rocks/smarts3 -D
|
||||
|
||||
# Using yarn
|
||||
yarn add @push.rocks/smarts3 --dev
|
||||
```
|
||||
|
||||
> **Note:** The package ships with precompiled Rust binaries for `linux_amd64` and `linux_arm64`. No Rust toolchain needed on your machine.
|
||||
|
||||
## 🚀 Quick Start
|
||||
|
||||
Get up and running in seconds:
|
||||
|
||||
```typescript
|
||||
import { Smarts3 } from '@push.rocks/smarts3';
|
||||
|
||||
// Start your local S3 server
|
||||
const s3Server = await Smarts3.createAndStart({
|
||||
port: 3000,
|
||||
cleanSlate: true, // Start with empty buckets
|
||||
// Start a local S3 server
|
||||
const s3 = await Smarts3.createAndStart({
|
||||
server: { port: 3000 },
|
||||
storage: { cleanSlate: true },
|
||||
});
|
||||
|
||||
// Create a bucket
|
||||
const bucket = await s3Server.createBucket('my-awesome-bucket');
|
||||
await s3.createBucket('my-bucket');
|
||||
|
||||
// Get S3 connection details for use with AWS SDK or other S3 clients
|
||||
const s3Config = await s3Server.getS3Descriptor();
|
||||
// Get connection details for any S3 client
|
||||
const descriptor = await s3.getS3Descriptor();
|
||||
// → { endpoint: 'localhost', port: 3000, accessKey: 'S3RVER', accessSecret: 'S3RVER', useSsl: false }
|
||||
|
||||
// When you're done
|
||||
await s3Server.stop();
|
||||
// When done
|
||||
await s3.stop();
|
||||
```
|
||||
|
||||
## 📖 Detailed Usage Guide
|
||||
## 📖 Configuration
|
||||
|
||||
### 🏗️ Setting Up Your S3 Server
|
||||
|
||||
The `Smarts3` class provides a simple interface for managing your local S3 server:
|
||||
All config fields are optional — sensible defaults are applied automatically.
|
||||
|
||||
```typescript
|
||||
import { Smarts3 } from '@push.rocks/smarts3';
|
||||
import { Smarts3, ISmarts3Config } from '@push.rocks/smarts3';
|
||||
|
||||
// Configuration options
|
||||
const config = {
|
||||
port: 3000, // Port to run the server on (default: 3000)
|
||||
cleanSlate: true, // Clear all data on start (default: false)
|
||||
const config: ISmarts3Config = {
|
||||
server: {
|
||||
port: 3000, // Default: 3000
|
||||
address: '0.0.0.0', // Default: '0.0.0.0'
|
||||
silent: false, // Default: false
|
||||
region: 'us-east-1', // Default: 'us-east-1' — used for SigV4 signing
|
||||
},
|
||||
storage: {
|
||||
directory: './my-data', // Default: .nogit/bucketsDir
|
||||
cleanSlate: false, // Default: false — set true to wipe on start
|
||||
},
|
||||
auth: {
|
||||
enabled: false, // Default: false
|
||||
credentials: [{
|
||||
accessKeyId: 'MY_KEY',
|
||||
secretAccessKey: 'MY_SECRET',
|
||||
}],
|
||||
},
|
||||
cors: {
|
||||
enabled: false, // Default: false
|
||||
allowedOrigins: ['*'],
|
||||
allowedMethods: ['GET', 'POST', 'PUT', 'DELETE', 'HEAD', 'OPTIONS'],
|
||||
allowedHeaders: ['*'],
|
||||
exposedHeaders: ['ETag', 'x-amz-request-id', 'x-amz-version-id'],
|
||||
maxAge: 86400,
|
||||
allowCredentials: false,
|
||||
},
|
||||
logging: {
|
||||
level: 'info', // 'error' | 'warn' | 'info' | 'debug'
|
||||
format: 'text', // 'text' | 'json'
|
||||
enabled: true,
|
||||
},
|
||||
limits: {
|
||||
maxObjectSize: 5 * 1024 * 1024 * 1024, // 5 GB
|
||||
maxMetadataSize: 2048,
|
||||
requestTimeout: 300000, // 5 minutes
|
||||
},
|
||||
multipart: {
|
||||
expirationDays: 7,
|
||||
cleanupIntervalMinutes: 60,
|
||||
},
|
||||
};
|
||||
|
||||
// Create and start in one go
|
||||
const s3Server = await Smarts3.createAndStart(config);
|
||||
|
||||
// Or create and start separately
|
||||
const s3Server = new Smarts3(config);
|
||||
await s3Server.start();
|
||||
const s3 = await Smarts3.createAndStart(config);
|
||||
```
|
||||
|
||||
### 🪣 Working with Buckets
|
||||
### Common Configurations
|
||||
|
||||
Creating and managing buckets is straightforward:
|
||||
**CI/CD testing** — silent, clean, fast:
|
||||
```typescript
|
||||
const s3 = await Smarts3.createAndStart({
|
||||
server: { port: 9999, silent: true },
|
||||
storage: { cleanSlate: true },
|
||||
});
|
||||
```
|
||||
|
||||
**Auth enabled:**
|
||||
```typescript
|
||||
const s3 = await Smarts3.createAndStart({
|
||||
auth: {
|
||||
enabled: true,
|
||||
credentials: [{ accessKeyId: 'test', secretAccessKey: 'test123' }],
|
||||
},
|
||||
});
|
||||
```
|
||||
|
||||
**CORS for local web dev:**
|
||||
```typescript
|
||||
const s3 = await Smarts3.createAndStart({
|
||||
cors: {
|
||||
enabled: true,
|
||||
allowedOrigins: ['http://localhost:5173'],
|
||||
allowCredentials: true,
|
||||
},
|
||||
});
|
||||
```
|
||||
|
||||
## 📤 Usage with AWS SDK v3
|
||||
|
||||
```typescript
|
||||
// Create a new bucket
|
||||
const bucket = await s3Server.createBucket('my-bucket');
|
||||
import { S3Client, PutObjectCommand, GetObjectCommand, DeleteObjectCommand } from '@aws-sdk/client-s3';
|
||||
|
||||
// The bucket is now ready to use!
|
||||
console.log(`Created bucket: ${bucket.name}`);
|
||||
const descriptor = await s3.getS3Descriptor();
|
||||
|
||||
const client = new S3Client({
|
||||
endpoint: `http://${descriptor.endpoint}:${descriptor.port}`,
|
||||
region: 'us-east-1',
|
||||
credentials: {
|
||||
accessKeyId: descriptor.accessKey,
|
||||
secretAccessKey: descriptor.accessSecret,
|
||||
},
|
||||
forcePathStyle: true, // Required for path-style S3
|
||||
});
|
||||
|
||||
// Upload
|
||||
await client.send(new PutObjectCommand({
|
||||
Bucket: 'my-bucket',
|
||||
Key: 'hello.txt',
|
||||
Body: 'Hello, S3!',
|
||||
ContentType: 'text/plain',
|
||||
}));
|
||||
|
||||
// Download
|
||||
const { Body } = await client.send(new GetObjectCommand({
|
||||
Bucket: 'my-bucket',
|
||||
Key: 'hello.txt',
|
||||
}));
|
||||
const content = await Body.transformToString(); // "Hello, S3!"
|
||||
|
||||
// Delete
|
||||
await client.send(new DeleteObjectCommand({
|
||||
Bucket: 'my-bucket',
|
||||
Key: 'hello.txt',
|
||||
}));
|
||||
```
|
||||
|
||||
### 📤 Uploading Files
|
||||
|
||||
Use the powerful `SmartBucket` integration for file operations:
|
||||
## 🪣 Usage with SmartBucket
|
||||
|
||||
```typescript
|
||||
import { SmartBucket } from '@push.rocks/smartbucket';
|
||||
|
||||
// Get connection configuration
|
||||
const s3Config = await s3Server.getS3Descriptor();
|
||||
const smartbucket = new SmartBucket(await s3.getS3Descriptor());
|
||||
const bucket = await smartbucket.createBucket('my-bucket');
|
||||
const dir = await bucket.getBaseDirectory();
|
||||
|
||||
// Create a SmartBucket instance
|
||||
const smartbucket = new SmartBucket(s3Config);
|
||||
// Upload
|
||||
await dir.fastPut({ path: 'docs/readme.txt', contents: 'Hello!' });
|
||||
|
||||
// Get your bucket
|
||||
const bucket = await smartbucket.getBucket('my-bucket');
|
||||
// Download
|
||||
const content = await dir.fastGet('docs/readme.txt');
|
||||
|
||||
// Upload a file
|
||||
const baseDir = await bucket.getBaseDirectory();
|
||||
await baseDir.fastStore('path/to/file.txt', 'Hello, S3! 🎉');
|
||||
|
||||
// Upload with more control
|
||||
await baseDir.fastPut({
|
||||
path: 'documents/important.pdf',
|
||||
contents: Buffer.from(yourPdfData),
|
||||
});
|
||||
// List
|
||||
const files = await dir.listFiles();
|
||||
```
|
||||
|
||||
### 📥 Downloading Files
|
||||
## 📤 Multipart Uploads
|
||||
|
||||
Retrieve your files easily:
|
||||
For files larger than 5 MB, use multipart uploads. smarts3 handles them with **streaming I/O** — parts are written directly to disk, never buffered in memory.
|
||||
|
||||
```typescript
|
||||
// Get file contents as string
|
||||
const content = await baseDir.fastGet('path/to/file.txt');
|
||||
console.log(content); // "Hello, S3! 🎉"
|
||||
import {
|
||||
CreateMultipartUploadCommand,
|
||||
UploadPartCommand,
|
||||
CompleteMultipartUploadCommand,
|
||||
} from '@aws-sdk/client-s3';
|
||||
|
||||
// Get file as Buffer
|
||||
const buffer = await baseDir.fastGetBuffer('documents/important.pdf');
|
||||
```
|
||||
// 1. Initiate
|
||||
const { UploadId } = await client.send(new CreateMultipartUploadCommand({
|
||||
Bucket: 'my-bucket',
|
||||
Key: 'large-file.bin',
|
||||
}));
|
||||
|
||||
### 📋 Listing Files
|
||||
|
||||
Browse your bucket contents:
|
||||
|
||||
```typescript
|
||||
// List all files in the bucket
|
||||
const files = await baseDir.listFiles();
|
||||
|
||||
files.forEach((file) => {
|
||||
console.log(`📄 ${file.name} (${file.size} bytes)`);
|
||||
});
|
||||
|
||||
// List files with a specific prefix
|
||||
const docs = await baseDir.listFiles('documents/');
|
||||
```
|
||||
|
||||
### 🗑️ Deleting Files
|
||||
|
||||
Clean up when needed:
|
||||
|
||||
```typescript
|
||||
// Delete a single file
|
||||
await baseDir.fastDelete('old-file.txt');
|
||||
|
||||
// Delete multiple files
|
||||
const filesToDelete = ['temp1.txt', 'temp2.txt', 'temp3.txt'];
|
||||
for (const file of filesToDelete) {
|
||||
await baseDir.fastDelete(file);
|
||||
// 2. Upload parts
|
||||
const parts = [];
|
||||
for (let i = 0; i < chunks.length; i++) {
|
||||
const { ETag } = await client.send(new UploadPartCommand({
|
||||
Bucket: 'my-bucket',
|
||||
Key: 'large-file.bin',
|
||||
UploadId,
|
||||
PartNumber: i + 1,
|
||||
Body: chunks[i],
|
||||
}));
|
||||
parts.push({ PartNumber: i + 1, ETag });
|
||||
}
|
||||
|
||||
// 3. Complete
|
||||
await client.send(new CompleteMultipartUploadCommand({
|
||||
Bucket: 'my-bucket',
|
||||
Key: 'large-file.bin',
|
||||
UploadId,
|
||||
MultipartUpload: { Parts: parts },
|
||||
}));
|
||||
```
|
||||
|
||||
## 📜 Bucket Policies
|
||||
|
||||
smarts3 supports AWS-style bucket policies for fine-grained access control. Policies use the same IAM JSON format as real S3 — so you can develop and test your policy logic locally before deploying.
|
||||
|
||||
When `auth.enabled` is `true`, the auth pipeline works as follows:
|
||||
1. **Authenticate** — verify the AWS SigV4 signature (anonymous requests skip this step)
|
||||
2. **Authorize** — evaluate bucket policies against the request action, resource, and caller identity
|
||||
3. **Default** — authenticated users get full access; anonymous requests are denied unless a policy explicitly allows them
|
||||
|
||||
### Setting a Bucket Policy
|
||||
|
||||
Use the S3 `PutBucketPolicy` API (or any S3 client that supports it):
|
||||
|
||||
```typescript
|
||||
import { PutBucketPolicyCommand } from '@aws-sdk/client-s3';
|
||||
|
||||
// Allow anonymous read access to all objects in a bucket
|
||||
await client.send(new PutBucketPolicyCommand({
|
||||
Bucket: 'public-assets',
|
||||
Policy: JSON.stringify({
|
||||
Version: '2012-10-17',
|
||||
Statement: [{
|
||||
Sid: 'PublicRead',
|
||||
Effect: 'Allow',
|
||||
Principal: '*',
|
||||
Action: ['s3:GetObject'],
|
||||
Resource: ['arn:aws:s3:::public-assets/*'],
|
||||
}],
|
||||
}),
|
||||
}));
|
||||
```
|
||||
|
||||
### Policy Features
|
||||
|
||||
- **Effect**: `Allow` and `Deny` (explicit Deny always wins)
|
||||
- **Principal**: `"*"` (everyone) or `{ "AWS": ["arn:..."] }` for specific identities
|
||||
- **Action**: IAM-style actions like `s3:GetObject`, `s3:PutObject`, `s3:*`, or prefix wildcards like `s3:Get*`
|
||||
- **Resource**: ARN patterns with `*` and `?` wildcards (e.g. `arn:aws:s3:::my-bucket/*`)
|
||||
- **Persistence**: Policies survive server restarts — stored as JSON on disk alongside your data
|
||||
|
||||
### Policy CRUD Operations
|
||||
|
||||
| Operation | AWS SDK Command | HTTP |
|
||||
|-----------|----------------|------|
|
||||
| Get policy | `GetBucketPolicyCommand` | `GET /{bucket}?policy` |
|
||||
| Set policy | `PutBucketPolicyCommand` | `PUT /{bucket}?policy` |
|
||||
| Delete policy | `DeleteBucketPolicyCommand` | `DELETE /{bucket}?policy` |
|
||||
|
||||
Deleting a bucket automatically removes its associated policy.
|
||||
|
||||
## 🧪 Testing Integration
|
||||
|
||||
### Using with Jest
|
||||
|
||||
```typescript
|
||||
import { Smarts3 } from '@push.rocks/smarts3';
|
||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||
|
||||
describe('S3 Operations', () => {
|
||||
let s3Server: Smarts3;
|
||||
let s3: Smarts3;
|
||||
|
||||
beforeAll(async () => {
|
||||
s3Server = await Smarts3.createAndStart({
|
||||
port: 9999,
|
||||
cleanSlate: true,
|
||||
});
|
||||
});
|
||||
|
||||
afterAll(async () => {
|
||||
await s3Server.stop();
|
||||
});
|
||||
|
||||
test('should upload and retrieve a file', async () => {
|
||||
const bucket = await s3Server.createBucket('test-bucket');
|
||||
// Your test logic here
|
||||
tap.test('setup', async () => {
|
||||
s3 = await Smarts3.createAndStart({
|
||||
server: { port: 4567, silent: true },
|
||||
storage: { cleanSlate: true },
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### Using with Mocha
|
||||
|
||||
```typescript
|
||||
import { Smarts3 } from '@push.rocks/smarts3';
|
||||
import { expect } from 'chai';
|
||||
|
||||
describe('S3 Operations', () => {
|
||||
let s3Server: Smarts3;
|
||||
|
||||
before(async () => {
|
||||
s3Server = await Smarts3.createAndStart({
|
||||
port: 9999,
|
||||
cleanSlate: true,
|
||||
});
|
||||
});
|
||||
|
||||
after(async () => {
|
||||
await s3Server.stop();
|
||||
});
|
||||
|
||||
it('should upload and retrieve a file', async () => {
|
||||
const bucket = await s3Server.createBucket('test-bucket');
|
||||
// Your test logic here
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
## 🔌 AWS SDK Integration
|
||||
|
||||
Use `smarts3` with the official AWS SDK:
|
||||
|
||||
```typescript
|
||||
import { S3Client, PutObjectCommand } from '@aws-sdk/client-s3';
|
||||
import { Smarts3 } from '@push.rocks/smarts3';
|
||||
|
||||
// Start local S3
|
||||
const s3Server = await Smarts3.createAndStart({ port: 3000 });
|
||||
const config = await s3Server.getS3Descriptor();
|
||||
|
||||
// Configure AWS SDK
|
||||
const s3Client = new S3Client({
|
||||
endpoint: `http://${config.endpoint}:${config.port}`,
|
||||
region: 'us-east-1',
|
||||
credentials: {
|
||||
accessKeyId: config.accessKey,
|
||||
secretAccessKey: config.accessSecret,
|
||||
},
|
||||
forcePathStyle: true,
|
||||
tap.test('should store and retrieve objects', async () => {
|
||||
await s3.createBucket('test');
|
||||
// ... your test logic using AWS SDK or SmartBucket
|
||||
});
|
||||
|
||||
// Use AWS SDK as normal
|
||||
const command = new PutObjectCommand({
|
||||
Bucket: 'my-bucket',
|
||||
Key: 'test-file.txt',
|
||||
Body: 'Hello from AWS SDK!',
|
||||
tap.test('teardown', async () => {
|
||||
await s3.stop();
|
||||
});
|
||||
|
||||
await s3Client.send(command);
|
||||
export default tap.start();
|
||||
```
|
||||
|
||||
## 🎯 Real-World Examples
|
||||
|
||||
### CI/CD Pipeline Testing
|
||||
|
||||
```typescript
|
||||
// ci-test.ts
|
||||
import { Smarts3 } from '@push.rocks/smarts3';
|
||||
|
||||
export async function setupTestEnvironment() {
|
||||
// Start S3 server for CI tests
|
||||
const s3 = await Smarts3.createAndStart({
|
||||
port: process.env.S3_PORT || 3000,
|
||||
cleanSlate: true,
|
||||
});
|
||||
|
||||
// Create test buckets
|
||||
await s3.createBucket('uploads');
|
||||
await s3.createBucket('processed');
|
||||
await s3.createBucket('archive');
|
||||
|
||||
return s3;
|
||||
}
|
||||
```
|
||||
|
||||
### Microservice Development
|
||||
|
||||
```typescript
|
||||
// dev-server.ts
|
||||
import { Smarts3 } from '@push.rocks/smarts3';
|
||||
import express from 'express';
|
||||
|
||||
async function startDevelopmentServer() {
|
||||
// Start local S3
|
||||
const s3 = await Smarts3.createAndStart({ port: 3000 });
|
||||
await s3.createBucket('user-uploads');
|
||||
|
||||
// Start your API server
|
||||
const app = express();
|
||||
|
||||
app.post('/upload', async (req, res) => {
|
||||
// Your upload logic using local S3
|
||||
});
|
||||
|
||||
app.listen(8080, () => {
|
||||
console.log('🚀 Dev server running with local S3!');
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
### Data Migration Testing
|
||||
|
||||
```typescript
|
||||
import { Smarts3 } from '@push.rocks/smarts3';
|
||||
|
||||
async function testDataMigration() {
|
||||
const s3 = await Smarts3.createAndStart({ cleanSlate: true });
|
||||
|
||||
// Create source and destination buckets
|
||||
const sourceBucket = await s3.createBucket('legacy-data');
|
||||
const destBucket = await s3.createBucket('new-data');
|
||||
|
||||
// Populate source with test data
|
||||
const config = await s3.getS3Descriptor();
|
||||
const smartbucket = new SmartBucket(config);
|
||||
const source = await smartbucket.getBucket('legacy-data');
|
||||
const sourceDir = await source.getBaseDirectory();
|
||||
|
||||
// Add test files
|
||||
await sourceDir.fastStore(
|
||||
'user-1.json',
|
||||
JSON.stringify({ id: 1, name: 'Alice' }),
|
||||
);
|
||||
await sourceDir.fastStore(
|
||||
'user-2.json',
|
||||
JSON.stringify({ id: 2, name: 'Bob' }),
|
||||
);
|
||||
|
||||
// Run your migration logic
|
||||
await runMigration(config);
|
||||
|
||||
// Verify migration results
|
||||
const dest = await smartbucket.getBucket('new-data');
|
||||
const destDir = await dest.getBaseDirectory();
|
||||
const migratedFiles = await destDir.listFiles();
|
||||
|
||||
console.log(`✅ Migrated ${migratedFiles.length} files successfully!`);
|
||||
}
|
||||
```
|
||||
|
||||
## 🛠️ Advanced Configuration
|
||||
|
||||
### Custom S3 Descriptor Options
|
||||
|
||||
When integrating with different S3 clients, you can customize the connection details:
|
||||
|
||||
```typescript
|
||||
const customDescriptor = await s3Server.getS3Descriptor({
|
||||
endpoint: 'localhost', // Custom endpoint
|
||||
port: 3001, // Different port
|
||||
useSsl: false, // SSL configuration
|
||||
// Add any additional options your S3 client needs
|
||||
});
|
||||
```
|
||||
|
||||
### Environment-Based Configuration
|
||||
|
||||
```typescript
|
||||
const config = {
|
||||
port: parseInt(process.env.S3_PORT || '3000'),
|
||||
cleanSlate: process.env.NODE_ENV === 'test',
|
||||
};
|
||||
|
||||
const s3Server = await Smarts3.createAndStart(config);
|
||||
```
|
||||
|
||||
## 🤝 Use Cases
|
||||
|
||||
- **🧪 Unit & Integration Testing** - Test S3 operations without AWS credentials or internet
|
||||
- **🏗️ Local Development** - Develop cloud features offline with full S3 compatibility
|
||||
- **📚 Teaching & Demos** - Perfect for workshops and tutorials without AWS setup
|
||||
- **🔄 CI/CD Pipelines** - Reliable S3 operations in containerized test environments
|
||||
- **🎭 Mocking & Stubbing** - Replace real S3 calls in test suites
|
||||
- **📊 Data Migration Testing** - Safely test data migrations locally before production
|
||||
|
||||
## 🔧 API Reference
|
||||
|
||||
### Smarts3 Class
|
||||
### `Smarts3` Class
|
||||
|
||||
#### Constructor Options
|
||||
#### `static createAndStart(config?: ISmarts3Config): Promise<Smarts3>`
|
||||
|
||||
```typescript
|
||||
interface ISmarts3ContructorOptions {
|
||||
port?: number; // Server port (default: 3000)
|
||||
cleanSlate?: boolean; // Clear storage on start (default: false)
|
||||
}
|
||||
Create and start a server in one call.
|
||||
|
||||
#### `start(): Promise<void>`
|
||||
|
||||
Spawn the Rust binary and start the HTTP server.
|
||||
|
||||
#### `stop(): Promise<void>`
|
||||
|
||||
Gracefully stop the server and kill the Rust process.
|
||||
|
||||
#### `createBucket(name: string): Promise<{ name: string }>`
|
||||
|
||||
Create an S3 bucket.
|
||||
|
||||
#### `getS3Descriptor(options?): Promise<IS3Descriptor>`
|
||||
|
||||
Get connection details for S3 clients. Returns:
|
||||
|
||||
| Field | Type | Description |
|
||||
|-------|------|-------------|
|
||||
| `endpoint` | `string` | Server hostname (`localhost` by default) |
|
||||
| `port` | `number` | Server port |
|
||||
| `accessKey` | `string` | Access key from first configured credential |
|
||||
| `accessSecret` | `string` | Secret key from first configured credential |
|
||||
| `useSsl` | `boolean` | Always `false` (plain HTTP) |
|
||||
|
||||
## 🏗️ Architecture
|
||||
|
||||
smarts3 uses a **hybrid Rust + TypeScript** architecture:
|
||||
|
||||
```
|
||||
┌─────────────────────────────────┐
|
||||
│ Your Code (AWS SDK, etc.) │
|
||||
│ ↕ HTTP (localhost:3000) │
|
||||
├─────────────────────────────────┤
|
||||
│ rusts3 binary (Rust) │
|
||||
│ ├─ hyper 1.x HTTP server │
|
||||
│ ├─ S3 path-style routing │
|
||||
│ ├─ Streaming storage layer │
|
||||
│ ├─ Multipart manager │
|
||||
│ ├─ SigV4 auth + policy engine │
|
||||
│ ├─ CORS middleware │
|
||||
│ └─ S3 XML response builder │
|
||||
├─────────────────────────────────┤
|
||||
│ TypeScript (thin IPC wrapper) │
|
||||
│ ├─ Smarts3 class │
|
||||
│ ├─ RustBridge (stdin/stdout) │
|
||||
│ └─ Config & S3 descriptor │
|
||||
└─────────────────────────────────┘
|
||||
```
|
||||
|
||||
#### Methods
|
||||
**Why Rust?** The TypeScript implementation had critical perf issues: OOM on multipart uploads (parts buffered in memory), double stream copying, file descriptor leaks on HEAD requests, full-file reads for range requests, and no backpressure. The Rust binary solves all of these with streaming I/O, zero-copy, and direct `seek()` for range requests.
|
||||
|
||||
- `static createAndStart(options)` - Create and start server in one call
|
||||
- `start()` - Start the S3 server
|
||||
- `stop()` - Stop the S3 server
|
||||
- `createBucket(name)` - Create a new bucket
|
||||
- `getS3Descriptor(options?)` - Get S3 connection configuration
|
||||
**IPC Protocol:** TypeScript spawns the `rusts3` binary with `--management` and communicates via newline-delimited JSON over stdin/stdout. Commands: `start`, `stop`, `createBucket`.
|
||||
|
||||
## 🐛 Debugging Tips
|
||||
### S3 Operations Supported
|
||||
|
||||
1. **Enable verbose logging** - The server logs all operations by default
|
||||
2. **Check the buckets directory** - Find your data in `.nogit/bucketsDir/`
|
||||
3. **Use the correct endpoint** - Remember to use `127.0.0.1` or `localhost`
|
||||
4. **Force path style** - Always use path-style URLs with local S3
|
||||
| Operation | Method | Path |
|
||||
|-----------|--------|------|
|
||||
| ListBuckets | `GET /` | |
|
||||
| CreateBucket | `PUT /{bucket}` | |
|
||||
| DeleteBucket | `DELETE /{bucket}` | |
|
||||
| HeadBucket | `HEAD /{bucket}` | |
|
||||
| ListObjects (v1/v2) | `GET /{bucket}` | `?list-type=2` for v2 |
|
||||
| PutObject | `PUT /{bucket}/{key}` | |
|
||||
| GetObject | `GET /{bucket}/{key}` | Supports `Range` header |
|
||||
| HeadObject | `HEAD /{bucket}/{key}` | |
|
||||
| DeleteObject | `DELETE /{bucket}/{key}` | |
|
||||
| CopyObject | `PUT /{bucket}/{key}` | `x-amz-copy-source` header |
|
||||
| InitiateMultipartUpload | `POST /{bucket}/{key}?uploads` | |
|
||||
| UploadPart | `PUT /{bucket}/{key}?partNumber&uploadId` | |
|
||||
| CompleteMultipartUpload | `POST /{bucket}/{key}?uploadId` | |
|
||||
| AbortMultipartUpload | `DELETE /{bucket}/{key}?uploadId` | |
|
||||
| ListMultipartUploads | `GET /{bucket}?uploads` | |
|
||||
| GetBucketPolicy | `GET /{bucket}?policy` | |
|
||||
| PutBucketPolicy | `PUT /{bucket}?policy` | |
|
||||
| DeleteBucketPolicy | `DELETE /{bucket}?policy` | |
|
||||
|
||||
## 📈 Performance
|
||||
### On-Disk Format
|
||||
|
||||
`@push.rocks/smarts3` is optimized for development and testing:
|
||||
|
||||
- ⚡ **Instant operations** - No network latency
|
||||
- 💾 **Low memory footprint** - Efficient file system usage
|
||||
- 🔄 **Fast cleanup** - Clean slate mode for quick test resets
|
||||
- 🚀 **Parallel operations** - Handle multiple requests simultaneously
|
||||
```
|
||||
{storage.directory}/
|
||||
{bucket}/
|
||||
{key}._S3_object # Object data
|
||||
{key}._S3_object.metadata.json # Metadata (content-type, x-amz-meta-*, etc.)
|
||||
{key}._S3_object.md5 # Cached MD5 hash
|
||||
.multipart/
|
||||
{upload-id}/
|
||||
metadata.json # Upload metadata (bucket, key, parts)
|
||||
part-1 # Part data files
|
||||
part-2
|
||||
...
|
||||
.policies/
|
||||
{bucket}.policy.json # Bucket policy (IAM JSON format)
|
||||
```
|
||||
|
||||
## 🔗 Related Packages
|
||||
|
||||
- [`@push.rocks/smartbucket`](https://www.npmjs.com/package/@push.rocks/smartbucket) - Powerful S3 abstraction layer
|
||||
- [`@push.rocks/smartfs`](https://www.npmjs.com/package/@push.rocks/smartfs) - Modern filesystem with Web Streams support
|
||||
- [`@tsclass/tsclass`](https://www.npmjs.com/package/@tsclass/tsclass) - TypeScript class helpers
|
||||
- [`@push.rocks/smartbucket`](https://code.foss.global/push.rocks/smartbucket) — High-level S3 abstraction layer
|
||||
- [`@push.rocks/smartrust`](https://code.foss.global/push.rocks/smartrust) — TypeScript ↔ Rust IPC bridge
|
||||
- [`@git.zone/tsrust`](https://code.foss.global/git.zone/tsrust) — Rust cross-compilation for npm packages
|
||||
|
||||
## License and Legal Information
|
||||
|
||||
This repository contains open-source code that is licensed under the MIT License. A copy of the MIT License can be found in the [license](license) file within this repository.
|
||||
This repository contains open-source code licensed under the MIT License. A copy of the license can be found in the [LICENSE](./LICENSE) file.
|
||||
|
||||
**Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.
|
||||
|
||||
### Trademarks
|
||||
|
||||
This project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and any related products or services are trademarks of Task Venture Capital GmbH and are not included within the scope of the MIT license granted herein. Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines, and any usage must be approved in writing by Task Venture Capital GmbH.
|
||||
This project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and any related products or services are trademarks of Task Venture Capital GmbH or third parties, and are not included within the scope of the MIT license granted herein.
|
||||
|
||||
Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines or the guidelines of the respective third-party owners, and any usage must be approved in writing. Third-party trademarks used herein are the property of their respective owners and used only in a descriptive manner, e.g. for an implementation of an API or similar.
|
||||
|
||||
### Company Information
|
||||
|
||||
Task Venture Capital GmbH
|
||||
Registered at District court Bremen HRB 35230 HB, Germany
|
||||
Registered at District Court Bremen HRB 35230 HB, Germany
|
||||
|
||||
For any legal inquiries or if you require further information, please contact us via email at hello@task.vc.
|
||||
For any legal inquiries or further information, please contact us via email at hello@task.vc.
|
||||
|
||||
By using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.
|
||||
|
||||
2
rust/.cargo/config.toml
Normal file
2
rust/.cargo/config.toml
Normal file
@@ -0,0 +1,2 @@
|
||||
[target.aarch64-unknown-linux-gnu]
|
||||
linker = "aarch64-linux-gnu-gcc"
|
||||
1438
rust/Cargo.lock
generated
Normal file
1438
rust/Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
33
rust/Cargo.toml
Normal file
33
rust/Cargo.toml
Normal file
@@ -0,0 +1,33 @@
|
||||
[package]
|
||||
name = "rusts3"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[[bin]]
|
||||
name = "rusts3"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
hyper = { version = "1", features = ["http1", "server"] }
|
||||
hyper-util = { version = "0.1", features = ["tokio", "http1"] }
|
||||
http-body-util = "0.1"
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
quick-xml = { version = "0.37", features = ["serialize"] }
|
||||
md-5 = "0.10"
|
||||
tokio-util = { version = "0.7", features = ["io"] }
|
||||
bytes = "1"
|
||||
uuid = { version = "1", features = ["v4"] }
|
||||
clap = { version = "4", features = ["derive"] }
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
thiserror = "2"
|
||||
anyhow = "1"
|
||||
percent-encoding = "2"
|
||||
url = "2"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
futures-core = "0.3"
|
||||
hmac = "0.12"
|
||||
sha2 = "0.10"
|
||||
hex = "0.4"
|
||||
172
rust/src/action.rs
Normal file
172
rust/src/action.rs
Normal file
@@ -0,0 +1,172 @@
|
||||
use hyper::body::Incoming;
|
||||
use hyper::{Method, Request};
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// S3 actions that map to IAM permission strings.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum S3Action {
|
||||
ListAllMyBuckets,
|
||||
CreateBucket,
|
||||
DeleteBucket,
|
||||
HeadBucket,
|
||||
ListBucket,
|
||||
GetObject,
|
||||
HeadObject,
|
||||
PutObject,
|
||||
DeleteObject,
|
||||
CopyObject,
|
||||
ListBucketMultipartUploads,
|
||||
AbortMultipartUpload,
|
||||
InitiateMultipartUpload,
|
||||
UploadPart,
|
||||
CompleteMultipartUpload,
|
||||
GetBucketPolicy,
|
||||
PutBucketPolicy,
|
||||
DeleteBucketPolicy,
|
||||
}
|
||||
|
||||
impl S3Action {
|
||||
/// Return the IAM-style action string (e.g. "s3:GetObject").
|
||||
pub fn iam_action(&self) -> &'static str {
|
||||
match self {
|
||||
S3Action::ListAllMyBuckets => "s3:ListAllMyBuckets",
|
||||
S3Action::CreateBucket => "s3:CreateBucket",
|
||||
S3Action::DeleteBucket => "s3:DeleteBucket",
|
||||
S3Action::HeadBucket => "s3:ListBucket",
|
||||
S3Action::ListBucket => "s3:ListBucket",
|
||||
S3Action::GetObject => "s3:GetObject",
|
||||
S3Action::HeadObject => "s3:GetObject",
|
||||
S3Action::PutObject => "s3:PutObject",
|
||||
S3Action::DeleteObject => "s3:DeleteObject",
|
||||
S3Action::CopyObject => "s3:PutObject",
|
||||
S3Action::ListBucketMultipartUploads => "s3:ListBucketMultipartUploads",
|
||||
S3Action::AbortMultipartUpload => "s3:AbortMultipartUpload",
|
||||
S3Action::InitiateMultipartUpload => "s3:PutObject",
|
||||
S3Action::UploadPart => "s3:PutObject",
|
||||
S3Action::CompleteMultipartUpload => "s3:PutObject",
|
||||
S3Action::GetBucketPolicy => "s3:GetBucketPolicy",
|
||||
S3Action::PutBucketPolicy => "s3:PutBucketPolicy",
|
||||
S3Action::DeleteBucketPolicy => "s3:DeleteBucketPolicy",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Context extracted from a request, used for policy evaluation.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RequestContext {
|
||||
pub action: S3Action,
|
||||
pub bucket: Option<String>,
|
||||
pub key: Option<String>,
|
||||
}
|
||||
|
||||
impl RequestContext {
|
||||
/// Build the ARN for this request's resource.
|
||||
pub fn resource_arn(&self) -> String {
|
||||
match (&self.bucket, &self.key) {
|
||||
(Some(bucket), Some(key)) => format!("arn:aws:s3:::{}/{}", bucket, key),
|
||||
(Some(bucket), None) => format!("arn:aws:s3:::{}", bucket),
|
||||
_ => "arn:aws:s3:::*".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Resolve the S3 action from an incoming HTTP request.
|
||||
pub fn resolve_action(req: &Request<Incoming>) -> RequestContext {
|
||||
let method = req.method().clone();
|
||||
let path = req.uri().path().to_string();
|
||||
let query_string = req.uri().query().unwrap_or("").to_string();
|
||||
let query = parse_query_simple(&query_string);
|
||||
|
||||
let segments: Vec<&str> = path
|
||||
.trim_start_matches('/')
|
||||
.splitn(2, '/')
|
||||
.filter(|s| !s.is_empty())
|
||||
.collect();
|
||||
|
||||
match segments.len() {
|
||||
0 => {
|
||||
// Root: GET / -> ListBuckets
|
||||
RequestContext {
|
||||
action: S3Action::ListAllMyBuckets,
|
||||
bucket: None,
|
||||
key: None,
|
||||
}
|
||||
}
|
||||
1 => {
|
||||
let bucket = percent_decode(segments[0]);
|
||||
let has_policy = query.contains_key("policy");
|
||||
let has_uploads = query.contains_key("uploads");
|
||||
|
||||
let action = match (&method, has_policy, has_uploads) {
|
||||
(&Method::GET, true, _) => S3Action::GetBucketPolicy,
|
||||
(&Method::PUT, true, _) => S3Action::PutBucketPolicy,
|
||||
(&Method::DELETE, true, _) => S3Action::DeleteBucketPolicy,
|
||||
(&Method::GET, _, true) => S3Action::ListBucketMultipartUploads,
|
||||
(&Method::GET, _, _) => S3Action::ListBucket,
|
||||
(&Method::PUT, _, _) => S3Action::CreateBucket,
|
||||
(&Method::DELETE, _, _) => S3Action::DeleteBucket,
|
||||
(&Method::HEAD, _, _) => S3Action::HeadBucket,
|
||||
_ => S3Action::ListBucket,
|
||||
};
|
||||
|
||||
RequestContext {
|
||||
action,
|
||||
bucket: Some(bucket),
|
||||
key: None,
|
||||
}
|
||||
}
|
||||
2 => {
|
||||
let bucket = percent_decode(segments[0]);
|
||||
let key = percent_decode(segments[1]);
|
||||
|
||||
let has_copy_source = req.headers().contains_key("x-amz-copy-source");
|
||||
let has_part_number = query.contains_key("partNumber");
|
||||
let has_upload_id = query.contains_key("uploadId");
|
||||
let has_uploads = query.contains_key("uploads");
|
||||
|
||||
let action = match &method {
|
||||
&Method::PUT if has_part_number && has_upload_id => S3Action::UploadPart,
|
||||
&Method::PUT if has_copy_source => S3Action::CopyObject,
|
||||
&Method::PUT => S3Action::PutObject,
|
||||
&Method::GET => S3Action::GetObject,
|
||||
&Method::HEAD => S3Action::HeadObject,
|
||||
&Method::DELETE if has_upload_id => S3Action::AbortMultipartUpload,
|
||||
&Method::DELETE => S3Action::DeleteObject,
|
||||
&Method::POST if has_uploads => S3Action::InitiateMultipartUpload,
|
||||
&Method::POST if has_upload_id => S3Action::CompleteMultipartUpload,
|
||||
_ => S3Action::GetObject,
|
||||
};
|
||||
|
||||
RequestContext {
|
||||
action,
|
||||
bucket: Some(bucket),
|
||||
key: Some(key),
|
||||
}
|
||||
}
|
||||
_ => RequestContext {
|
||||
action: S3Action::ListAllMyBuckets,
|
||||
bucket: None,
|
||||
key: None,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_query_simple(query_string: &str) -> HashMap<String, String> {
|
||||
let mut map = HashMap::new();
|
||||
if query_string.is_empty() {
|
||||
return map;
|
||||
}
|
||||
for pair in query_string.split('&') {
|
||||
let mut parts = pair.splitn(2, '=');
|
||||
let key = parts.next().unwrap_or("");
|
||||
let value = parts.next().unwrap_or("");
|
||||
map.insert(key.to_string(), value.to_string());
|
||||
}
|
||||
map
|
||||
}
|
||||
|
||||
fn percent_decode(s: &str) -> String {
|
||||
percent_encoding::percent_decode_str(s)
|
||||
.decode_utf8_lossy()
|
||||
.to_string()
|
||||
}
|
||||
310
rust/src/auth.rs
Normal file
310
rust/src/auth.rs
Normal file
@@ -0,0 +1,310 @@
|
||||
use hmac::{Hmac, Mac};
|
||||
use hyper::body::Incoming;
|
||||
use hyper::Request;
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use crate::config::{Credential, S3Config};
|
||||
use crate::s3_error::S3Error;
|
||||
|
||||
type HmacSha256 = Hmac<Sha256>;
|
||||
|
||||
/// The identity of an authenticated caller.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct AuthenticatedIdentity {
|
||||
pub access_key_id: String,
|
||||
}
|
||||
|
||||
/// Parsed components of an AWS4-HMAC-SHA256 Authorization header.
|
||||
struct SigV4Header {
|
||||
access_key_id: String,
|
||||
date_stamp: String,
|
||||
region: String,
|
||||
signed_headers: Vec<String>,
|
||||
signature: String,
|
||||
}
|
||||
|
||||
/// Verify the request's SigV4 signature. Returns the caller identity on success.
|
||||
pub fn verify_request(
|
||||
req: &Request<Incoming>,
|
||||
config: &S3Config,
|
||||
) -> Result<AuthenticatedIdentity, S3Error> {
|
||||
let auth_header = req
|
||||
.headers()
|
||||
.get("authorization")
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.unwrap_or("");
|
||||
|
||||
// Reject SigV2
|
||||
if auth_header.starts_with("AWS ") {
|
||||
return Err(S3Error::authorization_header_malformed());
|
||||
}
|
||||
|
||||
if !auth_header.starts_with("AWS4-HMAC-SHA256") {
|
||||
return Err(S3Error::authorization_header_malformed());
|
||||
}
|
||||
|
||||
let parsed = parse_auth_header(auth_header)?;
|
||||
|
||||
// Look up credential
|
||||
let credential = find_credential(&parsed.access_key_id, config)
|
||||
.ok_or_else(S3Error::invalid_access_key_id)?;
|
||||
|
||||
// Get x-amz-date
|
||||
let amz_date = req
|
||||
.headers()
|
||||
.get("x-amz-date")
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.or_else(|| {
|
||||
req.headers()
|
||||
.get("date")
|
||||
.and_then(|v| v.to_str().ok())
|
||||
})
|
||||
.ok_or_else(|| S3Error::missing_security_header("Missing x-amz-date header"))?;
|
||||
|
||||
// Enforce 15-min clock skew
|
||||
check_clock_skew(amz_date)?;
|
||||
|
||||
// Get payload hash
|
||||
let content_sha256 = req
|
||||
.headers()
|
||||
.get("x-amz-content-sha256")
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.unwrap_or("UNSIGNED-PAYLOAD");
|
||||
|
||||
// Build canonical request
|
||||
let canonical_request = build_canonical_request(req, &parsed.signed_headers, content_sha256);
|
||||
|
||||
// Build string to sign
|
||||
let scope = format!(
|
||||
"{}/{}/s3/aws4_request",
|
||||
parsed.date_stamp, parsed.region
|
||||
);
|
||||
let canonical_hash = hex::encode(Sha256::digest(canonical_request.as_bytes()));
|
||||
let string_to_sign = format!(
|
||||
"AWS4-HMAC-SHA256\n{}\n{}\n{}",
|
||||
amz_date, scope, canonical_hash
|
||||
);
|
||||
|
||||
// Derive signing key
|
||||
let signing_key = derive_signing_key(
|
||||
&credential.secret_access_key,
|
||||
&parsed.date_stamp,
|
||||
&parsed.region,
|
||||
);
|
||||
|
||||
// Compute signature
|
||||
let computed = hmac_sha256(&signing_key, string_to_sign.as_bytes());
|
||||
let computed_hex = hex::encode(&computed);
|
||||
|
||||
// Constant-time comparison
|
||||
if !constant_time_eq(computed_hex.as_bytes(), parsed.signature.as_bytes()) {
|
||||
return Err(S3Error::signature_does_not_match());
|
||||
}
|
||||
|
||||
Ok(AuthenticatedIdentity {
|
||||
access_key_id: parsed.access_key_id,
|
||||
})
|
||||
}
|
||||
|
||||
/// Parse the Authorization header into its components.
|
||||
fn parse_auth_header(header: &str) -> Result<SigV4Header, S3Error> {
|
||||
// Format: AWS4-HMAC-SHA256 Credential=KEY/YYYYMMDD/region/s3/aws4_request, SignedHeaders=h1;h2, Signature=hex
|
||||
let after_algo = header
|
||||
.strip_prefix("AWS4-HMAC-SHA256")
|
||||
.ok_or_else(S3Error::authorization_header_malformed)?
|
||||
.trim();
|
||||
|
||||
let mut credential_str = None;
|
||||
let mut signed_headers_str = None;
|
||||
let mut signature_str = None;
|
||||
|
||||
for part in after_algo.split(',') {
|
||||
let part = part.trim();
|
||||
if let Some(val) = part.strip_prefix("Credential=") {
|
||||
credential_str = Some(val.trim());
|
||||
} else if let Some(val) = part.strip_prefix("SignedHeaders=") {
|
||||
signed_headers_str = Some(val.trim());
|
||||
} else if let Some(val) = part.strip_prefix("Signature=") {
|
||||
signature_str = Some(val.trim());
|
||||
}
|
||||
}
|
||||
|
||||
let credential_str = credential_str
|
||||
.ok_or_else(S3Error::authorization_header_malformed)?;
|
||||
let signed_headers_str = signed_headers_str
|
||||
.ok_or_else(S3Error::authorization_header_malformed)?;
|
||||
let signature = signature_str
|
||||
.ok_or_else(S3Error::authorization_header_malformed)?
|
||||
.to_string();
|
||||
|
||||
// Parse credential: KEY/YYYYMMDD/region/s3/aws4_request
|
||||
let cred_parts: Vec<&str> = credential_str.splitn(5, '/').collect();
|
||||
if cred_parts.len() < 5 {
|
||||
return Err(S3Error::authorization_header_malformed());
|
||||
}
|
||||
|
||||
let access_key_id = cred_parts[0].to_string();
|
||||
let date_stamp = cred_parts[1].to_string();
|
||||
let region = cred_parts[2].to_string();
|
||||
|
||||
let signed_headers: Vec<String> = signed_headers_str
|
||||
.split(';')
|
||||
.map(|s| s.trim().to_lowercase())
|
||||
.collect();
|
||||
|
||||
Ok(SigV4Header {
|
||||
access_key_id,
|
||||
date_stamp,
|
||||
region,
|
||||
signed_headers,
|
||||
signature,
|
||||
})
|
||||
}
|
||||
|
||||
/// Find a credential by access key ID.
|
||||
fn find_credential<'a>(access_key_id: &str, config: &'a S3Config) -> Option<&'a Credential> {
|
||||
config
|
||||
.auth
|
||||
.credentials
|
||||
.iter()
|
||||
.find(|c| c.access_key_id == access_key_id)
|
||||
}
|
||||
|
||||
/// Check clock skew (15 minutes max).
|
||||
fn check_clock_skew(amz_date: &str) -> Result<(), S3Error> {
|
||||
// Parse ISO 8601 basic format: YYYYMMDDTHHMMSSZ
|
||||
let parsed = chrono::NaiveDateTime::parse_from_str(amz_date, "%Y%m%dT%H%M%SZ")
|
||||
.map_err(|_| S3Error::authorization_header_malformed())?;
|
||||
|
||||
let request_time = chrono::DateTime::<chrono::Utc>::from_naive_utc_and_offset(parsed, chrono::Utc);
|
||||
let now = chrono::Utc::now();
|
||||
let diff = (now - request_time).num_seconds().unsigned_abs();
|
||||
|
||||
if diff > 15 * 60 {
|
||||
return Err(S3Error::request_time_too_skewed());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Build the canonical request string.
|
||||
fn build_canonical_request(
|
||||
req: &Request<Incoming>,
|
||||
signed_headers: &[String],
|
||||
payload_hash: &str,
|
||||
) -> String {
|
||||
let method = req.method().as_str();
|
||||
let uri_path = req.uri().path();
|
||||
|
||||
// Canonical URI: the path, already percent-encoded by the client
|
||||
let canonical_uri = if uri_path.is_empty() { "/" } else { uri_path };
|
||||
|
||||
// Canonical query string: sorted key=value pairs
|
||||
let canonical_query = build_canonical_query(req.uri().query().unwrap_or(""));
|
||||
|
||||
// Canonical headers: sorted by lowercase header name
|
||||
let canonical_headers = build_canonical_headers(req, signed_headers);
|
||||
|
||||
// Signed headers string
|
||||
let signed_headers_str = signed_headers.join(";");
|
||||
|
||||
// Payload hash — accept UNSIGNED-PAYLOAD and STREAMING-AWS4-HMAC-SHA256-PAYLOAD as-is
|
||||
let effective_payload_hash = if payload_hash == "UNSIGNED-PAYLOAD"
|
||||
|| payload_hash == "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
|
||||
{
|
||||
payload_hash.to_string()
|
||||
} else {
|
||||
payload_hash.to_string()
|
||||
};
|
||||
|
||||
format!(
|
||||
"{}\n{}\n{}\n{}\n{}\n{}",
|
||||
method,
|
||||
canonical_uri,
|
||||
canonical_query,
|
||||
canonical_headers,
|
||||
signed_headers_str,
|
||||
effective_payload_hash
|
||||
)
|
||||
}
|
||||
|
||||
/// Build canonical query string (sorted key=value pairs).
|
||||
fn build_canonical_query(query: &str) -> String {
|
||||
if query.is_empty() {
|
||||
return String::new();
|
||||
}
|
||||
|
||||
let mut pairs: Vec<(String, String)> = Vec::new();
|
||||
for pair in query.split('&') {
|
||||
let mut parts = pair.splitn(2, '=');
|
||||
let key = parts.next().unwrap_or("");
|
||||
let value = parts.next().unwrap_or("");
|
||||
pairs.push((key.to_string(), value.to_string()));
|
||||
}
|
||||
pairs.sort();
|
||||
|
||||
pairs
|
||||
.iter()
|
||||
.map(|(k, v)| format!("{}={}", k, v))
|
||||
.collect::<Vec<_>>()
|
||||
.join("&")
|
||||
}
|
||||
|
||||
/// Build canonical headers string.
|
||||
fn build_canonical_headers(req: &Request<Incoming>, signed_headers: &[String]) -> String {
|
||||
let mut header_map: HashMap<String, Vec<String>> = HashMap::new();
|
||||
|
||||
for (name, value) in req.headers() {
|
||||
let name_lower = name.as_str().to_lowercase();
|
||||
if signed_headers.contains(&name_lower) {
|
||||
if let Ok(val) = value.to_str() {
|
||||
header_map
|
||||
.entry(name_lower)
|
||||
.or_default()
|
||||
.push(val.trim().to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut result = String::new();
|
||||
for header_name in signed_headers {
|
||||
let values = header_map
|
||||
.get(header_name)
|
||||
.map(|v| v.join(","))
|
||||
.unwrap_or_default();
|
||||
result.push_str(header_name);
|
||||
result.push(':');
|
||||
result.push_str(&values);
|
||||
result.push('\n');
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
/// Derive the signing key via 4-step HMAC chain.
|
||||
fn derive_signing_key(secret_key: &str, date_stamp: &str, region: &str) -> Vec<u8> {
|
||||
let k_secret = format!("AWS4{}", secret_key);
|
||||
let k_date = hmac_sha256(k_secret.as_bytes(), date_stamp.as_bytes());
|
||||
let k_region = hmac_sha256(&k_date, region.as_bytes());
|
||||
let k_service = hmac_sha256(&k_region, b"s3");
|
||||
hmac_sha256(&k_service, b"aws4_request")
|
||||
}
|
||||
|
||||
/// Compute HMAC-SHA256.
|
||||
fn hmac_sha256(key: &[u8], data: &[u8]) -> Vec<u8> {
|
||||
let mut mac = HmacSha256::new_from_slice(key).expect("HMAC key length is always valid");
|
||||
mac.update(data);
|
||||
mac.finalize().into_bytes().to_vec()
|
||||
}
|
||||
|
||||
/// Constant-time byte comparison.
|
||||
fn constant_time_eq(a: &[u8], b: &[u8]) -> bool {
|
||||
if a.len() != b.len() {
|
||||
return false;
|
||||
}
|
||||
let mut diff = 0u8;
|
||||
for (x, y) in a.iter().zip(b.iter()) {
|
||||
diff |= x ^ y;
|
||||
}
|
||||
diff == 0
|
||||
}
|
||||
84
rust/src/config.rs
Normal file
84
rust/src/config.rs
Normal file
@@ -0,0 +1,84 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct S3Config {
|
||||
pub server: ServerConfig,
|
||||
pub storage: StorageConfig,
|
||||
pub auth: AuthConfig,
|
||||
pub cors: CorsConfig,
|
||||
pub logging: LoggingConfig,
|
||||
pub limits: LimitsConfig,
|
||||
pub multipart: MultipartConfig,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ServerConfig {
|
||||
pub port: u16,
|
||||
pub address: String,
|
||||
pub silent: bool,
|
||||
#[serde(default = "default_region")]
|
||||
pub region: String,
|
||||
}
|
||||
|
||||
fn default_region() -> String {
|
||||
"us-east-1".to_string()
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct StorageConfig {
|
||||
pub directory: String,
|
||||
pub clean_slate: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct AuthConfig {
|
||||
pub enabled: bool,
|
||||
pub credentials: Vec<Credential>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Credential {
|
||||
#[serde(rename = "accessKeyId")]
|
||||
pub access_key_id: String,
|
||||
#[serde(rename = "secretAccessKey")]
|
||||
pub secret_access_key: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CorsConfig {
|
||||
pub enabled: bool,
|
||||
pub allowed_origins: Option<Vec<String>>,
|
||||
pub allowed_methods: Option<Vec<String>>,
|
||||
pub allowed_headers: Option<Vec<String>>,
|
||||
pub exposed_headers: Option<Vec<String>>,
|
||||
pub max_age: Option<u64>,
|
||||
pub allow_credentials: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct LoggingConfig {
|
||||
pub level: Option<String>,
|
||||
pub format: Option<String>,
|
||||
pub enabled: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct LimitsConfig {
|
||||
pub max_object_size: Option<u64>,
|
||||
pub max_metadata_size: Option<u64>,
|
||||
pub request_timeout: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct MultipartConfig {
|
||||
pub expiration_days: Option<u64>,
|
||||
pub cleanup_interval_minutes: Option<u64>,
|
||||
}
|
||||
46
rust/src/main.rs
Normal file
46
rust/src/main.rs
Normal file
@@ -0,0 +1,46 @@
|
||||
mod action;
|
||||
mod auth;
|
||||
mod config;
|
||||
mod management;
|
||||
mod policy;
|
||||
mod s3_error;
|
||||
mod server;
|
||||
mod storage;
|
||||
mod xml_response;
|
||||
|
||||
use clap::Parser;
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(name = "rusts3", about = "High-performance S3-compatible server")]
|
||||
struct Cli {
|
||||
/// Run in management mode (IPC via stdin/stdout)
|
||||
#[arg(long)]
|
||||
management: bool,
|
||||
|
||||
/// Log level
|
||||
#[arg(long, default_value = "info")]
|
||||
log_level: String,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let cli = Cli::parse();
|
||||
|
||||
if cli.management {
|
||||
// Init tracing to stderr only (stdout reserved for IPC)
|
||||
tracing_subscriber::fmt()
|
||||
.with_writer(std::io::stderr)
|
||||
.with_env_filter(
|
||||
tracing_subscriber::EnvFilter::try_new(&cli.log_level)
|
||||
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("info")),
|
||||
)
|
||||
.init();
|
||||
|
||||
management::management_loop().await?;
|
||||
} else {
|
||||
eprintln!("rusts3: use --management flag for IPC mode");
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
155
rust/src/management.rs
Normal file
155
rust/src/management.rs
Normal file
@@ -0,0 +1,155 @@
|
||||
use anyhow::Result;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use std::io::Write;
|
||||
use tokio::io::{AsyncBufReadExt, BufReader};
|
||||
|
||||
use crate::config::S3Config;
|
||||
use crate::server::S3Server;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct IpcRequest {
|
||||
id: String,
|
||||
method: String,
|
||||
params: Value,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct IpcResponse {
|
||||
id: String,
|
||||
success: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
result: Option<Value>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
error: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct IpcEvent {
|
||||
event: String,
|
||||
data: Value,
|
||||
}
|
||||
|
||||
fn send_line(value: &impl Serialize) {
|
||||
let mut stdout = std::io::stdout().lock();
|
||||
serde_json::to_writer(&mut stdout, value).ok();
|
||||
stdout.write_all(b"\n").ok();
|
||||
stdout.flush().ok();
|
||||
}
|
||||
|
||||
fn send_response(id: String, result: Value) {
|
||||
send_line(&IpcResponse {
|
||||
id,
|
||||
success: true,
|
||||
result: Some(result),
|
||||
error: None,
|
||||
});
|
||||
}
|
||||
|
||||
fn send_error(id: String, message: String) {
|
||||
send_line(&IpcResponse {
|
||||
id,
|
||||
success: false,
|
||||
result: None,
|
||||
error: Some(message),
|
||||
});
|
||||
}
|
||||
|
||||
pub async fn management_loop() -> Result<()> {
|
||||
// Emit ready event
|
||||
send_line(&IpcEvent {
|
||||
event: "ready".to_string(),
|
||||
data: serde_json::json!({}),
|
||||
});
|
||||
|
||||
let mut server: Option<S3Server> = None;
|
||||
let stdin = BufReader::new(tokio::io::stdin());
|
||||
let mut lines = stdin.lines();
|
||||
|
||||
while let Ok(Some(line)) = lines.next_line().await {
|
||||
let line = line.trim().to_string();
|
||||
if line.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let req: IpcRequest = match serde_json::from_str(&line) {
|
||||
Ok(r) => r,
|
||||
Err(e) => {
|
||||
tracing::warn!("Invalid IPC request: {}", e);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let id = req.id.clone();
|
||||
let method = req.method.as_str();
|
||||
|
||||
match method {
|
||||
"start" => {
|
||||
#[derive(Deserialize)]
|
||||
struct StartParams {
|
||||
config: S3Config,
|
||||
}
|
||||
match serde_json::from_value::<StartParams>(req.params) {
|
||||
Ok(params) => {
|
||||
match S3Server::start(params.config).await {
|
||||
Ok(s) => {
|
||||
server = Some(s);
|
||||
send_response(id, serde_json::json!({}));
|
||||
}
|
||||
Err(e) => {
|
||||
send_error(id, format!("Failed to start server: {}", e));
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
send_error(id, format!("Invalid start params: {}", e));
|
||||
}
|
||||
}
|
||||
}
|
||||
"stop" => {
|
||||
if let Some(s) = server.take() {
|
||||
s.stop().await;
|
||||
}
|
||||
send_response(id, serde_json::json!({}));
|
||||
}
|
||||
"createBucket" => {
|
||||
#[derive(Deserialize)]
|
||||
struct CreateBucketParams {
|
||||
name: String,
|
||||
}
|
||||
match serde_json::from_value::<CreateBucketParams>(req.params) {
|
||||
Ok(params) => {
|
||||
if let Some(ref s) = server {
|
||||
match s.store().create_bucket(¶ms.name).await {
|
||||
Ok(()) => {
|
||||
send_response(id, serde_json::json!({}));
|
||||
}
|
||||
Err(e) => {
|
||||
send_error(
|
||||
id,
|
||||
format!("Failed to create bucket: {}", e),
|
||||
);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
send_error(id, "Server not started".to_string());
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
send_error(id, format!("Invalid createBucket params: {}", e));
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
send_error(id, format!("Unknown method: {}", method));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clean shutdown
|
||||
if let Some(s) = server.take() {
|
||||
s.stop().await;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
429
rust/src/policy.rs
Normal file
429
rust/src/policy.rs
Normal file
@@ -0,0 +1,429 @@
|
||||
use serde::{Deserialize, Deserializer, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
use tokio::fs;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::action::RequestContext;
|
||||
use crate::auth::AuthenticatedIdentity;
|
||||
use crate::s3_error::S3Error;
|
||||
|
||||
// ============================
|
||||
// Policy data model
|
||||
// ============================
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct BucketPolicy {
|
||||
#[serde(rename = "Version")]
|
||||
pub version: String,
|
||||
#[serde(rename = "Statement")]
|
||||
pub statements: Vec<PolicyStatement>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct PolicyStatement {
|
||||
#[serde(rename = "Sid", default, skip_serializing_if = "Option::is_none")]
|
||||
pub sid: Option<String>,
|
||||
#[serde(rename = "Effect")]
|
||||
pub effect: PolicyEffect,
|
||||
#[serde(rename = "Principal", deserialize_with = "deserialize_principal")]
|
||||
pub principal: Principal,
|
||||
#[serde(rename = "Action", deserialize_with = "deserialize_string_or_vec")]
|
||||
pub action: Vec<String>,
|
||||
#[serde(rename = "Resource", deserialize_with = "deserialize_string_or_vec")]
|
||||
pub resource: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub enum PolicyEffect {
|
||||
Allow,
|
||||
Deny,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum Principal {
|
||||
Wildcard,
|
||||
Aws(Vec<String>),
|
||||
}
|
||||
|
||||
impl Serialize for Principal {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
match self {
|
||||
Principal::Wildcard => serializer.serialize_str("*"),
|
||||
Principal::Aws(ids) => {
|
||||
use serde::ser::SerializeMap;
|
||||
let mut map = serializer.serialize_map(Some(1))?;
|
||||
if ids.len() == 1 {
|
||||
map.serialize_entry("AWS", &ids[0])?;
|
||||
} else {
|
||||
map.serialize_entry("AWS", ids)?;
|
||||
}
|
||||
map.end()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn deserialize_principal<'de, D>(deserializer: D) -> Result<Principal, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
#[derive(Deserialize)]
|
||||
#[serde(untagged)]
|
||||
enum PrincipalRaw {
|
||||
Star(String),
|
||||
Map(HashMap<String, StringOrVec>),
|
||||
}
|
||||
|
||||
let raw = PrincipalRaw::deserialize(deserializer)?;
|
||||
match raw {
|
||||
PrincipalRaw::Star(s) if s == "*" => Ok(Principal::Wildcard),
|
||||
PrincipalRaw::Star(_) => Err(serde::de::Error::custom(
|
||||
"Principal string must be \"*\"",
|
||||
)),
|
||||
PrincipalRaw::Map(map) => {
|
||||
if let Some(aws) = map.get("AWS") {
|
||||
Ok(Principal::Aws(aws.clone().into_vec()))
|
||||
} else {
|
||||
Err(serde::de::Error::custom("Principal map must contain \"AWS\" key"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
enum StringOrVec {
|
||||
Single(String),
|
||||
Multiple(Vec<String>),
|
||||
}
|
||||
|
||||
impl StringOrVec {
|
||||
fn into_vec(self) -> Vec<String> {
|
||||
match self {
|
||||
StringOrVec::Single(s) => vec![s],
|
||||
StringOrVec::Multiple(v) => v,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn deserialize_string_or_vec<'de, D>(deserializer: D) -> Result<Vec<String>, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let raw = StringOrVec::deserialize(deserializer)?;
|
||||
Ok(raw.into_vec())
|
||||
}
|
||||
|
||||
// ============================
|
||||
// Policy evaluation
|
||||
// ============================
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum PolicyDecision {
|
||||
Allow,
|
||||
Deny,
|
||||
NoOpinion,
|
||||
}
|
||||
|
||||
/// Evaluate a bucket policy against a request context and caller identity.
|
||||
pub fn evaluate_policy(
|
||||
policy: &BucketPolicy,
|
||||
ctx: &RequestContext,
|
||||
identity: Option<&AuthenticatedIdentity>,
|
||||
) -> PolicyDecision {
|
||||
let resource_arn = ctx.resource_arn();
|
||||
let iam_action = ctx.action.iam_action();
|
||||
let mut has_allow = false;
|
||||
|
||||
for stmt in &policy.statements {
|
||||
// Check principal match
|
||||
if !principal_matches(&stmt.principal, identity) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check action match
|
||||
if !action_matches(&stmt.action, iam_action) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check resource match
|
||||
if !resource_matches(&stmt.resource, &resource_arn, ctx.bucket.as_deref()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Statement matches — apply effect
|
||||
match stmt.effect {
|
||||
PolicyEffect::Deny => return PolicyDecision::Deny,
|
||||
PolicyEffect::Allow => has_allow = true,
|
||||
}
|
||||
}
|
||||
|
||||
if has_allow {
|
||||
PolicyDecision::Allow
|
||||
} else {
|
||||
PolicyDecision::NoOpinion
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if the principal matches the caller.
|
||||
fn principal_matches(principal: &Principal, identity: Option<&AuthenticatedIdentity>) -> bool {
|
||||
match principal {
|
||||
Principal::Wildcard => true,
|
||||
Principal::Aws(ids) => {
|
||||
if let Some(id) = identity {
|
||||
ids.iter().any(|arn| {
|
||||
// Match against full ARN or just the access key ID
|
||||
arn == "*" || arn.ends_with(&id.access_key_id)
|
||||
})
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if the action matches. Supports wildcard `s3:*` and `*`.
|
||||
fn action_matches(policy_actions: &[String], request_action: &str) -> bool {
|
||||
for pa in policy_actions {
|
||||
if pa == "*" || pa == "s3:*" {
|
||||
return true;
|
||||
}
|
||||
if pa.eq_ignore_ascii_case(request_action) {
|
||||
return true;
|
||||
}
|
||||
// Simple prefix wildcard: "s3:Get*" matches "s3:GetObject"
|
||||
if let Some(prefix) = pa.strip_suffix('*') {
|
||||
if request_action
|
||||
.to_lowercase()
|
||||
.starts_with(&prefix.to_lowercase())
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/// Check if the resource matches. Supports wildcard patterns.
|
||||
fn resource_matches(policy_resources: &[String], request_arn: &str, bucket: Option<&str>) -> bool {
|
||||
for pr in policy_resources {
|
||||
if pr == "*" {
|
||||
return true;
|
||||
}
|
||||
if arn_pattern_matches(pr, request_arn) {
|
||||
return true;
|
||||
}
|
||||
// Also check bucket-level ARN if the request is for an object
|
||||
if let Some(b) = bucket {
|
||||
let bucket_arn = format!("arn:aws:s3:::{}", b);
|
||||
if arn_pattern_matches(pr, &bucket_arn) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/// Simple ARN pattern matching with `*` and `?` wildcards.
|
||||
fn arn_pattern_matches(pattern: &str, value: &str) -> bool {
|
||||
// Handle trailing /* specifically: arn:aws:s3:::bucket/* matches arn:aws:s3:::bucket/anything
|
||||
if pattern.ends_with("/*") {
|
||||
let prefix = &pattern[..pattern.len() - 1]; // Remove trailing *
|
||||
if value.starts_with(prefix) {
|
||||
return true;
|
||||
}
|
||||
// Also match exact bucket without trailing /
|
||||
let bucket_only = &pattern[..pattern.len() - 2];
|
||||
if value == bucket_only {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
simple_wildcard_match(pattern, value)
|
||||
}
|
||||
|
||||
fn simple_wildcard_match(pattern: &str, value: &str) -> bool {
|
||||
let pat_bytes = pattern.as_bytes();
|
||||
let val_bytes = value.as_bytes();
|
||||
let mut pi = 0;
|
||||
let mut vi = 0;
|
||||
let mut star_pi = usize::MAX;
|
||||
let mut star_vi = 0;
|
||||
|
||||
while vi < val_bytes.len() {
|
||||
if pi < pat_bytes.len() && (pat_bytes[pi] == b'?' || pat_bytes[pi] == val_bytes[vi]) {
|
||||
pi += 1;
|
||||
vi += 1;
|
||||
} else if pi < pat_bytes.len() && pat_bytes[pi] == b'*' {
|
||||
star_pi = pi;
|
||||
star_vi = vi;
|
||||
pi += 1;
|
||||
} else if star_pi != usize::MAX {
|
||||
pi = star_pi + 1;
|
||||
star_vi += 1;
|
||||
vi = star_vi;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
while pi < pat_bytes.len() && pat_bytes[pi] == b'*' {
|
||||
pi += 1;
|
||||
}
|
||||
|
||||
pi == pat_bytes.len()
|
||||
}
|
||||
|
||||
// ============================
|
||||
// Policy validation
|
||||
// ============================
|
||||
|
||||
const MAX_POLICY_SIZE: usize = 20 * 1024; // 20 KB
|
||||
|
||||
pub fn validate_policy(json: &str) -> Result<BucketPolicy, S3Error> {
|
||||
if json.len() > MAX_POLICY_SIZE {
|
||||
return Err(S3Error::malformed_policy("Policy exceeds maximum size of 20KB"));
|
||||
}
|
||||
|
||||
let policy: BucketPolicy =
|
||||
serde_json::from_str(json).map_err(|e| S3Error::malformed_policy(&e.to_string()))?;
|
||||
|
||||
if policy.version != "2012-10-17" {
|
||||
return Err(S3Error::malformed_policy(
|
||||
"Policy version must be \"2012-10-17\"",
|
||||
));
|
||||
}
|
||||
|
||||
if policy.statements.is_empty() {
|
||||
return Err(S3Error::malformed_policy(
|
||||
"Policy must contain at least one statement",
|
||||
));
|
||||
}
|
||||
|
||||
for (i, stmt) in policy.statements.iter().enumerate() {
|
||||
if stmt.action.is_empty() {
|
||||
return Err(S3Error::malformed_policy(&format!(
|
||||
"Statement {} has no actions",
|
||||
i
|
||||
)));
|
||||
}
|
||||
for action in &stmt.action {
|
||||
if action != "*" && !action.starts_with("s3:") {
|
||||
return Err(S3Error::malformed_policy(&format!(
|
||||
"Action \"{}\" must start with \"s3:\"",
|
||||
action
|
||||
)));
|
||||
}
|
||||
}
|
||||
if stmt.resource.is_empty() {
|
||||
return Err(S3Error::malformed_policy(&format!(
|
||||
"Statement {} has no resources",
|
||||
i
|
||||
)));
|
||||
}
|
||||
for resource in &stmt.resource {
|
||||
if resource != "*" && !resource.starts_with("arn:aws:s3:::") {
|
||||
return Err(S3Error::malformed_policy(&format!(
|
||||
"Resource \"{}\" must start with \"arn:aws:s3:::\"",
|
||||
resource
|
||||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(policy)
|
||||
}
|
||||
|
||||
// ============================
|
||||
// PolicyStore — in-memory cache + disk
|
||||
// ============================
|
||||
|
||||
pub struct PolicyStore {
|
||||
policies: RwLock<HashMap<String, BucketPolicy>>,
|
||||
policies_dir: PathBuf,
|
||||
}
|
||||
|
||||
impl PolicyStore {
|
||||
pub fn new(policies_dir: PathBuf) -> Self {
|
||||
Self {
|
||||
policies: RwLock::new(HashMap::new()),
|
||||
policies_dir,
|
||||
}
|
||||
}
|
||||
|
||||
/// Load all policies from disk into cache.
|
||||
pub async fn load_from_disk(&self) -> anyhow::Result<()> {
|
||||
let dir = &self.policies_dir;
|
||||
if !dir.exists() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut entries = fs::read_dir(dir).await?;
|
||||
let mut policies = HashMap::new();
|
||||
|
||||
while let Some(entry) = entries.next_entry().await? {
|
||||
let name = entry.file_name().to_string_lossy().to_string();
|
||||
if let Some(bucket) = name.strip_suffix(".policy.json") {
|
||||
match fs::read_to_string(entry.path()).await {
|
||||
Ok(json) => match serde_json::from_str::<BucketPolicy>(&json) {
|
||||
Ok(policy) => {
|
||||
tracing::info!("Loaded policy for bucket: {}", bucket);
|
||||
policies.insert(bucket.to_string(), policy);
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::warn!("Failed to parse policy for {}: {}", bucket, e);
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
tracing::warn!("Failed to read policy file {}: {}", name, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut cache = self.policies.write().await;
|
||||
*cache = policies;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get a policy for a bucket.
|
||||
pub async fn get_policy(&self, bucket: &str) -> Option<BucketPolicy> {
|
||||
let cache = self.policies.read().await;
|
||||
cache.get(bucket).cloned()
|
||||
}
|
||||
|
||||
/// Store a policy for a bucket (atomic write + cache update).
|
||||
pub async fn put_policy(&self, bucket: &str, policy: BucketPolicy) -> anyhow::Result<()> {
|
||||
let json = serde_json::to_string_pretty(&policy)?;
|
||||
|
||||
// Atomic write: temp file + rename
|
||||
let policy_path = self.policies_dir.join(format!("{}.policy.json", bucket));
|
||||
let temp_path = self
|
||||
.policies_dir
|
||||
.join(format!("{}.policy.json.tmp", bucket));
|
||||
|
||||
fs::write(&temp_path, &json).await?;
|
||||
fs::rename(&temp_path, &policy_path).await?;
|
||||
|
||||
// Update cache
|
||||
let mut cache = self.policies.write().await;
|
||||
cache.insert(bucket.to_string(), policy);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Delete a policy for a bucket.
|
||||
pub async fn delete_policy(&self, bucket: &str) -> anyhow::Result<()> {
|
||||
let policy_path = self.policies_dir.join(format!("{}.policy.json", bucket));
|
||||
let _ = fs::remove_file(&policy_path).await;
|
||||
|
||||
let mut cache = self.policies.write().await;
|
||||
cache.remove(bucket);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
106
rust/src/s3_error.rs
Normal file
106
rust/src/s3_error.rs
Normal file
@@ -0,0 +1,106 @@
|
||||
use hyper::StatusCode;
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("S3Error({code}): {message}")]
|
||||
pub struct S3Error {
|
||||
pub code: String,
|
||||
pub message: String,
|
||||
pub status: StatusCode,
|
||||
}
|
||||
|
||||
impl S3Error {
|
||||
pub fn new(code: &str, message: &str, status: StatusCode) -> Self {
|
||||
Self {
|
||||
code: code.to_string(),
|
||||
message: message.to_string(),
|
||||
status,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn no_such_key() -> Self {
|
||||
Self::new("NoSuchKey", "The specified key does not exist.", StatusCode::NOT_FOUND)
|
||||
}
|
||||
|
||||
pub fn no_such_bucket() -> Self {
|
||||
Self::new("NoSuchBucket", "The specified bucket does not exist", StatusCode::NOT_FOUND)
|
||||
}
|
||||
|
||||
pub fn bucket_not_empty() -> Self {
|
||||
Self::new("BucketNotEmpty", "The bucket you tried to delete is not empty", StatusCode::CONFLICT)
|
||||
}
|
||||
|
||||
pub fn access_denied() -> Self {
|
||||
Self::new("AccessDenied", "Access Denied", StatusCode::FORBIDDEN)
|
||||
}
|
||||
|
||||
pub fn no_such_upload() -> Self {
|
||||
Self::new("NoSuchUpload", "The specified upload does not exist", StatusCode::NOT_FOUND)
|
||||
}
|
||||
|
||||
pub fn invalid_part_number() -> Self {
|
||||
Self::new("InvalidPartNumber", "Part number must be between 1 and 10000", StatusCode::BAD_REQUEST)
|
||||
}
|
||||
|
||||
pub fn internal_error(msg: &str) -> Self {
|
||||
Self::new("InternalError", msg, StatusCode::INTERNAL_SERVER_ERROR)
|
||||
}
|
||||
|
||||
pub fn invalid_request(msg: &str) -> Self {
|
||||
Self::new("InvalidRequest", msg, StatusCode::BAD_REQUEST)
|
||||
}
|
||||
|
||||
pub fn signature_does_not_match() -> Self {
|
||||
Self::new(
|
||||
"SignatureDoesNotMatch",
|
||||
"The request signature we calculated does not match the signature you provided.",
|
||||
StatusCode::FORBIDDEN,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn invalid_access_key_id() -> Self {
|
||||
Self::new(
|
||||
"InvalidAccessKeyId",
|
||||
"The AWS Access Key Id you provided does not exist in our records.",
|
||||
StatusCode::FORBIDDEN,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn request_time_too_skewed() -> Self {
|
||||
Self::new(
|
||||
"RequestTimeTooSkewed",
|
||||
"The difference between the request time and the current time is too large.",
|
||||
StatusCode::FORBIDDEN,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn authorization_header_malformed() -> Self {
|
||||
Self::new(
|
||||
"AuthorizationHeaderMalformed",
|
||||
"The authorization header is malformed.",
|
||||
StatusCode::BAD_REQUEST,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn missing_security_header(msg: &str) -> Self {
|
||||
Self::new("MissingSecurityHeader", msg, StatusCode::BAD_REQUEST)
|
||||
}
|
||||
|
||||
pub fn no_such_bucket_policy() -> Self {
|
||||
Self::new(
|
||||
"NoSuchBucketPolicy",
|
||||
"The bucket policy does not exist.",
|
||||
StatusCode::NOT_FOUND,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn malformed_policy(msg: &str) -> Self {
|
||||
Self::new("MalformedPolicy", msg, StatusCode::BAD_REQUEST)
|
||||
}
|
||||
|
||||
pub fn to_xml(&self) -> String {
|
||||
format!(
|
||||
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<Error><Code>{}</Code><Message>{}</Message></Error>",
|
||||
self.code, self.message
|
||||
)
|
||||
}
|
||||
}
|
||||
971
rust/src/server.rs
Normal file
971
rust/src/server.rs
Normal file
@@ -0,0 +1,971 @@
|
||||
use anyhow::Result;
|
||||
use bytes::Bytes;
|
||||
use futures_core::Stream;
|
||||
use http_body_util::BodyExt;
|
||||
use hyper::body::Incoming;
|
||||
use hyper::server::conn::http1;
|
||||
use hyper::service::service_fn;
|
||||
use hyper::{Method, Request, Response, StatusCode};
|
||||
use hyper_util::rt::TokioIo;
|
||||
use std::collections::HashMap;
|
||||
use std::net::SocketAddr;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::task::{Context, Poll};
|
||||
use tokio::io::AsyncReadExt;
|
||||
use tokio::net::TcpListener;
|
||||
use tokio::sync::watch;
|
||||
use tokio_util::io::ReaderStream;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::action::{self, RequestContext, S3Action};
|
||||
use crate::auth::{self, AuthenticatedIdentity};
|
||||
use crate::config::S3Config;
|
||||
use crate::policy::{self, PolicyDecision, PolicyStore};
|
||||
use crate::s3_error::S3Error;
|
||||
use crate::storage::FileStore;
|
||||
use crate::xml_response;
|
||||
|
||||
pub struct S3Server {
|
||||
store: Arc<FileStore>,
|
||||
shutdown_tx: watch::Sender<bool>,
|
||||
server_handle: tokio::task::JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl S3Server {
|
||||
pub async fn start(config: S3Config) -> Result<Self> {
|
||||
let store = Arc::new(FileStore::new(config.storage.directory.clone().into()));
|
||||
|
||||
// Initialize or reset storage
|
||||
if config.storage.clean_slate {
|
||||
store.reset().await?;
|
||||
} else {
|
||||
store.initialize().await?;
|
||||
}
|
||||
|
||||
// Initialize policy store
|
||||
let policy_store = Arc::new(PolicyStore::new(store.policies_dir()));
|
||||
policy_store.load_from_disk().await?;
|
||||
|
||||
let addr: SocketAddr = format!("{}:{}", config.address(), config.server.port)
|
||||
.parse()?;
|
||||
|
||||
let listener = TcpListener::bind(addr).await?;
|
||||
let (shutdown_tx, shutdown_rx) = watch::channel(false);
|
||||
|
||||
let server_store = store.clone();
|
||||
let server_config = config.clone();
|
||||
let server_policy_store = policy_store.clone();
|
||||
|
||||
let server_handle = tokio::spawn(async move {
|
||||
loop {
|
||||
let mut rx = shutdown_rx.clone();
|
||||
|
||||
tokio::select! {
|
||||
result = listener.accept() => {
|
||||
match result {
|
||||
Ok((stream, _remote_addr)) => {
|
||||
let io = TokioIo::new(stream);
|
||||
let store = server_store.clone();
|
||||
let cfg = server_config.clone();
|
||||
let ps = server_policy_store.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let svc = service_fn(move |req: Request<Incoming>| {
|
||||
let store = store.clone();
|
||||
let cfg = cfg.clone();
|
||||
let ps = ps.clone();
|
||||
async move {
|
||||
handle_request(req, store, cfg, ps).await
|
||||
}
|
||||
});
|
||||
|
||||
if let Err(e) = http1::Builder::new()
|
||||
.keep_alive(true)
|
||||
.serve_connection(io, svc)
|
||||
.await
|
||||
{
|
||||
if !e.is_incomplete_message() {
|
||||
tracing::error!("Connection error: {}", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!("Accept error: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
_ = rx.changed() => {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
if !config.server.silent {
|
||||
tracing::info!("S3 server listening on {}", addr);
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
store,
|
||||
shutdown_tx,
|
||||
server_handle,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn stop(self) {
|
||||
let _ = self.shutdown_tx.send(true);
|
||||
let _ = self.server_handle.await;
|
||||
}
|
||||
|
||||
pub fn store(&self) -> &FileStore {
|
||||
&self.store
|
||||
}
|
||||
}
|
||||
|
||||
impl S3Config {
|
||||
fn address(&self) -> &str {
|
||||
&self.server.address
|
||||
}
|
||||
}
|
||||
|
||||
// ============================
|
||||
// Request handling
|
||||
// ============================
|
||||
|
||||
type BoxBody = http_body_util::combinators::BoxBody<Bytes, Box<dyn std::error::Error + Send + Sync>>;
|
||||
|
||||
fn full_body(data: impl Into<Bytes>) -> BoxBody {
|
||||
http_body_util::Full::new(data.into())
|
||||
.map_err(|never: std::convert::Infallible| -> Box<dyn std::error::Error + Send + Sync> { match never {} })
|
||||
.boxed()
|
||||
}
|
||||
|
||||
fn empty_body() -> BoxBody {
|
||||
http_body_util::Empty::new()
|
||||
.map_err(|never: std::convert::Infallible| -> Box<dyn std::error::Error + Send + Sync> { match never {} })
|
||||
.boxed()
|
||||
}
|
||||
|
||||
fn stream_body(reader: tokio::fs::File, content_length: u64) -> BoxBody {
|
||||
let stream = ReaderStream::with_capacity(reader.take(content_length), 64 * 1024);
|
||||
let mapped = FrameStream { inner: stream };
|
||||
http_body_util::StreamBody::new(mapped).boxed()
|
||||
}
|
||||
|
||||
/// Adapter that converts ReaderStream into a Stream of Frame<Bytes>
|
||||
struct FrameStream {
|
||||
inner: ReaderStream<tokio::io::Take<tokio::fs::File>>,
|
||||
}
|
||||
|
||||
impl Stream for FrameStream {
|
||||
type Item = Result<hyper::body::Frame<Bytes>, Box<dyn std::error::Error + Send + Sync>>;
|
||||
|
||||
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
let inner = unsafe { self.map_unchecked_mut(|s| &mut s.inner) };
|
||||
match inner.poll_next(cx) {
|
||||
Poll::Ready(Some(Ok(bytes))) => {
|
||||
Poll::Ready(Some(Ok(hyper::body::Frame::data(bytes))))
|
||||
}
|
||||
Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(Box::new(e) as Box<dyn std::error::Error + Send + Sync>))),
|
||||
Poll::Ready(None) => Poll::Ready(None),
|
||||
Poll::Pending => Poll::Pending,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn xml_response(status: StatusCode, xml: String, request_id: &str) -> Response<BoxBody> {
|
||||
Response::builder()
|
||||
.status(status)
|
||||
.header("content-type", "application/xml")
|
||||
.header("x-amz-request-id", request_id)
|
||||
.body(full_body(xml))
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn empty_response(status: StatusCode, request_id: &str) -> Response<BoxBody> {
|
||||
Response::builder()
|
||||
.status(status)
|
||||
.header("x-amz-request-id", request_id)
|
||||
.body(empty_body())
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn s3_error_response(err: &S3Error, request_id: &str) -> Response<BoxBody> {
|
||||
let xml = err.to_xml();
|
||||
Response::builder()
|
||||
.status(err.status)
|
||||
.header("content-type", "application/xml")
|
||||
.header("x-amz-request-id", request_id)
|
||||
.body(full_body(xml))
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
async fn handle_request(
|
||||
req: Request<Incoming>,
|
||||
store: Arc<FileStore>,
|
||||
config: S3Config,
|
||||
policy_store: Arc<PolicyStore>,
|
||||
) -> Result<Response<BoxBody>, std::convert::Infallible> {
|
||||
let request_id = Uuid::new_v4().to_string();
|
||||
let method = req.method().clone();
|
||||
let uri = req.uri().clone();
|
||||
let start = std::time::Instant::now();
|
||||
|
||||
// Handle CORS preflight
|
||||
if config.cors.enabled && method == Method::OPTIONS {
|
||||
let resp = build_cors_preflight(&config, &request_id);
|
||||
return Ok(resp);
|
||||
}
|
||||
|
||||
// Step 1: Resolve S3 action from request
|
||||
let request_ctx = action::resolve_action(&req);
|
||||
|
||||
// Step 2: Auth + policy pipeline
|
||||
if config.auth.enabled {
|
||||
// Attempt authentication
|
||||
let identity = {
|
||||
let has_auth_header = req
|
||||
.headers()
|
||||
.get("authorization")
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.map(|s| !s.is_empty())
|
||||
.unwrap_or(false);
|
||||
|
||||
if has_auth_header {
|
||||
match auth::verify_request(&req, &config) {
|
||||
Ok(id) => Some(id),
|
||||
Err(e) => {
|
||||
tracing::warn!("Auth failed: {}", e.message);
|
||||
return Ok(s3_error_response(&e, &request_id));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None // Anonymous request
|
||||
}
|
||||
};
|
||||
|
||||
// Step 3: Authorization (policy evaluation)
|
||||
if let Err(e) = authorize_request(&request_ctx, identity.as_ref(), &policy_store).await {
|
||||
return Ok(s3_error_response(&e, &request_id));
|
||||
}
|
||||
}
|
||||
|
||||
// Route and handle
|
||||
let mut response = match route_request(req, store, &config, &request_id, &policy_store).await {
|
||||
Ok(resp) => resp,
|
||||
Err(err) => {
|
||||
if let Some(s3err) = err.downcast_ref::<S3Error>() {
|
||||
s3_error_response(s3err, &request_id)
|
||||
} else {
|
||||
tracing::error!("Internal error: {}", err);
|
||||
let s3err = S3Error::internal_error(&err.to_string());
|
||||
s3_error_response(&s3err, &request_id)
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Add CORS headers if enabled
|
||||
if config.cors.enabled {
|
||||
add_cors_headers(response.headers_mut(), &config);
|
||||
}
|
||||
|
||||
let duration = start.elapsed();
|
||||
tracing::info!(
|
||||
method = %method,
|
||||
path = %uri.path(),
|
||||
status = %response.status().as_u16(),
|
||||
duration_ms = %duration.as_millis(),
|
||||
"request"
|
||||
);
|
||||
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
/// Authorize a request based on bucket policies and authentication state.
|
||||
async fn authorize_request(
|
||||
ctx: &RequestContext,
|
||||
identity: Option<&AuthenticatedIdentity>,
|
||||
policy_store: &PolicyStore,
|
||||
) -> Result<(), S3Error> {
|
||||
// ListAllMyBuckets requires authentication (no bucket to apply policy to)
|
||||
if ctx.action == S3Action::ListAllMyBuckets {
|
||||
if identity.is_none() {
|
||||
return Err(S3Error::access_denied());
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// If there's a bucket, check its policy
|
||||
if let Some(ref bucket) = ctx.bucket {
|
||||
if let Some(bucket_policy) = policy_store.get_policy(bucket).await {
|
||||
let decision = policy::evaluate_policy(&bucket_policy, ctx, identity);
|
||||
match decision {
|
||||
PolicyDecision::Deny => return Err(S3Error::access_denied()),
|
||||
PolicyDecision::Allow => return Ok(()),
|
||||
PolicyDecision::NoOpinion => {
|
||||
// Fall through to default behavior
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Default: authenticated users get full access, anonymous denied
|
||||
if identity.is_none() {
|
||||
return Err(S3Error::access_denied());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ============================
|
||||
// Routing
|
||||
// ============================
|
||||
|
||||
async fn route_request(
|
||||
req: Request<Incoming>,
|
||||
store: Arc<FileStore>,
|
||||
_config: &S3Config,
|
||||
request_id: &str,
|
||||
policy_store: &Arc<PolicyStore>,
|
||||
) -> Result<Response<BoxBody>> {
|
||||
let method = req.method().clone();
|
||||
let path = req.uri().path().to_string();
|
||||
let query_string = req.uri().query().unwrap_or("").to_string();
|
||||
let query = parse_query(&query_string);
|
||||
|
||||
// Parse path: /, /{bucket}, /{bucket}/{key...}
|
||||
let segments: Vec<&str> = path
|
||||
.trim_start_matches('/')
|
||||
.splitn(2, '/')
|
||||
.filter(|s| !s.is_empty())
|
||||
.collect();
|
||||
|
||||
match segments.len() {
|
||||
0 => {
|
||||
// Root: GET / -> ListBuckets
|
||||
match method {
|
||||
Method::GET => handle_list_buckets(store, request_id).await,
|
||||
_ => Ok(empty_response(StatusCode::METHOD_NOT_ALLOWED, request_id)),
|
||||
}
|
||||
}
|
||||
1 => {
|
||||
// Bucket level: /{bucket}
|
||||
let bucket = percent_decode(segments[0]);
|
||||
|
||||
// Check for ?policy query parameter
|
||||
if query.contains_key("policy") {
|
||||
return match method {
|
||||
Method::GET => handle_get_bucket_policy(policy_store, &bucket, request_id).await,
|
||||
Method::PUT => handle_put_bucket_policy(req, &store, policy_store, &bucket, request_id).await,
|
||||
Method::DELETE => handle_delete_bucket_policy(policy_store, &bucket, request_id).await,
|
||||
_ => Ok(empty_response(StatusCode::METHOD_NOT_ALLOWED, request_id)),
|
||||
};
|
||||
}
|
||||
|
||||
match method {
|
||||
Method::GET => {
|
||||
if query.contains_key("uploads") {
|
||||
handle_list_multipart_uploads(store, &bucket, request_id).await
|
||||
} else {
|
||||
handle_list_objects(store, &bucket, &query, request_id).await
|
||||
}
|
||||
}
|
||||
Method::PUT => handle_create_bucket(store, &bucket, request_id).await,
|
||||
Method::DELETE => handle_delete_bucket(store, &bucket, request_id, policy_store).await,
|
||||
Method::HEAD => handle_head_bucket(store, &bucket, request_id).await,
|
||||
_ => Ok(empty_response(StatusCode::METHOD_NOT_ALLOWED, request_id)),
|
||||
}
|
||||
}
|
||||
2 => {
|
||||
// Object level: /{bucket}/{key...}
|
||||
let bucket = percent_decode(segments[0]);
|
||||
let key = percent_decode(segments[1]);
|
||||
|
||||
match method {
|
||||
Method::PUT => {
|
||||
if query.contains_key("partNumber") && query.contains_key("uploadId") {
|
||||
handle_upload_part(req, store, &query, request_id).await
|
||||
} else if req.headers().contains_key("x-amz-copy-source") {
|
||||
handle_copy_object(req, store, &bucket, &key, request_id).await
|
||||
} else {
|
||||
handle_put_object(req, store, &bucket, &key, request_id).await
|
||||
}
|
||||
}
|
||||
Method::GET => {
|
||||
handle_get_object(req, store, &bucket, &key, request_id).await
|
||||
}
|
||||
Method::HEAD => {
|
||||
handle_head_object(store, &bucket, &key, request_id).await
|
||||
}
|
||||
Method::DELETE => {
|
||||
if query.contains_key("uploadId") {
|
||||
let upload_id = query.get("uploadId").unwrap();
|
||||
handle_abort_multipart(store, upload_id, request_id).await
|
||||
} else {
|
||||
handle_delete_object(store, &bucket, &key, request_id).await
|
||||
}
|
||||
}
|
||||
Method::POST => {
|
||||
if query.contains_key("uploads") {
|
||||
handle_initiate_multipart(req, store, &bucket, &key, request_id).await
|
||||
} else if query.contains_key("uploadId") {
|
||||
let upload_id = query.get("uploadId").unwrap().clone();
|
||||
handle_complete_multipart(req, store, &bucket, &key, &upload_id, request_id).await
|
||||
} else {
|
||||
let err = S3Error::invalid_request("Invalid POST request");
|
||||
Ok(s3_error_response(&err, request_id))
|
||||
}
|
||||
}
|
||||
_ => Ok(empty_response(StatusCode::METHOD_NOT_ALLOWED, request_id)),
|
||||
}
|
||||
}
|
||||
_ => Ok(empty_response(StatusCode::BAD_REQUEST, request_id)),
|
||||
}
|
||||
}
|
||||
|
||||
// ============================
|
||||
// Handlers
|
||||
// ============================
|
||||
|
||||
async fn handle_list_buckets(
|
||||
store: Arc<FileStore>,
|
||||
request_id: &str,
|
||||
) -> Result<Response<BoxBody>> {
|
||||
let buckets = store.list_buckets().await?;
|
||||
let xml = xml_response::list_buckets_xml(&buckets);
|
||||
Ok(xml_response(StatusCode::OK, xml, request_id))
|
||||
}
|
||||
|
||||
async fn handle_create_bucket(
|
||||
store: Arc<FileStore>,
|
||||
bucket: &str,
|
||||
request_id: &str,
|
||||
) -> Result<Response<BoxBody>> {
|
||||
store.create_bucket(bucket).await?;
|
||||
Ok(empty_response(StatusCode::OK, request_id))
|
||||
}
|
||||
|
||||
async fn handle_delete_bucket(
|
||||
store: Arc<FileStore>,
|
||||
bucket: &str,
|
||||
request_id: &str,
|
||||
policy_store: &Arc<PolicyStore>,
|
||||
) -> Result<Response<BoxBody>> {
|
||||
store.delete_bucket(bucket).await?;
|
||||
// Clean up bucket policy on deletion
|
||||
let _ = policy_store.delete_policy(bucket).await;
|
||||
Ok(empty_response(StatusCode::NO_CONTENT, request_id))
|
||||
}
|
||||
|
||||
async fn handle_head_bucket(
|
||||
store: Arc<FileStore>,
|
||||
bucket: &str,
|
||||
request_id: &str,
|
||||
) -> Result<Response<BoxBody>> {
|
||||
if store.bucket_exists(bucket).await {
|
||||
Ok(empty_response(StatusCode::OK, request_id))
|
||||
} else {
|
||||
Err(S3Error::no_such_bucket().into())
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_list_objects(
|
||||
store: Arc<FileStore>,
|
||||
bucket: &str,
|
||||
query: &HashMap<String, String>,
|
||||
request_id: &str,
|
||||
) -> Result<Response<BoxBody>> {
|
||||
let prefix = query.get("prefix").map(|s| s.as_str()).unwrap_or("");
|
||||
let delimiter = query.get("delimiter").map(|s| s.as_str()).unwrap_or("");
|
||||
let max_keys = query
|
||||
.get("max-keys")
|
||||
.and_then(|s| s.parse().ok())
|
||||
.unwrap_or(1000usize);
|
||||
let continuation_token = query.get("continuation-token").map(|s| s.as_str());
|
||||
let is_v2 = query.get("list-type").map(|s| s.as_str()) == Some("2");
|
||||
|
||||
let result = store
|
||||
.list_objects(bucket, prefix, delimiter, max_keys, continuation_token)
|
||||
.await?;
|
||||
|
||||
let xml = if is_v2 {
|
||||
xml_response::list_objects_v2_xml(bucket, &result)
|
||||
} else {
|
||||
xml_response::list_objects_v1_xml(bucket, &result)
|
||||
};
|
||||
|
||||
Ok(xml_response(StatusCode::OK, xml, request_id))
|
||||
}
|
||||
|
||||
async fn handle_put_object(
|
||||
req: Request<Incoming>,
|
||||
store: Arc<FileStore>,
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
request_id: &str,
|
||||
) -> Result<Response<BoxBody>> {
|
||||
let metadata = extract_metadata(req.headers());
|
||||
let body = req.into_body();
|
||||
|
||||
let result = store.put_object(bucket, key, body, metadata).await?;
|
||||
|
||||
let resp = Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header("ETag", format!("\"{}\"", result.md5))
|
||||
.header("x-amz-request-id", request_id)
|
||||
.body(empty_body())
|
||||
.unwrap();
|
||||
|
||||
Ok(resp)
|
||||
}
|
||||
|
||||
async fn handle_get_object(
|
||||
req: Request<Incoming>,
|
||||
store: Arc<FileStore>,
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
request_id: &str,
|
||||
) -> Result<Response<BoxBody>> {
|
||||
// Parse Range header
|
||||
let range = parse_range_header(req.headers());
|
||||
|
||||
let result = store.get_object(bucket, key, range).await?;
|
||||
|
||||
let content_type = result
|
||||
.metadata
|
||||
.get("content-type")
|
||||
.cloned()
|
||||
.unwrap_or_else(|| "binary/octet-stream".to_string());
|
||||
|
||||
let mut builder = Response::builder()
|
||||
.header("ETag", format!("\"{}\"", result.md5))
|
||||
.header("Last-Modified", result.last_modified.format("%a, %d %b %Y %H:%M:%S GMT").to_string())
|
||||
.header("Content-Type", &content_type)
|
||||
.header("Accept-Ranges", "bytes")
|
||||
.header("x-amz-request-id", request_id);
|
||||
|
||||
// Add custom metadata headers
|
||||
for (k, v) in &result.metadata {
|
||||
if k.starts_with("x-amz-meta-") {
|
||||
builder = builder.header(k.as_str(), v.as_str());
|
||||
}
|
||||
}
|
||||
|
||||
if let Some((start, end)) = range {
|
||||
let content_length = end - start + 1;
|
||||
let resp = builder
|
||||
.status(StatusCode::PARTIAL_CONTENT)
|
||||
.header("Content-Length", content_length.to_string())
|
||||
.header(
|
||||
"Content-Range",
|
||||
format!("bytes {}-{}/{}", start, end, result.size),
|
||||
)
|
||||
.body(stream_body(result.body, content_length))
|
||||
.unwrap();
|
||||
Ok(resp)
|
||||
} else {
|
||||
let resp = builder
|
||||
.status(StatusCode::OK)
|
||||
.header("Content-Length", result.size.to_string())
|
||||
.body(stream_body(result.body, result.content_length))
|
||||
.unwrap();
|
||||
Ok(resp)
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_head_object(
|
||||
store: Arc<FileStore>,
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
request_id: &str,
|
||||
) -> Result<Response<BoxBody>> {
|
||||
let result = store.head_object(bucket, key).await?;
|
||||
|
||||
let content_type = result
|
||||
.metadata
|
||||
.get("content-type")
|
||||
.cloned()
|
||||
.unwrap_or_else(|| "binary/octet-stream".to_string());
|
||||
|
||||
let mut builder = Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header("ETag", format!("\"{}\"", result.md5))
|
||||
.header("Last-Modified", result.last_modified.format("%a, %d %b %Y %H:%M:%S GMT").to_string())
|
||||
.header("Content-Type", &content_type)
|
||||
.header("Content-Length", result.size.to_string())
|
||||
.header("Accept-Ranges", "bytes")
|
||||
.header("x-amz-request-id", request_id);
|
||||
|
||||
for (k, v) in &result.metadata {
|
||||
if k.starts_with("x-amz-meta-") {
|
||||
builder = builder.header(k.as_str(), v.as_str());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(builder.body(empty_body()).unwrap())
|
||||
}
|
||||
|
||||
async fn handle_delete_object(
|
||||
store: Arc<FileStore>,
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
request_id: &str,
|
||||
) -> Result<Response<BoxBody>> {
|
||||
store.delete_object(bucket, key).await?;
|
||||
Ok(empty_response(StatusCode::NO_CONTENT, request_id))
|
||||
}
|
||||
|
||||
async fn handle_copy_object(
|
||||
req: Request<Incoming>,
|
||||
store: Arc<FileStore>,
|
||||
dest_bucket: &str,
|
||||
dest_key: &str,
|
||||
request_id: &str,
|
||||
) -> Result<Response<BoxBody>> {
|
||||
let copy_source = req
|
||||
.headers()
|
||||
.get("x-amz-copy-source")
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.unwrap_or("")
|
||||
.to_string();
|
||||
|
||||
let metadata_directive = req
|
||||
.headers()
|
||||
.get("x-amz-metadata-directive")
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.unwrap_or("COPY")
|
||||
.to_uppercase();
|
||||
|
||||
// Parse source: /bucket/key or bucket/key
|
||||
let source = copy_source.trim_start_matches('/');
|
||||
let first_slash = source.find('/').unwrap_or(source.len());
|
||||
let src_bucket = percent_decode(&source[..first_slash]);
|
||||
let src_key = if first_slash < source.len() {
|
||||
percent_decode(&source[first_slash + 1..])
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
|
||||
let new_metadata = if metadata_directive == "REPLACE" {
|
||||
Some(extract_metadata(req.headers()))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let result = store
|
||||
.copy_object(&src_bucket, &src_key, dest_bucket, dest_key, &metadata_directive, new_metadata)
|
||||
.await?;
|
||||
|
||||
let xml = xml_response::copy_object_result_xml(&result.md5, &result.last_modified.to_rfc3339());
|
||||
Ok(xml_response(StatusCode::OK, xml, request_id))
|
||||
}
|
||||
|
||||
// ============================
|
||||
// Policy handlers
|
||||
// ============================
|
||||
|
||||
async fn handle_get_bucket_policy(
|
||||
policy_store: &Arc<PolicyStore>,
|
||||
bucket: &str,
|
||||
request_id: &str,
|
||||
) -> Result<Response<BoxBody>> {
|
||||
match policy_store.get_policy(bucket).await {
|
||||
Some(p) => {
|
||||
let json = serde_json::to_string_pretty(&p)?;
|
||||
let resp = Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header("content-type", "application/json")
|
||||
.header("x-amz-request-id", request_id)
|
||||
.body(full_body(json))
|
||||
.unwrap();
|
||||
Ok(resp)
|
||||
}
|
||||
None => Err(S3Error::no_such_bucket_policy().into()),
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_put_bucket_policy(
|
||||
req: Request<Incoming>,
|
||||
store: &Arc<FileStore>,
|
||||
policy_store: &Arc<PolicyStore>,
|
||||
bucket: &str,
|
||||
request_id: &str,
|
||||
) -> Result<Response<BoxBody>> {
|
||||
// Verify bucket exists
|
||||
if !store.bucket_exists(bucket).await {
|
||||
return Err(S3Error::no_such_bucket().into());
|
||||
}
|
||||
|
||||
// Read body
|
||||
let body_bytes = req.collect().await.map_err(|e| anyhow::anyhow!("Body error: {}", e))?.to_bytes();
|
||||
let body_str = String::from_utf8_lossy(&body_bytes);
|
||||
|
||||
// Validate and parse
|
||||
let validated_policy = policy::validate_policy(&body_str)?;
|
||||
|
||||
// Store
|
||||
policy_store
|
||||
.put_policy(bucket, validated_policy)
|
||||
.await
|
||||
.map_err(|e| S3Error::internal_error(&e.to_string()))?;
|
||||
|
||||
Ok(empty_response(StatusCode::NO_CONTENT, request_id))
|
||||
}
|
||||
|
||||
async fn handle_delete_bucket_policy(
|
||||
policy_store: &Arc<PolicyStore>,
|
||||
bucket: &str,
|
||||
request_id: &str,
|
||||
) -> Result<Response<BoxBody>> {
|
||||
policy_store
|
||||
.delete_policy(bucket)
|
||||
.await
|
||||
.map_err(|e| S3Error::internal_error(&e.to_string()))?;
|
||||
Ok(empty_response(StatusCode::NO_CONTENT, request_id))
|
||||
}
|
||||
|
||||
// ============================
|
||||
// Multipart handlers
|
||||
// ============================
|
||||
|
||||
async fn handle_initiate_multipart(
|
||||
req: Request<Incoming>,
|
||||
store: Arc<FileStore>,
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
request_id: &str,
|
||||
) -> Result<Response<BoxBody>> {
|
||||
let metadata = extract_metadata(req.headers());
|
||||
let upload_id = store.initiate_multipart(bucket, key, metadata).await?;
|
||||
let xml = xml_response::initiate_multipart_xml(bucket, key, &upload_id);
|
||||
Ok(xml_response(StatusCode::OK, xml, request_id))
|
||||
}
|
||||
|
||||
async fn handle_upload_part(
|
||||
req: Request<Incoming>,
|
||||
store: Arc<FileStore>,
|
||||
query: &HashMap<String, String>,
|
||||
request_id: &str,
|
||||
) -> Result<Response<BoxBody>> {
|
||||
let upload_id = query.get("uploadId").unwrap();
|
||||
let part_number: u32 = query
|
||||
.get("partNumber")
|
||||
.and_then(|s| s.parse().ok())
|
||||
.unwrap_or(0);
|
||||
|
||||
if part_number < 1 || part_number > 10000 {
|
||||
return Err(S3Error::invalid_part_number().into());
|
||||
}
|
||||
|
||||
let body = req.into_body();
|
||||
let (etag, _size) = store.upload_part(upload_id, part_number, body).await?;
|
||||
|
||||
let resp = Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header("ETag", format!("\"{}\"", etag))
|
||||
.header("x-amz-request-id", request_id)
|
||||
.body(empty_body())
|
||||
.unwrap();
|
||||
|
||||
Ok(resp)
|
||||
}
|
||||
|
||||
async fn handle_complete_multipart(
|
||||
req: Request<Incoming>,
|
||||
store: Arc<FileStore>,
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
upload_id: &str,
|
||||
request_id: &str,
|
||||
) -> Result<Response<BoxBody>> {
|
||||
// Read request body (XML)
|
||||
let body_bytes = req.collect().await.map_err(|e| anyhow::anyhow!("Body error: {}", e))?.to_bytes();
|
||||
let body_str = String::from_utf8_lossy(&body_bytes);
|
||||
|
||||
// Parse parts from XML using regex-like approach
|
||||
let parts = parse_complete_multipart_xml(&body_str);
|
||||
|
||||
let result = store.complete_multipart(upload_id, &parts).await?;
|
||||
|
||||
let xml = xml_response::complete_multipart_xml(bucket, key, &result.etag);
|
||||
Ok(xml_response(StatusCode::OK, xml, request_id))
|
||||
}
|
||||
|
||||
async fn handle_abort_multipart(
|
||||
store: Arc<FileStore>,
|
||||
upload_id: &str,
|
||||
request_id: &str,
|
||||
) -> Result<Response<BoxBody>> {
|
||||
store.abort_multipart(upload_id).await?;
|
||||
Ok(empty_response(StatusCode::NO_CONTENT, request_id))
|
||||
}
|
||||
|
||||
async fn handle_list_multipart_uploads(
|
||||
store: Arc<FileStore>,
|
||||
bucket: &str,
|
||||
request_id: &str,
|
||||
) -> Result<Response<BoxBody>> {
|
||||
let uploads = store.list_multipart_uploads(bucket).await?;
|
||||
let xml = xml_response::list_multipart_uploads_xml(bucket, &uploads);
|
||||
Ok(xml_response(StatusCode::OK, xml, request_id))
|
||||
}
|
||||
|
||||
// ============================
|
||||
// Helpers
|
||||
// ============================
|
||||
|
||||
fn parse_query(query_string: &str) -> HashMap<String, String> {
|
||||
let mut map = HashMap::new();
|
||||
if query_string.is_empty() {
|
||||
return map;
|
||||
}
|
||||
for pair in query_string.split('&') {
|
||||
let mut parts = pair.splitn(2, '=');
|
||||
let key = parts.next().unwrap_or("");
|
||||
let value = parts.next().unwrap_or("");
|
||||
let key = percent_decode(key);
|
||||
let value = percent_decode(value);
|
||||
map.insert(key, value);
|
||||
}
|
||||
map
|
||||
}
|
||||
|
||||
fn percent_decode(s: &str) -> String {
|
||||
percent_encoding::percent_decode_str(s)
|
||||
.decode_utf8_lossy()
|
||||
.to_string()
|
||||
}
|
||||
|
||||
fn extract_metadata(headers: &hyper::HeaderMap) -> HashMap<String, String> {
|
||||
let mut metadata = HashMap::new();
|
||||
|
||||
for (name, value) in headers {
|
||||
let name_str = name.as_str().to_lowercase();
|
||||
if let Ok(val) = value.to_str() {
|
||||
match name_str.as_str() {
|
||||
"content-type" | "cache-control" | "content-disposition"
|
||||
| "content-encoding" | "content-language" | "expires" => {
|
||||
metadata.insert(name_str, val.to_string());
|
||||
}
|
||||
_ if name_str.starts_with("x-amz-meta-") => {
|
||||
metadata.insert(name_str, val.to_string());
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Default content-type
|
||||
if !metadata.contains_key("content-type") {
|
||||
metadata.insert("content-type".to_string(), "binary/octet-stream".to_string());
|
||||
}
|
||||
|
||||
metadata
|
||||
}
|
||||
|
||||
fn parse_range_header(headers: &hyper::HeaderMap) -> Option<(u64, u64)> {
|
||||
let range_val = headers.get("range")?.to_str().ok()?;
|
||||
let bytes_prefix = "bytes=";
|
||||
if !range_val.starts_with(bytes_prefix) {
|
||||
return None;
|
||||
}
|
||||
let range_spec = &range_val[bytes_prefix.len()..];
|
||||
let mut parts = range_spec.splitn(2, '-');
|
||||
let start: u64 = parts.next()?.parse().ok()?;
|
||||
let end_str = parts.next()?;
|
||||
let end: u64 = if end_str.is_empty() {
|
||||
// If no end specified, we'll handle this later based on file size
|
||||
u64::MAX
|
||||
} else {
|
||||
end_str.parse().ok()?
|
||||
};
|
||||
Some((start, end))
|
||||
}
|
||||
|
||||
fn parse_complete_multipart_xml(xml: &str) -> Vec<(u32, String)> {
|
||||
let mut parts = Vec::new();
|
||||
|
||||
// Simple XML parsing for <Part><PartNumber>N</PartNumber><ETag>...</ETag></Part>
|
||||
let mut remaining = xml;
|
||||
while let Some(part_start) = remaining.find("<Part>") {
|
||||
let after_part = &remaining[part_start + 6..];
|
||||
if let Some(part_end) = after_part.find("</Part>") {
|
||||
let part_content = &after_part[..part_end];
|
||||
|
||||
let part_number = extract_xml_value(part_content, "PartNumber")
|
||||
.and_then(|s| s.parse::<u32>().ok());
|
||||
let etag = extract_xml_value(part_content, "ETag")
|
||||
.map(|s| s.replace('"', ""));
|
||||
|
||||
if let (Some(pn), Some(et)) = (part_number, etag) {
|
||||
parts.push((pn, et));
|
||||
}
|
||||
|
||||
remaining = &after_part[part_end + 7..];
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
parts.sort_by_key(|(pn, _)| *pn);
|
||||
parts
|
||||
}
|
||||
|
||||
fn extract_xml_value<'a>(xml: &'a str, tag: &str) -> Option<String> {
|
||||
let open = format!("<{}>", tag);
|
||||
let close = format!("</{}>", tag);
|
||||
let start = xml.find(&open)? + open.len();
|
||||
let end = xml.find(&close)?;
|
||||
Some(xml[start..end].to_string())
|
||||
}
|
||||
|
||||
// ============================
|
||||
// CORS
|
||||
// ============================
|
||||
|
||||
fn build_cors_preflight(config: &S3Config, request_id: &str) -> Response<BoxBody> {
|
||||
let mut builder = Response::builder()
|
||||
.status(StatusCode::NO_CONTENT)
|
||||
.header("x-amz-request-id", request_id);
|
||||
|
||||
if let Some(ref origins) = config.cors.allowed_origins {
|
||||
builder = builder.header("Access-Control-Allow-Origin", origins.join(", "));
|
||||
}
|
||||
if let Some(ref methods) = config.cors.allowed_methods {
|
||||
builder = builder.header("Access-Control-Allow-Methods", methods.join(", "));
|
||||
}
|
||||
if let Some(ref headers) = config.cors.allowed_headers {
|
||||
builder = builder.header("Access-Control-Allow-Headers", headers.join(", "));
|
||||
}
|
||||
if let Some(max_age) = config.cors.max_age {
|
||||
builder = builder.header("Access-Control-Max-Age", max_age.to_string());
|
||||
}
|
||||
if config.cors.allow_credentials == Some(true) {
|
||||
builder = builder.header("Access-Control-Allow-Credentials", "true");
|
||||
}
|
||||
|
||||
builder.body(empty_body()).unwrap()
|
||||
}
|
||||
|
||||
fn add_cors_headers(headers: &mut hyper::HeaderMap, config: &S3Config) {
|
||||
if let Some(ref origins) = config.cors.allowed_origins {
|
||||
headers.insert(
|
||||
"access-control-allow-origin",
|
||||
origins.join(", ").parse().unwrap(),
|
||||
);
|
||||
}
|
||||
if let Some(ref exposed) = config.cors.exposed_headers {
|
||||
headers.insert(
|
||||
"access-control-expose-headers",
|
||||
exposed.join(", ").parse().unwrap(),
|
||||
);
|
||||
}
|
||||
if config.cors.allow_credentials == Some(true) {
|
||||
headers.insert(
|
||||
"access-control-allow-credentials",
|
||||
"true".parse().unwrap(),
|
||||
);
|
||||
}
|
||||
}
|
||||
838
rust/src/storage.rs
Normal file
838
rust/src/storage.rs
Normal file
@@ -0,0 +1,838 @@
|
||||
use anyhow::Result;
|
||||
use chrono::{DateTime, Utc};
|
||||
use http_body_util::BodyExt;
|
||||
use hyper::body::Incoming;
|
||||
use md5::{Digest, Md5};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use tokio::fs;
|
||||
use tokio::io::{AsyncReadExt, AsyncSeekExt, AsyncWriteExt, BufWriter};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::s3_error::S3Error;
|
||||
|
||||
// ============================
|
||||
// Result types
|
||||
// ============================
|
||||
|
||||
pub struct PutResult {
|
||||
pub md5: String,
|
||||
}
|
||||
|
||||
pub struct GetResult {
|
||||
pub size: u64,
|
||||
pub last_modified: DateTime<Utc>,
|
||||
pub md5: String,
|
||||
pub metadata: HashMap<String, String>,
|
||||
pub body: tokio::fs::File,
|
||||
pub content_length: u64,
|
||||
}
|
||||
|
||||
pub struct HeadResult {
|
||||
pub size: u64,
|
||||
pub last_modified: DateTime<Utc>,
|
||||
pub md5: String,
|
||||
pub metadata: HashMap<String, String>,
|
||||
}
|
||||
|
||||
pub struct CopyResult {
|
||||
pub md5: String,
|
||||
pub last_modified: DateTime<Utc>,
|
||||
}
|
||||
|
||||
pub struct ListObjectEntry {
|
||||
pub key: String,
|
||||
pub size: u64,
|
||||
pub last_modified: DateTime<Utc>,
|
||||
pub md5: String,
|
||||
}
|
||||
|
||||
pub struct ListObjectsResult {
|
||||
pub contents: Vec<ListObjectEntry>,
|
||||
pub common_prefixes: Vec<String>,
|
||||
pub is_truncated: bool,
|
||||
pub next_continuation_token: Option<String>,
|
||||
pub prefix: String,
|
||||
pub delimiter: String,
|
||||
pub max_keys: usize,
|
||||
}
|
||||
|
||||
pub struct BucketInfo {
|
||||
pub name: String,
|
||||
pub creation_date: DateTime<Utc>,
|
||||
}
|
||||
|
||||
pub struct MultipartUploadInfo {
|
||||
pub upload_id: String,
|
||||
pub key: String,
|
||||
pub initiated: DateTime<Utc>,
|
||||
}
|
||||
|
||||
pub struct CompleteMultipartResult {
|
||||
pub etag: String,
|
||||
}
|
||||
|
||||
// ============================
|
||||
// Multipart metadata (disk format, compatible with TS)
|
||||
// ============================
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct MultipartMetadata {
|
||||
upload_id: String,
|
||||
bucket: String,
|
||||
key: String,
|
||||
initiated: String,
|
||||
metadata: HashMap<String, String>,
|
||||
parts: Vec<PartMetadata>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct PartMetadata {
|
||||
part_number: u32,
|
||||
etag: String,
|
||||
size: u64,
|
||||
last_modified: String,
|
||||
}
|
||||
|
||||
// ============================
|
||||
// FileStore
|
||||
// ============================
|
||||
|
||||
pub struct FileStore {
|
||||
root_dir: PathBuf,
|
||||
}
|
||||
|
||||
impl FileStore {
|
||||
pub fn new(root_dir: PathBuf) -> Self {
|
||||
Self { root_dir }
|
||||
}
|
||||
|
||||
pub async fn initialize(&self) -> Result<()> {
|
||||
fs::create_dir_all(&self.root_dir).await?;
|
||||
fs::create_dir_all(self.policies_dir()).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn policies_dir(&self) -> PathBuf {
|
||||
self.root_dir.join(".policies")
|
||||
}
|
||||
|
||||
pub async fn reset(&self) -> Result<()> {
|
||||
if self.root_dir.exists() {
|
||||
fs::remove_dir_all(&self.root_dir).await?;
|
||||
}
|
||||
fs::create_dir_all(&self.root_dir).await?;
|
||||
fs::create_dir_all(self.policies_dir()).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ============================
|
||||
// Bucket operations
|
||||
// ============================
|
||||
|
||||
pub async fn list_buckets(&self) -> Result<Vec<BucketInfo>> {
|
||||
let mut buckets = Vec::new();
|
||||
let mut entries = fs::read_dir(&self.root_dir).await?;
|
||||
|
||||
while let Some(entry) = entries.next_entry().await? {
|
||||
let meta = entry.metadata().await?;
|
||||
if meta.is_dir() {
|
||||
let name = entry.file_name().to_string_lossy().to_string();
|
||||
// Skip hidden dirs like .multipart
|
||||
if name.starts_with('.') {
|
||||
continue;
|
||||
}
|
||||
let creation_date: DateTime<Utc> = meta
|
||||
.created()
|
||||
.unwrap_or(meta.modified().unwrap_or(std::time::SystemTime::UNIX_EPOCH))
|
||||
.into();
|
||||
buckets.push(BucketInfo {
|
||||
name,
|
||||
creation_date,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
buckets.sort_by(|a, b| a.name.cmp(&b.name));
|
||||
Ok(buckets)
|
||||
}
|
||||
|
||||
pub async fn bucket_exists(&self, bucket: &str) -> bool {
|
||||
self.root_dir.join(bucket).is_dir()
|
||||
}
|
||||
|
||||
pub async fn create_bucket(&self, bucket: &str) -> Result<()> {
|
||||
let bucket_path = self.root_dir.join(bucket);
|
||||
fs::create_dir_all(&bucket_path).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn delete_bucket(&self, bucket: &str) -> Result<()> {
|
||||
let bucket_path = self.root_dir.join(bucket);
|
||||
|
||||
if !bucket_path.is_dir() {
|
||||
return Err(S3Error::no_such_bucket().into());
|
||||
}
|
||||
|
||||
// Check if bucket is empty (ignore hidden files)
|
||||
let mut entries = fs::read_dir(&bucket_path).await?;
|
||||
while let Some(_entry) = entries.next_entry().await? {
|
||||
return Err(S3Error::bucket_not_empty().into());
|
||||
}
|
||||
|
||||
fs::remove_dir_all(&bucket_path).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ============================
|
||||
// Object operations
|
||||
// ============================
|
||||
|
||||
pub async fn put_object(
|
||||
&self,
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
body: Incoming,
|
||||
metadata: HashMap<String, String>,
|
||||
) -> Result<PutResult> {
|
||||
if !self.bucket_exists(bucket).await {
|
||||
return Err(S3Error::no_such_bucket().into());
|
||||
}
|
||||
|
||||
let object_path = self.object_path(bucket, key);
|
||||
if let Some(parent) = object_path.parent() {
|
||||
fs::create_dir_all(parent).await?;
|
||||
}
|
||||
|
||||
let file = fs::File::create(&object_path).await?;
|
||||
let mut writer = BufWriter::new(file);
|
||||
let mut hasher = Md5::new();
|
||||
|
||||
// Stream body frames directly to file
|
||||
let mut body = body;
|
||||
loop {
|
||||
match body.frame().await {
|
||||
Some(Ok(frame)) => {
|
||||
if let Ok(data) = frame.into_data() {
|
||||
hasher.update(&data);
|
||||
writer.write_all(&data).await?;
|
||||
}
|
||||
}
|
||||
Some(Err(e)) => {
|
||||
return Err(anyhow::anyhow!("Body read error: {}", e));
|
||||
}
|
||||
None => break,
|
||||
}
|
||||
}
|
||||
|
||||
writer.flush().await?;
|
||||
drop(writer);
|
||||
|
||||
let md5_hex = format!("{:x}", hasher.finalize());
|
||||
|
||||
// Write MD5 sidecar
|
||||
let md5_path = format!("{}.md5", object_path.display());
|
||||
fs::write(&md5_path, &md5_hex).await?;
|
||||
|
||||
// Write metadata sidecar
|
||||
let metadata_path = format!("{}.metadata.json", object_path.display());
|
||||
let metadata_json = serde_json::to_string_pretty(&metadata)?;
|
||||
fs::write(&metadata_path, metadata_json).await?;
|
||||
|
||||
Ok(PutResult {
|
||||
md5: md5_hex,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn get_object(
|
||||
&self,
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
range: Option<(u64, u64)>,
|
||||
) -> Result<GetResult> {
|
||||
let object_path = self.object_path(bucket, key);
|
||||
|
||||
if !object_path.exists() {
|
||||
return Err(S3Error::no_such_key().into());
|
||||
}
|
||||
|
||||
let file_meta = fs::metadata(&object_path).await?;
|
||||
let size = file_meta.len();
|
||||
let last_modified: DateTime<Utc> = file_meta.modified()?.into();
|
||||
|
||||
let md5 = self.read_md5(&object_path).await;
|
||||
let metadata = self.read_metadata(&object_path).await;
|
||||
|
||||
let mut file = fs::File::open(&object_path).await?;
|
||||
|
||||
let content_length = if let Some((start, end)) = range {
|
||||
file.seek(std::io::SeekFrom::Start(start)).await?;
|
||||
end - start + 1
|
||||
} else {
|
||||
size
|
||||
};
|
||||
|
||||
Ok(GetResult {
|
||||
size,
|
||||
last_modified,
|
||||
md5,
|
||||
metadata,
|
||||
body: file,
|
||||
content_length,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn head_object(&self, bucket: &str, key: &str) -> Result<HeadResult> {
|
||||
let object_path = self.object_path(bucket, key);
|
||||
|
||||
if !object_path.exists() {
|
||||
return Err(S3Error::no_such_key().into());
|
||||
}
|
||||
|
||||
// Only stat the file, don't open it
|
||||
let file_meta = fs::metadata(&object_path).await?;
|
||||
let size = file_meta.len();
|
||||
let last_modified: DateTime<Utc> = file_meta.modified()?.into();
|
||||
|
||||
let md5 = self.read_md5(&object_path).await;
|
||||
let metadata = self.read_metadata(&object_path).await;
|
||||
|
||||
Ok(HeadResult {
|
||||
size,
|
||||
last_modified,
|
||||
md5,
|
||||
metadata,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn delete_object(&self, bucket: &str, key: &str) -> Result<()> {
|
||||
let object_path = self.object_path(bucket, key);
|
||||
let md5_path = format!("{}.md5", object_path.display());
|
||||
let metadata_path = format!("{}.metadata.json", object_path.display());
|
||||
|
||||
// S3 doesn't error if object doesn't exist
|
||||
let _ = fs::remove_file(&object_path).await;
|
||||
let _ = fs::remove_file(&md5_path).await;
|
||||
let _ = fs::remove_file(&metadata_path).await;
|
||||
|
||||
// Clean up empty parent directories up to bucket level
|
||||
let bucket_path = self.root_dir.join(bucket);
|
||||
let mut current = object_path.parent().map(|p| p.to_path_buf());
|
||||
while let Some(dir) = current {
|
||||
if dir == bucket_path {
|
||||
break;
|
||||
}
|
||||
if fs::read_dir(&dir).await.is_ok() {
|
||||
let mut entries = fs::read_dir(&dir).await?;
|
||||
if entries.next_entry().await?.is_none() {
|
||||
let _ = fs::remove_dir(&dir).await;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
current = dir.parent().map(|p| p.to_path_buf());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn copy_object(
|
||||
&self,
|
||||
src_bucket: &str,
|
||||
src_key: &str,
|
||||
dest_bucket: &str,
|
||||
dest_key: &str,
|
||||
metadata_directive: &str,
|
||||
new_metadata: Option<HashMap<String, String>>,
|
||||
) -> Result<CopyResult> {
|
||||
let src_path = self.object_path(src_bucket, src_key);
|
||||
let dest_path = self.object_path(dest_bucket, dest_key);
|
||||
|
||||
if !src_path.exists() {
|
||||
return Err(S3Error::no_such_key().into());
|
||||
}
|
||||
|
||||
if !self.bucket_exists(dest_bucket).await {
|
||||
return Err(S3Error::no_such_bucket().into());
|
||||
}
|
||||
|
||||
if let Some(parent) = dest_path.parent() {
|
||||
fs::create_dir_all(parent).await?;
|
||||
}
|
||||
|
||||
// Copy object file
|
||||
fs::copy(&src_path, &dest_path).await?;
|
||||
|
||||
// Handle metadata
|
||||
if metadata_directive == "COPY" {
|
||||
let src_meta_path = format!("{}.metadata.json", src_path.display());
|
||||
let dest_meta_path = format!("{}.metadata.json", dest_path.display());
|
||||
let _ = fs::copy(&src_meta_path, &dest_meta_path).await;
|
||||
} else if let Some(meta) = new_metadata {
|
||||
let dest_meta_path = format!("{}.metadata.json", dest_path.display());
|
||||
let json = serde_json::to_string_pretty(&meta)?;
|
||||
fs::write(&dest_meta_path, json).await?;
|
||||
}
|
||||
|
||||
// Copy MD5
|
||||
let src_md5_path = format!("{}.md5", src_path.display());
|
||||
let dest_md5_path = format!("{}.md5", dest_path.display());
|
||||
let _ = fs::copy(&src_md5_path, &dest_md5_path).await;
|
||||
|
||||
let file_meta = fs::metadata(&dest_path).await?;
|
||||
let md5 = self.read_md5(&dest_path).await;
|
||||
let last_modified: DateTime<Utc> = file_meta.modified()?.into();
|
||||
|
||||
Ok(CopyResult {
|
||||
md5,
|
||||
last_modified,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn list_objects(
|
||||
&self,
|
||||
bucket: &str,
|
||||
prefix: &str,
|
||||
delimiter: &str,
|
||||
max_keys: usize,
|
||||
continuation_token: Option<&str>,
|
||||
) -> Result<ListObjectsResult> {
|
||||
let bucket_path = self.root_dir.join(bucket);
|
||||
|
||||
if !bucket_path.is_dir() {
|
||||
return Err(S3Error::no_such_bucket().into());
|
||||
}
|
||||
|
||||
// Collect all object keys recursively
|
||||
let mut keys = Vec::new();
|
||||
self.collect_keys(&bucket_path, &bucket_path, &mut keys)
|
||||
.await?;
|
||||
|
||||
// Apply prefix filter
|
||||
if !prefix.is_empty() {
|
||||
keys.retain(|k| k.starts_with(prefix));
|
||||
}
|
||||
|
||||
keys.sort();
|
||||
|
||||
// Handle continuation token
|
||||
if let Some(token) = continuation_token {
|
||||
if let Some(pos) = keys.iter().position(|k| k.as_str() > token) {
|
||||
keys = keys[pos..].to_vec();
|
||||
} else {
|
||||
keys.clear();
|
||||
}
|
||||
}
|
||||
|
||||
// Handle delimiter and pagination
|
||||
let mut common_prefixes: Vec<String> = Vec::new();
|
||||
let mut common_prefix_set = std::collections::HashSet::new();
|
||||
let mut contents: Vec<ListObjectEntry> = Vec::new();
|
||||
let mut is_truncated = false;
|
||||
|
||||
for key in &keys {
|
||||
if !delimiter.is_empty() {
|
||||
let remaining = &key[prefix.len()..];
|
||||
if let Some(delim_idx) = remaining.find(delimiter) {
|
||||
let cp = format!(
|
||||
"{}{}",
|
||||
prefix,
|
||||
&remaining[..delim_idx + delimiter.len()]
|
||||
);
|
||||
if common_prefix_set.insert(cp.clone()) {
|
||||
common_prefixes.push(cp);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if contents.len() >= max_keys {
|
||||
is_truncated = true;
|
||||
break;
|
||||
}
|
||||
|
||||
let object_path = self.object_path(bucket, key);
|
||||
if let Ok(meta) = fs::metadata(&object_path).await {
|
||||
let md5 = self.read_md5(&object_path).await;
|
||||
let last_modified: DateTime<Utc> = meta.modified().unwrap_or(std::time::SystemTime::UNIX_EPOCH).into();
|
||||
contents.push(ListObjectEntry {
|
||||
key: key.clone(),
|
||||
size: meta.len(),
|
||||
last_modified,
|
||||
md5,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
let next_continuation_token = if is_truncated {
|
||||
contents.last().map(|e| e.key.clone())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
common_prefixes.sort();
|
||||
|
||||
Ok(ListObjectsResult {
|
||||
contents,
|
||||
common_prefixes,
|
||||
is_truncated,
|
||||
next_continuation_token,
|
||||
prefix: prefix.to_string(),
|
||||
delimiter: delimiter.to_string(),
|
||||
max_keys,
|
||||
})
|
||||
}
|
||||
|
||||
// ============================
|
||||
// Multipart operations
|
||||
// ============================
|
||||
|
||||
fn multipart_dir(&self) -> PathBuf {
|
||||
self.root_dir.join(".multipart")
|
||||
}
|
||||
|
||||
pub async fn initiate_multipart(
|
||||
&self,
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
metadata: HashMap<String, String>,
|
||||
) -> Result<String> {
|
||||
let upload_id = Uuid::new_v4().to_string().replace('-', "");
|
||||
let upload_dir = self.multipart_dir().join(&upload_id);
|
||||
fs::create_dir_all(&upload_dir).await?;
|
||||
|
||||
let meta = MultipartMetadata {
|
||||
upload_id: upload_id.clone(),
|
||||
bucket: bucket.to_string(),
|
||||
key: key.to_string(),
|
||||
initiated: Utc::now().to_rfc3339(),
|
||||
metadata,
|
||||
parts: Vec::new(),
|
||||
};
|
||||
|
||||
let meta_path = upload_dir.join("metadata.json");
|
||||
let json = serde_json::to_string_pretty(&meta)?;
|
||||
fs::write(&meta_path, json).await?;
|
||||
|
||||
Ok(upload_id)
|
||||
}
|
||||
|
||||
pub async fn upload_part(
|
||||
&self,
|
||||
upload_id: &str,
|
||||
part_number: u32,
|
||||
body: Incoming,
|
||||
) -> Result<(String, u64)> {
|
||||
let upload_dir = self.multipart_dir().join(upload_id);
|
||||
if !upload_dir.is_dir() {
|
||||
return Err(S3Error::no_such_upload().into());
|
||||
}
|
||||
|
||||
let part_path = upload_dir.join(format!("part-{}", part_number));
|
||||
let file = fs::File::create(&part_path).await?;
|
||||
let mut writer = BufWriter::new(file);
|
||||
let mut hasher = Md5::new();
|
||||
let mut size: u64 = 0;
|
||||
|
||||
let mut body = body;
|
||||
loop {
|
||||
match body.frame().await {
|
||||
Some(Ok(frame)) => {
|
||||
if let Ok(data) = frame.into_data() {
|
||||
hasher.update(&data);
|
||||
size += data.len() as u64;
|
||||
writer.write_all(&data).await?;
|
||||
}
|
||||
}
|
||||
Some(Err(e)) => {
|
||||
return Err(anyhow::anyhow!("Body read error: {}", e));
|
||||
}
|
||||
None => break,
|
||||
}
|
||||
}
|
||||
|
||||
writer.flush().await?;
|
||||
drop(writer);
|
||||
|
||||
let etag = format!("{:x}", hasher.finalize());
|
||||
|
||||
// Update metadata
|
||||
self.update_multipart_metadata(upload_id, part_number, &etag, size)
|
||||
.await?;
|
||||
|
||||
Ok((etag, size))
|
||||
}
|
||||
|
||||
async fn update_multipart_metadata(
|
||||
&self,
|
||||
upload_id: &str,
|
||||
part_number: u32,
|
||||
etag: &str,
|
||||
size: u64,
|
||||
) -> Result<()> {
|
||||
let meta_path = self.multipart_dir().join(upload_id).join("metadata.json");
|
||||
let content = fs::read_to_string(&meta_path).await?;
|
||||
let mut meta: MultipartMetadata = serde_json::from_str(&content)?;
|
||||
|
||||
// Remove existing part with same number
|
||||
meta.parts.retain(|p| p.part_number != part_number);
|
||||
|
||||
meta.parts.push(PartMetadata {
|
||||
part_number,
|
||||
etag: etag.to_string(),
|
||||
size,
|
||||
last_modified: Utc::now().to_rfc3339(),
|
||||
});
|
||||
|
||||
meta.parts.sort_by_key(|p| p.part_number);
|
||||
|
||||
let json = serde_json::to_string_pretty(&meta)?;
|
||||
fs::write(&meta_path, json).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn complete_multipart(
|
||||
&self,
|
||||
upload_id: &str,
|
||||
parts: &[(u32, String)],
|
||||
) -> Result<CompleteMultipartResult> {
|
||||
let upload_dir = self.multipart_dir().join(upload_id);
|
||||
if !upload_dir.is_dir() {
|
||||
return Err(S3Error::no_such_upload().into());
|
||||
}
|
||||
|
||||
// Read metadata to get bucket/key
|
||||
let meta_path = upload_dir.join("metadata.json");
|
||||
let content = fs::read_to_string(&meta_path).await?;
|
||||
let meta: MultipartMetadata = serde_json::from_str(&content)?;
|
||||
|
||||
let object_path = self.object_path(&meta.bucket, &meta.key);
|
||||
if let Some(parent) = object_path.parent() {
|
||||
fs::create_dir_all(parent).await?;
|
||||
}
|
||||
|
||||
// Concatenate parts into final object, stream each part
|
||||
let dest_file = fs::File::create(&object_path).await?;
|
||||
let mut writer = BufWriter::new(dest_file);
|
||||
let mut hasher = Md5::new();
|
||||
|
||||
for (part_number, _etag) in parts {
|
||||
let part_path = upload_dir.join(format!("part-{}", part_number));
|
||||
if !part_path.exists() {
|
||||
return Err(anyhow::anyhow!("Part {} not found", part_number));
|
||||
}
|
||||
|
||||
let mut part_file = fs::File::open(&part_path).await?;
|
||||
let mut buf = vec![0u8; 64 * 1024]; // 64KB buffer
|
||||
loop {
|
||||
let n = part_file.read(&mut buf).await?;
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
hasher.update(&buf[..n]);
|
||||
writer.write_all(&buf[..n]).await?;
|
||||
}
|
||||
}
|
||||
|
||||
writer.flush().await?;
|
||||
drop(writer);
|
||||
|
||||
let etag = format!("{:x}", hasher.finalize());
|
||||
|
||||
// Write MD5 sidecar
|
||||
let md5_path = format!("{}.md5", object_path.display());
|
||||
fs::write(&md5_path, &etag).await?;
|
||||
|
||||
// Write metadata sidecar
|
||||
let metadata_path = format!("{}.metadata.json", object_path.display());
|
||||
let metadata_json = serde_json::to_string_pretty(&meta.metadata)?;
|
||||
fs::write(&metadata_path, metadata_json).await?;
|
||||
|
||||
// Clean up multipart directory
|
||||
let _ = fs::remove_dir_all(&upload_dir).await;
|
||||
|
||||
Ok(CompleteMultipartResult {
|
||||
etag,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn abort_multipart(&self, upload_id: &str) -> Result<()> {
|
||||
let upload_dir = self.multipart_dir().join(upload_id);
|
||||
if !upload_dir.is_dir() {
|
||||
return Err(S3Error::no_such_upload().into());
|
||||
}
|
||||
fs::remove_dir_all(&upload_dir).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn list_multipart_uploads(
|
||||
&self,
|
||||
bucket: &str,
|
||||
) -> Result<Vec<MultipartUploadInfo>> {
|
||||
let multipart_dir = self.multipart_dir();
|
||||
if !multipart_dir.is_dir() {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
let mut uploads = Vec::new();
|
||||
let mut entries = fs::read_dir(&multipart_dir).await?;
|
||||
|
||||
while let Some(entry) = entries.next_entry().await? {
|
||||
if !entry.metadata().await?.is_dir() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let meta_path = entry.path().join("metadata.json");
|
||||
if let Ok(content) = fs::read_to_string(&meta_path).await {
|
||||
if let Ok(meta) = serde_json::from_str::<MultipartMetadata>(&content) {
|
||||
if meta.bucket == bucket {
|
||||
let initiated = DateTime::parse_from_rfc3339(&meta.initiated)
|
||||
.map(|dt| dt.with_timezone(&Utc))
|
||||
.unwrap_or_else(|_| Utc::now());
|
||||
|
||||
uploads.push(MultipartUploadInfo {
|
||||
upload_id: meta.upload_id,
|
||||
key: meta.key,
|
||||
initiated,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(uploads)
|
||||
}
|
||||
|
||||
// ============================
|
||||
// Helpers
|
||||
// ============================
|
||||
|
||||
fn object_path(&self, bucket: &str, key: &str) -> PathBuf {
|
||||
let encoded = encode_key(key);
|
||||
self.root_dir
|
||||
.join(bucket)
|
||||
.join(format!("{}._S3_object", encoded))
|
||||
}
|
||||
|
||||
async fn read_md5(&self, object_path: &Path) -> String {
|
||||
let md5_path = format!("{}.md5", object_path.display());
|
||||
match fs::read_to_string(&md5_path).await {
|
||||
Ok(s) => s.trim().to_string(),
|
||||
Err(_) => {
|
||||
// Calculate MD5 if sidecar missing
|
||||
match self.calculate_md5(object_path).await {
|
||||
Ok(hash) => {
|
||||
let _ = fs::write(&md5_path, &hash).await;
|
||||
hash
|
||||
}
|
||||
Err(_) => String::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn calculate_md5(&self, path: &Path) -> Result<String> {
|
||||
let mut file = fs::File::open(path).await?;
|
||||
let mut hasher = Md5::new();
|
||||
let mut buf = vec![0u8; 64 * 1024];
|
||||
loop {
|
||||
let n = file.read(&mut buf).await?;
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
hasher.update(&buf[..n]);
|
||||
}
|
||||
Ok(format!("{:x}", hasher.finalize()))
|
||||
}
|
||||
|
||||
async fn read_metadata(&self, object_path: &Path) -> HashMap<String, String> {
|
||||
let meta_path = format!("{}.metadata.json", object_path.display());
|
||||
match fs::read_to_string(&meta_path).await {
|
||||
Ok(s) => serde_json::from_str(&s).unwrap_or_default(),
|
||||
Err(_) => HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn collect_keys<'a>(
|
||||
&'a self,
|
||||
bucket_path: &'a Path,
|
||||
dir: &'a Path,
|
||||
keys: &'a mut Vec<String>,
|
||||
) -> std::pin::Pin<Box<dyn std::future::Future<Output = Result<()>> + Send + 'a>> {
|
||||
Box::pin(async move {
|
||||
let mut entries = match fs::read_dir(dir).await {
|
||||
Ok(e) => e,
|
||||
Err(_) => return Ok(()),
|
||||
};
|
||||
|
||||
while let Some(entry) = entries.next_entry().await? {
|
||||
let meta = entry.metadata().await?;
|
||||
let name = entry.file_name().to_string_lossy().to_string();
|
||||
|
||||
if meta.is_dir() {
|
||||
self.collect_keys(bucket_path, &entry.path(), keys).await?;
|
||||
} else if name.ends_with("._S3_object")
|
||||
&& !name.ends_with(".metadata.json")
|
||||
&& !name.ends_with(".md5")
|
||||
{
|
||||
let relative = entry
|
||||
.path()
|
||||
.strip_prefix(bucket_path)
|
||||
.unwrap_or(Path::new(""))
|
||||
.to_string_lossy()
|
||||
.to_string();
|
||||
let key = decode_key(relative.trim_end_matches("._S3_object"));
|
||||
keys.push(key);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ============================
|
||||
// Key encoding (identity on Linux)
|
||||
// ============================
|
||||
|
||||
fn encode_key(key: &str) -> String {
|
||||
if cfg!(windows) {
|
||||
key.chars()
|
||||
.map(|c| match c {
|
||||
'<' | '>' | ':' | '"' | '\\' | '|' | '?' | '*' => {
|
||||
format!("&{:02x}", c as u32)
|
||||
}
|
||||
_ => c.to_string(),
|
||||
})
|
||||
.collect()
|
||||
} else {
|
||||
key.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
fn decode_key(encoded: &str) -> String {
|
||||
if cfg!(windows) {
|
||||
let mut result = String::new();
|
||||
let mut chars = encoded.chars();
|
||||
while let Some(c) = chars.next() {
|
||||
if c == '&' {
|
||||
let hex: String = chars.by_ref().take(2).collect();
|
||||
if let Ok(byte) = u8::from_str_radix(&hex, 16) {
|
||||
result.push(byte as char);
|
||||
} else {
|
||||
result.push('&');
|
||||
result.push_str(&hex);
|
||||
}
|
||||
} else {
|
||||
result.push(c);
|
||||
}
|
||||
}
|
||||
result
|
||||
} else {
|
||||
encoded.to_string()
|
||||
}
|
||||
}
|
||||
211
rust/src/xml_response.rs
Normal file
211
rust/src/xml_response.rs
Normal file
@@ -0,0 +1,211 @@
|
||||
use crate::storage::{BucketInfo, ListObjectsResult, MultipartUploadInfo};
|
||||
|
||||
const XML_DECL: &str = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>";
|
||||
const S3_NS: &str = "http://s3.amazonaws.com/doc/2006-03-01/";
|
||||
|
||||
fn xml_escape(s: &str) -> String {
|
||||
s.replace('&', "&")
|
||||
.replace('<', "<")
|
||||
.replace('>', ">")
|
||||
.replace('"', """)
|
||||
.replace('\'', "'")
|
||||
}
|
||||
|
||||
pub fn list_buckets_xml(buckets: &[BucketInfo]) -> String {
|
||||
let mut xml = format!(
|
||||
"{}\n<ListAllMyBucketsResult xmlns=\"{}\">\
|
||||
<Owner><ID>123456789000</ID><DisplayName>S3rver</DisplayName></Owner>\
|
||||
<Buckets>",
|
||||
XML_DECL, S3_NS
|
||||
);
|
||||
|
||||
for b in buckets {
|
||||
xml.push_str(&format!(
|
||||
"<Bucket><Name>{}</Name><CreationDate>{}</CreationDate></Bucket>",
|
||||
xml_escape(&b.name),
|
||||
b.creation_date.to_rfc3339()
|
||||
));
|
||||
}
|
||||
|
||||
xml.push_str("</Buckets></ListAllMyBucketsResult>");
|
||||
xml
|
||||
}
|
||||
|
||||
pub fn list_objects_v1_xml(bucket: &str, result: &ListObjectsResult) -> String {
|
||||
let mut xml = format!(
|
||||
"{}\n<ListBucketResult xmlns=\"{}\">\
|
||||
<Name>{}</Name>\
|
||||
<Prefix>{}</Prefix>\
|
||||
<MaxKeys>{}</MaxKeys>\
|
||||
<IsTruncated>{}</IsTruncated>",
|
||||
XML_DECL,
|
||||
S3_NS,
|
||||
xml_escape(bucket),
|
||||
xml_escape(&result.prefix),
|
||||
result.max_keys,
|
||||
result.is_truncated
|
||||
);
|
||||
|
||||
if !result.delimiter.is_empty() {
|
||||
xml.push_str(&format!("<Delimiter>{}</Delimiter>", xml_escape(&result.delimiter)));
|
||||
}
|
||||
|
||||
for entry in &result.contents {
|
||||
xml.push_str(&format!(
|
||||
"<Contents>\
|
||||
<Key>{}</Key>\
|
||||
<LastModified>{}</LastModified>\
|
||||
<ETag>\"{}\"</ETag>\
|
||||
<Size>{}</Size>\
|
||||
<StorageClass>STANDARD</StorageClass>\
|
||||
</Contents>",
|
||||
xml_escape(&entry.key),
|
||||
entry.last_modified.to_rfc3339(),
|
||||
xml_escape(&entry.md5),
|
||||
entry.size
|
||||
));
|
||||
}
|
||||
|
||||
for cp in &result.common_prefixes {
|
||||
xml.push_str(&format!(
|
||||
"<CommonPrefixes><Prefix>{}</Prefix></CommonPrefixes>",
|
||||
xml_escape(cp)
|
||||
));
|
||||
}
|
||||
|
||||
xml.push_str("</ListBucketResult>");
|
||||
xml
|
||||
}
|
||||
|
||||
pub fn list_objects_v2_xml(bucket: &str, result: &ListObjectsResult) -> String {
|
||||
let mut xml = format!(
|
||||
"{}\n<ListBucketResult xmlns=\"{}\">\
|
||||
<Name>{}</Name>\
|
||||
<Prefix>{}</Prefix>\
|
||||
<MaxKeys>{}</MaxKeys>\
|
||||
<KeyCount>{}</KeyCount>\
|
||||
<IsTruncated>{}</IsTruncated>",
|
||||
XML_DECL,
|
||||
S3_NS,
|
||||
xml_escape(bucket),
|
||||
xml_escape(&result.prefix),
|
||||
result.max_keys,
|
||||
result.contents.len(),
|
||||
result.is_truncated
|
||||
);
|
||||
|
||||
if !result.delimiter.is_empty() {
|
||||
xml.push_str(&format!("<Delimiter>{}</Delimiter>", xml_escape(&result.delimiter)));
|
||||
}
|
||||
|
||||
if let Some(ref token) = result.next_continuation_token {
|
||||
xml.push_str(&format!(
|
||||
"<NextContinuationToken>{}</NextContinuationToken>",
|
||||
xml_escape(token)
|
||||
));
|
||||
}
|
||||
|
||||
for entry in &result.contents {
|
||||
xml.push_str(&format!(
|
||||
"<Contents>\
|
||||
<Key>{}</Key>\
|
||||
<LastModified>{}</LastModified>\
|
||||
<ETag>\"{}\"</ETag>\
|
||||
<Size>{}</Size>\
|
||||
<StorageClass>STANDARD</StorageClass>\
|
||||
</Contents>",
|
||||
xml_escape(&entry.key),
|
||||
entry.last_modified.to_rfc3339(),
|
||||
xml_escape(&entry.md5),
|
||||
entry.size
|
||||
));
|
||||
}
|
||||
|
||||
for cp in &result.common_prefixes {
|
||||
xml.push_str(&format!(
|
||||
"<CommonPrefixes><Prefix>{}</Prefix></CommonPrefixes>",
|
||||
xml_escape(cp)
|
||||
));
|
||||
}
|
||||
|
||||
xml.push_str("</ListBucketResult>");
|
||||
xml
|
||||
}
|
||||
|
||||
pub fn copy_object_result_xml(etag: &str, last_modified: &str) -> String {
|
||||
format!(
|
||||
"{}\n<CopyObjectResult>\
|
||||
<LastModified>{}</LastModified>\
|
||||
<ETag>\"{}\"</ETag>\
|
||||
</CopyObjectResult>",
|
||||
XML_DECL,
|
||||
xml_escape(last_modified),
|
||||
xml_escape(etag)
|
||||
)
|
||||
}
|
||||
|
||||
pub fn initiate_multipart_xml(bucket: &str, key: &str, upload_id: &str) -> String {
|
||||
format!(
|
||||
"{}\n<InitiateMultipartUploadResult xmlns=\"{}\">\
|
||||
<Bucket>{}</Bucket>\
|
||||
<Key>{}</Key>\
|
||||
<UploadId>{}</UploadId>\
|
||||
</InitiateMultipartUploadResult>",
|
||||
XML_DECL,
|
||||
S3_NS,
|
||||
xml_escape(bucket),
|
||||
xml_escape(key),
|
||||
xml_escape(upload_id)
|
||||
)
|
||||
}
|
||||
|
||||
pub fn complete_multipart_xml(bucket: &str, key: &str, etag: &str) -> String {
|
||||
format!(
|
||||
"{}\n<CompleteMultipartUploadResult xmlns=\"{}\">\
|
||||
<Location>/{}/{}</Location>\
|
||||
<Bucket>{}</Bucket>\
|
||||
<Key>{}</Key>\
|
||||
<ETag>\"{}\"</ETag>\
|
||||
</CompleteMultipartUploadResult>",
|
||||
XML_DECL,
|
||||
S3_NS,
|
||||
xml_escape(bucket),
|
||||
xml_escape(key),
|
||||
xml_escape(bucket),
|
||||
xml_escape(key),
|
||||
xml_escape(etag)
|
||||
)
|
||||
}
|
||||
|
||||
pub fn list_multipart_uploads_xml(bucket: &str, uploads: &[MultipartUploadInfo]) -> String {
|
||||
let mut xml = format!(
|
||||
"{}\n<ListMultipartUploadsResult xmlns=\"{}\">\
|
||||
<Bucket>{}</Bucket>\
|
||||
<KeyMarker></KeyMarker>\
|
||||
<UploadIdMarker></UploadIdMarker>\
|
||||
<MaxUploads>1000</MaxUploads>\
|
||||
<IsTruncated>false</IsTruncated>",
|
||||
XML_DECL,
|
||||
S3_NS,
|
||||
xml_escape(bucket)
|
||||
);
|
||||
|
||||
for u in uploads {
|
||||
xml.push_str(&format!(
|
||||
"<Upload>\
|
||||
<Key>{}</Key>\
|
||||
<UploadId>{}</UploadId>\
|
||||
<Initiator><ID>S3RVER</ID><DisplayName>S3RVER</DisplayName></Initiator>\
|
||||
<Owner><ID>S3RVER</ID><DisplayName>S3RVER</DisplayName></Owner>\
|
||||
<StorageClass>STANDARD</StorageClass>\
|
||||
<Initiated>{}</Initiated>\
|
||||
</Upload>",
|
||||
xml_escape(&u.key),
|
||||
xml_escape(&u.upload_id),
|
||||
u.initiated.to_rfc3339()
|
||||
));
|
||||
}
|
||||
|
||||
xml.push_str("</ListMultipartUploadsResult>");
|
||||
xml
|
||||
}
|
||||
301
test/test.auth.node.ts
Normal file
301
test/test.auth.node.ts
Normal file
@@ -0,0 +1,301 @@
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
import {
|
||||
S3Client,
|
||||
CreateBucketCommand,
|
||||
ListBucketsCommand,
|
||||
PutObjectCommand,
|
||||
GetObjectCommand,
|
||||
DeleteObjectCommand,
|
||||
DeleteBucketCommand,
|
||||
PutBucketPolicyCommand,
|
||||
GetBucketPolicyCommand,
|
||||
DeleteBucketPolicyCommand,
|
||||
} from '@aws-sdk/client-s3';
|
||||
import { Readable } from 'stream';
|
||||
import * as smarts3 from '../ts/index.js';
|
||||
|
||||
let testSmarts3Instance: smarts3.Smarts3;
|
||||
let authClient: S3Client;
|
||||
let wrongClient: S3Client;
|
||||
|
||||
const TEST_PORT = 3344;
|
||||
const ACCESS_KEY = 'TESTAKID';
|
||||
const SECRET_KEY = 'TESTSECRETKEY123';
|
||||
|
||||
async function streamToString(stream: Readable): Promise<string> {
|
||||
const chunks: Buffer[] = [];
|
||||
return new Promise((resolve, reject) => {
|
||||
stream.on('data', (chunk) => chunks.push(Buffer.from(chunk)));
|
||||
stream.on('error', reject);
|
||||
stream.on('end', () => resolve(Buffer.concat(chunks).toString('utf8')));
|
||||
});
|
||||
}
|
||||
|
||||
// ============================
|
||||
// Server setup
|
||||
// ============================
|
||||
|
||||
tap.test('should start S3 server with auth enabled', async () => {
|
||||
testSmarts3Instance = await smarts3.Smarts3.createAndStart({
|
||||
server: {
|
||||
port: TEST_PORT,
|
||||
silent: true,
|
||||
region: 'us-east-1',
|
||||
},
|
||||
storage: {
|
||||
cleanSlate: true,
|
||||
},
|
||||
auth: {
|
||||
enabled: true,
|
||||
credentials: [
|
||||
{
|
||||
accessKeyId: ACCESS_KEY,
|
||||
secretAccessKey: SECRET_KEY,
|
||||
},
|
||||
],
|
||||
},
|
||||
});
|
||||
|
||||
// Authenticated client with correct credentials
|
||||
authClient = new S3Client({
|
||||
endpoint: `http://localhost:${TEST_PORT}`,
|
||||
region: 'us-east-1',
|
||||
credentials: {
|
||||
accessKeyId: ACCESS_KEY,
|
||||
secretAccessKey: SECRET_KEY,
|
||||
},
|
||||
forcePathStyle: true,
|
||||
});
|
||||
|
||||
// Client with wrong credentials
|
||||
wrongClient = new S3Client({
|
||||
endpoint: `http://localhost:${TEST_PORT}`,
|
||||
region: 'us-east-1',
|
||||
credentials: {
|
||||
accessKeyId: 'WRONGKEY',
|
||||
secretAccessKey: 'WRONGSECRET',
|
||||
},
|
||||
forcePathStyle: true,
|
||||
});
|
||||
|
||||
});
|
||||
|
||||
// ============================
|
||||
// Authenticated CRUD
|
||||
// ============================
|
||||
|
||||
tap.test('authenticated: should list buckets', async () => {
|
||||
const response = await authClient.send(new ListBucketsCommand({}));
|
||||
expect(response.$metadata.httpStatusCode).toEqual(200);
|
||||
expect(Array.isArray(response.Buckets)).toEqual(true);
|
||||
});
|
||||
|
||||
tap.test('authenticated: should create a bucket', async () => {
|
||||
const response = await authClient.send(new CreateBucketCommand({ Bucket: 'auth-test-bucket' }));
|
||||
expect(response.$metadata.httpStatusCode).toEqual(200);
|
||||
});
|
||||
|
||||
tap.test('authenticated: should upload an object', async () => {
|
||||
const response = await authClient.send(
|
||||
new PutObjectCommand({
|
||||
Bucket: 'auth-test-bucket',
|
||||
Key: 'hello.txt',
|
||||
Body: 'Hello authenticated world!',
|
||||
ContentType: 'text/plain',
|
||||
}),
|
||||
);
|
||||
expect(response.$metadata.httpStatusCode).toEqual(200);
|
||||
});
|
||||
|
||||
tap.test('authenticated: should download the object', async () => {
|
||||
const response = await authClient.send(
|
||||
new GetObjectCommand({
|
||||
Bucket: 'auth-test-bucket',
|
||||
Key: 'hello.txt',
|
||||
}),
|
||||
);
|
||||
expect(response.$metadata.httpStatusCode).toEqual(200);
|
||||
const content = await streamToString(response.Body as Readable);
|
||||
expect(content).toEqual('Hello authenticated world!');
|
||||
});
|
||||
|
||||
// ============================
|
||||
// Wrong credentials → 403
|
||||
// ============================
|
||||
|
||||
tap.test('wrong credentials: should fail to list buckets', async () => {
|
||||
await expect(wrongClient.send(new ListBucketsCommand({}))).rejects.toThrow();
|
||||
});
|
||||
|
||||
tap.test('wrong credentials: should fail to get object', async () => {
|
||||
await expect(
|
||||
wrongClient.send(
|
||||
new GetObjectCommand({
|
||||
Bucket: 'auth-test-bucket',
|
||||
Key: 'hello.txt',
|
||||
}),
|
||||
),
|
||||
).rejects.toThrow();
|
||||
});
|
||||
|
||||
// ============================
|
||||
// Anonymous → 403 (no policy yet)
|
||||
// ============================
|
||||
|
||||
tap.test('anonymous: should fail to list buckets', async () => {
|
||||
const resp = await fetch(`http://localhost:${TEST_PORT}/`);
|
||||
expect(resp.status).toEqual(403);
|
||||
});
|
||||
|
||||
tap.test('anonymous: should fail to get object (no policy)', async () => {
|
||||
const resp = await fetch(`http://localhost:${TEST_PORT}/auth-test-bucket/hello.txt`);
|
||||
expect(resp.status).toEqual(403);
|
||||
});
|
||||
|
||||
// ============================
|
||||
// Bucket policy: public read
|
||||
// ============================
|
||||
|
||||
tap.test('should PUT a public-read bucket policy', async () => {
|
||||
const policy = {
|
||||
Version: '2012-10-17',
|
||||
Statement: [
|
||||
{
|
||||
Sid: 'PublicRead',
|
||||
Effect: 'Allow',
|
||||
Principal: '*',
|
||||
Action: ['s3:GetObject'],
|
||||
Resource: [`arn:aws:s3:::auth-test-bucket/*`],
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
const response = await authClient.send(
|
||||
new PutBucketPolicyCommand({
|
||||
Bucket: 'auth-test-bucket',
|
||||
Policy: JSON.stringify(policy),
|
||||
}),
|
||||
);
|
||||
expect(response.$metadata.httpStatusCode).toEqual(204);
|
||||
});
|
||||
|
||||
tap.test('should GET the bucket policy', async () => {
|
||||
const response = await authClient.send(
|
||||
new GetBucketPolicyCommand({
|
||||
Bucket: 'auth-test-bucket',
|
||||
}),
|
||||
);
|
||||
expect(response.$metadata.httpStatusCode).toEqual(200);
|
||||
const policy = JSON.parse(response.Policy!);
|
||||
expect(policy.Statement[0].Sid).toEqual('PublicRead');
|
||||
});
|
||||
|
||||
tap.test('anonymous: should GET object after public-read policy', async () => {
|
||||
const resp = await fetch(`http://localhost:${TEST_PORT}/auth-test-bucket/hello.txt`);
|
||||
expect(resp.status).toEqual(200);
|
||||
const content = await resp.text();
|
||||
expect(content).toEqual('Hello authenticated world!');
|
||||
});
|
||||
|
||||
tap.test('anonymous: should still fail to PUT object (policy only allows GET)', async () => {
|
||||
const resp = await fetch(`http://localhost:${TEST_PORT}/auth-test-bucket/anon-file.txt`, {
|
||||
method: 'PUT',
|
||||
body: 'should fail',
|
||||
});
|
||||
expect(resp.status).toEqual(403);
|
||||
});
|
||||
|
||||
// ============================
|
||||
// Deny policy
|
||||
// ============================
|
||||
|
||||
tap.test('should PUT a deny policy that blocks authenticated delete', async () => {
|
||||
const policy = {
|
||||
Version: '2012-10-17',
|
||||
Statement: [
|
||||
{
|
||||
Sid: 'PublicRead',
|
||||
Effect: 'Allow',
|
||||
Principal: '*',
|
||||
Action: ['s3:GetObject'],
|
||||
Resource: [`arn:aws:s3:::auth-test-bucket/*`],
|
||||
},
|
||||
{
|
||||
Sid: 'DenyDelete',
|
||||
Effect: 'Deny',
|
||||
Principal: '*',
|
||||
Action: ['s3:DeleteObject'],
|
||||
Resource: [`arn:aws:s3:::auth-test-bucket/*`],
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
const response = await authClient.send(
|
||||
new PutBucketPolicyCommand({
|
||||
Bucket: 'auth-test-bucket',
|
||||
Policy: JSON.stringify(policy),
|
||||
}),
|
||||
);
|
||||
expect(response.$metadata.httpStatusCode).toEqual(204);
|
||||
});
|
||||
|
||||
tap.test('authenticated: should be denied delete by policy', async () => {
|
||||
await expect(
|
||||
authClient.send(
|
||||
new DeleteObjectCommand({
|
||||
Bucket: 'auth-test-bucket',
|
||||
Key: 'hello.txt',
|
||||
}),
|
||||
),
|
||||
).rejects.toThrow();
|
||||
});
|
||||
|
||||
// ============================
|
||||
// DELETE bucket policy
|
||||
// ============================
|
||||
|
||||
tap.test('should DELETE the bucket policy', async () => {
|
||||
const response = await authClient.send(
|
||||
new DeleteBucketPolicyCommand({
|
||||
Bucket: 'auth-test-bucket',
|
||||
}),
|
||||
);
|
||||
expect(response.$metadata.httpStatusCode).toEqual(204);
|
||||
});
|
||||
|
||||
tap.test('should GET policy → 404 after deletion', async () => {
|
||||
await expect(
|
||||
authClient.send(
|
||||
new GetBucketPolicyCommand({
|
||||
Bucket: 'auth-test-bucket',
|
||||
}),
|
||||
),
|
||||
).rejects.toThrow();
|
||||
});
|
||||
|
||||
// ============================
|
||||
// Cleanup
|
||||
// ============================
|
||||
|
||||
tap.test('authenticated: delete object after policy removed', async () => {
|
||||
const response = await authClient.send(
|
||||
new DeleteObjectCommand({
|
||||
Bucket: 'auth-test-bucket',
|
||||
Key: 'hello.txt',
|
||||
}),
|
||||
);
|
||||
expect(response.$metadata.httpStatusCode).toEqual(204);
|
||||
});
|
||||
|
||||
tap.test('authenticated: delete the bucket', async () => {
|
||||
const response = await authClient.send(
|
||||
new DeleteBucketCommand({ Bucket: 'auth-test-bucket' }),
|
||||
);
|
||||
expect(response.$metadata.httpStatusCode).toEqual(204);
|
||||
});
|
||||
|
||||
tap.test('should stop the S3 server', async () => {
|
||||
await testSmarts3Instance.stop();
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
@@ -18,9 +18,13 @@ async function streamToString(stream: Readable): Promise<string> {
|
||||
|
||||
tap.test('should start the S3 server and configure client', async () => {
|
||||
testSmarts3Instance = await smarts3.Smarts3.createAndStart({
|
||||
server: {
|
||||
port: 3337,
|
||||
cleanSlate: true,
|
||||
silent: true,
|
||||
},
|
||||
storage: {
|
||||
cleanSlate: true,
|
||||
},
|
||||
});
|
||||
|
||||
const descriptor = await testSmarts3Instance.getS3Descriptor();
|
||||
|
||||
335
test/test.policy-actions.node.ts
Normal file
335
test/test.policy-actions.node.ts
Normal file
@@ -0,0 +1,335 @@
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
import {
|
||||
S3Client,
|
||||
CreateBucketCommand,
|
||||
DeleteBucketCommand,
|
||||
ListBucketsCommand,
|
||||
ListObjectsV2Command,
|
||||
PutObjectCommand,
|
||||
GetObjectCommand,
|
||||
DeleteObjectCommand,
|
||||
CopyObjectCommand,
|
||||
HeadBucketCommand,
|
||||
PutBucketPolicyCommand,
|
||||
GetBucketPolicyCommand,
|
||||
DeleteBucketPolicyCommand,
|
||||
} from '@aws-sdk/client-s3';
|
||||
import * as smarts3 from '../ts/index.js';
|
||||
|
||||
let testSmarts3Instance: smarts3.Smarts3;
|
||||
let authClient: S3Client;
|
||||
|
||||
const TEST_PORT = 3347;
|
||||
const ACCESS_KEY = 'TESTAKID';
|
||||
const SECRET_KEY = 'TESTSECRETKEY123';
|
||||
const BUCKET = 'actions-bucket';
|
||||
const BASE_URL = `http://localhost:${TEST_PORT}`;
|
||||
|
||||
async function putPolicy(statements: any[]) {
|
||||
await authClient.send(
|
||||
new PutBucketPolicyCommand({
|
||||
Bucket: BUCKET,
|
||||
Policy: JSON.stringify({ Version: '2012-10-17', Statement: statements }),
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
async function clearPolicy() {
|
||||
await authClient.send(new DeleteBucketPolicyCommand({ Bucket: BUCKET }));
|
||||
}
|
||||
|
||||
function denyStatement(action: string) {
|
||||
return {
|
||||
Sid: `Deny_${action.replace(':', '_')}`,
|
||||
Effect: 'Deny' as const,
|
||||
Principal: '*',
|
||||
Action: action,
|
||||
Resource: [
|
||||
`arn:aws:s3:::${BUCKET}`,
|
||||
`arn:aws:s3:::${BUCKET}/*`,
|
||||
],
|
||||
};
|
||||
}
|
||||
|
||||
// ============================
|
||||
// Server setup
|
||||
// ============================
|
||||
|
||||
tap.test('setup: start server, create bucket, upload object', async () => {
|
||||
testSmarts3Instance = await smarts3.Smarts3.createAndStart({
|
||||
server: { port: TEST_PORT, silent: true, region: 'us-east-1' },
|
||||
storage: { cleanSlate: true },
|
||||
auth: {
|
||||
enabled: true,
|
||||
credentials: [{ accessKeyId: ACCESS_KEY, secretAccessKey: SECRET_KEY }],
|
||||
},
|
||||
});
|
||||
|
||||
authClient = new S3Client({
|
||||
endpoint: BASE_URL,
|
||||
region: 'us-east-1',
|
||||
credentials: { accessKeyId: ACCESS_KEY, secretAccessKey: SECRET_KEY },
|
||||
forcePathStyle: true,
|
||||
});
|
||||
|
||||
await authClient.send(new CreateBucketCommand({ Bucket: BUCKET }));
|
||||
await authClient.send(
|
||||
new PutObjectCommand({
|
||||
Bucket: BUCKET,
|
||||
Key: 'obj.txt',
|
||||
Body: 'test content for actions',
|
||||
ContentType: 'text/plain',
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
// ============================
|
||||
// Per-action deny enforcement
|
||||
// ============================
|
||||
|
||||
tap.test('Deny s3:ListBucket → authenticated ListObjects fails', async () => {
|
||||
await putPolicy([denyStatement('s3:ListBucket')]);
|
||||
|
||||
await expect(
|
||||
authClient.send(new ListObjectsV2Command({ Bucket: BUCKET }))
|
||||
).rejects.toThrow();
|
||||
|
||||
await clearPolicy();
|
||||
});
|
||||
|
||||
tap.test('Deny s3:CreateBucket → authenticated CreateBucket on new bucket fails', async () => {
|
||||
// We need to create a policy on the target bucket, but the target doesn't exist yet.
|
||||
// Instead, we use a different approach: deny on existing bucket and test HeadBucket works
|
||||
// but for CreateBucket, use fetch to target a new bucket name with the deny check.
|
||||
// Actually, CreateBucket has no bucket policy to evaluate against (the bucket doesn't exist yet).
|
||||
// The deny would need to be on the bucket being created.
|
||||
// Since the bucket doesn't exist, there's no policy to load — so CreateBucket can't be denied via policy.
|
||||
// This is expected AWS behavior. Skip this test and note it.
|
||||
|
||||
// Verify CreateBucket still works (no policy can deny it since bucket doesn't exist yet)
|
||||
await authClient.send(new CreateBucketCommand({ Bucket: 'new-test-bucket' }));
|
||||
await authClient.send(new DeleteBucketCommand({ Bucket: 'new-test-bucket' }));
|
||||
});
|
||||
|
||||
tap.test('Deny s3:DeleteBucket → authenticated DeleteBucket fails', async () => {
|
||||
await putPolicy([denyStatement('s3:DeleteBucket')]);
|
||||
|
||||
await expect(
|
||||
authClient.send(new DeleteBucketCommand({ Bucket: BUCKET }))
|
||||
).rejects.toThrow();
|
||||
|
||||
await clearPolicy();
|
||||
});
|
||||
|
||||
tap.test('Deny s3:GetObject → authenticated GetObject fails', async () => {
|
||||
await putPolicy([denyStatement('s3:GetObject')]);
|
||||
|
||||
await expect(
|
||||
authClient.send(new GetObjectCommand({ Bucket: BUCKET, Key: 'obj.txt' }))
|
||||
).rejects.toThrow();
|
||||
|
||||
await clearPolicy();
|
||||
});
|
||||
|
||||
tap.test('Deny s3:PutObject → authenticated PutObject fails', async () => {
|
||||
await putPolicy([denyStatement('s3:PutObject')]);
|
||||
|
||||
await expect(
|
||||
authClient.send(
|
||||
new PutObjectCommand({
|
||||
Bucket: BUCKET,
|
||||
Key: 'new-obj.txt',
|
||||
Body: 'should fail',
|
||||
})
|
||||
)
|
||||
).rejects.toThrow();
|
||||
|
||||
await clearPolicy();
|
||||
});
|
||||
|
||||
tap.test('Deny s3:DeleteObject → authenticated DeleteObject fails', async () => {
|
||||
await putPolicy([denyStatement('s3:DeleteObject')]);
|
||||
|
||||
await expect(
|
||||
authClient.send(new DeleteObjectCommand({ Bucket: BUCKET, Key: 'obj.txt' }))
|
||||
).rejects.toThrow();
|
||||
|
||||
await clearPolicy();
|
||||
});
|
||||
|
||||
tap.test('Deny s3:PutObject → authenticated CopyObject fails (maps to s3:PutObject)', async () => {
|
||||
await putPolicy([denyStatement('s3:PutObject')]);
|
||||
|
||||
await expect(
|
||||
authClient.send(
|
||||
new CopyObjectCommand({
|
||||
Bucket: BUCKET,
|
||||
Key: 'obj-copy.txt',
|
||||
CopySource: `${BUCKET}/obj.txt`,
|
||||
})
|
||||
)
|
||||
).rejects.toThrow();
|
||||
|
||||
await clearPolicy();
|
||||
});
|
||||
|
||||
tap.test('Deny s3:GetBucketPolicy → authenticated GetBucketPolicy fails', async () => {
|
||||
// First put a policy that denies GetBucketPolicy
|
||||
// We need to be careful: put the deny policy, then try to get it
|
||||
await putPolicy([denyStatement('s3:GetBucketPolicy')]);
|
||||
|
||||
await expect(
|
||||
authClient.send(new GetBucketPolicyCommand({ Bucket: BUCKET }))
|
||||
).rejects.toThrow();
|
||||
|
||||
// Clear using direct delete (which isn't denied)
|
||||
await clearPolicy();
|
||||
});
|
||||
|
||||
tap.test('Deny s3:PutBucketPolicy → authenticated PutBucketPolicy fails (for second policy)', async () => {
|
||||
// First put a policy that denies PutBucketPolicy
|
||||
await putPolicy([denyStatement('s3:PutBucketPolicy')]);
|
||||
|
||||
// Now try to put another policy — should fail
|
||||
await expect(
|
||||
authClient.send(
|
||||
new PutBucketPolicyCommand({
|
||||
Bucket: BUCKET,
|
||||
Policy: JSON.stringify({
|
||||
Version: '2012-10-17',
|
||||
Statement: [
|
||||
{
|
||||
Sid: 'SomeOtherPolicy',
|
||||
Effect: 'Allow',
|
||||
Principal: '*',
|
||||
Action: 's3:GetObject',
|
||||
Resource: `arn:aws:s3:::${BUCKET}/*`,
|
||||
},
|
||||
],
|
||||
}),
|
||||
})
|
||||
)
|
||||
).rejects.toThrow();
|
||||
|
||||
await clearPolicy();
|
||||
});
|
||||
|
||||
tap.test('Deny s3:DeleteBucketPolicy → authenticated DeleteBucketPolicy fails', async () => {
|
||||
await putPolicy([denyStatement('s3:DeleteBucketPolicy')]);
|
||||
|
||||
await expect(
|
||||
authClient.send(new DeleteBucketPolicyCommand({ Bucket: BUCKET }))
|
||||
).rejects.toThrow();
|
||||
|
||||
// We need another way to clean up — use fetch with auth to bypass? No, the deny is on all principals.
|
||||
// Actually, we can't clear the policy via SDK since delete is denied.
|
||||
// The server still denies it. We need to stop and restart or use a different mechanism.
|
||||
// For test cleanup, just stop the server at end and it will be wiped with cleanSlate on next start.
|
||||
});
|
||||
|
||||
tap.test('Recovery: remove deny policy → authenticated operations resume working', async () => {
|
||||
// The previous test left a deny policy on DeleteBucketPolicy.
|
||||
// But we can work around it by stopping/restarting or if the deny is still in place.
|
||||
// Actually, we denied s3:DeleteBucketPolicy but NOT s3:PutBucketPolicy.
|
||||
// So we can overwrite the policy with an empty-ish one, then delete.
|
||||
await authClient.send(
|
||||
new PutBucketPolicyCommand({
|
||||
Bucket: BUCKET,
|
||||
Policy: JSON.stringify({
|
||||
Version: '2012-10-17',
|
||||
Statement: [
|
||||
{
|
||||
Sid: 'AllowAll',
|
||||
Effect: 'Allow',
|
||||
Principal: '*',
|
||||
Action: 's3:*',
|
||||
Resource: [`arn:aws:s3:::${BUCKET}`, `arn:aws:s3:::${BUCKET}/*`],
|
||||
},
|
||||
],
|
||||
}),
|
||||
})
|
||||
);
|
||||
|
||||
// Now all operations should work again
|
||||
const getResp = await authClient.send(
|
||||
new GetObjectCommand({ Bucket: BUCKET, Key: 'obj.txt' })
|
||||
);
|
||||
expect(getResp.$metadata.httpStatusCode).toEqual(200);
|
||||
|
||||
const listResp = await authClient.send(
|
||||
new ListObjectsV2Command({ Bucket: BUCKET })
|
||||
);
|
||||
expect(listResp.$metadata.httpStatusCode).toEqual(200);
|
||||
|
||||
await clearPolicy();
|
||||
});
|
||||
|
||||
// ============================
|
||||
// Special cases
|
||||
// ============================
|
||||
|
||||
tap.test('ListAllMyBuckets always requires auth → anonymous fetch to / returns 403', async () => {
|
||||
const resp = await fetch(`${BASE_URL}/`);
|
||||
expect(resp.status).toEqual(403);
|
||||
});
|
||||
|
||||
tap.test('Auth disabled mode → anonymous full access works', async () => {
|
||||
// Start a second server with auth disabled
|
||||
const noAuthInstance = await smarts3.Smarts3.createAndStart({
|
||||
server: { port: 3348, silent: true, region: 'us-east-1' },
|
||||
storage: { cleanSlate: true },
|
||||
auth: { enabled: false, credentials: [] },
|
||||
});
|
||||
|
||||
// Anonymous operations should all work
|
||||
const listResp = await fetch('http://localhost:3348/');
|
||||
expect(listResp.status).toEqual(200);
|
||||
|
||||
// Create bucket via fetch
|
||||
const createResp = await fetch('http://localhost:3348/anon-bucket', { method: 'PUT' });
|
||||
expect(createResp.status).toEqual(200);
|
||||
|
||||
// Put object
|
||||
const putResp = await fetch('http://localhost:3348/anon-bucket/file.txt', {
|
||||
method: 'PUT',
|
||||
body: 'hello anon',
|
||||
});
|
||||
expect(putResp.status).toEqual(200);
|
||||
|
||||
// Get object
|
||||
const getResp = await fetch('http://localhost:3348/anon-bucket/file.txt');
|
||||
expect(getResp.status).toEqual(200);
|
||||
const text = await getResp.text();
|
||||
expect(text).toEqual('hello anon');
|
||||
|
||||
// Delete object
|
||||
const delObjResp = await fetch('http://localhost:3348/anon-bucket/file.txt', { method: 'DELETE' });
|
||||
expect(delObjResp.status).toEqual(204);
|
||||
|
||||
// Delete bucket
|
||||
const delBucketResp = await fetch('http://localhost:3348/anon-bucket', { method: 'DELETE' });
|
||||
expect(delBucketResp.status).toEqual(204);
|
||||
|
||||
await noAuthInstance.stop();
|
||||
});
|
||||
|
||||
// ============================
|
||||
// Teardown
|
||||
// ============================
|
||||
|
||||
tap.test('teardown: clean up and stop server', async () => {
|
||||
// Clean up any remaining objects
|
||||
try {
|
||||
await authClient.send(new DeleteObjectCommand({ Bucket: BUCKET, Key: 'obj.txt' }));
|
||||
} catch {
|
||||
// May already be deleted
|
||||
}
|
||||
try {
|
||||
await authClient.send(new DeleteBucketCommand({ Bucket: BUCKET }));
|
||||
} catch {
|
||||
// May already be deleted
|
||||
}
|
||||
await testSmarts3Instance.stop();
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
252
test/test.policy-crud.node.ts
Normal file
252
test/test.policy-crud.node.ts
Normal file
@@ -0,0 +1,252 @@
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
import {
|
||||
S3Client,
|
||||
CreateBucketCommand,
|
||||
DeleteBucketCommand,
|
||||
PutBucketPolicyCommand,
|
||||
GetBucketPolicyCommand,
|
||||
DeleteBucketPolicyCommand,
|
||||
} from '@aws-sdk/client-s3';
|
||||
import * as smarts3 from '../ts/index.js';
|
||||
|
||||
let testSmarts3Instance: smarts3.Smarts3;
|
||||
let authClient: S3Client;
|
||||
|
||||
const TEST_PORT = 3345;
|
||||
const ACCESS_KEY = 'TESTAKID';
|
||||
const SECRET_KEY = 'TESTSECRETKEY123';
|
||||
const BUCKET = 'policy-crud-bucket';
|
||||
|
||||
function makePolicy(statements: any[]) {
|
||||
return JSON.stringify({ Version: '2012-10-17', Statement: statements });
|
||||
}
|
||||
|
||||
const validStatement = {
|
||||
Sid: 'Test1',
|
||||
Effect: 'Allow',
|
||||
Principal: '*',
|
||||
Action: ['s3:GetObject'],
|
||||
Resource: [`arn:aws:s3:::${BUCKET}/*`],
|
||||
};
|
||||
|
||||
// ============================
|
||||
// Server setup
|
||||
// ============================
|
||||
|
||||
tap.test('setup: start S3 server with auth enabled', async () => {
|
||||
testSmarts3Instance = await smarts3.Smarts3.createAndStart({
|
||||
server: { port: TEST_PORT, silent: true, region: 'us-east-1' },
|
||||
storage: { cleanSlate: true },
|
||||
auth: {
|
||||
enabled: true,
|
||||
credentials: [{ accessKeyId: ACCESS_KEY, secretAccessKey: SECRET_KEY }],
|
||||
},
|
||||
});
|
||||
|
||||
authClient = new S3Client({
|
||||
endpoint: `http://localhost:${TEST_PORT}`,
|
||||
region: 'us-east-1',
|
||||
credentials: { accessKeyId: ACCESS_KEY, secretAccessKey: SECRET_KEY },
|
||||
forcePathStyle: true,
|
||||
});
|
||||
});
|
||||
|
||||
tap.test('setup: create bucket', async () => {
|
||||
await authClient.send(new CreateBucketCommand({ Bucket: BUCKET }));
|
||||
});
|
||||
|
||||
// ============================
|
||||
// CRUD tests
|
||||
// ============================
|
||||
|
||||
tap.test('GET policy on bucket with no policy → throws (NoSuchBucketPolicy)', async () => {
|
||||
await expect(
|
||||
authClient.send(new GetBucketPolicyCommand({ Bucket: BUCKET }))
|
||||
).rejects.toThrow();
|
||||
});
|
||||
|
||||
tap.test('PUT valid policy → 204', async () => {
|
||||
const response = await authClient.send(
|
||||
new PutBucketPolicyCommand({
|
||||
Bucket: BUCKET,
|
||||
Policy: makePolicy([validStatement]),
|
||||
})
|
||||
);
|
||||
expect(response.$metadata.httpStatusCode).toEqual(204);
|
||||
});
|
||||
|
||||
tap.test('GET policy back → returns matching JSON', async () => {
|
||||
const response = await authClient.send(
|
||||
new GetBucketPolicyCommand({ Bucket: BUCKET })
|
||||
);
|
||||
expect(response.$metadata.httpStatusCode).toEqual(200);
|
||||
const policy = JSON.parse(response.Policy!);
|
||||
expect(policy.Version).toEqual('2012-10-17');
|
||||
expect(policy.Statement[0].Sid).toEqual('Test1');
|
||||
expect(policy.Statement[0].Effect).toEqual('Allow');
|
||||
});
|
||||
|
||||
tap.test('PUT updated policy (overwrite) → 204, GET returns new version', async () => {
|
||||
const updatedStatement = {
|
||||
Sid: 'Updated',
|
||||
Effect: 'Deny',
|
||||
Principal: '*',
|
||||
Action: ['s3:DeleteObject'],
|
||||
Resource: [`arn:aws:s3:::${BUCKET}/*`],
|
||||
};
|
||||
|
||||
const putResp = await authClient.send(
|
||||
new PutBucketPolicyCommand({
|
||||
Bucket: BUCKET,
|
||||
Policy: makePolicy([updatedStatement]),
|
||||
})
|
||||
);
|
||||
expect(putResp.$metadata.httpStatusCode).toEqual(204);
|
||||
|
||||
const getResp = await authClient.send(
|
||||
new GetBucketPolicyCommand({ Bucket: BUCKET })
|
||||
);
|
||||
const policy = JSON.parse(getResp.Policy!);
|
||||
expect(policy.Statement[0].Sid).toEqual('Updated');
|
||||
expect(policy.Statement[0].Effect).toEqual('Deny');
|
||||
});
|
||||
|
||||
tap.test('DELETE policy → 204', async () => {
|
||||
const response = await authClient.send(
|
||||
new DeleteBucketPolicyCommand({ Bucket: BUCKET })
|
||||
);
|
||||
expect(response.$metadata.httpStatusCode).toEqual(204);
|
||||
});
|
||||
|
||||
tap.test('DELETE policy again (idempotent) → 204', async () => {
|
||||
const response = await authClient.send(
|
||||
new DeleteBucketPolicyCommand({ Bucket: BUCKET })
|
||||
);
|
||||
expect(response.$metadata.httpStatusCode).toEqual(204);
|
||||
});
|
||||
|
||||
tap.test('GET policy after delete → throws', async () => {
|
||||
await expect(
|
||||
authClient.send(new GetBucketPolicyCommand({ Bucket: BUCKET }))
|
||||
).rejects.toThrow();
|
||||
});
|
||||
|
||||
tap.test('PUT policy on non-existent bucket → throws (NoSuchBucket)', async () => {
|
||||
await expect(
|
||||
authClient.send(
|
||||
new PutBucketPolicyCommand({
|
||||
Bucket: 'nonexistent-bucket-xyz',
|
||||
Policy: makePolicy([validStatement]),
|
||||
})
|
||||
)
|
||||
).rejects.toThrow();
|
||||
});
|
||||
|
||||
tap.test('PUT invalid JSON → throws (MalformedPolicy)', async () => {
|
||||
await expect(
|
||||
authClient.send(
|
||||
new PutBucketPolicyCommand({
|
||||
Bucket: BUCKET,
|
||||
Policy: '{not valid json!!!',
|
||||
})
|
||||
)
|
||||
).rejects.toThrow();
|
||||
});
|
||||
|
||||
tap.test('PUT policy with wrong version → throws (MalformedPolicy)', async () => {
|
||||
await expect(
|
||||
authClient.send(
|
||||
new PutBucketPolicyCommand({
|
||||
Bucket: BUCKET,
|
||||
Policy: JSON.stringify({
|
||||
Version: '2023-01-01',
|
||||
Statement: [validStatement],
|
||||
}),
|
||||
})
|
||||
)
|
||||
).rejects.toThrow();
|
||||
});
|
||||
|
||||
tap.test('PUT policy with empty statements array → throws (MalformedPolicy)', async () => {
|
||||
await expect(
|
||||
authClient.send(
|
||||
new PutBucketPolicyCommand({
|
||||
Bucket: BUCKET,
|
||||
Policy: JSON.stringify({
|
||||
Version: '2012-10-17',
|
||||
Statement: [],
|
||||
}),
|
||||
})
|
||||
)
|
||||
).rejects.toThrow();
|
||||
});
|
||||
|
||||
tap.test('PUT policy with action missing s3: prefix → throws (MalformedPolicy)', async () => {
|
||||
await expect(
|
||||
authClient.send(
|
||||
new PutBucketPolicyCommand({
|
||||
Bucket: BUCKET,
|
||||
Policy: makePolicy([
|
||||
{
|
||||
Sid: 'BadAction',
|
||||
Effect: 'Allow',
|
||||
Principal: '*',
|
||||
Action: ['GetObject'],
|
||||
Resource: [`arn:aws:s3:::${BUCKET}/*`],
|
||||
},
|
||||
]),
|
||||
})
|
||||
)
|
||||
).rejects.toThrow();
|
||||
});
|
||||
|
||||
tap.test('PUT policy with resource missing arn:aws:s3::: prefix → throws (MalformedPolicy)', async () => {
|
||||
await expect(
|
||||
authClient.send(
|
||||
new PutBucketPolicyCommand({
|
||||
Bucket: BUCKET,
|
||||
Policy: makePolicy([
|
||||
{
|
||||
Sid: 'BadResource',
|
||||
Effect: 'Allow',
|
||||
Principal: '*',
|
||||
Action: ['s3:GetObject'],
|
||||
Resource: ['policy-crud-bucket/*'],
|
||||
},
|
||||
]),
|
||||
})
|
||||
)
|
||||
).rejects.toThrow();
|
||||
});
|
||||
|
||||
tap.test('Bucket deletion cleans up associated policy', async () => {
|
||||
// PUT a policy
|
||||
await authClient.send(
|
||||
new PutBucketPolicyCommand({
|
||||
Bucket: BUCKET,
|
||||
Policy: makePolicy([validStatement]),
|
||||
})
|
||||
);
|
||||
|
||||
// Delete the bucket
|
||||
await authClient.send(new DeleteBucketCommand({ Bucket: BUCKET }));
|
||||
|
||||
// Re-create the bucket
|
||||
await authClient.send(new CreateBucketCommand({ Bucket: BUCKET }));
|
||||
|
||||
// GET policy should now be gone
|
||||
await expect(
|
||||
authClient.send(new GetBucketPolicyCommand({ Bucket: BUCKET }))
|
||||
).rejects.toThrow();
|
||||
});
|
||||
|
||||
// ============================
|
||||
// Teardown
|
||||
// ============================
|
||||
|
||||
tap.test('teardown: delete bucket and stop server', async () => {
|
||||
await authClient.send(new DeleteBucketCommand({ Bucket: BUCKET }));
|
||||
await testSmarts3Instance.stop();
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
517
test/test.policy-eval.node.ts
Normal file
517
test/test.policy-eval.node.ts
Normal file
@@ -0,0 +1,517 @@
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
import {
|
||||
S3Client,
|
||||
CreateBucketCommand,
|
||||
DeleteBucketCommand,
|
||||
PutObjectCommand,
|
||||
GetObjectCommand,
|
||||
DeleteObjectCommand,
|
||||
PutBucketPolicyCommand,
|
||||
DeleteBucketPolicyCommand,
|
||||
} from '@aws-sdk/client-s3';
|
||||
import { Readable } from 'stream';
|
||||
import * as smarts3 from '../ts/index.js';
|
||||
|
||||
let testSmarts3Instance: smarts3.Smarts3;
|
||||
let authClient: S3Client;
|
||||
|
||||
const TEST_PORT = 3346;
|
||||
const ACCESS_KEY = 'TESTAKID';
|
||||
const SECRET_KEY = 'TESTSECRETKEY123';
|
||||
const BUCKET = 'eval-bucket';
|
||||
const BASE_URL = `http://localhost:${TEST_PORT}`;
|
||||
|
||||
async function streamToString(stream: Readable): Promise<string> {
|
||||
const chunks: Buffer[] = [];
|
||||
return new Promise((resolve, reject) => {
|
||||
stream.on('data', (chunk) => chunks.push(Buffer.from(chunk)));
|
||||
stream.on('error', reject);
|
||||
stream.on('end', () => resolve(Buffer.concat(chunks).toString('utf8')));
|
||||
});
|
||||
}
|
||||
|
||||
async function putPolicy(statements: any[]) {
|
||||
await authClient.send(
|
||||
new PutBucketPolicyCommand({
|
||||
Bucket: BUCKET,
|
||||
Policy: JSON.stringify({ Version: '2012-10-17', Statement: statements }),
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
async function clearPolicy() {
|
||||
await authClient.send(new DeleteBucketPolicyCommand({ Bucket: BUCKET }));
|
||||
}
|
||||
|
||||
// ============================
|
||||
// Server setup
|
||||
// ============================
|
||||
|
||||
tap.test('setup: start server, create bucket, upload object', async () => {
|
||||
testSmarts3Instance = await smarts3.Smarts3.createAndStart({
|
||||
server: { port: TEST_PORT, silent: true, region: 'us-east-1' },
|
||||
storage: { cleanSlate: true },
|
||||
auth: {
|
||||
enabled: true,
|
||||
credentials: [{ accessKeyId: ACCESS_KEY, secretAccessKey: SECRET_KEY }],
|
||||
},
|
||||
});
|
||||
|
||||
authClient = new S3Client({
|
||||
endpoint: BASE_URL,
|
||||
region: 'us-east-1',
|
||||
credentials: { accessKeyId: ACCESS_KEY, secretAccessKey: SECRET_KEY },
|
||||
forcePathStyle: true,
|
||||
});
|
||||
|
||||
await authClient.send(new CreateBucketCommand({ Bucket: BUCKET }));
|
||||
await authClient.send(
|
||||
new PutObjectCommand({
|
||||
Bucket: BUCKET,
|
||||
Key: 'test-obj.txt',
|
||||
Body: 'hello policy eval',
|
||||
ContentType: 'text/plain',
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
// ============================
|
||||
// Principal matching
|
||||
// ============================
|
||||
|
||||
tap.test('Principal: "*" → anonymous fetch GET succeeds', async () => {
|
||||
await putPolicy([
|
||||
{
|
||||
Sid: 'PrincipalWildcard',
|
||||
Effect: 'Allow',
|
||||
Principal: '*',
|
||||
Action: 's3:GetObject',
|
||||
Resource: `arn:aws:s3:::${BUCKET}/*`,
|
||||
},
|
||||
]);
|
||||
|
||||
const resp = await fetch(`${BASE_URL}/${BUCKET}/test-obj.txt`);
|
||||
expect(resp.status).toEqual(200);
|
||||
const text = await resp.text();
|
||||
expect(text).toEqual('hello policy eval');
|
||||
await clearPolicy();
|
||||
});
|
||||
|
||||
tap.test('Principal: {"AWS": "*"} → anonymous GET fails, authenticated GET succeeds', async () => {
|
||||
await putPolicy([
|
||||
{
|
||||
Sid: 'AwsWildcard',
|
||||
Effect: 'Allow',
|
||||
Principal: { AWS: '*' },
|
||||
Action: 's3:GetObject',
|
||||
Resource: `arn:aws:s3:::${BUCKET}/*`,
|
||||
},
|
||||
]);
|
||||
|
||||
// Anonymous → no identity → Principal AWS:* doesn't match anonymous → NoOpinion → denied
|
||||
const anonResp = await fetch(`${BASE_URL}/${BUCKET}/test-obj.txt`);
|
||||
expect(anonResp.status).toEqual(403);
|
||||
|
||||
// Authenticated → has identity → Principal AWS:* matches → Allow
|
||||
const authResp = await authClient.send(
|
||||
new GetObjectCommand({ Bucket: BUCKET, Key: 'test-obj.txt' })
|
||||
);
|
||||
expect(authResp.$metadata.httpStatusCode).toEqual(200);
|
||||
await clearPolicy();
|
||||
});
|
||||
|
||||
tap.test('Principal: {"AWS": "arn:aws:iam::TESTAKID"} → authenticated GET succeeds', async () => {
|
||||
await putPolicy([
|
||||
{
|
||||
Sid: 'SpecificPrincipal',
|
||||
Effect: 'Allow',
|
||||
Principal: { AWS: `arn:aws:iam::${ACCESS_KEY}` },
|
||||
Action: 's3:GetObject',
|
||||
Resource: `arn:aws:s3:::${BUCKET}/*`,
|
||||
},
|
||||
]);
|
||||
|
||||
const resp = await authClient.send(
|
||||
new GetObjectCommand({ Bucket: BUCKET, Key: 'test-obj.txt' })
|
||||
);
|
||||
expect(resp.$metadata.httpStatusCode).toEqual(200);
|
||||
await clearPolicy();
|
||||
});
|
||||
|
||||
tap.test('Principal: {"AWS": "arn:aws:iam::WRONGKEY"} → authenticated GET still succeeds (default allow)', async () => {
|
||||
await putPolicy([
|
||||
{
|
||||
Sid: 'WrongPrincipal',
|
||||
Effect: 'Allow',
|
||||
Principal: { AWS: 'arn:aws:iam::WRONGKEY' },
|
||||
Action: 's3:GetObject',
|
||||
Resource: `arn:aws:s3:::${BUCKET}/*`,
|
||||
},
|
||||
]);
|
||||
|
||||
// Principal doesn't match our key → NoOpinion → default allow for authenticated
|
||||
const resp = await authClient.send(
|
||||
new GetObjectCommand({ Bucket: BUCKET, Key: 'test-obj.txt' })
|
||||
);
|
||||
expect(resp.$metadata.httpStatusCode).toEqual(200);
|
||||
await clearPolicy();
|
||||
});
|
||||
|
||||
// ============================
|
||||
// Action matching
|
||||
// ============================
|
||||
|
||||
tap.test('Action: "s3:*" → anonymous can GET and PUT (wildcard matches all)', async () => {
|
||||
await putPolicy([
|
||||
{
|
||||
Sid: 'S3Wildcard',
|
||||
Effect: 'Allow',
|
||||
Principal: '*',
|
||||
Action: 's3:*',
|
||||
Resource: `arn:aws:s3:::${BUCKET}/*`,
|
||||
},
|
||||
]);
|
||||
|
||||
const getResp = await fetch(`${BASE_URL}/${BUCKET}/test-obj.txt`);
|
||||
expect(getResp.status).toEqual(200);
|
||||
|
||||
const putResp = await fetch(`${BASE_URL}/${BUCKET}/anon-wildcard.txt`, {
|
||||
method: 'PUT',
|
||||
body: 'wildcard put',
|
||||
});
|
||||
expect(putResp.status).toEqual(200);
|
||||
|
||||
// Clean up the object we created
|
||||
await authClient.send(new DeleteObjectCommand({ Bucket: BUCKET, Key: 'anon-wildcard.txt' }));
|
||||
await clearPolicy();
|
||||
});
|
||||
|
||||
tap.test('Action: "*" → global wildcard matches all actions', async () => {
|
||||
await putPolicy([
|
||||
{
|
||||
Sid: 'GlobalWildcard',
|
||||
Effect: 'Allow',
|
||||
Principal: '*',
|
||||
Action: '*',
|
||||
Resource: `arn:aws:s3:::${BUCKET}/*`,
|
||||
},
|
||||
]);
|
||||
|
||||
const getResp = await fetch(`${BASE_URL}/${BUCKET}/test-obj.txt`);
|
||||
expect(getResp.status).toEqual(200);
|
||||
|
||||
const putResp = await fetch(`${BASE_URL}/${BUCKET}/anon-global.txt`, {
|
||||
method: 'PUT',
|
||||
body: 'global wildcard',
|
||||
});
|
||||
expect(putResp.status).toEqual(200);
|
||||
|
||||
await authClient.send(new DeleteObjectCommand({ Bucket: BUCKET, Key: 'anon-global.txt' }));
|
||||
await clearPolicy();
|
||||
});
|
||||
|
||||
tap.test('Action: "s3:Get*" → anonymous can GET but not PUT (prefix wildcard)', async () => {
|
||||
await putPolicy([
|
||||
{
|
||||
Sid: 'PrefixWildcard',
|
||||
Effect: 'Allow',
|
||||
Principal: '*',
|
||||
Action: 's3:Get*',
|
||||
Resource: `arn:aws:s3:::${BUCKET}/*`,
|
||||
},
|
||||
]);
|
||||
|
||||
const getResp = await fetch(`${BASE_URL}/${BUCKET}/test-obj.txt`);
|
||||
expect(getResp.status).toEqual(200);
|
||||
|
||||
const putResp = await fetch(`${BASE_URL}/${BUCKET}/anon-prefix.txt`, {
|
||||
method: 'PUT',
|
||||
body: 'should fail',
|
||||
});
|
||||
expect(putResp.status).toEqual(403);
|
||||
await clearPolicy();
|
||||
});
|
||||
|
||||
tap.test('Action: ["s3:GetObject", "s3:PutObject"] → anonymous can GET and PUT but not DELETE', async () => {
|
||||
await putPolicy([
|
||||
{
|
||||
Sid: 'MultiAction',
|
||||
Effect: 'Allow',
|
||||
Principal: '*',
|
||||
Action: ['s3:GetObject', 's3:PutObject'],
|
||||
Resource: `arn:aws:s3:::${BUCKET}/*`,
|
||||
},
|
||||
]);
|
||||
|
||||
const getResp = await fetch(`${BASE_URL}/${BUCKET}/test-obj.txt`);
|
||||
expect(getResp.status).toEqual(200);
|
||||
|
||||
const putResp = await fetch(`${BASE_URL}/${BUCKET}/anon-multi.txt`, {
|
||||
method: 'PUT',
|
||||
body: 'multi action',
|
||||
});
|
||||
expect(putResp.status).toEqual(200);
|
||||
|
||||
const delResp = await fetch(`${BASE_URL}/${BUCKET}/anon-multi.txt`, {
|
||||
method: 'DELETE',
|
||||
});
|
||||
expect(delResp.status).toEqual(403);
|
||||
|
||||
// Clean up
|
||||
await authClient.send(new DeleteObjectCommand({ Bucket: BUCKET, Key: 'anon-multi.txt' }));
|
||||
await clearPolicy();
|
||||
});
|
||||
|
||||
// ============================
|
||||
// Resource ARN matching
|
||||
// ============================
|
||||
|
||||
tap.test('Resource: "arn:aws:s3:::eval-bucket/*" → anonymous GET of object succeeds', async () => {
|
||||
await putPolicy([
|
||||
{
|
||||
Sid: 'ResourceWildcard',
|
||||
Effect: 'Allow',
|
||||
Principal: '*',
|
||||
Action: 's3:GetObject',
|
||||
Resource: `arn:aws:s3:::${BUCKET}/*`,
|
||||
},
|
||||
]);
|
||||
|
||||
const resp = await fetch(`${BASE_URL}/${BUCKET}/test-obj.txt`);
|
||||
expect(resp.status).toEqual(200);
|
||||
await clearPolicy();
|
||||
});
|
||||
|
||||
tap.test('Resource: exact key → anonymous GET of that key succeeds, other key fails', async () => {
|
||||
await putPolicy([
|
||||
{
|
||||
Sid: 'ExactResource',
|
||||
Effect: 'Allow',
|
||||
Principal: '*',
|
||||
Action: 's3:GetObject',
|
||||
Resource: `arn:aws:s3:::${BUCKET}/test-obj.txt`,
|
||||
},
|
||||
]);
|
||||
|
||||
const goodResp = await fetch(`${BASE_URL}/${BUCKET}/test-obj.txt`);
|
||||
expect(goodResp.status).toEqual(200);
|
||||
|
||||
// Other key → resource doesn't match → NoOpinion → denied for anonymous
|
||||
const badResp = await fetch(`${BASE_URL}/${BUCKET}/nonexistent.txt`);
|
||||
expect(badResp.status).toEqual(403);
|
||||
await clearPolicy();
|
||||
});
|
||||
|
||||
tap.test('Resource: wrong bucket ARN → NoOpinion → anonymous GET denied', async () => {
|
||||
await putPolicy([
|
||||
{
|
||||
Sid: 'WrongBucket',
|
||||
Effect: 'Allow',
|
||||
Principal: '*',
|
||||
Action: 's3:GetObject',
|
||||
Resource: 'arn:aws:s3:::other-bucket/*',
|
||||
},
|
||||
]);
|
||||
|
||||
const resp = await fetch(`${BASE_URL}/${BUCKET}/test-obj.txt`);
|
||||
expect(resp.status).toEqual(403);
|
||||
await clearPolicy();
|
||||
});
|
||||
|
||||
tap.test('Resource: "*" → matches everything, anonymous GET succeeds', async () => {
|
||||
await putPolicy([
|
||||
{
|
||||
Sid: 'StarResource',
|
||||
Effect: 'Allow',
|
||||
Principal: '*',
|
||||
Action: 's3:GetObject',
|
||||
Resource: '*',
|
||||
},
|
||||
]);
|
||||
|
||||
const resp = await fetch(`${BASE_URL}/${BUCKET}/test-obj.txt`);
|
||||
expect(resp.status).toEqual(200);
|
||||
await clearPolicy();
|
||||
});
|
||||
|
||||
// ============================
|
||||
// Deny-over-Allow priority
|
||||
// ============================
|
||||
|
||||
tap.test('Allow + Deny same action → anonymous GET denied', async () => {
|
||||
await putPolicy([
|
||||
{
|
||||
Sid: 'AllowGet',
|
||||
Effect: 'Allow',
|
||||
Principal: '*',
|
||||
Action: 's3:GetObject',
|
||||
Resource: `arn:aws:s3:::${BUCKET}/*`,
|
||||
},
|
||||
{
|
||||
Sid: 'DenyGet',
|
||||
Effect: 'Deny',
|
||||
Principal: '*',
|
||||
Action: 's3:GetObject',
|
||||
Resource: `arn:aws:s3:::${BUCKET}/*`,
|
||||
},
|
||||
]);
|
||||
|
||||
const resp = await fetch(`${BASE_URL}/${BUCKET}/test-obj.txt`);
|
||||
expect(resp.status).toEqual(403);
|
||||
await clearPolicy();
|
||||
});
|
||||
|
||||
tap.test('Allow s3:* + Deny s3:DeleteObject → anonymous GET succeeds, DELETE denied', async () => {
|
||||
await putPolicy([
|
||||
{
|
||||
Sid: 'AllowAll',
|
||||
Effect: 'Allow',
|
||||
Principal: '*',
|
||||
Action: 's3:*',
|
||||
Resource: `arn:aws:s3:::${BUCKET}/*`,
|
||||
},
|
||||
{
|
||||
Sid: 'DenyDelete',
|
||||
Effect: 'Deny',
|
||||
Principal: '*',
|
||||
Action: 's3:DeleteObject',
|
||||
Resource: `arn:aws:s3:::${BUCKET}/*`,
|
||||
},
|
||||
]);
|
||||
|
||||
const getResp = await fetch(`${BASE_URL}/${BUCKET}/test-obj.txt`);
|
||||
expect(getResp.status).toEqual(200);
|
||||
|
||||
const delResp = await fetch(`${BASE_URL}/${BUCKET}/test-obj.txt`, { method: 'DELETE' });
|
||||
expect(delResp.status).toEqual(403);
|
||||
await clearPolicy();
|
||||
});
|
||||
|
||||
tap.test('Statement order does not matter: Deny first, Allow second → still denied', async () => {
|
||||
await putPolicy([
|
||||
{
|
||||
Sid: 'DenyFirst',
|
||||
Effect: 'Deny',
|
||||
Principal: '*',
|
||||
Action: 's3:GetObject',
|
||||
Resource: `arn:aws:s3:::${BUCKET}/*`,
|
||||
},
|
||||
{
|
||||
Sid: 'AllowSecond',
|
||||
Effect: 'Allow',
|
||||
Principal: '*',
|
||||
Action: 's3:GetObject',
|
||||
Resource: `arn:aws:s3:::${BUCKET}/*`,
|
||||
},
|
||||
]);
|
||||
|
||||
const resp = await fetch(`${BASE_URL}/${BUCKET}/test-obj.txt`);
|
||||
expect(resp.status).toEqual(403);
|
||||
await clearPolicy();
|
||||
});
|
||||
|
||||
// ============================
|
||||
// NoOpinion fallback
|
||||
// ============================
|
||||
|
||||
tap.test('NoOpinion: policy allows PutObject only → authenticated GET falls through (default allow)', async () => {
|
||||
await putPolicy([
|
||||
{
|
||||
Sid: 'AllowPutOnly',
|
||||
Effect: 'Allow',
|
||||
Principal: '*',
|
||||
Action: 's3:PutObject',
|
||||
Resource: `arn:aws:s3:::${BUCKET}/*`,
|
||||
},
|
||||
]);
|
||||
|
||||
// Authenticated → NoOpinion → default allow
|
||||
const resp = await authClient.send(
|
||||
new GetObjectCommand({ Bucket: BUCKET, Key: 'test-obj.txt' })
|
||||
);
|
||||
expect(resp.$metadata.httpStatusCode).toEqual(200);
|
||||
await clearPolicy();
|
||||
});
|
||||
|
||||
tap.test('NoOpinion: same policy → anonymous GET falls through → default deny (403)', async () => {
|
||||
await putPolicy([
|
||||
{
|
||||
Sid: 'AllowPutOnly',
|
||||
Effect: 'Allow',
|
||||
Principal: '*',
|
||||
Action: 's3:PutObject',
|
||||
Resource: `arn:aws:s3:::${BUCKET}/*`,
|
||||
},
|
||||
]);
|
||||
|
||||
// Anonymous → NoOpinion for GetObject → default deny
|
||||
const resp = await fetch(`${BASE_URL}/${BUCKET}/test-obj.txt`);
|
||||
expect(resp.status).toEqual(403);
|
||||
await clearPolicy();
|
||||
});
|
||||
|
||||
// ============================
|
||||
// IAM action mapping
|
||||
// ============================
|
||||
|
||||
tap.test('Policy allows s3:GetObject → anonymous HEAD object succeeds (HeadObject maps to s3:GetObject)', async () => {
|
||||
await putPolicy([
|
||||
{
|
||||
Sid: 'AllowGet',
|
||||
Effect: 'Allow',
|
||||
Principal: '*',
|
||||
Action: 's3:GetObject',
|
||||
Resource: `arn:aws:s3:::${BUCKET}/*`,
|
||||
},
|
||||
]);
|
||||
|
||||
const resp = await fetch(`${BASE_URL}/${BUCKET}/test-obj.txt`, { method: 'HEAD' });
|
||||
expect(resp.status).toEqual(200);
|
||||
await clearPolicy();
|
||||
});
|
||||
|
||||
tap.test('Policy allows s3:ListBucket → anonymous HEAD bucket succeeds', async () => {
|
||||
await putPolicy([
|
||||
{
|
||||
Sid: 'AllowList',
|
||||
Effect: 'Allow',
|
||||
Principal: '*',
|
||||
Action: 's3:ListBucket',
|
||||
Resource: `arn:aws:s3:::${BUCKET}`,
|
||||
},
|
||||
]);
|
||||
|
||||
const resp = await fetch(`${BASE_URL}/${BUCKET}`, { method: 'HEAD' });
|
||||
expect(resp.status).toEqual(200);
|
||||
await clearPolicy();
|
||||
});
|
||||
|
||||
tap.test('Policy allows s3:ListBucket → anonymous GET bucket (list objects) succeeds', async () => {
|
||||
await putPolicy([
|
||||
{
|
||||
Sid: 'AllowList',
|
||||
Effect: 'Allow',
|
||||
Principal: '*',
|
||||
Action: 's3:ListBucket',
|
||||
Resource: `arn:aws:s3:::${BUCKET}`,
|
||||
},
|
||||
]);
|
||||
|
||||
const resp = await fetch(`${BASE_URL}/${BUCKET}`);
|
||||
expect(resp.status).toEqual(200);
|
||||
const text = await resp.text();
|
||||
expect(text).toInclude('ListBucketResult');
|
||||
await clearPolicy();
|
||||
});
|
||||
|
||||
// ============================
|
||||
// Teardown
|
||||
// ============================
|
||||
|
||||
tap.test('teardown: clean up and stop server', async () => {
|
||||
await authClient.send(new DeleteObjectCommand({ Bucket: BUCKET, Key: 'test-obj.txt' }));
|
||||
await authClient.send(new DeleteBucketCommand({ Bucket: BUCKET }));
|
||||
await testSmarts3Instance.stop();
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
@@ -7,8 +7,12 @@ let testSmarts3Instance: smarts3.Smarts3;
|
||||
|
||||
tap.test('should create a smarts3 instance and run it', async (toolsArg) => {
|
||||
testSmarts3Instance = await smarts3.Smarts3.createAndStart({
|
||||
server: {
|
||||
port: 3333,
|
||||
},
|
||||
storage: {
|
||||
cleanSlate: true,
|
||||
},
|
||||
});
|
||||
console.log(`Let the instance run for 2 seconds`);
|
||||
await toolsArg.delayFor(2000);
|
||||
|
||||
@@ -3,6 +3,6 @@
|
||||
*/
|
||||
export const commitinfo = {
|
||||
name: '@push.rocks/smarts3',
|
||||
version: '3.2.0',
|
||||
version: '5.3.0',
|
||||
description: 'A Node.js TypeScript package to create a local S3 endpoint for simulating AWS S3 operations using mapped local directories for development and testing purposes.'
|
||||
}
|
||||
|
||||
@@ -1,118 +0,0 @@
|
||||
import * as plugins from '../plugins.js';
|
||||
import { S3Error } from './s3-error.js';
|
||||
import { createXml } from '../utils/xml.utils.js';
|
||||
import type { FilesystemStore } from './filesystem-store.js';
|
||||
import type { MultipartUploadManager } from './multipart-manager.js';
|
||||
import type { Readable } from 'stream';
|
||||
|
||||
/**
|
||||
* S3 request context with helper methods
|
||||
*/
|
||||
export class S3Context {
|
||||
public method: string;
|
||||
public url: URL;
|
||||
public headers: plugins.http.IncomingHttpHeaders;
|
||||
public params: Record<string, string> = {};
|
||||
public query: Record<string, string> = {};
|
||||
public store: FilesystemStore;
|
||||
public multipart: MultipartUploadManager;
|
||||
|
||||
private req: plugins.http.IncomingMessage;
|
||||
private res: plugins.http.ServerResponse;
|
||||
private statusCode: number = 200;
|
||||
private responseHeaders: Record<string, string> = {};
|
||||
|
||||
constructor(
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
store: FilesystemStore,
|
||||
multipart: MultipartUploadManager
|
||||
) {
|
||||
this.req = req;
|
||||
this.res = res;
|
||||
this.store = store;
|
||||
this.multipart = multipart;
|
||||
this.method = req.method || 'GET';
|
||||
this.headers = req.headers;
|
||||
|
||||
// Parse URL and query string
|
||||
const fullUrl = `http://${req.headers.host || 'localhost'}${req.url || '/'}`;
|
||||
this.url = new URL(fullUrl);
|
||||
|
||||
// Parse query string into object
|
||||
this.url.searchParams.forEach((value, key) => {
|
||||
this.query[key] = value;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Set response status code
|
||||
*/
|
||||
public status(code: number): this {
|
||||
this.statusCode = code;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set response header
|
||||
*/
|
||||
public setHeader(name: string, value: string | number): this {
|
||||
this.responseHeaders[name] = value.toString();
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Send response body (string, Buffer, or Stream)
|
||||
*/
|
||||
public async send(body: string | Buffer | Readable | NodeJS.ReadableStream): Promise<void> {
|
||||
// Write status and headers
|
||||
this.res.writeHead(this.statusCode, this.responseHeaders);
|
||||
|
||||
// Handle different body types
|
||||
if (typeof body === 'string' || body instanceof Buffer) {
|
||||
this.res.end(body);
|
||||
} else if (body && typeof (body as any).pipe === 'function') {
|
||||
// It's a stream
|
||||
(body as Readable).pipe(this.res);
|
||||
} else {
|
||||
this.res.end();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Send XML response
|
||||
*/
|
||||
public async sendXML(obj: any): Promise<void> {
|
||||
const xml = createXml(obj, { format: true });
|
||||
this.setHeader('Content-Type', 'application/xml');
|
||||
this.setHeader('Content-Length', Buffer.byteLength(xml));
|
||||
await this.send(xml);
|
||||
}
|
||||
|
||||
/**
|
||||
* Throw an S3 error
|
||||
*/
|
||||
public throw(code: string, message: string, detail?: Record<string, any>): never {
|
||||
throw new S3Error(code, message, detail);
|
||||
}
|
||||
|
||||
/**
|
||||
* Read and parse request body as string
|
||||
*/
|
||||
public async readBody(): Promise<string> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const chunks: Buffer[] = [];
|
||||
|
||||
this.req.on('data', (chunk) => chunks.push(chunk));
|
||||
this.req.on('end', () => resolve(Buffer.concat(chunks).toString('utf8')));
|
||||
this.req.on('error', reject);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the request stream (for streaming uploads)
|
||||
*/
|
||||
public getRequestStream(): NodeJS.ReadableStream {
|
||||
return this.req;
|
||||
}
|
||||
}
|
||||
@@ -1,562 +0,0 @@
|
||||
import * as plugins from '../plugins.js';
|
||||
import { S3Error } from './s3-error.js';
|
||||
import { Readable } from 'stream';
|
||||
|
||||
export interface IS3Bucket {
|
||||
name: string;
|
||||
creationDate: Date;
|
||||
}
|
||||
|
||||
export interface IS3Object {
|
||||
key: string;
|
||||
size: number;
|
||||
lastModified: Date;
|
||||
md5: string;
|
||||
metadata: Record<string, string>;
|
||||
content?: Readable;
|
||||
}
|
||||
|
||||
export interface IListObjectsOptions {
|
||||
prefix?: string;
|
||||
delimiter?: string;
|
||||
maxKeys?: number;
|
||||
continuationToken?: string;
|
||||
}
|
||||
|
||||
export interface IListObjectsResult {
|
||||
contents: IS3Object[];
|
||||
commonPrefixes: string[];
|
||||
isTruncated: boolean;
|
||||
nextContinuationToken?: string;
|
||||
prefix: string;
|
||||
delimiter: string;
|
||||
maxKeys: number;
|
||||
}
|
||||
|
||||
export interface IRangeOptions {
|
||||
start: number;
|
||||
end: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Filesystem-backed storage for S3 objects using smartfs
|
||||
*/
|
||||
export class FilesystemStore {
|
||||
constructor(private rootDir: string) {}
|
||||
|
||||
/**
|
||||
* Initialize store (ensure root directory exists)
|
||||
*/
|
||||
public async initialize(): Promise<void> {
|
||||
await plugins.smartfs.directory(this.rootDir).recursive().create();
|
||||
}
|
||||
|
||||
/**
|
||||
* Reset store (delete all buckets)
|
||||
*/
|
||||
public async reset(): Promise<void> {
|
||||
// Delete directory and recreate it
|
||||
const exists = await plugins.smartfs.directory(this.rootDir).exists();
|
||||
if (exists) {
|
||||
await plugins.smartfs.directory(this.rootDir).recursive().delete();
|
||||
}
|
||||
await plugins.smartfs.directory(this.rootDir).recursive().create();
|
||||
}
|
||||
|
||||
// ============================
|
||||
// BUCKET OPERATIONS
|
||||
// ============================
|
||||
|
||||
/**
|
||||
* List all buckets
|
||||
*/
|
||||
public async listBuckets(): Promise<IS3Bucket[]> {
|
||||
const entries = await plugins.smartfs.directory(this.rootDir).includeStats().list();
|
||||
const buckets: IS3Bucket[] = [];
|
||||
|
||||
for (const entry of entries) {
|
||||
if (entry.isDirectory && entry.stats) {
|
||||
buckets.push({
|
||||
name: entry.name,
|
||||
creationDate: entry.stats.birthtime,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return buckets.sort((a, b) => a.name.localeCompare(b.name));
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if bucket exists
|
||||
*/
|
||||
public async bucketExists(bucket: string): Promise<boolean> {
|
||||
const bucketPath = this.getBucketPath(bucket);
|
||||
return plugins.smartfs.directory(bucketPath).exists();
|
||||
}
|
||||
|
||||
/**
|
||||
* Create bucket
|
||||
*/
|
||||
public async createBucket(bucket: string): Promise<void> {
|
||||
const bucketPath = this.getBucketPath(bucket);
|
||||
await plugins.smartfs.directory(bucketPath).recursive().create();
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete bucket (must be empty)
|
||||
*/
|
||||
public async deleteBucket(bucket: string): Promise<void> {
|
||||
const bucketPath = this.getBucketPath(bucket);
|
||||
|
||||
// Check if bucket exists
|
||||
if (!(await this.bucketExists(bucket))) {
|
||||
throw new S3Error('NoSuchBucket', 'The specified bucket does not exist');
|
||||
}
|
||||
|
||||
// Check if bucket is empty
|
||||
const files = await plugins.smartfs.directory(bucketPath).recursive().list();
|
||||
if (files.length > 0) {
|
||||
throw new S3Error('BucketNotEmpty', 'The bucket you tried to delete is not empty');
|
||||
}
|
||||
|
||||
await plugins.smartfs.directory(bucketPath).recursive().delete();
|
||||
}
|
||||
|
||||
// ============================
|
||||
// OBJECT OPERATIONS
|
||||
// ============================
|
||||
|
||||
/**
|
||||
* List objects in bucket
|
||||
*/
|
||||
public async listObjects(
|
||||
bucket: string,
|
||||
options: IListObjectsOptions = {}
|
||||
): Promise<IListObjectsResult> {
|
||||
const bucketPath = this.getBucketPath(bucket);
|
||||
|
||||
if (!(await this.bucketExists(bucket))) {
|
||||
throw new S3Error('NoSuchBucket', 'The specified bucket does not exist');
|
||||
}
|
||||
|
||||
const {
|
||||
prefix = '',
|
||||
delimiter = '',
|
||||
maxKeys = 1000,
|
||||
continuationToken,
|
||||
} = options;
|
||||
|
||||
// List all object files recursively with filter
|
||||
const entries = await plugins.smartfs
|
||||
.directory(bucketPath)
|
||||
.recursive()
|
||||
.filter((entry) => entry.name.endsWith('._S3_object'))
|
||||
.list();
|
||||
|
||||
// Convert file paths to keys
|
||||
let keys = entries.map((entry) => {
|
||||
const relativePath = plugins.path.relative(bucketPath, entry.path);
|
||||
const key = this.decodeKey(relativePath.replace(/\._S3_object$/, ''));
|
||||
return key;
|
||||
});
|
||||
|
||||
// Apply prefix filter
|
||||
if (prefix) {
|
||||
keys = keys.filter((key) => key.startsWith(prefix));
|
||||
}
|
||||
|
||||
// Sort keys
|
||||
keys = keys.sort();
|
||||
|
||||
// Handle continuation token (simple implementation using key name)
|
||||
if (continuationToken) {
|
||||
const startIndex = keys.findIndex((key) => key > continuationToken);
|
||||
if (startIndex > 0) {
|
||||
keys = keys.slice(startIndex);
|
||||
}
|
||||
}
|
||||
|
||||
// Handle delimiter (common prefixes)
|
||||
const commonPrefixes: Set<string> = new Set();
|
||||
const contents: IS3Object[] = [];
|
||||
|
||||
for (const key of keys) {
|
||||
if (delimiter) {
|
||||
// Find first delimiter after prefix
|
||||
const remainingKey = key.slice(prefix.length);
|
||||
const delimiterIndex = remainingKey.indexOf(delimiter);
|
||||
|
||||
if (delimiterIndex !== -1) {
|
||||
// This key has a delimiter, add to common prefixes
|
||||
const commonPrefix = prefix + remainingKey.slice(0, delimiterIndex + delimiter.length);
|
||||
commonPrefixes.add(commonPrefix);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Add to contents (limited by maxKeys)
|
||||
if (contents.length >= maxKeys) {
|
||||
break;
|
||||
}
|
||||
|
||||
try {
|
||||
const objectInfo = await this.getObjectInfo(bucket, key);
|
||||
contents.push(objectInfo);
|
||||
} catch (err) {
|
||||
// Skip if object no longer exists
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
const isTruncated = keys.length > contents.length + commonPrefixes.size;
|
||||
const nextContinuationToken = isTruncated
|
||||
? contents[contents.length - 1]?.key
|
||||
: undefined;
|
||||
|
||||
return {
|
||||
contents,
|
||||
commonPrefixes: Array.from(commonPrefixes).sort(),
|
||||
isTruncated,
|
||||
nextContinuationToken,
|
||||
prefix,
|
||||
delimiter,
|
||||
maxKeys,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get object info (without content)
|
||||
*/
|
||||
private async getObjectInfo(bucket: string, key: string): Promise<IS3Object> {
|
||||
const objectPath = this.getObjectPath(bucket, key);
|
||||
const metadataPath = `${objectPath}.metadata.json`;
|
||||
const md5Path = `${objectPath}.md5`;
|
||||
|
||||
const [stats, metadata, md5] = await Promise.all([
|
||||
plugins.smartfs.file(objectPath).stat(),
|
||||
this.readMetadata(metadataPath),
|
||||
this.readMD5(objectPath, md5Path),
|
||||
]);
|
||||
|
||||
return {
|
||||
key,
|
||||
size: stats.size,
|
||||
lastModified: stats.mtime,
|
||||
md5,
|
||||
metadata,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if object exists
|
||||
*/
|
||||
public async objectExists(bucket: string, key: string): Promise<boolean> {
|
||||
const objectPath = this.getObjectPath(bucket, key);
|
||||
return plugins.smartfs.file(objectPath).exists();
|
||||
}
|
||||
|
||||
/**
|
||||
* Put object (upload with streaming)
|
||||
*/
|
||||
public async putObject(
|
||||
bucket: string,
|
||||
key: string,
|
||||
stream: NodeJS.ReadableStream,
|
||||
metadata: Record<string, string> = {}
|
||||
): Promise<{ size: number; md5: string }> {
|
||||
const objectPath = this.getObjectPath(bucket, key);
|
||||
|
||||
// Ensure bucket exists
|
||||
if (!(await this.bucketExists(bucket))) {
|
||||
throw new S3Error('NoSuchBucket', 'The specified bucket does not exist');
|
||||
}
|
||||
|
||||
// Ensure parent directory exists
|
||||
const parentDir = plugins.path.dirname(objectPath);
|
||||
await plugins.smartfs.directory(parentDir).recursive().create();
|
||||
|
||||
// Write with MD5 calculation
|
||||
const result = await this.writeStreamWithMD5(stream, objectPath);
|
||||
|
||||
// Save metadata
|
||||
const metadataPath = `${objectPath}.metadata.json`;
|
||||
await plugins.smartfs.file(metadataPath).write(JSON.stringify(metadata, null, 2));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get object (download with streaming)
|
||||
*/
|
||||
public async getObject(
|
||||
bucket: string,
|
||||
key: string,
|
||||
range?: IRangeOptions
|
||||
): Promise<IS3Object> {
|
||||
const objectPath = this.getObjectPath(bucket, key);
|
||||
|
||||
if (!(await this.objectExists(bucket, key))) {
|
||||
throw new S3Error('NoSuchKey', 'The specified key does not exist');
|
||||
}
|
||||
|
||||
const info = await this.getObjectInfo(bucket, key);
|
||||
|
||||
// Get Web ReadableStream from smartfs
|
||||
const webStream = await plugins.smartfs.file(objectPath).readStream();
|
||||
|
||||
// Convert Web Stream to Node.js Readable stream
|
||||
let nodeStream = Readable.fromWeb(webStream as any);
|
||||
|
||||
// Handle range requests if needed
|
||||
if (range) {
|
||||
// For range requests, we need to skip bytes and limit output
|
||||
let bytesRead = 0;
|
||||
const rangeStart = range.start;
|
||||
const rangeEnd = range.end;
|
||||
|
||||
nodeStream = nodeStream.pipe(new (require('stream').Transform)({
|
||||
transform(chunk: Buffer, encoding, callback) {
|
||||
const chunkStart = bytesRead;
|
||||
const chunkEnd = bytesRead + chunk.length - 1;
|
||||
bytesRead += chunk.length;
|
||||
|
||||
// Skip chunks before range
|
||||
if (chunkEnd < rangeStart) {
|
||||
callback();
|
||||
return;
|
||||
}
|
||||
|
||||
// Stop after range
|
||||
if (chunkStart > rangeEnd) {
|
||||
this.end();
|
||||
callback();
|
||||
return;
|
||||
}
|
||||
|
||||
// Slice chunk to fit range
|
||||
const sliceStart = Math.max(0, rangeStart - chunkStart);
|
||||
const sliceEnd = Math.min(chunk.length, rangeEnd - chunkStart + 1);
|
||||
|
||||
callback(null, chunk.slice(sliceStart, sliceEnd));
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
return {
|
||||
...info,
|
||||
content: nodeStream,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete object
|
||||
*/
|
||||
public async deleteObject(bucket: string, key: string): Promise<void> {
|
||||
const objectPath = this.getObjectPath(bucket, key);
|
||||
const metadataPath = `${objectPath}.metadata.json`;
|
||||
const md5Path = `${objectPath}.md5`;
|
||||
|
||||
// S3 doesn't throw error if object doesn't exist
|
||||
await Promise.all([
|
||||
plugins.smartfs.file(objectPath).delete().catch(() => {}),
|
||||
plugins.smartfs.file(metadataPath).delete().catch(() => {}),
|
||||
plugins.smartfs.file(md5Path).delete().catch(() => {}),
|
||||
]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Copy object
|
||||
*/
|
||||
public async copyObject(
|
||||
srcBucket: string,
|
||||
srcKey: string,
|
||||
destBucket: string,
|
||||
destKey: string,
|
||||
metadataDirective: 'COPY' | 'REPLACE' = 'COPY',
|
||||
newMetadata?: Record<string, string>
|
||||
): Promise<{ size: number; md5: string }> {
|
||||
const srcObjectPath = this.getObjectPath(srcBucket, srcKey);
|
||||
const destObjectPath = this.getObjectPath(destBucket, destKey);
|
||||
|
||||
// Check source exists
|
||||
if (!(await this.objectExists(srcBucket, srcKey))) {
|
||||
throw new S3Error('NoSuchKey', 'The specified key does not exist');
|
||||
}
|
||||
|
||||
// Ensure dest bucket exists
|
||||
if (!(await this.bucketExists(destBucket))) {
|
||||
throw new S3Error('NoSuchBucket', 'The specified bucket does not exist');
|
||||
}
|
||||
|
||||
// Ensure parent directory exists
|
||||
const parentDir = plugins.path.dirname(destObjectPath);
|
||||
await plugins.smartfs.directory(parentDir).recursive().create();
|
||||
|
||||
// Copy object file
|
||||
await plugins.smartfs.file(srcObjectPath).copy(destObjectPath);
|
||||
|
||||
// Handle metadata
|
||||
if (metadataDirective === 'COPY') {
|
||||
// Copy metadata
|
||||
const srcMetadataPath = `${srcObjectPath}.metadata.json`;
|
||||
const destMetadataPath = `${destObjectPath}.metadata.json`;
|
||||
await plugins.smartfs.file(srcMetadataPath).copy(destMetadataPath).catch(() => {});
|
||||
} else if (newMetadata) {
|
||||
// Replace with new metadata
|
||||
const destMetadataPath = `${destObjectPath}.metadata.json`;
|
||||
await plugins.smartfs.file(destMetadataPath).write(JSON.stringify(newMetadata, null, 2));
|
||||
}
|
||||
|
||||
// Copy MD5
|
||||
const srcMD5Path = `${srcObjectPath}.md5`;
|
||||
const destMD5Path = `${destObjectPath}.md5`;
|
||||
await plugins.smartfs.file(srcMD5Path).copy(destMD5Path).catch(() => {});
|
||||
|
||||
// Get result info
|
||||
const stats = await plugins.smartfs.file(destObjectPath).stat();
|
||||
const md5 = await this.readMD5(destObjectPath, destMD5Path);
|
||||
|
||||
return { size: stats.size, md5 };
|
||||
}
|
||||
|
||||
// ============================
|
||||
// HELPER METHODS
|
||||
// ============================
|
||||
|
||||
/**
|
||||
* Get bucket directory path
|
||||
*/
|
||||
private getBucketPath(bucket: string): string {
|
||||
return plugins.path.join(this.rootDir, bucket);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get object file path
|
||||
*/
|
||||
private getObjectPath(bucket: string, key: string): string {
|
||||
return plugins.path.join(
|
||||
this.rootDir,
|
||||
bucket,
|
||||
this.encodeKey(key) + '._S3_object'
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Encode key for Windows compatibility
|
||||
*/
|
||||
private encodeKey(key: string): string {
|
||||
if (process.platform === 'win32') {
|
||||
// Replace invalid Windows filename chars with hex encoding
|
||||
return key.replace(/[<>:"\\|?*]/g, (ch) =>
|
||||
'&' + Buffer.from(ch, 'utf8').toString('hex')
|
||||
);
|
||||
}
|
||||
return key;
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode key from filesystem path
|
||||
*/
|
||||
private decodeKey(encodedKey: string): string {
|
||||
if (process.platform === 'win32') {
|
||||
// Decode hex-encoded chars
|
||||
return encodedKey.replace(/&([0-9a-f]{2})/gi, (_, hex) =>
|
||||
Buffer.from(hex, 'hex').toString('utf8')
|
||||
);
|
||||
}
|
||||
return encodedKey;
|
||||
}
|
||||
|
||||
/**
|
||||
* Write stream to file with MD5 calculation
|
||||
*/
|
||||
private async writeStreamWithMD5(
|
||||
input: NodeJS.ReadableStream,
|
||||
destPath: string
|
||||
): Promise<{ size: number; md5: string }> {
|
||||
const hash = plugins.crypto.createHash('md5');
|
||||
let totalSize = 0;
|
||||
|
||||
return new Promise(async (resolve, reject) => {
|
||||
// Get Web WritableStream from smartfs
|
||||
const webWriteStream = await plugins.smartfs.file(destPath).writeStream();
|
||||
const writer = webWriteStream.getWriter();
|
||||
|
||||
// Read from Node.js stream and write to Web stream
|
||||
input.on('data', async (chunk: Buffer) => {
|
||||
hash.update(chunk);
|
||||
totalSize += chunk.length;
|
||||
|
||||
try {
|
||||
await writer.write(new Uint8Array(chunk));
|
||||
} catch (err) {
|
||||
reject(err);
|
||||
}
|
||||
});
|
||||
|
||||
input.on('error', (err) => {
|
||||
writer.abort(err);
|
||||
reject(err);
|
||||
});
|
||||
|
||||
input.on('end', async () => {
|
||||
try {
|
||||
await writer.close();
|
||||
const md5 = hash.digest('hex');
|
||||
|
||||
// Save MD5 to separate file
|
||||
const md5Path = `${destPath}.md5`;
|
||||
await plugins.smartfs.file(md5Path).write(md5);
|
||||
|
||||
resolve({ size: totalSize, md5 });
|
||||
} catch (err) {
|
||||
reject(err);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Read MD5 hash (calculate if missing)
|
||||
*/
|
||||
private async readMD5(objectPath: string, md5Path: string): Promise<string> {
|
||||
try {
|
||||
// Try to read cached MD5
|
||||
const md5 = await plugins.smartfs.file(md5Path).encoding('utf8').read() as string;
|
||||
return md5.trim();
|
||||
} catch (err) {
|
||||
// Calculate MD5 if not cached
|
||||
return new Promise(async (resolve, reject) => {
|
||||
const hash = plugins.crypto.createHash('md5');
|
||||
|
||||
try {
|
||||
const webStream = await plugins.smartfs.file(objectPath).readStream();
|
||||
const nodeStream = Readable.fromWeb(webStream as any);
|
||||
|
||||
nodeStream.on('data', (chunk: Buffer) => hash.update(chunk));
|
||||
nodeStream.on('end', async () => {
|
||||
const md5 = hash.digest('hex');
|
||||
// Cache it
|
||||
await plugins.smartfs.file(md5Path).write(md5);
|
||||
resolve(md5);
|
||||
});
|
||||
nodeStream.on('error', reject);
|
||||
} catch (err) {
|
||||
reject(err);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Read metadata from JSON file
|
||||
*/
|
||||
private async readMetadata(metadataPath: string): Promise<Record<string, string>> {
|
||||
try {
|
||||
const content = await plugins.smartfs.file(metadataPath).encoding('utf8').read() as string;
|
||||
return JSON.parse(content);
|
||||
} catch (err) {
|
||||
return {};
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,130 +0,0 @@
|
||||
import type { ILoggingConfig } from '../index.js';
|
||||
|
||||
/**
|
||||
* Log levels in order of severity
|
||||
*/
|
||||
const LOG_LEVELS = {
|
||||
error: 0,
|
||||
warn: 1,
|
||||
info: 2,
|
||||
debug: 3,
|
||||
} as const;
|
||||
|
||||
type LogLevel = keyof typeof LOG_LEVELS;
|
||||
|
||||
/**
|
||||
* Structured logger with configurable levels and formats
|
||||
*/
|
||||
export class Logger {
|
||||
private config: Required<ILoggingConfig>;
|
||||
private minLevel: number;
|
||||
|
||||
constructor(config: ILoggingConfig) {
|
||||
// Apply defaults for any missing config
|
||||
this.config = {
|
||||
level: config.level ?? 'info',
|
||||
format: config.format ?? 'text',
|
||||
enabled: config.enabled ?? true,
|
||||
};
|
||||
this.minLevel = LOG_LEVELS[this.config.level];
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a log level should be output
|
||||
*/
|
||||
private shouldLog(level: LogLevel): boolean {
|
||||
if (!this.config.enabled) {
|
||||
return false;
|
||||
}
|
||||
return LOG_LEVELS[level] <= this.minLevel;
|
||||
}
|
||||
|
||||
/**
|
||||
* Format a log message
|
||||
*/
|
||||
private format(level: LogLevel, message: string, meta?: Record<string, any>): string {
|
||||
const timestamp = new Date().toISOString();
|
||||
|
||||
if (this.config.format === 'json') {
|
||||
return JSON.stringify({
|
||||
timestamp,
|
||||
level,
|
||||
message,
|
||||
...(meta || {}),
|
||||
});
|
||||
}
|
||||
|
||||
// Text format
|
||||
const metaStr = meta ? ` ${JSON.stringify(meta)}` : '';
|
||||
return `[${timestamp}] ${level.toUpperCase()}: ${message}${metaStr}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Log at error level
|
||||
*/
|
||||
public error(message: string, meta?: Record<string, any>): void {
|
||||
if (this.shouldLog('error')) {
|
||||
console.error(this.format('error', message, meta));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Log at warn level
|
||||
*/
|
||||
public warn(message: string, meta?: Record<string, any>): void {
|
||||
if (this.shouldLog('warn')) {
|
||||
console.warn(this.format('warn', message, meta));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Log at info level
|
||||
*/
|
||||
public info(message: string, meta?: Record<string, any>): void {
|
||||
if (this.shouldLog('info')) {
|
||||
console.log(this.format('info', message, meta));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Log at debug level
|
||||
*/
|
||||
public debug(message: string, meta?: Record<string, any>): void {
|
||||
if (this.shouldLog('debug')) {
|
||||
console.log(this.format('debug', message, meta));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Log HTTP request
|
||||
*/
|
||||
public request(method: string, url: string, meta?: Record<string, any>): void {
|
||||
this.info(`→ ${method} ${url}`, meta);
|
||||
}
|
||||
|
||||
/**
|
||||
* Log HTTP response
|
||||
*/
|
||||
public response(method: string, url: string, statusCode: number, duration: number): void {
|
||||
const level: LogLevel = statusCode >= 500 ? 'error' : statusCode >= 400 ? 'warn' : 'info';
|
||||
|
||||
if (this.shouldLog(level)) {
|
||||
const message = `← ${method} ${url} - ${statusCode} (${duration}ms)`;
|
||||
|
||||
if (level === 'error') {
|
||||
this.error(message, { statusCode, duration });
|
||||
} else if (level === 'warn') {
|
||||
this.warn(message, { statusCode, duration });
|
||||
} else {
|
||||
this.info(message, { statusCode, duration });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Log S3 error
|
||||
*/
|
||||
public s3Error(code: string, message: string, status: number): void {
|
||||
this.error(`[S3Error] ${code}: ${message}`, { code, status });
|
||||
}
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
import * as plugins from '../plugins.js';
|
||||
import type { S3Context } from './context.js';
|
||||
|
||||
export type Middleware = (
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
ctx: S3Context,
|
||||
next: () => Promise<void>
|
||||
) => Promise<void>;
|
||||
|
||||
/**
|
||||
* Middleware stack for composing request handlers
|
||||
*/
|
||||
export class MiddlewareStack {
|
||||
private middlewares: Middleware[] = [];
|
||||
|
||||
/**
|
||||
* Add middleware to the stack
|
||||
*/
|
||||
public use(middleware: Middleware): void {
|
||||
this.middlewares.push(middleware);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute all middlewares in order
|
||||
*/
|
||||
public async execute(
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
ctx: S3Context
|
||||
): Promise<void> {
|
||||
let index = 0;
|
||||
|
||||
const next = async (): Promise<void> => {
|
||||
if (index < this.middlewares.length) {
|
||||
const middleware = this.middlewares[index++];
|
||||
await middleware(req, res, ctx, next);
|
||||
}
|
||||
};
|
||||
|
||||
await next();
|
||||
}
|
||||
}
|
||||
@@ -1,238 +0,0 @@
|
||||
import * as plugins from '../plugins.js';
|
||||
import { Readable } from 'stream';
|
||||
|
||||
/**
|
||||
* Multipart upload metadata
|
||||
*/
|
||||
export interface IMultipartUpload {
|
||||
uploadId: string;
|
||||
bucket: string;
|
||||
key: string;
|
||||
initiated: Date;
|
||||
parts: Map<number, IPartInfo>;
|
||||
metadata: Record<string, string>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Part information
|
||||
*/
|
||||
export interface IPartInfo {
|
||||
partNumber: number;
|
||||
etag: string;
|
||||
size: number;
|
||||
lastModified: Date;
|
||||
}
|
||||
|
||||
/**
|
||||
* Manages multipart upload state and storage
|
||||
*/
|
||||
export class MultipartUploadManager {
|
||||
private uploads: Map<string, IMultipartUpload> = new Map();
|
||||
private uploadDir: string;
|
||||
|
||||
constructor(private rootDir: string) {
|
||||
this.uploadDir = plugins.path.join(rootDir, '.multipart');
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize multipart uploads directory
|
||||
*/
|
||||
public async initialize(): Promise<void> {
|
||||
await plugins.smartfs.directory(this.uploadDir).recursive().create();
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a unique upload ID
|
||||
*/
|
||||
private generateUploadId(): string {
|
||||
return plugins.crypto.randomBytes(16).toString('hex');
|
||||
}
|
||||
|
||||
/**
|
||||
* Initiate a new multipart upload
|
||||
*/
|
||||
public async initiateUpload(
|
||||
bucket: string,
|
||||
key: string,
|
||||
metadata: Record<string, string>
|
||||
): Promise<string> {
|
||||
const uploadId = this.generateUploadId();
|
||||
|
||||
this.uploads.set(uploadId, {
|
||||
uploadId,
|
||||
bucket,
|
||||
key,
|
||||
initiated: new Date(),
|
||||
parts: new Map(),
|
||||
metadata,
|
||||
});
|
||||
|
||||
// Create directory for this upload's parts
|
||||
const uploadPath = plugins.path.join(this.uploadDir, uploadId);
|
||||
await plugins.smartfs.directory(uploadPath).recursive().create();
|
||||
|
||||
return uploadId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Upload a part
|
||||
*/
|
||||
public async uploadPart(
|
||||
uploadId: string,
|
||||
partNumber: number,
|
||||
stream: Readable
|
||||
): Promise<IPartInfo> {
|
||||
const upload = this.uploads.get(uploadId);
|
||||
if (!upload) {
|
||||
throw new Error('No such upload');
|
||||
}
|
||||
|
||||
const partPath = plugins.path.join(this.uploadDir, uploadId, `part-${partNumber}`);
|
||||
|
||||
// Write part to disk
|
||||
const webWriteStream = await plugins.smartfs.file(partPath).writeStream();
|
||||
const writer = webWriteStream.getWriter();
|
||||
|
||||
let size = 0;
|
||||
const hash = plugins.crypto.createHash('md5');
|
||||
|
||||
for await (const chunk of stream) {
|
||||
const buffer = Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk);
|
||||
await writer.write(new Uint8Array(buffer));
|
||||
hash.update(buffer);
|
||||
size += buffer.length;
|
||||
}
|
||||
|
||||
await writer.close();
|
||||
|
||||
const etag = hash.digest('hex');
|
||||
|
||||
const partInfo: IPartInfo = {
|
||||
partNumber,
|
||||
etag,
|
||||
size,
|
||||
lastModified: new Date(),
|
||||
};
|
||||
|
||||
upload.parts.set(partNumber, partInfo);
|
||||
|
||||
return partInfo;
|
||||
}
|
||||
|
||||
/**
|
||||
* Complete multipart upload - combine all parts
|
||||
*/
|
||||
public async completeUpload(
|
||||
uploadId: string,
|
||||
parts: Array<{ PartNumber: number; ETag: string }>
|
||||
): Promise<{ etag: string; size: number }> {
|
||||
const upload = this.uploads.get(uploadId);
|
||||
if (!upload) {
|
||||
throw new Error('No such upload');
|
||||
}
|
||||
|
||||
// Verify all parts are uploaded
|
||||
for (const part of parts) {
|
||||
const uploadedPart = upload.parts.get(part.PartNumber);
|
||||
if (!uploadedPart) {
|
||||
throw new Error(`Part ${part.PartNumber} not uploaded`);
|
||||
}
|
||||
// Normalize ETag format (remove quotes if present)
|
||||
const normalizedETag = part.ETag.replace(/"/g, '');
|
||||
if (uploadedPart.etag !== normalizedETag) {
|
||||
throw new Error(`Part ${part.PartNumber} ETag mismatch`);
|
||||
}
|
||||
}
|
||||
|
||||
// Sort parts by part number
|
||||
const sortedParts = parts.sort((a, b) => a.PartNumber - b.PartNumber);
|
||||
|
||||
// Combine parts into final object
|
||||
const finalPath = plugins.path.join(this.uploadDir, uploadId, 'final');
|
||||
const webWriteStream = await plugins.smartfs.file(finalPath).writeStream();
|
||||
const writer = webWriteStream.getWriter();
|
||||
|
||||
const hash = plugins.crypto.createHash('md5');
|
||||
let totalSize = 0;
|
||||
|
||||
for (const part of sortedParts) {
|
||||
const partPath = plugins.path.join(this.uploadDir, uploadId, `part-${part.PartNumber}`);
|
||||
|
||||
// Read part and write to final file
|
||||
const partContent = await plugins.smartfs.file(partPath).read();
|
||||
const buffer = Buffer.isBuffer(partContent) ? partContent : Buffer.from(partContent as string);
|
||||
|
||||
await writer.write(new Uint8Array(buffer));
|
||||
hash.update(buffer);
|
||||
totalSize += buffer.length;
|
||||
}
|
||||
|
||||
await writer.close();
|
||||
|
||||
const etag = hash.digest('hex');
|
||||
|
||||
return { etag, size: totalSize };
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the final combined file path
|
||||
*/
|
||||
public getFinalPath(uploadId: string): string {
|
||||
return plugins.path.join(this.uploadDir, uploadId, 'final');
|
||||
}
|
||||
|
||||
/**
|
||||
* Get upload metadata
|
||||
*/
|
||||
public getUpload(uploadId: string): IMultipartUpload | undefined {
|
||||
return this.uploads.get(uploadId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Abort multipart upload - clean up parts
|
||||
*/
|
||||
public async abortUpload(uploadId: string): Promise<void> {
|
||||
const upload = this.uploads.get(uploadId);
|
||||
if (!upload) {
|
||||
throw new Error('No such upload');
|
||||
}
|
||||
|
||||
// Delete upload directory
|
||||
const uploadPath = plugins.path.join(this.uploadDir, uploadId);
|
||||
await plugins.smartfs.directory(uploadPath).recursive().delete();
|
||||
|
||||
// Remove from memory
|
||||
this.uploads.delete(uploadId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean up upload after completion
|
||||
*/
|
||||
public async cleanupUpload(uploadId: string): Promise<void> {
|
||||
const uploadPath = plugins.path.join(this.uploadDir, uploadId);
|
||||
await plugins.smartfs.directory(uploadPath).recursive().delete();
|
||||
this.uploads.delete(uploadId);
|
||||
}
|
||||
|
||||
/**
|
||||
* List all in-progress uploads for a bucket
|
||||
*/
|
||||
public listUploads(bucket?: string): IMultipartUpload[] {
|
||||
const uploads = Array.from(this.uploads.values());
|
||||
if (bucket) {
|
||||
return uploads.filter((u) => u.bucket === bucket);
|
||||
}
|
||||
return uploads;
|
||||
}
|
||||
|
||||
/**
|
||||
* List parts for an upload
|
||||
*/
|
||||
public listParts(uploadId: string): IPartInfo[] {
|
||||
const upload = this.uploads.get(uploadId);
|
||||
if (!upload) {
|
||||
throw new Error('No such upload');
|
||||
}
|
||||
return Array.from(upload.parts.values()).sort((a, b) => a.partNumber - b.partNumber);
|
||||
}
|
||||
}
|
||||
@@ -1,129 +0,0 @@
|
||||
import * as plugins from '../plugins.js';
|
||||
import type { S3Context } from './context.js';
|
||||
|
||||
export type RouteHandler = (
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
ctx: S3Context,
|
||||
params: Record<string, string>
|
||||
) => Promise<void>;
|
||||
|
||||
export interface IRouteMatch {
|
||||
handler: RouteHandler;
|
||||
params: Record<string, string>;
|
||||
}
|
||||
|
||||
interface IRoute {
|
||||
method: string;
|
||||
pattern: RegExp;
|
||||
paramNames: string[];
|
||||
handler: RouteHandler;
|
||||
}
|
||||
|
||||
/**
|
||||
* Simple HTTP router with pattern matching for S3 routes
|
||||
*/
|
||||
export class S3Router {
|
||||
private routes: IRoute[] = [];
|
||||
|
||||
/**
|
||||
* Add a route with pattern matching
|
||||
* Supports patterns like:
|
||||
* - "/" (exact match)
|
||||
* - "/:bucket" (single param)
|
||||
* - "/:bucket/:key*" (param with wildcard - captures everything after)
|
||||
*/
|
||||
public add(method: string, pattern: string, handler: RouteHandler): void {
|
||||
const { regex, paramNames } = this.convertPatternToRegex(pattern);
|
||||
|
||||
this.routes.push({
|
||||
method: method.toUpperCase(),
|
||||
pattern: regex,
|
||||
paramNames,
|
||||
handler,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Match a request to a route
|
||||
*/
|
||||
public match(method: string, pathname: string): IRouteMatch | null {
|
||||
// Normalize pathname: remove trailing slash unless it's root
|
||||
const normalizedPath = pathname === '/' ? pathname : pathname.replace(/\/$/, '');
|
||||
|
||||
for (const route of this.routes) {
|
||||
if (route.method !== method.toUpperCase()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const match = normalizedPath.match(route.pattern);
|
||||
if (match) {
|
||||
// Extract params from captured groups
|
||||
const params: Record<string, string> = {};
|
||||
for (let i = 0; i < route.paramNames.length; i++) {
|
||||
params[route.paramNames[i]] = decodeURIComponent(match[i + 1] || '');
|
||||
}
|
||||
|
||||
return {
|
||||
handler: route.handler,
|
||||
params,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert path pattern to RegExp
|
||||
* Examples:
|
||||
* - "/" → /^\/$/
|
||||
* - "/:bucket" → /^\/([^/]+)$/
|
||||
* - "/:bucket/:key*" → /^\/([^/]+)\/(.+)$/
|
||||
*/
|
||||
private convertPatternToRegex(pattern: string): { regex: RegExp; paramNames: string[] } {
|
||||
const paramNames: string[] = [];
|
||||
let regexStr = pattern;
|
||||
|
||||
// Process all params in a single pass to maintain order
|
||||
regexStr = regexStr.replace(/:(\w+)(\*)?/g, (match, paramName, isWildcard) => {
|
||||
paramNames.push(paramName);
|
||||
// :param* captures rest of path, :param captures single segment
|
||||
return isWildcard ? '(.+)' : '([^/]+)';
|
||||
});
|
||||
|
||||
// Escape special regex characters
|
||||
regexStr = regexStr.replace(/\//g, '\\/');
|
||||
|
||||
// Add anchors
|
||||
regexStr = `^${regexStr}$`;
|
||||
|
||||
return {
|
||||
regex: new RegExp(regexStr),
|
||||
paramNames,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Convenience methods for common HTTP methods
|
||||
*/
|
||||
public get(pattern: string, handler: RouteHandler): void {
|
||||
this.add('GET', pattern, handler);
|
||||
}
|
||||
|
||||
public put(pattern: string, handler: RouteHandler): void {
|
||||
this.add('PUT', pattern, handler);
|
||||
}
|
||||
|
||||
public post(pattern: string, handler: RouteHandler): void {
|
||||
this.add('POST', pattern, handler);
|
||||
}
|
||||
|
||||
public delete(pattern: string, handler: RouteHandler): void {
|
||||
this.add('DELETE', pattern, handler);
|
||||
}
|
||||
|
||||
public head(pattern: string, handler: RouteHandler): void {
|
||||
this.add('HEAD', pattern, handler);
|
||||
}
|
||||
}
|
||||
@@ -1,145 +0,0 @@
|
||||
import * as plugins from '../plugins.js';
|
||||
|
||||
/**
|
||||
* S3 error codes mapped to HTTP status codes
|
||||
*/
|
||||
const S3_ERROR_CODES: Record<string, number> = {
|
||||
'AccessDenied': 403,
|
||||
'BadDigest': 400,
|
||||
'BadRequest': 400,
|
||||
'BucketAlreadyExists': 409,
|
||||
'BucketAlreadyOwnedByYou': 409,
|
||||
'BucketNotEmpty': 409,
|
||||
'CredentialsNotSupported': 400,
|
||||
'EntityTooSmall': 400,
|
||||
'EntityTooLarge': 400,
|
||||
'ExpiredToken': 400,
|
||||
'IncompleteBody': 400,
|
||||
'IncorrectNumberOfFilesInPostRequest': 400,
|
||||
'InlineDataTooLarge': 400,
|
||||
'InternalError': 500,
|
||||
'InvalidArgument': 400,
|
||||
'InvalidBucketName': 400,
|
||||
'InvalidDigest': 400,
|
||||
'InvalidLocationConstraint': 400,
|
||||
'InvalidPart': 400,
|
||||
'InvalidPartOrder': 400,
|
||||
'InvalidRange': 416,
|
||||
'InvalidRequest': 400,
|
||||
'InvalidSecurity': 403,
|
||||
'InvalidSOAPRequest': 400,
|
||||
'InvalidStorageClass': 400,
|
||||
'InvalidTargetBucketForLogging': 400,
|
||||
'InvalidToken': 400,
|
||||
'InvalidURI': 400,
|
||||
'KeyTooLongError': 400,
|
||||
'MalformedACLError': 400,
|
||||
'MalformedPOSTRequest': 400,
|
||||
'MalformedXML': 400,
|
||||
'MaxMessageLengthExceeded': 400,
|
||||
'MaxPostPreDataLengthExceededError': 400,
|
||||
'MetadataTooLarge': 400,
|
||||
'MethodNotAllowed': 405,
|
||||
'MissingContentLength': 411,
|
||||
'MissingRequestBodyError': 400,
|
||||
'MissingSecurityElement': 400,
|
||||
'MissingSecurityHeader': 400,
|
||||
'NoLoggingStatusForKey': 400,
|
||||
'NoSuchBucket': 404,
|
||||
'NoSuchKey': 404,
|
||||
'NoSuchLifecycleConfiguration': 404,
|
||||
'NoSuchUpload': 404,
|
||||
'NoSuchVersion': 404,
|
||||
'NotImplemented': 501,
|
||||
'NotSignedUp': 403,
|
||||
'OperationAborted': 409,
|
||||
'PermanentRedirect': 301,
|
||||
'PreconditionFailed': 412,
|
||||
'Redirect': 307,
|
||||
'RequestIsNotMultiPartContent': 400,
|
||||
'RequestTimeout': 400,
|
||||
'RequestTimeTooSkewed': 403,
|
||||
'RequestTorrentOfBucketError': 400,
|
||||
'SignatureDoesNotMatch': 403,
|
||||
'ServiceUnavailable': 503,
|
||||
'SlowDown': 503,
|
||||
'TemporaryRedirect': 307,
|
||||
'TokenRefreshRequired': 400,
|
||||
'TooManyBuckets': 400,
|
||||
'UnexpectedContent': 400,
|
||||
'UnresolvableGrantByEmailAddress': 400,
|
||||
'UserKeyMustBeSpecified': 400,
|
||||
};
|
||||
|
||||
/**
|
||||
* S3-compatible error class that formats errors as XML responses
|
||||
*/
|
||||
export class S3Error extends Error {
|
||||
public status: number;
|
||||
public code: string;
|
||||
public detail: Record<string, any>;
|
||||
|
||||
constructor(
|
||||
code: string,
|
||||
message: string,
|
||||
detail: Record<string, any> = {}
|
||||
) {
|
||||
super(message);
|
||||
this.name = 'S3Error';
|
||||
this.code = code;
|
||||
this.status = S3_ERROR_CODES[code] || 500;
|
||||
this.detail = detail;
|
||||
|
||||
// Maintain proper stack trace
|
||||
if (Error.captureStackTrace) {
|
||||
Error.captureStackTrace(this, S3Error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert error to S3-compatible XML format
|
||||
*/
|
||||
public toXML(): string {
|
||||
const smartXmlInstance = new plugins.SmartXml();
|
||||
const errorObj: any = {
|
||||
Error: {
|
||||
Code: this.code,
|
||||
Message: this.message,
|
||||
...this.detail,
|
||||
},
|
||||
};
|
||||
|
||||
const xml = smartXmlInstance.createXmlFromObject(errorObj);
|
||||
|
||||
// Ensure XML declaration
|
||||
if (!xml.startsWith('<?xml')) {
|
||||
return `<?xml version="1.0" encoding="UTF-8"?>\n${xml}`;
|
||||
}
|
||||
|
||||
return xml;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create S3Error from a generic Error
|
||||
*/
|
||||
public static fromError(err: any): S3Error {
|
||||
if (err instanceof S3Error) {
|
||||
return err;
|
||||
}
|
||||
|
||||
// Map common errors
|
||||
if (err.code === 'ENOENT') {
|
||||
return new S3Error('NoSuchKey', 'The specified key does not exist.');
|
||||
}
|
||||
if (err.code === 'EACCES') {
|
||||
return new S3Error('AccessDenied', 'Access Denied');
|
||||
}
|
||||
|
||||
// Default to internal error
|
||||
return new S3Error(
|
||||
'InternalError',
|
||||
'We encountered an internal error. Please try again.',
|
||||
{ OriginalError: err.message }
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,388 +0,0 @@
|
||||
import * as plugins from '../plugins.js';
|
||||
import { S3Router } from './router.js';
|
||||
import { MiddlewareStack } from './middleware-stack.js';
|
||||
import { S3Context } from './context.js';
|
||||
import { FilesystemStore } from './filesystem-store.js';
|
||||
import { S3Error } from './s3-error.js';
|
||||
import { Logger } from './logger.js';
|
||||
import { MultipartUploadManager } from './multipart-manager.js';
|
||||
import { ServiceController } from '../controllers/service.controller.js';
|
||||
import { BucketController } from '../controllers/bucket.controller.js';
|
||||
import { ObjectController } from '../controllers/object.controller.js';
|
||||
import type { ISmarts3Config } from '../index.js';
|
||||
|
||||
export interface ISmarts3ServerOptions {
|
||||
port?: number;
|
||||
address?: string;
|
||||
directory?: string;
|
||||
cleanSlate?: boolean;
|
||||
silent?: boolean;
|
||||
config?: Required<ISmarts3Config>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Custom S3-compatible server implementation
|
||||
* Built on native Node.js http module with zero framework dependencies
|
||||
*/
|
||||
export class Smarts3Server {
|
||||
private httpServer?: plugins.http.Server;
|
||||
private router: S3Router;
|
||||
private middlewares: MiddlewareStack;
|
||||
public store: FilesystemStore; // Made public for direct access from Smarts3 class
|
||||
public multipart: MultipartUploadManager; // Made public for controller access
|
||||
private options: Required<Omit<ISmarts3ServerOptions, 'config'>>;
|
||||
private config: Required<ISmarts3Config>;
|
||||
private logger: Logger;
|
||||
|
||||
constructor(options: ISmarts3ServerOptions = {}) {
|
||||
this.options = {
|
||||
port: options.port ?? 3000,
|
||||
address: options.address ?? '0.0.0.0',
|
||||
directory: options.directory ?? plugins.path.join(process.cwd(), '.nogit/bucketsDir'),
|
||||
cleanSlate: options.cleanSlate ?? false,
|
||||
silent: options.silent ?? false,
|
||||
};
|
||||
|
||||
// Store config for middleware and feature configuration
|
||||
// If no config provided, create minimal default (for backward compatibility)
|
||||
this.config = options.config ?? {
|
||||
server: {
|
||||
port: this.options.port,
|
||||
address: this.options.address,
|
||||
silent: this.options.silent,
|
||||
},
|
||||
storage: {
|
||||
directory: this.options.directory,
|
||||
cleanSlate: this.options.cleanSlate,
|
||||
},
|
||||
auth: {
|
||||
enabled: false,
|
||||
credentials: [{ accessKeyId: 'S3RVER', secretAccessKey: 'S3RVER' }],
|
||||
},
|
||||
cors: {
|
||||
enabled: false,
|
||||
allowedOrigins: ['*'],
|
||||
allowedMethods: ['GET', 'POST', 'PUT', 'DELETE', 'HEAD', 'OPTIONS'],
|
||||
allowedHeaders: ['*'],
|
||||
exposedHeaders: ['ETag', 'x-amz-request-id', 'x-amz-version-id'],
|
||||
maxAge: 86400,
|
||||
allowCredentials: false,
|
||||
},
|
||||
logging: {
|
||||
level: 'info',
|
||||
format: 'text',
|
||||
enabled: true,
|
||||
},
|
||||
limits: {
|
||||
maxObjectSize: 5 * 1024 * 1024 * 1024,
|
||||
maxMetadataSize: 2048,
|
||||
requestTimeout: 300000,
|
||||
},
|
||||
};
|
||||
|
||||
this.logger = new Logger(this.config.logging);
|
||||
this.store = new FilesystemStore(this.options.directory);
|
||||
this.multipart = new MultipartUploadManager(this.options.directory);
|
||||
this.router = new S3Router();
|
||||
this.middlewares = new MiddlewareStack();
|
||||
|
||||
this.setupMiddlewares();
|
||||
this.setupRoutes();
|
||||
}
|
||||
|
||||
/**
|
||||
* Setup middleware stack
|
||||
*/
|
||||
private setupMiddlewares(): void {
|
||||
// CORS middleware (must be first to handle preflight requests)
|
||||
if (this.config.cors.enabled) {
|
||||
this.middlewares.use(async (req, res, ctx, next) => {
|
||||
const origin = req.headers.origin || req.headers.referer;
|
||||
|
||||
// Check if origin is allowed
|
||||
const allowedOrigins = this.config.cors.allowedOrigins || ['*'];
|
||||
const isOriginAllowed =
|
||||
allowedOrigins.includes('*') ||
|
||||
(origin && allowedOrigins.includes(origin));
|
||||
|
||||
if (isOriginAllowed) {
|
||||
// Set CORS headers
|
||||
res.setHeader(
|
||||
'Access-Control-Allow-Origin',
|
||||
allowedOrigins.includes('*') ? '*' : origin || '*'
|
||||
);
|
||||
|
||||
if (this.config.cors.allowCredentials) {
|
||||
res.setHeader('Access-Control-Allow-Credentials', 'true');
|
||||
}
|
||||
|
||||
// Handle preflight OPTIONS request
|
||||
if (req.method === 'OPTIONS') {
|
||||
res.setHeader(
|
||||
'Access-Control-Allow-Methods',
|
||||
(this.config.cors.allowedMethods || []).join(', ')
|
||||
);
|
||||
res.setHeader(
|
||||
'Access-Control-Allow-Headers',
|
||||
(this.config.cors.allowedHeaders || []).join(', ')
|
||||
);
|
||||
if (this.config.cors.maxAge) {
|
||||
res.setHeader(
|
||||
'Access-Control-Max-Age',
|
||||
String(this.config.cors.maxAge)
|
||||
);
|
||||
}
|
||||
res.writeHead(204);
|
||||
res.end();
|
||||
return; // Don't call next() for OPTIONS
|
||||
}
|
||||
|
||||
// Set exposed headers for actual requests
|
||||
if (this.config.cors.exposedHeaders && this.config.cors.exposedHeaders.length > 0) {
|
||||
res.setHeader(
|
||||
'Access-Control-Expose-Headers',
|
||||
this.config.cors.exposedHeaders.join(', ')
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
await next();
|
||||
});
|
||||
}
|
||||
|
||||
// Authentication middleware (simple static credentials)
|
||||
if (this.config.auth.enabled) {
|
||||
this.middlewares.use(async (req, res, ctx, next) => {
|
||||
const authHeader = req.headers.authorization;
|
||||
|
||||
// Extract access key from Authorization header
|
||||
let accessKeyId: string | undefined;
|
||||
|
||||
if (authHeader) {
|
||||
// Support multiple auth formats:
|
||||
// 1. AWS accessKeyId:signature
|
||||
// 2. AWS4-HMAC-SHA256 Credential=accessKeyId/date/region/service/aws4_request, ...
|
||||
if (authHeader.startsWith('AWS ')) {
|
||||
accessKeyId = authHeader.substring(4).split(':')[0];
|
||||
} else if (authHeader.startsWith('AWS4-HMAC-SHA256')) {
|
||||
const credentialMatch = authHeader.match(/Credential=([^/]+)\//);
|
||||
accessKeyId = credentialMatch ? credentialMatch[1] : undefined;
|
||||
}
|
||||
}
|
||||
|
||||
// Check if access key is valid
|
||||
const isValid = this.config.auth.credentials.some(
|
||||
(cred) => cred.accessKeyId === accessKeyId
|
||||
);
|
||||
|
||||
if (!isValid) {
|
||||
ctx.throw('AccessDenied', 'Access Denied');
|
||||
return;
|
||||
}
|
||||
|
||||
await next();
|
||||
});
|
||||
}
|
||||
|
||||
// Logger middleware
|
||||
if (!this.options.silent && this.config.logging.enabled) {
|
||||
this.middlewares.use(async (req, res, ctx, next) => {
|
||||
const start = Date.now();
|
||||
|
||||
// Log request
|
||||
this.logger.request(req.method || 'UNKNOWN', req.url || '/', {
|
||||
headers: req.headers,
|
||||
});
|
||||
|
||||
await next();
|
||||
|
||||
// Log response
|
||||
const duration = Date.now() - start;
|
||||
this.logger.response(
|
||||
req.method || 'UNKNOWN',
|
||||
req.url || '/',
|
||||
res.statusCode || 500,
|
||||
duration
|
||||
);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Setup routes
|
||||
*/
|
||||
private setupRoutes(): void {
|
||||
// Service level (/)
|
||||
this.router.get('/', ServiceController.listBuckets);
|
||||
|
||||
// Bucket level (/:bucket)
|
||||
this.router.put('/:bucket', BucketController.createBucket);
|
||||
this.router.delete('/:bucket', BucketController.deleteBucket);
|
||||
this.router.get('/:bucket', BucketController.listObjects);
|
||||
this.router.head('/:bucket', BucketController.headBucket);
|
||||
|
||||
// Object level (/:bucket/:key*)
|
||||
this.router.put('/:bucket/:key*', ObjectController.putObject);
|
||||
this.router.post('/:bucket/:key*', ObjectController.postObject); // For multipart operations
|
||||
this.router.get('/:bucket/:key*', ObjectController.getObject);
|
||||
this.router.head('/:bucket/:key*', ObjectController.headObject);
|
||||
this.router.delete('/:bucket/:key*', ObjectController.deleteObject);
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle incoming HTTP request
|
||||
*/
|
||||
private async handleRequest(
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse
|
||||
): Promise<void> {
|
||||
const context = new S3Context(req, res, this.store, this.multipart);
|
||||
|
||||
try {
|
||||
// Execute middleware stack
|
||||
await this.middlewares.execute(req, res, context);
|
||||
|
||||
// Route to handler
|
||||
const match = this.router.match(context.method, context.url.pathname);
|
||||
|
||||
if (match) {
|
||||
context.params = match.params;
|
||||
await match.handler(req, res, context, match.params);
|
||||
} else {
|
||||
context.throw('NoSuchKey', 'The specified resource does not exist');
|
||||
}
|
||||
} catch (err) {
|
||||
await this.handleError(err, context, res);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle errors and send S3-compatible error responses
|
||||
*/
|
||||
private async handleError(
|
||||
err: any,
|
||||
context: S3Context,
|
||||
res: plugins.http.ServerResponse
|
||||
): Promise<void> {
|
||||
const s3Error = err instanceof S3Error ? err : S3Error.fromError(err);
|
||||
|
||||
// Log the error
|
||||
this.logger.s3Error(s3Error.code, s3Error.message, s3Error.status);
|
||||
|
||||
// Log stack trace for server errors
|
||||
if (s3Error.status >= 500) {
|
||||
this.logger.debug('Error stack trace', {
|
||||
stack: err.stack || err.toString(),
|
||||
});
|
||||
}
|
||||
|
||||
// Send error response
|
||||
const errorXml = s3Error.toXML();
|
||||
|
||||
res.writeHead(s3Error.status, {
|
||||
'Content-Type': 'application/xml',
|
||||
'Content-Length': Buffer.byteLength(errorXml),
|
||||
});
|
||||
|
||||
res.end(errorXml);
|
||||
}
|
||||
|
||||
/**
|
||||
* Start the server
|
||||
*/
|
||||
public async start(): Promise<void> {
|
||||
// Initialize store
|
||||
await this.store.initialize();
|
||||
|
||||
// Initialize multipart upload manager
|
||||
await this.multipart.initialize();
|
||||
|
||||
// Clean slate if requested
|
||||
if (this.options.cleanSlate) {
|
||||
await this.store.reset();
|
||||
}
|
||||
|
||||
// Create HTTP server
|
||||
this.httpServer = plugins.http.createServer((req, res) => {
|
||||
this.handleRequest(req, res).catch((err) => {
|
||||
this.logger.error('Fatal error in request handler', {
|
||||
error: err.message,
|
||||
stack: err.stack,
|
||||
});
|
||||
if (!res.headersSent) {
|
||||
res.writeHead(500, { 'Content-Type': 'text/plain' });
|
||||
res.end('Internal Server Error');
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// Start listening
|
||||
await new Promise<void>((resolve, reject) => {
|
||||
this.httpServer!.listen(this.options.port, this.options.address, (err?: Error) => {
|
||||
if (err) {
|
||||
reject(err);
|
||||
} else {
|
||||
this.logger.info(`S3 server listening on ${this.options.address}:${this.options.port}`);
|
||||
resolve();
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop the server
|
||||
*/
|
||||
public async stop(): Promise<void> {
|
||||
if (!this.httpServer) {
|
||||
return;
|
||||
}
|
||||
|
||||
await new Promise<void>((resolve, reject) => {
|
||||
this.httpServer!.close((err?: Error) => {
|
||||
if (err) {
|
||||
reject(err);
|
||||
} else {
|
||||
this.logger.info('S3 server stopped');
|
||||
resolve();
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
this.httpServer = undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get server port (useful for testing with random ports)
|
||||
*/
|
||||
public getPort(): number {
|
||||
if (!this.httpServer) {
|
||||
throw new Error('Server not started');
|
||||
}
|
||||
|
||||
const address = this.httpServer.address();
|
||||
if (typeof address === 'string') {
|
||||
throw new Error('Unix socket not supported');
|
||||
}
|
||||
|
||||
return address?.port || this.options.port;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get S3 descriptor for client configuration
|
||||
*/
|
||||
public getS3Descriptor(): {
|
||||
accessKey: string;
|
||||
accessSecret: string;
|
||||
endpoint: string;
|
||||
port: number;
|
||||
useSsl: boolean;
|
||||
} {
|
||||
return {
|
||||
accessKey: 'S3RVER',
|
||||
accessSecret: 'S3RVER',
|
||||
endpoint: this.options.address === '0.0.0.0' ? '127.0.0.1' : this.options.address,
|
||||
port: this.getPort(),
|
||||
useSsl: false,
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -1,130 +0,0 @@
|
||||
import * as plugins from '../plugins.js';
|
||||
import type { S3Context } from '../classes/context.js';
|
||||
|
||||
/**
|
||||
* Bucket-level operations
|
||||
*/
|
||||
export class BucketController {
|
||||
/**
|
||||
* HEAD /:bucket - Check if bucket exists
|
||||
*/
|
||||
public static async headBucket(
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
ctx: S3Context,
|
||||
params: Record<string, string>
|
||||
): Promise<void> {
|
||||
const { bucket } = params;
|
||||
|
||||
if (await ctx.store.bucketExists(bucket)) {
|
||||
ctx.status(200).send('');
|
||||
} else {
|
||||
ctx.throw('NoSuchBucket', 'The specified bucket does not exist');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* PUT /:bucket - Create bucket
|
||||
*/
|
||||
public static async createBucket(
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
ctx: S3Context,
|
||||
params: Record<string, string>
|
||||
): Promise<void> {
|
||||
const { bucket } = params;
|
||||
|
||||
await ctx.store.createBucket(bucket);
|
||||
ctx.status(200).send('');
|
||||
}
|
||||
|
||||
/**
|
||||
* DELETE /:bucket - Delete bucket
|
||||
*/
|
||||
public static async deleteBucket(
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
ctx: S3Context,
|
||||
params: Record<string, string>
|
||||
): Promise<void> {
|
||||
const { bucket } = params;
|
||||
|
||||
await ctx.store.deleteBucket(bucket);
|
||||
ctx.status(204).send('');
|
||||
}
|
||||
|
||||
/**
|
||||
* GET /:bucket - List objects
|
||||
* Supports both V1 and V2 listing (V2 uses list-type=2 query param)
|
||||
*/
|
||||
public static async listObjects(
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
ctx: S3Context,
|
||||
params: Record<string, string>
|
||||
): Promise<void> {
|
||||
const { bucket } = params;
|
||||
const isV2 = ctx.query['list-type'] === '2';
|
||||
|
||||
const result = await ctx.store.listObjects(bucket, {
|
||||
prefix: ctx.query.prefix,
|
||||
delimiter: ctx.query.delimiter,
|
||||
maxKeys: ctx.query['max-keys'] ? parseInt(ctx.query['max-keys']) : 1000,
|
||||
continuationToken: ctx.query['continuation-token'],
|
||||
});
|
||||
|
||||
if (isV2) {
|
||||
// List Objects V2 response
|
||||
await ctx.sendXML({
|
||||
ListBucketResult: {
|
||||
'@_xmlns': 'http://s3.amazonaws.com/doc/2006-03-01/',
|
||||
Name: bucket,
|
||||
Prefix: result.prefix || '',
|
||||
MaxKeys: result.maxKeys,
|
||||
KeyCount: result.contents.length,
|
||||
IsTruncated: result.isTruncated,
|
||||
...(result.delimiter && { Delimiter: result.delimiter }),
|
||||
...(result.nextContinuationToken && {
|
||||
NextContinuationToken: result.nextContinuationToken,
|
||||
}),
|
||||
...(result.commonPrefixes.length > 0 && {
|
||||
CommonPrefixes: result.commonPrefixes.map((prefix) => ({
|
||||
Prefix: prefix,
|
||||
})),
|
||||
}),
|
||||
Contents: result.contents.map((obj) => ({
|
||||
Key: obj.key,
|
||||
LastModified: obj.lastModified.toISOString(),
|
||||
ETag: `"${obj.md5}"`,
|
||||
Size: obj.size,
|
||||
StorageClass: 'STANDARD',
|
||||
})),
|
||||
},
|
||||
});
|
||||
} else {
|
||||
// List Objects V1 response
|
||||
await ctx.sendXML({
|
||||
ListBucketResult: {
|
||||
'@_xmlns': 'http://s3.amazonaws.com/doc/2006-03-01/',
|
||||
Name: bucket,
|
||||
Prefix: result.prefix || '',
|
||||
MaxKeys: result.maxKeys,
|
||||
IsTruncated: result.isTruncated,
|
||||
...(result.delimiter && { Delimiter: result.delimiter }),
|
||||
...(result.commonPrefixes.length > 0 && {
|
||||
CommonPrefixes: result.commonPrefixes.map((prefix) => ({
|
||||
Prefix: prefix,
|
||||
})),
|
||||
}),
|
||||
Contents: result.contents.map((obj) => ({
|
||||
Key: obj.key,
|
||||
LastModified: obj.lastModified.toISOString(),
|
||||
ETag: `"${obj.md5}"`,
|
||||
Size: obj.size,
|
||||
StorageClass: 'STANDARD',
|
||||
})),
|
||||
},
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,378 +0,0 @@
|
||||
import * as plugins from '../plugins.js';
|
||||
import type { S3Context } from '../classes/context.js';
|
||||
|
||||
/**
|
||||
* Object-level operations
|
||||
*/
|
||||
export class ObjectController {
|
||||
/**
|
||||
* PUT /:bucket/:key* - Upload object, copy object, or upload part
|
||||
*/
|
||||
public static async putObject(
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
ctx: S3Context,
|
||||
params: Record<string, string>
|
||||
): Promise<void> {
|
||||
const { bucket, key } = params;
|
||||
|
||||
// Check if this is a multipart upload part
|
||||
if (ctx.query.partNumber && ctx.query.uploadId) {
|
||||
return ObjectController.uploadPart(req, res, ctx, params);
|
||||
}
|
||||
|
||||
// Check if this is a COPY operation
|
||||
const copySource = ctx.headers['x-amz-copy-source'] as string | undefined;
|
||||
if (copySource) {
|
||||
return ObjectController.copyObject(req, res, ctx, params);
|
||||
}
|
||||
|
||||
// Extract metadata from headers
|
||||
const metadata: Record<string, string> = {};
|
||||
for (const [header, value] of Object.entries(ctx.headers)) {
|
||||
if (header.startsWith('x-amz-meta-')) {
|
||||
metadata[header] = value as string;
|
||||
}
|
||||
if (header === 'content-type' && value) {
|
||||
metadata['content-type'] = value as string;
|
||||
}
|
||||
if (header === 'cache-control' && value) {
|
||||
metadata['cache-control'] = value as string;
|
||||
}
|
||||
}
|
||||
|
||||
// If no content-type, default to binary/octet-stream
|
||||
if (!metadata['content-type']) {
|
||||
metadata['content-type'] = 'binary/octet-stream';
|
||||
}
|
||||
|
||||
// Stream upload
|
||||
const result = await ctx.store.putObject(bucket, key, ctx.getRequestStream(), metadata);
|
||||
|
||||
ctx.setHeader('ETag', `"${result.md5}"`);
|
||||
ctx.status(200).send('');
|
||||
}
|
||||
|
||||
/**
|
||||
* GET /:bucket/:key* - Download object
|
||||
*/
|
||||
public static async getObject(
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
ctx: S3Context,
|
||||
params: Record<string, string>
|
||||
): Promise<void> {
|
||||
const { bucket, key } = params;
|
||||
|
||||
// Parse Range header if present
|
||||
const rangeHeader = ctx.headers.range as string | undefined;
|
||||
let range: { start: number; end: number } | undefined;
|
||||
|
||||
if (rangeHeader) {
|
||||
const matches = rangeHeader.match(/bytes=(\d+)-(\d*)/);
|
||||
if (matches) {
|
||||
const start = parseInt(matches[1]);
|
||||
const end = matches[2] ? parseInt(matches[2]) : undefined;
|
||||
range = { start, end: end || start + 1024 * 1024 }; // Default to 1MB if no end
|
||||
}
|
||||
}
|
||||
|
||||
// Get object
|
||||
const object = await ctx.store.getObject(bucket, key, range);
|
||||
|
||||
// Set response headers
|
||||
ctx.setHeader('ETag', `"${object.md5}"`);
|
||||
ctx.setHeader('Last-Modified', object.lastModified.toUTCString());
|
||||
ctx.setHeader('Content-Type', object.metadata['content-type'] || 'binary/octet-stream');
|
||||
ctx.setHeader('Accept-Ranges', 'bytes');
|
||||
|
||||
// Handle custom metadata headers
|
||||
for (const [key, value] of Object.entries(object.metadata)) {
|
||||
if (key.startsWith('x-amz-meta-')) {
|
||||
ctx.setHeader(key, value);
|
||||
}
|
||||
}
|
||||
|
||||
if (range) {
|
||||
ctx.status(206);
|
||||
ctx.setHeader('Content-Length', (range.end - range.start + 1).toString());
|
||||
ctx.setHeader('Content-Range', `bytes ${range.start}-${range.end}/${object.size}`);
|
||||
} else {
|
||||
ctx.status(200);
|
||||
ctx.setHeader('Content-Length', object.size.toString());
|
||||
}
|
||||
|
||||
// Stream response
|
||||
await ctx.send(object.content!);
|
||||
}
|
||||
|
||||
/**
|
||||
* HEAD /:bucket/:key* - Get object metadata
|
||||
*/
|
||||
public static async headObject(
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
ctx: S3Context,
|
||||
params: Record<string, string>
|
||||
): Promise<void> {
|
||||
const { bucket, key } = params;
|
||||
|
||||
// Get object (without content)
|
||||
const object = await ctx.store.getObject(bucket, key);
|
||||
|
||||
// Set response headers (same as GET but no body)
|
||||
ctx.setHeader('ETag', `"${object.md5}"`);
|
||||
ctx.setHeader('Last-Modified', object.lastModified.toUTCString());
|
||||
ctx.setHeader('Content-Type', object.metadata['content-type'] || 'binary/octet-stream');
|
||||
ctx.setHeader('Content-Length', object.size.toString());
|
||||
ctx.setHeader('Accept-Ranges', 'bytes');
|
||||
|
||||
// Handle custom metadata headers
|
||||
for (const [key, value] of Object.entries(object.metadata)) {
|
||||
if (key.startsWith('x-amz-meta-')) {
|
||||
ctx.setHeader(key, value);
|
||||
}
|
||||
}
|
||||
|
||||
ctx.status(200).send('');
|
||||
}
|
||||
|
||||
/**
|
||||
* DELETE /:bucket/:key* - Delete object or abort multipart upload
|
||||
*/
|
||||
public static async deleteObject(
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
ctx: S3Context,
|
||||
params: Record<string, string>
|
||||
): Promise<void> {
|
||||
const { bucket, key } = params;
|
||||
|
||||
// Check if this is an abort multipart upload
|
||||
if (ctx.query.uploadId) {
|
||||
return ObjectController.abortMultipartUpload(req, res, ctx, params);
|
||||
}
|
||||
|
||||
await ctx.store.deleteObject(bucket, key);
|
||||
ctx.status(204).send('');
|
||||
}
|
||||
|
||||
/**
|
||||
* COPY operation (PUT with x-amz-copy-source header)
|
||||
*/
|
||||
private static async copyObject(
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
ctx: S3Context,
|
||||
params: Record<string, string>
|
||||
): Promise<void> {
|
||||
const { bucket: destBucket, key: destKey } = params;
|
||||
const copySource = ctx.headers['x-amz-copy-source'] as string;
|
||||
|
||||
// Parse source bucket and key from copy source
|
||||
// Format: /bucket/key or bucket/key
|
||||
const sourcePath = copySource.startsWith('/') ? copySource.slice(1) : copySource;
|
||||
const firstSlash = sourcePath.indexOf('/');
|
||||
const srcBucket = decodeURIComponent(sourcePath.slice(0, firstSlash));
|
||||
const srcKey = decodeURIComponent(sourcePath.slice(firstSlash + 1));
|
||||
|
||||
// Get metadata directive (COPY or REPLACE)
|
||||
const metadataDirective = (ctx.headers['x-amz-metadata-directive'] as string)?.toUpperCase() || 'COPY';
|
||||
|
||||
// Extract new metadata if REPLACE
|
||||
let newMetadata: Record<string, string> | undefined;
|
||||
if (metadataDirective === 'REPLACE') {
|
||||
newMetadata = {};
|
||||
for (const [header, value] of Object.entries(ctx.headers)) {
|
||||
if (header.startsWith('x-amz-meta-')) {
|
||||
newMetadata[header] = value as string;
|
||||
}
|
||||
if (header === 'content-type' && value) {
|
||||
newMetadata['content-type'] = value as string;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Perform copy
|
||||
const result = await ctx.store.copyObject(
|
||||
srcBucket,
|
||||
srcKey,
|
||||
destBucket,
|
||||
destKey,
|
||||
metadataDirective as 'COPY' | 'REPLACE',
|
||||
newMetadata
|
||||
);
|
||||
|
||||
// Send XML response
|
||||
await ctx.sendXML({
|
||||
CopyObjectResult: {
|
||||
LastModified: new Date().toISOString(),
|
||||
ETag: `"${result.md5}"`,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* POST /:bucket/:key* - Initiate or complete multipart upload
|
||||
*/
|
||||
public static async postObject(
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
ctx: S3Context,
|
||||
params: Record<string, string>
|
||||
): Promise<void> {
|
||||
// Check if this is initiate multipart upload
|
||||
if (ctx.query.uploads !== undefined) {
|
||||
return ObjectController.initiateMultipartUpload(req, res, ctx, params);
|
||||
}
|
||||
|
||||
// Check if this is complete multipart upload
|
||||
if (ctx.query.uploadId) {
|
||||
return ObjectController.completeMultipartUpload(req, res, ctx, params);
|
||||
}
|
||||
|
||||
ctx.throw('InvalidRequest', 'Invalid POST request');
|
||||
}
|
||||
|
||||
/**
|
||||
* Initiate Multipart Upload (POST with ?uploads)
|
||||
*/
|
||||
private static async initiateMultipartUpload(
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
ctx: S3Context,
|
||||
params: Record<string, string>
|
||||
): Promise<void> {
|
||||
const { bucket, key } = params;
|
||||
|
||||
// Extract metadata from headers
|
||||
const metadata: Record<string, string> = {};
|
||||
for (const [header, value] of Object.entries(ctx.headers)) {
|
||||
if (header.startsWith('x-amz-meta-')) {
|
||||
metadata[header] = value as string;
|
||||
}
|
||||
if (header === 'content-type' && value) {
|
||||
metadata['content-type'] = value as string;
|
||||
}
|
||||
}
|
||||
|
||||
// Initiate upload
|
||||
const uploadId = await ctx.multipart.initiateUpload(bucket, key, metadata);
|
||||
|
||||
// Send XML response
|
||||
await ctx.sendXML({
|
||||
InitiateMultipartUploadResult: {
|
||||
Bucket: bucket,
|
||||
Key: key,
|
||||
UploadId: uploadId,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Upload Part (PUT with ?partNumber&uploadId)
|
||||
*/
|
||||
private static async uploadPart(
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
ctx: S3Context,
|
||||
params: Record<string, string>
|
||||
): Promise<void> {
|
||||
const uploadId = ctx.query.uploadId!;
|
||||
const partNumber = parseInt(ctx.query.partNumber!);
|
||||
|
||||
if (isNaN(partNumber) || partNumber < 1 || partNumber > 10000) {
|
||||
ctx.throw('InvalidPartNumber', 'Part number must be between 1 and 10000');
|
||||
}
|
||||
|
||||
// Upload the part
|
||||
const partInfo = await ctx.multipart.uploadPart(
|
||||
uploadId,
|
||||
partNumber,
|
||||
ctx.getRequestStream() as any as import('stream').Readable
|
||||
);
|
||||
|
||||
// Set ETag header (part ETag)
|
||||
ctx.setHeader('ETag', `"${partInfo.etag}"`);
|
||||
ctx.status(200).send('');
|
||||
}
|
||||
|
||||
/**
|
||||
* Complete Multipart Upload (POST with ?uploadId)
|
||||
*/
|
||||
private static async completeMultipartUpload(
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
ctx: S3Context,
|
||||
params: Record<string, string>
|
||||
): Promise<void> {
|
||||
const { bucket, key } = params;
|
||||
const uploadId = ctx.query.uploadId!;
|
||||
|
||||
// Read and parse request body (XML with part list)
|
||||
const body = await ctx.readBody();
|
||||
|
||||
// Parse XML to extract parts
|
||||
// Expected format: <CompleteMultipartUpload><Part><PartNumber>1</PartNumber><ETag>"etag"</ETag></Part>...</CompleteMultipartUpload>
|
||||
const partMatches = body.matchAll(/<Part>.*?<PartNumber>(\d+)<\/PartNumber>.*?<ETag>(.*?)<\/ETag>.*?<\/Part>/gs);
|
||||
const parts: Array<{ PartNumber: number; ETag: string }> = [];
|
||||
|
||||
for (const match of partMatches) {
|
||||
parts.push({
|
||||
PartNumber: parseInt(match[1]),
|
||||
ETag: match[2],
|
||||
});
|
||||
}
|
||||
|
||||
// Complete the upload
|
||||
const result = await ctx.multipart.completeUpload(uploadId, parts);
|
||||
|
||||
// Get upload metadata
|
||||
const upload = ctx.multipart.getUpload(uploadId);
|
||||
if (!upload) {
|
||||
ctx.throw('NoSuchUpload', 'The specified upload does not exist');
|
||||
}
|
||||
|
||||
// Move final file to object store
|
||||
const finalPath = ctx.multipart.getFinalPath(uploadId);
|
||||
const finalContent = await plugins.smartfs.file(finalPath).read();
|
||||
const finalStream = plugins.http.IncomingMessage.prototype;
|
||||
|
||||
// Create a readable stream from the buffer
|
||||
const { Readable } = await import('stream');
|
||||
const finalReadableStream = Readable.from([finalContent]);
|
||||
|
||||
// Store the final object
|
||||
await ctx.store.putObject(bucket, key, finalReadableStream, upload.metadata);
|
||||
|
||||
// Clean up multipart upload data
|
||||
await ctx.multipart.cleanupUpload(uploadId);
|
||||
|
||||
// Send XML response
|
||||
await ctx.sendXML({
|
||||
CompleteMultipartUploadResult: {
|
||||
Location: `/${bucket}/${key}`,
|
||||
Bucket: bucket,
|
||||
Key: key,
|
||||
ETag: `"${result.etag}"`,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Abort Multipart Upload (DELETE with ?uploadId)
|
||||
*/
|
||||
private static async abortMultipartUpload(
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
ctx: S3Context,
|
||||
params: Record<string, string>
|
||||
): Promise<void> {
|
||||
const uploadId = ctx.query.uploadId!;
|
||||
|
||||
// Abort and cleanup
|
||||
await ctx.multipart.abortUpload(uploadId);
|
||||
|
||||
ctx.status(204).send('');
|
||||
}
|
||||
}
|
||||
@@ -1,35 +0,0 @@
|
||||
import * as plugins from '../plugins.js';
|
||||
import type { S3Context } from '../classes/context.js';
|
||||
|
||||
/**
|
||||
* Service-level operations (root /)
|
||||
*/
|
||||
export class ServiceController {
|
||||
/**
|
||||
* GET / - List all buckets
|
||||
*/
|
||||
public static async listBuckets(
|
||||
req: plugins.http.IncomingMessage,
|
||||
res: plugins.http.ServerResponse,
|
||||
ctx: S3Context,
|
||||
params: Record<string, string>
|
||||
): Promise<void> {
|
||||
const buckets = await ctx.store.listBuckets();
|
||||
|
||||
await ctx.sendXML({
|
||||
ListAllMyBucketsResult: {
|
||||
'@_xmlns': 'http://s3.amazonaws.com/doc/2006-03-01/',
|
||||
Owner: {
|
||||
ID: '123456789000',
|
||||
DisplayName: 'S3rver',
|
||||
},
|
||||
Buckets: {
|
||||
Bucket: buckets.map((bucket) => ({
|
||||
Name: bucket.name,
|
||||
CreationDate: bucket.creationDate.toISOString(),
|
||||
})),
|
||||
},
|
||||
},
|
||||
});
|
||||
}
|
||||
}
|
||||
83
ts/index.ts
83
ts/index.ts
@@ -1,6 +1,5 @@
|
||||
import * as plugins from './plugins.js';
|
||||
import * as paths from './paths.js';
|
||||
import { Smarts3Server } from './classes/smarts3-server.js';
|
||||
|
||||
/**
|
||||
* Authentication configuration
|
||||
@@ -44,6 +43,14 @@ export interface ILimitsConfig {
|
||||
requestTimeout?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Multipart upload configuration
|
||||
*/
|
||||
export interface IMultipartConfig {
|
||||
expirationDays?: number;
|
||||
cleanupIntervalMinutes?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Server configuration
|
||||
*/
|
||||
@@ -51,6 +58,7 @@ export interface IServerConfig {
|
||||
port?: number;
|
||||
address?: string;
|
||||
silent?: boolean;
|
||||
region?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -71,6 +79,7 @@ export interface ISmarts3Config {
|
||||
cors?: ICorsConfig;
|
||||
logging?: ILoggingConfig;
|
||||
limits?: ILimitsConfig;
|
||||
multipart?: IMultipartConfig;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -81,6 +90,7 @@ const DEFAULT_CONFIG: ISmarts3Config = {
|
||||
port: 3000,
|
||||
address: '0.0.0.0',
|
||||
silent: false,
|
||||
region: 'us-east-1',
|
||||
},
|
||||
storage: {
|
||||
directory: paths.bucketsDir,
|
||||
@@ -114,6 +124,10 @@ const DEFAULT_CONFIG: ISmarts3Config = {
|
||||
maxMetadataSize: 2048,
|
||||
requestTimeout: 300000, // 5 minutes
|
||||
},
|
||||
multipart: {
|
||||
expirationDays: 7,
|
||||
cleanupIntervalMinutes: 60,
|
||||
},
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -145,15 +159,28 @@ function mergeConfig(userConfig: ISmarts3Config): Required<ISmarts3Config> {
|
||||
...DEFAULT_CONFIG.limits!,
|
||||
...(userConfig.limits || {}),
|
||||
},
|
||||
multipart: {
|
||||
...DEFAULT_CONFIG.multipart!,
|
||||
...(userConfig.multipart || {}),
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* IPC command type map for RustBridge
|
||||
*/
|
||||
type TRustS3Commands = {
|
||||
start: { params: { config: Required<ISmarts3Config> }; result: {} };
|
||||
stop: { params: {}; result: {} };
|
||||
createBucket: { params: { name: string }; result: {} };
|
||||
};
|
||||
|
||||
/**
|
||||
* Main Smarts3 class - production-ready S3-compatible server
|
||||
*/
|
||||
export class Smarts3 {
|
||||
// STATIC
|
||||
public static async createAndStart(configArg: ISmarts3Config | ILegacySmarts3Config = {}) {
|
||||
public static async createAndStart(configArg: ISmarts3Config = {}) {
|
||||
const smartS3Instance = new Smarts3(configArg);
|
||||
await smartS3Instance.start();
|
||||
return smartS3Instance;
|
||||
@@ -161,22 +188,28 @@ export class Smarts3 {
|
||||
|
||||
// INSTANCE
|
||||
public config: Required<ISmarts3Config>;
|
||||
public s3Instance: Smarts3Server;
|
||||
private bridge: InstanceType<typeof plugins.RustBridge<TRustS3Commands>>;
|
||||
|
||||
constructor(configArg: ISmarts3Config | ILegacySmarts3Config = {}) {
|
||||
constructor(configArg: ISmarts3Config = {}) {
|
||||
this.config = mergeConfig(configArg);
|
||||
this.bridge = new plugins.RustBridge<TRustS3Commands>({
|
||||
binaryName: 'rusts3',
|
||||
localPaths: [
|
||||
plugins.path.join(paths.packageDir, 'dist_rust', 'rusts3'),
|
||||
plugins.path.join(paths.packageDir, 'rust', 'target', 'release', 'rusts3'),
|
||||
plugins.path.join(paths.packageDir, 'rust', 'target', 'debug', 'rusts3'),
|
||||
],
|
||||
readyTimeoutMs: 30000,
|
||||
requestTimeoutMs: 300000,
|
||||
});
|
||||
}
|
||||
|
||||
public async start() {
|
||||
this.s3Instance = new Smarts3Server({
|
||||
port: this.config.server.port,
|
||||
address: this.config.server.address,
|
||||
directory: this.config.storage.directory,
|
||||
cleanSlate: this.config.storage.cleanSlate,
|
||||
silent: this.config.server.silent,
|
||||
config: this.config, // Pass full config to server
|
||||
});
|
||||
await this.s3Instance.start();
|
||||
const spawned = await this.bridge.spawn();
|
||||
if (!spawned) {
|
||||
throw new Error('Failed to spawn rusts3 binary. Make sure it is compiled (pnpm build).');
|
||||
}
|
||||
await this.bridge.sendCommand('start', { config: this.config });
|
||||
|
||||
if (!this.config.server.silent) {
|
||||
console.log('s3 server is running');
|
||||
@@ -186,7 +219,20 @@ export class Smarts3 {
|
||||
public async getS3Descriptor(
|
||||
optionsArg?: Partial<plugins.tsclass.storage.IS3Descriptor>,
|
||||
): Promise<plugins.tsclass.storage.IS3Descriptor> {
|
||||
const descriptor = this.s3Instance.getS3Descriptor();
|
||||
const cred = this.config.auth.credentials[0] || {
|
||||
accessKeyId: 'S3RVER',
|
||||
secretAccessKey: 'S3RVER',
|
||||
};
|
||||
|
||||
const descriptor: plugins.tsclass.storage.IS3Descriptor = {
|
||||
endpoint: this.config.server.address === '0.0.0.0' ? 'localhost' : this.config.server.address!,
|
||||
port: this.config.server.port!,
|
||||
useSsl: false,
|
||||
accessKey: cred.accessKeyId,
|
||||
accessSecret: cred.secretAccessKey,
|
||||
bucketName: '',
|
||||
};
|
||||
|
||||
return {
|
||||
...descriptor,
|
||||
...(optionsArg ? optionsArg : {}),
|
||||
@@ -194,15 +240,12 @@ export class Smarts3 {
|
||||
}
|
||||
|
||||
public async createBucket(bucketNameArg: string) {
|
||||
// Call the filesystem store directly instead of using the client library
|
||||
await this.s3Instance.store.createBucket(bucketNameArg);
|
||||
await this.bridge.sendCommand('createBucket', { name: bucketNameArg });
|
||||
return { name: bucketNameArg };
|
||||
}
|
||||
|
||||
public async stop() {
|
||||
await this.s3Instance.stop();
|
||||
await this.bridge.sendCommand('stop', {});
|
||||
this.bridge.kill();
|
||||
}
|
||||
}
|
||||
|
||||
// Export the custom server class for direct use
|
||||
export { Smarts3Server } from './classes/smarts3-server.js';
|
||||
|
||||
@@ -1,20 +1,13 @@
|
||||
// node native
|
||||
import * as path from 'path';
|
||||
import * as http from 'http';
|
||||
import * as crypto from 'crypto';
|
||||
import * as url from 'url';
|
||||
|
||||
export { path, http, crypto, url };
|
||||
export { path };
|
||||
|
||||
// @push.rocks scope
|
||||
import { SmartFs, SmartFsProviderNode } from '@push.rocks/smartfs';
|
||||
import * as smartpath from '@push.rocks/smartpath';
|
||||
import { SmartXml } from '@push.rocks/smartxml';
|
||||
import { RustBridge } from '@push.rocks/smartrust';
|
||||
|
||||
// Create SmartFs instance with Node.js provider
|
||||
export const smartfs = new SmartFs(new SmartFsProviderNode());
|
||||
|
||||
export { smartpath, SmartXml };
|
||||
export { smartpath, RustBridge };
|
||||
|
||||
// @tsclass scope
|
||||
import * as tsclass from '@tsclass/tsclass';
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
import * as plugins from '../plugins.js';
|
||||
|
||||
// Create a singleton instance of SmartXml
|
||||
const smartXmlInstance = new plugins.SmartXml();
|
||||
|
||||
/**
|
||||
* Parse XML string to JavaScript object
|
||||
*/
|
||||
export function parseXml(xmlString: string): any {
|
||||
return smartXmlInstance.parseXmlToObject(xmlString);
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert JavaScript object to XML string with XML declaration
|
||||
*/
|
||||
export function createXml(obj: any, options: { format?: boolean } = {}): string {
|
||||
const xml = smartXmlInstance.createXmlFromObject(obj);
|
||||
|
||||
// Ensure XML declaration is present
|
||||
if (!xml.startsWith('<?xml')) {
|
||||
return `<?xml version="1.0" encoding="UTF-8"?>\n${xml}`;
|
||||
}
|
||||
|
||||
return xml;
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper to create S3-compatible XML responses with proper namespace
|
||||
*/
|
||||
export function createS3Xml(rootElement: string, content: any, namespace = 'http://s3.amazonaws.com/doc/2006-03-01/'): string {
|
||||
const obj: any = {
|
||||
[rootElement]: {
|
||||
'@_xmlns': namespace,
|
||||
...content,
|
||||
},
|
||||
};
|
||||
|
||||
return createXml(obj, { format: true });
|
||||
}
|
||||
Reference in New Issue
Block a user