diff --git a/changelog.md b/changelog.md index 81850ad..3e014b3 100644 --- a/changelog.md +++ b/changelog.md @@ -1,5 +1,18 @@ # Changelog +## 2026-03-14 - 6.0.0 - BREAKING CHANGE(core) +Rebrand from smarts3 to smartstorage + +- Package renamed from @push.rocks/smarts3 to @push.rocks/smartstorage +- Class renamed from Smarts3 to SmartStorage (no backward-compatible re-export) +- Interface renamed from ISmarts3Config to ISmartStorageConfig +- Method renamed from getS3Descriptor to getStorageDescriptor +- Rust binary renamed from rusts3 to ruststorage +- Rust types renamed: S3Errorβ†’StorageError, S3Actionβ†’StorageAction, S3Configβ†’SmartStorageConfig, S3Serverβ†’StorageServer +- On-disk file extension changed from ._S3_object to ._storage_object (BREAKING for existing stored data) +- Default credentials changed from S3RVER to STORAGE +- All internal S3 branding removed; AWS S3 protocol compatibility (IAM actions, ARNs, SigV4) fully maintained + ## 2026-02-17 - 5.3.0 - feat(auth) add AWS SigV4 authentication and bucket policy support diff --git a/npmextra.json b/npmextra.json index a71fb8e..9a5f270 100644 --- a/npmextra.json +++ b/npmextra.json @@ -10,14 +10,15 @@ "module": { "githost": "code.foss.global", "gitscope": "push.rocks", - "gitrepo": "smarts3", - "description": "A Node.js TypeScript package to create a local S3 endpoint for simulating AWS S3 operations using mapped local directories for development and testing purposes.", - "npmPackagename": "@push.rocks/smarts3", + "gitrepo": "smartstorage", + "description": "A Node.js TypeScript package to create a local S3-compatible storage server using mapped local directories for development and testing purposes.", + "npmPackagename": "@push.rocks/smartstorage", "license": "MIT", "projectDomain": "push.rocks", "keywords": [ - "S3 Mock Server", - "Local S3", + "smartstorage", + "S3 Compatible", + "Local Storage Server", "Node.js", "TypeScript", "Local Development", @@ -26,8 +27,8 @@ "File Storage", "AWS S3 Compatibility", "Development Tool", - "S3 Endpoint", - "S3 Simulation", + "Storage Endpoint", + "Storage Simulation", "Bucket Management", "File Upload", "CI/CD Integration", diff --git a/package.json b/package.json index 94b58b4..539ce6a 100644 --- a/package.json +++ b/package.json @@ -1,8 +1,8 @@ { - "name": "@push.rocks/smarts3", - "version": "5.3.0", + "name": "@push.rocks/smartstorage", + "version": "6.0.0", "private": false, - "description": "A Node.js TypeScript package to create a local S3 endpoint for simulating AWS S3 operations using mapped local directories for development and testing purposes.", + "description": "A Node.js TypeScript package to create a local S3-compatible storage server using mapped local directories for development and testing purposes.", "main": "dist_ts/index.js", "typings": "dist_ts/index.d.ts", "type": "module", @@ -45,8 +45,9 @@ "@tsclass/tsclass": "^9.3.0" }, "keywords": [ - "S3 Mock Server", - "Local S3", + "smartstorage", + "S3 Compatible", + "Local Storage Server", "Node.js", "TypeScript", "Local Development", @@ -55,20 +56,20 @@ "File Storage", "AWS S3 Compatibility", "Development Tool", - "S3 Endpoint", - "S3 Simulation", + "Storage Endpoint", + "Storage Simulation", "Bucket Management", "File Upload", "CI/CD Integration", "Developer Onboarding" ], - "homepage": "https://code.foss.global/push.rocks/smarts3#readme", + "homepage": "https://code.foss.global/push.rocks/smartstorage#readme", "repository": { "type": "git", - "url": "https://code.foss.global/push.rocks/smarts3.git" + "url": "ssh://git@code.foss.global:29419/push.rocks/smartstorage.git" }, "bugs": { - "url": "https://code.foss.global/push.rocks/smarts3/issues" + "url": "https://code.foss.global/push.rocks/smartstorage/issues" }, "packageManager": "pnpm@10.14.0+sha512.ad27a79641b49c3e481a16a805baa71817a04bbe06a38d17e60e2eaee83f6a146c6a688125f5792e48dd5ba30e7da52a5cda4c3992b9ccf333f9ce223af84748", "pnpm": { diff --git a/production-readiness.md b/production-readiness.md index abf70d6..b512ed3 100644 --- a/production-readiness.md +++ b/production-readiness.md @@ -1,6 +1,6 @@ -# Production-Readiness Plan for smarts3 +# Production-Readiness Plan for smartstorage -**Goal:** Make smarts3 production-ready as a MinIO alternative for use cases where: +**Goal:** Make smartstorage production-ready as a MinIO alternative for use cases where: - Running MinIO is out of scope - You have a program written for S3 and want to use the local filesystem - You need a lightweight, zero-dependency S3-compatible server @@ -31,7 +31,7 @@ ### 1. Multipart Upload Support πŸš€ **HIGHEST PRIORITY** -**Why:** Essential for uploading files >5MB efficiently. Without this, smarts3 can't handle real-world production workloads. +**Why:** Essential for uploading files >5MB efficiently. Without this, smartstorage can't handle real-world production workloads. **Implementation Required:** - `POST /:bucket/:key?uploads` - CreateMultipartUpload @@ -46,13 +46,13 @@ **Files to Create/Modify:** - `ts/controllers/multipart.controller.ts` (new) - `ts/classes/filesystem-store.ts` (add multipart methods) -- `ts/classes/smarts3-server.ts` (add multipart routes) +- `ts/classes/smartstorage-server.ts` (add multipart routes) --- ### 2. Configurable Authentication πŸ” -**Why:** Currently hardcoded credentials ('S3RVER'/'S3RVER'). Production needs custom credentials. +**Why:** Currently hardcoded credentials ('STORAGE'/'STORAGE'). Production needs custom credentials. **Implementation Required:** - Support custom access keys and secrets via configuration @@ -75,7 +75,7 @@ interface IAuthConfig { **Files to Create/Modify:** - `ts/classes/auth-middleware.ts` (new) - `ts/classes/signature-validator.ts` (new) -- `ts/classes/smarts3-server.ts` (integrate auth middleware) +- `ts/classes/smartstorage-server.ts` (integrate auth middleware) - `ts/index.ts` (add auth config options) --- @@ -105,7 +105,7 @@ interface ICorsConfig { **Files to Create/Modify:** - `ts/classes/cors-middleware.ts` (new) -- `ts/classes/smarts3-server.ts` (integrate CORS middleware) +- `ts/classes/smartstorage-server.ts` (integrate CORS middleware) - `ts/index.ts` (add CORS config options) --- @@ -131,7 +131,7 @@ interface ISslConfig { ``` **Files to Create/Modify:** -- `ts/classes/smarts3-server.ts` (add HTTPS server creation) +- `ts/classes/smartstorage-server.ts` (add HTTPS server creation) - `ts/index.ts` (add SSL config options) --- @@ -147,7 +147,7 @@ interface ISslConfig { - Sensible production defaults - Example configurations for common use cases -**Configuration File Example (`smarts3.config.json`):** +**Configuration File Example (`smartstorage.config.json`):** ```json { "server": { @@ -220,7 +220,7 @@ interface ISslConfig { **Files to Create/Modify:** - `ts/classes/logger.ts` (new - use @push.rocks/smartlog?) - `ts/classes/access-logger-middleware.ts` (new) -- `ts/classes/smarts3-server.ts` (replace console.log with logger) +- `ts/classes/smartstorage-server.ts` (replace console.log with logger) - All controller files (use structured logging) --- @@ -238,7 +238,7 @@ interface ISslConfig { **Files to Create/Modify:** - `ts/controllers/health.controller.ts` (new) - `ts/classes/metrics-collector.ts` (new) -- `ts/classes/smarts3-server.ts` (add health routes) +- `ts/classes/smartstorage-server.ts` (add health routes) --- @@ -266,7 +266,7 @@ interface ISslConfig { **Files to Create/Modify:** - `ts/classes/validation-middleware.ts` (new) - `ts/utils/validators.ts` (new) -- `ts/classes/smarts3-server.ts` (integrate validation middleware) +- `ts/classes/smartstorage-server.ts` (integrate validation middleware) --- @@ -291,7 +291,7 @@ interface ISslConfig { - SIGTERM/SIGINT handling **Files to Create/Modify:** -- `ts/classes/smarts3-server.ts` (add graceful shutdown logic) +- `ts/classes/smartstorage-server.ts` (add graceful shutdown logic) - `ts/index.ts` (add signal handlers) --- @@ -336,7 +336,7 @@ interface ISslConfig { 4. βœ… Production configuration system 5. βœ… Production logging -**Outcome:** smarts3 can handle real production workloads +**Outcome:** smartstorage can handle real production workloads --- @@ -350,7 +350,7 @@ interface ISslConfig { 9. βœ… Graceful shutdown 10. βœ… Batch operations -**Outcome:** smarts3 is operationally mature +**Outcome:** smartstorage is operationally mature --- @@ -363,7 +363,7 @@ interface ISslConfig { 13. βœ… Comprehensive test suite 14. βœ… Documentation updates -**Outcome:** smarts3 has broad S3 API compatibility +**Outcome:** smartstorage has broad S3 API compatibility --- @@ -375,7 +375,7 @@ interface ISslConfig { 16. βœ… Performance optimization 17. βœ… Advanced features based on user feedback -**Outcome:** smarts3 is a complete MinIO alternative +**Outcome:** smartstorage is a complete MinIO alternative --- @@ -392,7 +392,7 @@ interface ISslConfig { ## 🎯 Target Use Cases -**With this plan implemented, smarts3 will be a solid MinIO alternative for:** +**With this plan implemented, smartstorage will be a solid MinIO alternative for:** βœ… **Local S3 development** - Fast, simple, no Docker required βœ… **Testing S3 integrations** - Reliable, repeatable tests diff --git a/readme.hints.md b/readme.hints.md index b7c7624..8bebd74 100644 --- a/readme.hints.md +++ b/readme.hints.md @@ -1,10 +1,10 @@ -# Project Hints for smarts3 +# Project Hints for smartstorage -## Current State (v6.0.0-dev) +## Current State (v6.0.0) -- **Rust-powered S3 server** via `@push.rocks/smartrust` IPC bridge +- **Rust-powered S3-compatible storage server** via `@push.rocks/smartrust` IPC bridge - High-performance: streaming I/O, zero-copy, backpressure, range seek -- TypeScript is thin IPC wrapper; all HTTP/storage/routing in Rust binary `rusts3` +- TypeScript is thin IPC wrapper; all HTTP/storage/routing in Rust binary `ruststorage` - Full S3 compatibility: PUT, GET, HEAD, DELETE for objects and buckets - Multipart upload support (streaming, no OOM) - **Real AWS SigV4 authentication** (cryptographic signature verification) @@ -18,37 +18,37 @@ - `main.rs` - Clap CLI, management mode entry - `config.rs` - Serde config structs matching TS interfaces (includes `region`) - `management.rs` - IPC loop (newline-delimited JSON over stdin/stdout) -- `server.rs` - hyper 1.x HTTP server, routing, CORS, auth+policy pipeline, all S3 handlers +- `server.rs` - hyper 1.x HTTP server, routing, CORS, auth+policy pipeline, all S3-compatible handlers - `storage.rs` - FileStore: filesystem-backed storage, multipart manager, `.policies/` dir -- `xml_response.rs` - S3 XML response builders -- `s3_error.rs` - S3 error codes with HTTP status mapping +- `xml_response.rs` - S3-compatible XML response builders +- `error.rs` - StorageError codes with HTTP status mapping - `auth.rs` - AWS SigV4 signature verification (HMAC-SHA256, clock skew, constant-time compare) -- `action.rs` - S3Action enum + request-to-IAM-action resolver + RequestContext +- `action.rs` - StorageAction enum + request-to-IAM-action resolver + RequestContext - `policy.rs` - BucketPolicy model, evaluation engine (Deny > Allow > NoOpinion), PolicyStore (RwLock cache + disk) ### TypeScript Bridge (`ts/`) -- `ts/index.ts` - Smarts3 class with RustBridge +- `ts/index.ts` - SmartStorage class with RustBridge - `ts/plugins.ts` - path, smartpath, RustBridge, tsclass - `ts/paths.ts` - packageDir, bucketsDir defaults ### IPC Commands | Command | Params | Action | |---------|--------|--------| -| `start` | `{ config: ISmarts3Config }` | Init storage + HTTP server | +| `start` | `{ config: ISmartStorageConfig }` | Init storage + HTTP server | | `stop` | `{}` | Graceful shutdown | | `createBucket` | `{ name: string }` | Create bucket directory | -### Storage Layout (backward-compatible) -- Objects: `{root}/{bucket}/{key}._S3_object` -- Metadata: `{root}/{bucket}/{key}._S3_object.metadata.json` -- MD5: `{root}/{bucket}/{key}._S3_object.md5` +### Storage Layout +- Objects: `{root}/{bucket}/{key}._storage_object` +- Metadata: `{root}/{bucket}/{key}._storage_object.metadata.json` +- MD5: `{root}/{bucket}/{key}._storage_object.md5` - Multipart: `{root}/.multipart/{upload_id}/part-{N}` - Policies: `{root}/.policies/{bucket}.policy.json` ## Build - `pnpm build` runs `tsrust && tsbuild --web --allowimplicitany` -- `tsrust` compiles Rust to `dist_rust/rusts3` +- `tsrust` compiles Rust to `dist_rust/ruststorage` - Targets: linux_amd64, linux_arm64 (configured in npmextra.json) ## Dependencies diff --git a/readme.md b/readme.md index 7f04472..700a2f0 100644 --- a/readme.md +++ b/readme.md @@ -1,76 +1,76 @@ -# @push.rocks/smarts3 πŸš€ +# @push.rocks/smartstorage -A high-performance, S3-compatible local server powered by a **Rust core** with a clean TypeScript API. Drop-in replacement for AWS S3 during development and testing β€” no cloud, no Docker, no MinIO. Just `npm install` and go. +A high-performance, S3-compatible local storage server powered by a **Rust core** with a clean TypeScript API. Drop-in replacement for AWS S3 during development and testing β€” no cloud, no Docker, no MinIO. Just `npm install` and go. ## Issue Reporting and Security For reporting bugs, issues, or security vulnerabilities, please visit [community.foss.global/](https://community.foss.global/). This is the central community hub for all issue reporting. Developers who sign and comply with our contribution agreement and go through identification can also get a [code.foss.global/](https://code.foss.global/) account to submit Pull Requests directly. -## 🌟 Why smarts3? +## Why smartstorage? -| Feature | smarts3 | MinIO | s3rver | -|---------|---------|-------|--------| +| Feature | smartstorage | MinIO | s3rver | +|---------|-------------|-------|--------| | Install | `pnpm add` | Docker / binary | `npm install` | | Startup time | ~20ms | seconds | ~200ms | -| Large file uploads | βœ… Streaming, zero-copy | βœ… | ❌ OOM risk | -| Range requests | βœ… Seek-based | βœ… | ❌ Full read | +| Large file uploads | Streaming, zero-copy | Yes | OOM risk | +| Range requests | Seek-based | Yes | Full read | | Language | Rust + TypeScript | Go | JavaScript | -| Multipart uploads | βœ… Full support | βœ… | ❌ | -| Auth | βœ… AWS SigV4 (full verification) | Full IAM | Basic | -| Bucket policies | βœ… IAM-style evaluation | βœ… | ❌ | +| Multipart uploads | Full support | Yes | No | +| Auth | AWS SigV4 (full verification) | Full IAM | Basic | +| Bucket policies | IAM-style evaluation | Yes | No | ### Core Features -- ⚑ **Rust-powered HTTP server** β€” hyper 1.x with streaming I/O, zero-copy, backpressure -- πŸ”„ **Full S3 API compatibility** β€” works with AWS SDK v3, SmartBucket, any S3 client -- πŸ“‚ **Filesystem-backed storage** β€” buckets map to directories, objects to files -- πŸ“€ **Streaming multipart uploads** β€” large files without memory pressure -- 🎯 **Byte-range requests** β€” `seek()` directly to the requested byte offset -- πŸ” **AWS SigV4 authentication** β€” full signature verification with constant-time comparison and 15-min clock skew enforcement -- πŸ“œ **Bucket policies** β€” IAM-style JSON policies with Allow/Deny evaluation, wildcard matching, and anonymous access support -- 🌐 **CORS middleware** β€” configurable cross-origin support -- πŸ“Š **Structured logging** β€” tracing-based, error through debug levels -- 🧹 **Clean slate mode** β€” wipe storage on startup for test isolation -- πŸ§ͺ **Test-first design** β€” start/stop in milliseconds, no port conflicts +- **Rust-powered HTTP server** β€” hyper 1.x with streaming I/O, zero-copy, backpressure +- **Full S3-compatible API** β€” works with AWS SDK v3, SmartBucket, any S3 client +- **Filesystem-backed storage** β€” buckets map to directories, objects to files +- **Streaming multipart uploads** β€” large files without memory pressure +- **Byte-range requests** β€” `seek()` directly to the requested byte offset +- **AWS SigV4 authentication** β€” full signature verification with constant-time comparison and 15-min clock skew enforcement +- **Bucket policies** β€” IAM-style JSON policies with Allow/Deny evaluation, wildcard matching, and anonymous access support +- **CORS middleware** β€” configurable cross-origin support +- **Structured logging** β€” tracing-based, error through debug levels +- **Clean slate mode** β€” wipe storage on startup for test isolation +- **Test-first design** β€” start/stop in milliseconds, no port conflicts -## πŸ“¦ Installation +## Installation ```bash -pnpm add @push.rocks/smarts3 -D +pnpm add @push.rocks/smartstorage -D ``` > **Note:** The package ships with precompiled Rust binaries for `linux_amd64` and `linux_arm64`. No Rust toolchain needed on your machine. -## πŸš€ Quick Start +## Quick Start ```typescript -import { Smarts3 } from '@push.rocks/smarts3'; +import { SmartStorage } from '@push.rocks/smartstorage'; -// Start a local S3 server -const s3 = await Smarts3.createAndStart({ +// Start a local S3-compatible storage server +const storage = await SmartStorage.createAndStart({ server: { port: 3000 }, storage: { cleanSlate: true }, }); // Create a bucket -await s3.createBucket('my-bucket'); +await storage.createBucket('my-bucket'); // Get connection details for any S3 client -const descriptor = await s3.getS3Descriptor(); -// β†’ { endpoint: 'localhost', port: 3000, accessKey: 'S3RVER', accessSecret: 'S3RVER', useSsl: false } +const descriptor = await storage.getStorageDescriptor(); +// β†’ { endpoint: 'localhost', port: 3000, accessKey: 'STORAGE', accessSecret: 'STORAGE', useSsl: false } // When done -await s3.stop(); +await storage.stop(); ``` -## πŸ“– Configuration +## Configuration All config fields are optional β€” sensible defaults are applied automatically. ```typescript -import { Smarts3, ISmarts3Config } from '@push.rocks/smarts3'; +import { SmartStorage, ISmartStorageConfig } from '@push.rocks/smartstorage'; -const config: ISmarts3Config = { +const config: ISmartStorageConfig = { server: { port: 3000, // Default: 3000 address: '0.0.0.0', // Default: '0.0.0.0' @@ -113,14 +113,14 @@ const config: ISmarts3Config = { }, }; -const s3 = await Smarts3.createAndStart(config); +const storage = await SmartStorage.createAndStart(config); ``` ### Common Configurations **CI/CD testing** β€” silent, clean, fast: ```typescript -const s3 = await Smarts3.createAndStart({ +const storage = await SmartStorage.createAndStart({ server: { port: 9999, silent: true }, storage: { cleanSlate: true }, }); @@ -128,7 +128,7 @@ const s3 = await Smarts3.createAndStart({ **Auth enabled:** ```typescript -const s3 = await Smarts3.createAndStart({ +const storage = await SmartStorage.createAndStart({ auth: { enabled: true, credentials: [{ accessKeyId: 'test', secretAccessKey: 'test123' }], @@ -138,7 +138,7 @@ const s3 = await Smarts3.createAndStart({ **CORS for local web dev:** ```typescript -const s3 = await Smarts3.createAndStart({ +const storage = await SmartStorage.createAndStart({ cors: { enabled: true, allowedOrigins: ['http://localhost:5173'], @@ -147,12 +147,12 @@ const s3 = await Smarts3.createAndStart({ }); ``` -## πŸ“€ Usage with AWS SDK v3 +## Usage with AWS SDK v3 ```typescript import { S3Client, PutObjectCommand, GetObjectCommand, DeleteObjectCommand } from '@aws-sdk/client-s3'; -const descriptor = await s3.getS3Descriptor(); +const descriptor = await storage.getStorageDescriptor(); const client = new S3Client({ endpoint: `http://${descriptor.endpoint}:${descriptor.port}`, @@ -161,14 +161,14 @@ const client = new S3Client({ accessKeyId: descriptor.accessKey, secretAccessKey: descriptor.accessSecret, }, - forcePathStyle: true, // Required for path-style S3 + forcePathStyle: true, // Required for path-style access }); // Upload await client.send(new PutObjectCommand({ Bucket: 'my-bucket', Key: 'hello.txt', - Body: 'Hello, S3!', + Body: 'Hello, Storage!', ContentType: 'text/plain', })); @@ -177,7 +177,7 @@ const { Body } = await client.send(new GetObjectCommand({ Bucket: 'my-bucket', Key: 'hello.txt', })); -const content = await Body.transformToString(); // "Hello, S3!" +const content = await Body.transformToString(); // "Hello, Storage!" // Delete await client.send(new DeleteObjectCommand({ @@ -186,12 +186,12 @@ await client.send(new DeleteObjectCommand({ })); ``` -## πŸͺ£ Usage with SmartBucket +## Usage with SmartBucket ```typescript import { SmartBucket } from '@push.rocks/smartbucket'; -const smartbucket = new SmartBucket(await s3.getS3Descriptor()); +const smartbucket = new SmartBucket(await storage.getStorageDescriptor()); const bucket = await smartbucket.createBucket('my-bucket'); const dir = await bucket.getBaseDirectory(); @@ -205,9 +205,9 @@ const content = await dir.fastGet('docs/readme.txt'); const files = await dir.listFiles(); ``` -## πŸ“€ Multipart Uploads +## Multipart Uploads -For files larger than 5 MB, use multipart uploads. smarts3 handles them with **streaming I/O** β€” parts are written directly to disk, never buffered in memory. +For files larger than 5 MB, use multipart uploads. smartstorage handles them with **streaming I/O** β€” parts are written directly to disk, never buffered in memory. ```typescript import { @@ -244,9 +244,9 @@ await client.send(new CompleteMultipartUploadCommand({ })); ``` -## πŸ“œ Bucket Policies +## Bucket Policies -smarts3 supports AWS-style bucket policies for fine-grained access control. Policies use the same IAM JSON format as real S3 β€” so you can develop and test your policy logic locally before deploying. +smartstorage supports AWS-style bucket policies for fine-grained access control. Policies use the same IAM JSON format as real S3 β€” so you can develop and test your policy logic locally before deploying. When `auth.enabled` is `true`, the auth pipeline works as follows: 1. **Authenticate** β€” verify the AWS SigV4 signature (anonymous requests skip this step) @@ -294,38 +294,38 @@ await client.send(new PutBucketPolicyCommand({ Deleting a bucket automatically removes its associated policy. -## πŸ§ͺ Testing Integration +## Testing Integration ```typescript -import { Smarts3 } from '@push.rocks/smarts3'; +import { SmartStorage } from '@push.rocks/smartstorage'; import { tap, expect } from '@git.zone/tstest/tapbundle'; -let s3: Smarts3; +let storage: SmartStorage; tap.test('setup', async () => { - s3 = await Smarts3.createAndStart({ + storage = await SmartStorage.createAndStart({ server: { port: 4567, silent: true }, storage: { cleanSlate: true }, }); }); tap.test('should store and retrieve objects', async () => { - await s3.createBucket('test'); + await storage.createBucket('test'); // ... your test logic using AWS SDK or SmartBucket }); tap.test('teardown', async () => { - await s3.stop(); + await storage.stop(); }); export default tap.start(); ``` -## πŸ”§ API Reference +## API Reference -### `Smarts3` Class +### `SmartStorage` Class -#### `static createAndStart(config?: ISmarts3Config): Promise` +#### `static createAndStart(config?: ISmartStorageConfig): Promise` Create and start a server in one call. @@ -339,11 +339,11 @@ Gracefully stop the server and kill the Rust process. #### `createBucket(name: string): Promise<{ name: string }>` -Create an S3 bucket. +Create a storage bucket. -#### `getS3Descriptor(options?): Promise` +#### `getStorageDescriptor(options?): Promise` -Get connection details for S3 clients. Returns: +Get connection details for S3-compatible clients. Returns: | Field | Type | Description | |-------|------|-------------| @@ -353,16 +353,16 @@ Get connection details for S3 clients. Returns: | `accessSecret` | `string` | Secret key from first configured credential | | `useSsl` | `boolean` | Always `false` (plain HTTP) | -## πŸ—οΈ Architecture +## Architecture -smarts3 uses a **hybrid Rust + TypeScript** architecture: +smartstorage uses a **hybrid Rust + TypeScript** architecture: ``` β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ Your Code (AWS SDK, etc.) β”‚ β”‚ ↕ HTTP (localhost:3000) β”‚ β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ -β”‚ rusts3 binary (Rust) β”‚ +β”‚ ruststorage binary (Rust) β”‚ β”‚ β”œβ”€ hyper 1.x HTTP server β”‚ β”‚ β”œβ”€ S3 path-style routing β”‚ β”‚ β”œβ”€ Streaming storage layer β”‚ @@ -372,7 +372,7 @@ smarts3 uses a **hybrid Rust + TypeScript** architecture: β”‚ └─ S3 XML response builder β”‚ β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ β”‚ TypeScript (thin IPC wrapper) β”‚ -β”‚ β”œβ”€ Smarts3 class β”‚ +β”‚ β”œβ”€ SmartStorage class β”‚ β”‚ β”œβ”€ RustBridge (stdin/stdout) β”‚ β”‚ └─ Config & S3 descriptor β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ @@ -380,9 +380,9 @@ smarts3 uses a **hybrid Rust + TypeScript** architecture: **Why Rust?** The TypeScript implementation had critical perf issues: OOM on multipart uploads (parts buffered in memory), double stream copying, file descriptor leaks on HEAD requests, full-file reads for range requests, and no backpressure. The Rust binary solves all of these with streaming I/O, zero-copy, and direct `seek()` for range requests. -**IPC Protocol:** TypeScript spawns the `rusts3` binary with `--management` and communicates via newline-delimited JSON over stdin/stdout. Commands: `start`, `stop`, `createBucket`. +**IPC Protocol:** TypeScript spawns the `ruststorage` binary with `--management` and communicates via newline-delimited JSON over stdin/stdout. Commands: `start`, `stop`, `createBucket`. -### S3 Operations Supported +### S3-Compatible Operations Supported | Operation | Method | Path | |-----------|--------|------| @@ -410,9 +410,9 @@ smarts3 uses a **hybrid Rust + TypeScript** architecture: ``` {storage.directory}/ {bucket}/ - {key}._S3_object # Object data - {key}._S3_object.metadata.json # Metadata (content-type, x-amz-meta-*, etc.) - {key}._S3_object.md5 # Cached MD5 hash + {key}._storage_object # Object data + {key}._storage_object.metadata.json # Metadata (content-type, x-amz-meta-*, etc.) + {key}._storage_object.md5 # Cached MD5 hash .multipart/ {upload-id}/ metadata.json # Upload metadata (bucket, key, parts) @@ -423,10 +423,10 @@ smarts3 uses a **hybrid Rust + TypeScript** architecture: {bucket}.policy.json # Bucket policy (IAM JSON format) ``` -## πŸ”— Related Packages +## Related Packages -- [`@push.rocks/smartbucket`](https://code.foss.global/push.rocks/smartbucket) β€” High-level S3 abstraction layer -- [`@push.rocks/smartrust`](https://code.foss.global/push.rocks/smartrust) β€” TypeScript ↔ Rust IPC bridge +- [`@push.rocks/smartbucket`](https://code.foss.global/push.rocks/smartbucket) β€” High-level S3-compatible abstraction layer +- [`@push.rocks/smartrust`](https://code.foss.global/push.rocks/smartrust) β€” TypeScript <-> Rust IPC bridge - [`@git.zone/tsrust`](https://code.foss.global/git.zone/tsrust) β€” Rust cross-compilation for npm packages ## License and Legal Information diff --git a/rust/Cargo.lock b/rust/Cargo.lock index 58fd6de..fc9a864 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -765,7 +765,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a96887878f22d7bad8a3b6dc5b7440e0ada9a245242924394987b21cf2210a4c" [[package]] -name = "rusts3" +name = "ruststorage" version = "0.1.0" dependencies = [ "anyhow", diff --git a/rust/Cargo.toml b/rust/Cargo.toml index 9e7b4e1..550c54a 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -1,10 +1,10 @@ [package] -name = "rusts3" +name = "ruststorage" version = "0.1.0" edition = "2021" [[bin]] -name = "rusts3" +name = "ruststorage" path = "src/main.rs" [dependencies] diff --git a/rust/src/action.rs b/rust/src/action.rs index 2c1cedc..51adaaf 100644 --- a/rust/src/action.rs +++ b/rust/src/action.rs @@ -2,9 +2,9 @@ use hyper::body::Incoming; use hyper::{Method, Request}; use std::collections::HashMap; -/// S3 actions that map to IAM permission strings. +/// Storage actions that map to IAM permission strings. #[derive(Debug, Clone, PartialEq, Eq)] -pub enum S3Action { +pub enum StorageAction { ListAllMyBuckets, CreateBucket, DeleteBucket, @@ -25,28 +25,28 @@ pub enum S3Action { DeleteBucketPolicy, } -impl S3Action { +impl StorageAction { /// Return the IAM-style action string (e.g. "s3:GetObject"). pub fn iam_action(&self) -> &'static str { match self { - S3Action::ListAllMyBuckets => "s3:ListAllMyBuckets", - S3Action::CreateBucket => "s3:CreateBucket", - S3Action::DeleteBucket => "s3:DeleteBucket", - S3Action::HeadBucket => "s3:ListBucket", - S3Action::ListBucket => "s3:ListBucket", - S3Action::GetObject => "s3:GetObject", - S3Action::HeadObject => "s3:GetObject", - S3Action::PutObject => "s3:PutObject", - S3Action::DeleteObject => "s3:DeleteObject", - S3Action::CopyObject => "s3:PutObject", - S3Action::ListBucketMultipartUploads => "s3:ListBucketMultipartUploads", - S3Action::AbortMultipartUpload => "s3:AbortMultipartUpload", - S3Action::InitiateMultipartUpload => "s3:PutObject", - S3Action::UploadPart => "s3:PutObject", - S3Action::CompleteMultipartUpload => "s3:PutObject", - S3Action::GetBucketPolicy => "s3:GetBucketPolicy", - S3Action::PutBucketPolicy => "s3:PutBucketPolicy", - S3Action::DeleteBucketPolicy => "s3:DeleteBucketPolicy", + StorageAction::ListAllMyBuckets => "s3:ListAllMyBuckets", + StorageAction::CreateBucket => "s3:CreateBucket", + StorageAction::DeleteBucket => "s3:DeleteBucket", + StorageAction::HeadBucket => "s3:ListBucket", + StorageAction::ListBucket => "s3:ListBucket", + StorageAction::GetObject => "s3:GetObject", + StorageAction::HeadObject => "s3:GetObject", + StorageAction::PutObject => "s3:PutObject", + StorageAction::DeleteObject => "s3:DeleteObject", + StorageAction::CopyObject => "s3:PutObject", + StorageAction::ListBucketMultipartUploads => "s3:ListBucketMultipartUploads", + StorageAction::AbortMultipartUpload => "s3:AbortMultipartUpload", + StorageAction::InitiateMultipartUpload => "s3:PutObject", + StorageAction::UploadPart => "s3:PutObject", + StorageAction::CompleteMultipartUpload => "s3:PutObject", + StorageAction::GetBucketPolicy => "s3:GetBucketPolicy", + StorageAction::PutBucketPolicy => "s3:PutBucketPolicy", + StorageAction::DeleteBucketPolicy => "s3:DeleteBucketPolicy", } } } @@ -54,7 +54,7 @@ impl S3Action { /// Context extracted from a request, used for policy evaluation. #[derive(Debug, Clone)] pub struct RequestContext { - pub action: S3Action, + pub action: StorageAction, pub bucket: Option, pub key: Option, } @@ -70,7 +70,7 @@ impl RequestContext { } } -/// Resolve the S3 action from an incoming HTTP request. +/// Resolve the storage action from an incoming HTTP request. pub fn resolve_action(req: &Request) -> RequestContext { let method = req.method().clone(); let path = req.uri().path().to_string(); @@ -87,7 +87,7 @@ pub fn resolve_action(req: &Request) -> RequestContext { 0 => { // Root: GET / -> ListBuckets RequestContext { - action: S3Action::ListAllMyBuckets, + action: StorageAction::ListAllMyBuckets, bucket: None, key: None, } @@ -98,15 +98,15 @@ pub fn resolve_action(req: &Request) -> RequestContext { let has_uploads = query.contains_key("uploads"); let action = match (&method, has_policy, has_uploads) { - (&Method::GET, true, _) => S3Action::GetBucketPolicy, - (&Method::PUT, true, _) => S3Action::PutBucketPolicy, - (&Method::DELETE, true, _) => S3Action::DeleteBucketPolicy, - (&Method::GET, _, true) => S3Action::ListBucketMultipartUploads, - (&Method::GET, _, _) => S3Action::ListBucket, - (&Method::PUT, _, _) => S3Action::CreateBucket, - (&Method::DELETE, _, _) => S3Action::DeleteBucket, - (&Method::HEAD, _, _) => S3Action::HeadBucket, - _ => S3Action::ListBucket, + (&Method::GET, true, _) => StorageAction::GetBucketPolicy, + (&Method::PUT, true, _) => StorageAction::PutBucketPolicy, + (&Method::DELETE, true, _) => StorageAction::DeleteBucketPolicy, + (&Method::GET, _, true) => StorageAction::ListBucketMultipartUploads, + (&Method::GET, _, _) => StorageAction::ListBucket, + (&Method::PUT, _, _) => StorageAction::CreateBucket, + (&Method::DELETE, _, _) => StorageAction::DeleteBucket, + (&Method::HEAD, _, _) => StorageAction::HeadBucket, + _ => StorageAction::ListBucket, }; RequestContext { @@ -125,16 +125,16 @@ pub fn resolve_action(req: &Request) -> RequestContext { let has_uploads = query.contains_key("uploads"); let action = match &method { - &Method::PUT if has_part_number && has_upload_id => S3Action::UploadPart, - &Method::PUT if has_copy_source => S3Action::CopyObject, - &Method::PUT => S3Action::PutObject, - &Method::GET => S3Action::GetObject, - &Method::HEAD => S3Action::HeadObject, - &Method::DELETE if has_upload_id => S3Action::AbortMultipartUpload, - &Method::DELETE => S3Action::DeleteObject, - &Method::POST if has_uploads => S3Action::InitiateMultipartUpload, - &Method::POST if has_upload_id => S3Action::CompleteMultipartUpload, - _ => S3Action::GetObject, + &Method::PUT if has_part_number && has_upload_id => StorageAction::UploadPart, + &Method::PUT if has_copy_source => StorageAction::CopyObject, + &Method::PUT => StorageAction::PutObject, + &Method::GET => StorageAction::GetObject, + &Method::HEAD => StorageAction::HeadObject, + &Method::DELETE if has_upload_id => StorageAction::AbortMultipartUpload, + &Method::DELETE => StorageAction::DeleteObject, + &Method::POST if has_uploads => StorageAction::InitiateMultipartUpload, + &Method::POST if has_upload_id => StorageAction::CompleteMultipartUpload, + _ => StorageAction::GetObject, }; RequestContext { @@ -144,7 +144,7 @@ pub fn resolve_action(req: &Request) -> RequestContext { } } _ => RequestContext { - action: S3Action::ListAllMyBuckets, + action: StorageAction::ListAllMyBuckets, bucket: None, key: None, }, diff --git a/rust/src/auth.rs b/rust/src/auth.rs index 8087c25..ba374cf 100644 --- a/rust/src/auth.rs +++ b/rust/src/auth.rs @@ -4,8 +4,8 @@ use hyper::Request; use sha2::{Digest, Sha256}; use std::collections::HashMap; -use crate::config::{Credential, S3Config}; -use crate::s3_error::S3Error; +use crate::config::{Credential, SmartStorageConfig}; +use crate::error::StorageError; type HmacSha256 = Hmac; @@ -27,8 +27,8 @@ struct SigV4Header { /// Verify the request's SigV4 signature. Returns the caller identity on success. pub fn verify_request( req: &Request, - config: &S3Config, -) -> Result { + config: &SmartStorageConfig, +) -> Result { let auth_header = req .headers() .get("authorization") @@ -37,18 +37,18 @@ pub fn verify_request( // Reject SigV2 if auth_header.starts_with("AWS ") { - return Err(S3Error::authorization_header_malformed()); + return Err(StorageError::authorization_header_malformed()); } if !auth_header.starts_with("AWS4-HMAC-SHA256") { - return Err(S3Error::authorization_header_malformed()); + return Err(StorageError::authorization_header_malformed()); } let parsed = parse_auth_header(auth_header)?; // Look up credential let credential = find_credential(&parsed.access_key_id, config) - .ok_or_else(S3Error::invalid_access_key_id)?; + .ok_or_else(StorageError::invalid_access_key_id)?; // Get x-amz-date let amz_date = req @@ -60,7 +60,7 @@ pub fn verify_request( .get("date") .and_then(|v| v.to_str().ok()) }) - .ok_or_else(|| S3Error::missing_security_header("Missing x-amz-date header"))?; + .ok_or_else(|| StorageError::missing_security_header("Missing x-amz-date header"))?; // Enforce 15-min clock skew check_clock_skew(amz_date)?; @@ -99,7 +99,7 @@ pub fn verify_request( // Constant-time comparison if !constant_time_eq(computed_hex.as_bytes(), parsed.signature.as_bytes()) { - return Err(S3Error::signature_does_not_match()); + return Err(StorageError::signature_does_not_match()); } Ok(AuthenticatedIdentity { @@ -108,11 +108,11 @@ pub fn verify_request( } /// Parse the Authorization header into its components. -fn parse_auth_header(header: &str) -> Result { +fn parse_auth_header(header: &str) -> Result { // Format: AWS4-HMAC-SHA256 Credential=KEY/YYYYMMDD/region/s3/aws4_request, SignedHeaders=h1;h2, Signature=hex let after_algo = header .strip_prefix("AWS4-HMAC-SHA256") - .ok_or_else(S3Error::authorization_header_malformed)? + .ok_or_else(StorageError::authorization_header_malformed)? .trim(); let mut credential_str = None; @@ -131,17 +131,17 @@ fn parse_auth_header(header: &str) -> Result { } let credential_str = credential_str - .ok_or_else(S3Error::authorization_header_malformed)?; + .ok_or_else(StorageError::authorization_header_malformed)?; let signed_headers_str = signed_headers_str - .ok_or_else(S3Error::authorization_header_malformed)?; + .ok_or_else(StorageError::authorization_header_malformed)?; let signature = signature_str - .ok_or_else(S3Error::authorization_header_malformed)? + .ok_or_else(StorageError::authorization_header_malformed)? .to_string(); // Parse credential: KEY/YYYYMMDD/region/s3/aws4_request let cred_parts: Vec<&str> = credential_str.splitn(5, '/').collect(); if cred_parts.len() < 5 { - return Err(S3Error::authorization_header_malformed()); + return Err(StorageError::authorization_header_malformed()); } let access_key_id = cred_parts[0].to_string(); @@ -163,7 +163,7 @@ fn parse_auth_header(header: &str) -> Result { } /// Find a credential by access key ID. -fn find_credential<'a>(access_key_id: &str, config: &'a S3Config) -> Option<&'a Credential> { +fn find_credential<'a>(access_key_id: &str, config: &'a SmartStorageConfig) -> Option<&'a Credential> { config .auth .credentials @@ -172,17 +172,17 @@ fn find_credential<'a>(access_key_id: &str, config: &'a S3Config) -> Option<&'a } /// Check clock skew (15 minutes max). -fn check_clock_skew(amz_date: &str) -> Result<(), S3Error> { +fn check_clock_skew(amz_date: &str) -> Result<(), StorageError> { // Parse ISO 8601 basic format: YYYYMMDDTHHMMSSZ let parsed = chrono::NaiveDateTime::parse_from_str(amz_date, "%Y%m%dT%H%M%SZ") - .map_err(|_| S3Error::authorization_header_malformed())?; + .map_err(|_| StorageError::authorization_header_malformed())?; let request_time = chrono::DateTime::::from_naive_utc_and_offset(parsed, chrono::Utc); let now = chrono::Utc::now(); let diff = (now - request_time).num_seconds().unsigned_abs(); if diff > 15 * 60 { - return Err(S3Error::request_time_too_skewed()); + return Err(StorageError::request_time_too_skewed()); } Ok(()) diff --git a/rust/src/config.rs b/rust/src/config.rs index ed86f5c..aa25604 100644 --- a/rust/src/config.rs +++ b/rust/src/config.rs @@ -2,7 +2,7 @@ use serde::{Deserialize, Serialize}; #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] -pub struct S3Config { +pub struct SmartStorageConfig { pub server: ServerConfig, pub storage: StorageConfig, pub auth: AuthConfig, diff --git a/rust/src/s3_error.rs b/rust/src/error.rs similarity index 97% rename from rust/src/s3_error.rs rename to rust/src/error.rs index 571142b..415b107 100644 --- a/rust/src/s3_error.rs +++ b/rust/src/error.rs @@ -1,14 +1,14 @@ use hyper::StatusCode; #[derive(Debug, thiserror::Error)] -#[error("S3Error({code}): {message}")] -pub struct S3Error { +#[error("StorageError({code}): {message}")] +pub struct StorageError { pub code: String, pub message: String, pub status: StatusCode, } -impl S3Error { +impl StorageError { pub fn new(code: &str, message: &str, status: StatusCode) -> Self { Self { code: code.to_string(), diff --git a/rust/src/main.rs b/rust/src/main.rs index ec13723..3970501 100644 --- a/rust/src/main.rs +++ b/rust/src/main.rs @@ -3,7 +3,7 @@ mod auth; mod config; mod management; mod policy; -mod s3_error; +mod error; mod server; mod storage; mod xml_response; @@ -11,7 +11,7 @@ mod xml_response; use clap::Parser; #[derive(Parser)] -#[command(name = "rusts3", about = "High-performance S3-compatible server")] +#[command(name = "ruststorage", about = "High-performance S3-compatible storage server")] struct Cli { /// Run in management mode (IPC via stdin/stdout) #[arg(long)] @@ -38,7 +38,7 @@ async fn main() -> anyhow::Result<()> { management::management_loop().await?; } else { - eprintln!("rusts3: use --management flag for IPC mode"); + eprintln!("ruststorage: use --management flag for IPC mode"); std::process::exit(1); } diff --git a/rust/src/management.rs b/rust/src/management.rs index e5654fc..68e86ad 100644 --- a/rust/src/management.rs +++ b/rust/src/management.rs @@ -4,8 +4,8 @@ use serde_json::Value; use std::io::Write; use tokio::io::{AsyncBufReadExt, BufReader}; -use crate::config::S3Config; -use crate::server::S3Server; +use crate::config::SmartStorageConfig; +use crate::server::StorageServer; #[derive(Deserialize)] struct IpcRequest { @@ -62,7 +62,7 @@ pub async fn management_loop() -> Result<()> { data: serde_json::json!({}), }); - let mut server: Option = None; + let mut server: Option = None; let stdin = BufReader::new(tokio::io::stdin()); let mut lines = stdin.lines(); @@ -87,11 +87,11 @@ pub async fn management_loop() -> Result<()> { "start" => { #[derive(Deserialize)] struct StartParams { - config: S3Config, + config: SmartStorageConfig, } match serde_json::from_value::(req.params) { Ok(params) => { - match S3Server::start(params.config).await { + match StorageServer::start(params.config).await { Ok(s) => { server = Some(s); send_response(id, serde_json::json!({})); diff --git a/rust/src/policy.rs b/rust/src/policy.rs index 349f80b..f029af0 100644 --- a/rust/src/policy.rs +++ b/rust/src/policy.rs @@ -6,7 +6,7 @@ use tokio::sync::RwLock; use crate::action::RequestContext; use crate::auth::AuthenticatedIdentity; -use crate::s3_error::S3Error; +use crate::error::StorageError; // ============================ // Policy data model @@ -284,50 +284,50 @@ fn simple_wildcard_match(pattern: &str, value: &str) -> bool { const MAX_POLICY_SIZE: usize = 20 * 1024; // 20 KB -pub fn validate_policy(json: &str) -> Result { +pub fn validate_policy(json: &str) -> Result { if json.len() > MAX_POLICY_SIZE { - return Err(S3Error::malformed_policy("Policy exceeds maximum size of 20KB")); + return Err(StorageError::malformed_policy("Policy exceeds maximum size of 20KB")); } let policy: BucketPolicy = - serde_json::from_str(json).map_err(|e| S3Error::malformed_policy(&e.to_string()))?; + serde_json::from_str(json).map_err(|e| StorageError::malformed_policy(&e.to_string()))?; if policy.version != "2012-10-17" { - return Err(S3Error::malformed_policy( + return Err(StorageError::malformed_policy( "Policy version must be \"2012-10-17\"", )); } if policy.statements.is_empty() { - return Err(S3Error::malformed_policy( + return Err(StorageError::malformed_policy( "Policy must contain at least one statement", )); } for (i, stmt) in policy.statements.iter().enumerate() { if stmt.action.is_empty() { - return Err(S3Error::malformed_policy(&format!( + return Err(StorageError::malformed_policy(&format!( "Statement {} has no actions", i ))); } for action in &stmt.action { if action != "*" && !action.starts_with("s3:") { - return Err(S3Error::malformed_policy(&format!( + return Err(StorageError::malformed_policy(&format!( "Action \"{}\" must start with \"s3:\"", action ))); } } if stmt.resource.is_empty() { - return Err(S3Error::malformed_policy(&format!( + return Err(StorageError::malformed_policy(&format!( "Statement {} has no resources", i ))); } for resource in &stmt.resource { if resource != "*" && !resource.starts_with("arn:aws:s3:::") { - return Err(S3Error::malformed_policy(&format!( + return Err(StorageError::malformed_policy(&format!( "Resource \"{}\" must start with \"arn:aws:s3:::\"", resource ))); diff --git a/rust/src/server.rs b/rust/src/server.rs index 796f8c2..706b61f 100644 --- a/rust/src/server.rs +++ b/rust/src/server.rs @@ -18,22 +18,22 @@ use tokio::sync::watch; use tokio_util::io::ReaderStream; use uuid::Uuid; -use crate::action::{self, RequestContext, S3Action}; +use crate::action::{self, RequestContext, StorageAction}; use crate::auth::{self, AuthenticatedIdentity}; -use crate::config::S3Config; +use crate::config::SmartStorageConfig; use crate::policy::{self, PolicyDecision, PolicyStore}; -use crate::s3_error::S3Error; +use crate::error::StorageError; use crate::storage::FileStore; use crate::xml_response; -pub struct S3Server { +pub struct StorageServer { store: Arc, shutdown_tx: watch::Sender, server_handle: tokio::task::JoinHandle<()>, } -impl S3Server { - pub async fn start(config: S3Config) -> Result { +impl StorageServer { + pub async fn start(config: SmartStorageConfig) -> Result { let store = Arc::new(FileStore::new(config.storage.directory.clone().into())); // Initialize or reset storage @@ -104,7 +104,7 @@ impl S3Server { }); if !config.server.silent { - tracing::info!("S3 server listening on {}", addr); + tracing::info!("Storage server listening on {}", addr); } Ok(Self { @@ -124,7 +124,7 @@ impl S3Server { } } -impl S3Config { +impl SmartStorageConfig { fn address(&self) -> &str { &self.server.address } @@ -192,7 +192,7 @@ fn empty_response(status: StatusCode, request_id: &str) -> Response { .unwrap() } -fn s3_error_response(err: &S3Error, request_id: &str) -> Response { +fn storage_error_response(err: &StorageError, request_id: &str) -> Response { let xml = err.to_xml(); Response::builder() .status(err.status) @@ -205,7 +205,7 @@ fn s3_error_response(err: &S3Error, request_id: &str) -> Response { async fn handle_request( req: Request, store: Arc, - config: S3Config, + config: SmartStorageConfig, policy_store: Arc, ) -> Result, std::convert::Infallible> { let request_id = Uuid::new_v4().to_string(); @@ -219,7 +219,7 @@ async fn handle_request( return Ok(resp); } - // Step 1: Resolve S3 action from request + // Step 1: Resolve storage action from request let request_ctx = action::resolve_action(&req); // Step 2: Auth + policy pipeline @@ -238,7 +238,7 @@ async fn handle_request( Ok(id) => Some(id), Err(e) => { tracing::warn!("Auth failed: {}", e.message); - return Ok(s3_error_response(&e, &request_id)); + return Ok(storage_error_response(&e, &request_id)); } } } else { @@ -248,7 +248,7 @@ async fn handle_request( // Step 3: Authorization (policy evaluation) if let Err(e) = authorize_request(&request_ctx, identity.as_ref(), &policy_store).await { - return Ok(s3_error_response(&e, &request_id)); + return Ok(storage_error_response(&e, &request_id)); } } @@ -256,12 +256,12 @@ async fn handle_request( let mut response = match route_request(req, store, &config, &request_id, &policy_store).await { Ok(resp) => resp, Err(err) => { - if let Some(s3err) = err.downcast_ref::() { - s3_error_response(s3err, &request_id) + if let Some(s3err) = err.downcast_ref::() { + storage_error_response(s3err, &request_id) } else { tracing::error!("Internal error: {}", err); - let s3err = S3Error::internal_error(&err.to_string()); - s3_error_response(&s3err, &request_id) + let s3err = StorageError::internal_error(&err.to_string()); + storage_error_response(&s3err, &request_id) } } }; @@ -288,11 +288,11 @@ async fn authorize_request( ctx: &RequestContext, identity: Option<&AuthenticatedIdentity>, policy_store: &PolicyStore, -) -> Result<(), S3Error> { +) -> Result<(), StorageError> { // ListAllMyBuckets requires authentication (no bucket to apply policy to) - if ctx.action == S3Action::ListAllMyBuckets { + if ctx.action == StorageAction::ListAllMyBuckets { if identity.is_none() { - return Err(S3Error::access_denied()); + return Err(StorageError::access_denied()); } return Ok(()); } @@ -302,7 +302,7 @@ async fn authorize_request( if let Some(bucket_policy) = policy_store.get_policy(bucket).await { let decision = policy::evaluate_policy(&bucket_policy, ctx, identity); match decision { - PolicyDecision::Deny => return Err(S3Error::access_denied()), + PolicyDecision::Deny => return Err(StorageError::access_denied()), PolicyDecision::Allow => return Ok(()), PolicyDecision::NoOpinion => { // Fall through to default behavior @@ -313,7 +313,7 @@ async fn authorize_request( // Default: authenticated users get full access, anonymous denied if identity.is_none() { - return Err(S3Error::access_denied()); + return Err(StorageError::access_denied()); } Ok(()) @@ -326,7 +326,7 @@ async fn authorize_request( async fn route_request( req: Request, store: Arc, - _config: &S3Config, + _config: &SmartStorageConfig, request_id: &str, policy_store: &Arc, ) -> Result> { @@ -414,8 +414,8 @@ async fn route_request( let upload_id = query.get("uploadId").unwrap().clone(); handle_complete_multipart(req, store, &bucket, &key, &upload_id, request_id).await } else { - let err = S3Error::invalid_request("Invalid POST request"); - Ok(s3_error_response(&err, request_id)) + let err = StorageError::invalid_request("Invalid POST request"); + Ok(storage_error_response(&err, request_id)) } } _ => Ok(empty_response(StatusCode::METHOD_NOT_ALLOWED, request_id)), @@ -467,7 +467,7 @@ async fn handle_head_bucket( if store.bucket_exists(bucket).await { Ok(empty_response(StatusCode::OK, request_id)) } else { - Err(S3Error::no_such_bucket().into()) + Err(StorageError::no_such_bucket().into()) } } @@ -682,7 +682,7 @@ async fn handle_get_bucket_policy( .unwrap(); Ok(resp) } - None => Err(S3Error::no_such_bucket_policy().into()), + None => Err(StorageError::no_such_bucket_policy().into()), } } @@ -695,7 +695,7 @@ async fn handle_put_bucket_policy( ) -> Result> { // Verify bucket exists if !store.bucket_exists(bucket).await { - return Err(S3Error::no_such_bucket().into()); + return Err(StorageError::no_such_bucket().into()); } // Read body @@ -709,7 +709,7 @@ async fn handle_put_bucket_policy( policy_store .put_policy(bucket, validated_policy) .await - .map_err(|e| S3Error::internal_error(&e.to_string()))?; + .map_err(|e| StorageError::internal_error(&e.to_string()))?; Ok(empty_response(StatusCode::NO_CONTENT, request_id)) } @@ -722,7 +722,7 @@ async fn handle_delete_bucket_policy( policy_store .delete_policy(bucket) .await - .map_err(|e| S3Error::internal_error(&e.to_string()))?; + .map_err(|e| StorageError::internal_error(&e.to_string()))?; Ok(empty_response(StatusCode::NO_CONTENT, request_id)) } @@ -756,7 +756,7 @@ async fn handle_upload_part( .unwrap_or(0); if part_number < 1 || part_number > 10000 { - return Err(S3Error::invalid_part_number().into()); + return Err(StorageError::invalid_part_number().into()); } let body = req.into_body(); @@ -925,7 +925,7 @@ fn extract_xml_value<'a>(xml: &'a str, tag: &str) -> Option { // CORS // ============================ -fn build_cors_preflight(config: &S3Config, request_id: &str) -> Response { +fn build_cors_preflight(config: &SmartStorageConfig, request_id: &str) -> Response { let mut builder = Response::builder() .status(StatusCode::NO_CONTENT) .header("x-amz-request-id", request_id); @@ -949,7 +949,7 @@ fn build_cors_preflight(config: &S3Config, request_id: &str) -> Response