Compare commits

38 Commits

Author SHA1 Message Date
d437ffc226 v5.3.0
Some checks failed
Default (tags) / security (push) Successful in 37s
Default (tags) / test (push) Failing after 26s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-17 16:50:04 +00:00
e36758f183 feat(auth): add AWS SigV4 authentication and bucket policy support 2026-02-17 16:50:04 +00:00
adf45dce2d v5.2.0
Some checks failed
Default (tags) / security (push) Successful in 40s
Default (tags) / test (push) Failing after 27s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-17 16:28:50 +00:00
eb232b6e8e feat(auth,policy): add AWS SigV4 authentication and S3 bucket policy support 2026-02-17 16:28:50 +00:00
0b9d8c4a72 v5.1.1
Some checks failed
Default (tags) / security (push) Has been cancelled
Default (tags) / test (push) Has been cancelled
Default (tags) / release (push) Has been cancelled
Default (tags) / metadata (push) Has been cancelled
2026-02-13 13:59:44 +00:00
65eb266983 fix(smarts3): replace TypeScript server with Rust-powered core and IPC bridge 2026-02-13 13:59:44 +00:00
54a0c2fb65 v5.1.0
Some checks failed
Default (tags) / security (push) Successful in 38s
Default (tags) / test (push) Failing after 37s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-11-23 23:31:26 +00:00
648ff98c2d feat(multipart): Implement full multipart upload support with persistent manager, periodic cleanup, and API integration 2025-11-23 23:31:26 +00:00
d6f178bde6 v5.0.2
Some checks failed
Default (tags) / security (push) Successful in 24s
Default (tags) / test (push) Failing after 35s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-11-23 22:53:39 +00:00
ffaef5cb15 fix(readme): Clarify contribution agreement requirement in README 2025-11-23 22:53:39 +00:00
d4cc1d43ea v5.0.1
Some checks failed
Default (tags) / security (push) Successful in 35s
Default (tags) / test (push) Failing after 35s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-11-23 22:52:19 +00:00
759becdd04 fix(docs): Clarify README wording about S3 compatibility and AWS SDK usage 2025-11-23 22:52:19 +00:00
51e8836227 v5.0.0
Some checks failed
Default (tags) / security (push) Successful in 25s
Default (tags) / test (push) Failing after 35s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-11-23 22:46:42 +00:00
3c0a54e08b BREAKING CHANGE(core): Production-ready S3-compatible server: nested config, multipart uploads, CORS, structured logging, SmartFS migration and improved error handling 2025-11-23 22:46:42 +00:00
c074a5d2ed v4.0.0
Some checks failed
Default (tags) / security (push) Successful in 36s
Default (tags) / test (push) Failing after 37s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-11-23 22:42:47 +00:00
a9ba9de6be BREAKING CHANGE(Smarts3): Migrate Smarts3 configuration to nested server/storage objects and remove legacy flat config support 2025-11-23 22:42:47 +00:00
263e7a58b9 v3.2.0
Some checks failed
Default (tags) / security (push) Successful in 25s
Default (tags) / test (push) Failing after 35s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-11-23 22:41:46 +00:00
74b81d7ba8 feat(multipart): Add multipart upload support with MultipartUploadManager and controller integration 2025-11-23 22:41:46 +00:00
0d4837184f v3.1.0
Some checks failed
Default (tags) / security (push) Successful in 38s
Default (tags) / test (push) Failing after 36s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-11-23 22:37:32 +00:00
7f3de92961 feat(logging): Add structured Logger and integrate into Smarts3Server; pass full config to server 2025-11-23 22:37:32 +00:00
a7bc902dd0 v3.0.4
Some checks failed
Default (tags) / security (push) Successful in 34s
Default (tags) / test (push) Failing after 36s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-11-23 22:31:44 +00:00
95d78d0d08 fix(smarts3): Use filesystem store for bucket creation and remove smartbucket runtime dependency 2025-11-23 22:31:44 +00:00
b62cb0bc97 v3.0.3
Some checks failed
Default (tags) / security (push) Successful in 39s
Default (tags) / test (push) Failing after 37s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-11-23 22:12:29 +00:00
32346636e0 fix(filesystem): Migrate filesystem implementation to @push.rocks/smartfs and add Web Streams handling 2025-11-23 22:12:29 +00:00
415ba3e76d v3.0.2
Some checks failed
Default (tags) / security (push) Successful in 41s
Default (tags) / test (push) Failing after 36s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-11-21 18:36:27 +00:00
6594f67d3e fix(smarts3): Prepare patch release 3.0.2 — no code changes detected 2025-11-21 18:36:27 +00:00
61974e0b54 v3.0.1
Some checks failed
Default (tags) / security (push) Successful in 40s
Default (tags) / test (push) Failing after 46s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-11-21 17:09:16 +00:00
fc845956fa fix(readme): Add Issue Reporting and Security section to README 2025-11-21 17:09:16 +00:00
eec1e09d2b v3.0.0
Some checks failed
Default (tags) / security (push) Successful in 25s
Default (tags) / test (push) Failing after 35s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-11-21 14:36:30 +00:00
c3daf9d3f7 BREAKING CHANGE(Smarts3): Remove legacy s3rver backend, simplify Smarts3 server API, and bump dependencies 2025-11-21 14:36:30 +00:00
654f47b7fc v2.3.0
Some checks failed
Default (tags) / security (push) Successful in 39s
Default (tags) / test (push) Failing after 36s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-11-21 14:32:19 +00:00
18a2eb7e3f feat(smarts3-server): Introduce native custom S3 server implementation (Smarts3Server) with routing, middleware, context, filesystem store, controllers and XML utilities; add SmartXml and AWS SDK test; keep optional legacy s3rver backend. 2025-11-21 14:32:19 +00:00
3ab667049a v2.2.7
Some checks failed
Default (tags) / security (push) Successful in 42s
Default (tags) / test (push) Failing after 37s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-11-20 08:10:17 +00:00
871f0e0b78 fix(core): Update dependencies, code style and project config; add pnpm overrides and ignore AI folders 2025-11-20 08:10:17 +00:00
f2bf3f1314 2.2.6
Some checks failed
Default (tags) / security (push) Successful in 42s
Default (tags) / test (push) Successful in 53s
Default (tags) / release (push) Failing after 44s
Default (tags) / metadata (push) Successful in 54s
2025-08-16 16:22:15 +00:00
a3b40923a5 fix(Smarts3): Allow overriding S3 descriptor; update dependencies, test config and documentation 2025-08-16 16:22:15 +00:00
bdcfcee37a 2.2.5
Some checks failed
Default (tags) / security (push) Successful in 41s
Default (tags) / test (push) Successful in 1m54s
Default (tags) / release (push) Failing after 1m38s
Default (tags) / metadata (push) Successful in 2m13s
2024-11-06 17:03:10 +01:00
d41878721c fix(ci): Corrected docker image URLs in Gitea workflows to match the correct domain format. 2024-11-06 17:03:10 +01:00
33 changed files with 14568 additions and 6714 deletions

View File

@@ -6,7 +6,7 @@ on:
- '**' - '**'
env: env:
IMAGE: code.foss.global/hosttoday/ht-docker-node:npmci IMAGE: code.foss.global/host.today/ht-docker-node:npmci
NPMCI_COMPUTED_REPOURL: https://${{gitea.repository_owner}}:${{secrets.GITEA_TOKEN}}@/${{gitea.repository}}.git NPMCI_COMPUTED_REPOURL: https://${{gitea.repository_owner}}:${{secrets.GITEA_TOKEN}}@/${{gitea.repository}}.git
NPMCI_TOKEN_NPM: ${{secrets.NPMCI_TOKEN_NPM}} NPMCI_TOKEN_NPM: ${{secrets.NPMCI_TOKEN_NPM}}
NPMCI_TOKEN_NPM2: ${{secrets.NPMCI_TOKEN_NPM2}} NPMCI_TOKEN_NPM2: ${{secrets.NPMCI_TOKEN_NPM2}}

View File

@@ -6,7 +6,7 @@ on:
- '*' - '*'
env: env:
IMAGE: code.foss.global/hosttoday/ht-docker-node:npmci IMAGE: code.foss.global/host.today/ht-docker-node:npmci
NPMCI_COMPUTED_REPOURL: https://${{gitea.repository_owner}}:${{secrets.GITEA_TOKEN}}@/${{gitea.repository}}.git NPMCI_COMPUTED_REPOURL: https://${{gitea.repository_owner}}:${{secrets.GITEA_TOKEN}}@/${{gitea.repository}}.git
NPMCI_TOKEN_NPM: ${{secrets.NPMCI_TOKEN_NPM}} NPMCI_TOKEN_NPM: ${{secrets.NPMCI_TOKEN_NPM}}
NPMCI_TOKEN_NPM2: ${{secrets.NPMCI_TOKEN_NPM2}} NPMCI_TOKEN_NPM2: ${{secrets.NPMCI_TOKEN_NPM2}}

8
.gitignore vendored
View File

@@ -3,7 +3,6 @@
# artifacts # artifacts
coverage/ coverage/
public/ public/
pages/
# installs # installs
node_modules/ node_modules/
@@ -17,4 +16,9 @@ node_modules/
dist/ dist/
dist_*/ dist_*/
# custom # AI
.claude/
.serena/
#------# custom
rust/target

View File

@@ -1,6 +1,176 @@
# Changelog # Changelog
## 2026-02-17 - 5.3.0 - feat(auth)
add AWS SigV4 authentication and bucket policy support
- Implement AWS SigV4 full verification (constant-time comparison, 15-minute clock skew enforcement) and expose default signing region (server.region = 'us-east-1').
- Add IAM-style bucket policy engine with Put/Get/Delete policy APIs (GetBucketPolicy/PutBucketPolicy/DeleteBucketPolicy), wildcard action/resource matching, Allow/Deny evaluation, and on-disk persistence under .policies/{bucket}.policy.json.
- Documentation and README expanded with policy usage, examples, API table entries, and notes about policy CRUD and behavior for anonymous/authenticated requests.
- Rust code refactors: simplify storage/server result structs and multipart handling (removed several unused size/key/bucket fields), remove S3Error::to_response and error_xml helpers, and other internal cleanup to support new auth/policy features.
## 2026-02-17 - 5.2.0 - feat(auth,policy)
add AWS SigV4 authentication and S3 bucket policy support
- Implemented real AWS SigV4 verification (HMAC-SHA256), including x-amz-date handling, clock skew enforcement and constant-time signature comparison
- Added bucket policy model, validator and evaluation engine (Deny > Allow > NoOpinion) with a PolicyStore (RwLock cache + disk-backed .policies/*.policy.json)
- Integrated action resolution and auth+policy pipeline into the HTTP server: authorization checks run per-request, anonymous requests are denied by default, ListAllMyBuckets requires authentication
- Added bucket policy CRUD handlers via ?policy query parameter (GET/PUT/DELETE) and cleanup of policies on bucket deletion
- Storage and config updates: created .policies dir and policy path helpers; default region added to server config (TS + Rust)
- Added comprehensive tests for auth and policy behavior (policy CRUD, evaluation, per-action enforcement, auth integration)
- Updated Rust dependencies and Cargo.toml/Cargo.lock to include hmac, sha2, hex, subtle, cpufeatures
## 2026-02-13 - 5.1.1 - fix(smarts3)
replace TypeScript server with Rust-powered core and IPC bridge
- Major refactor: Node.js/TypeScript in-process server replaced by a Rust crate ('rusts3') with a TypeScript IPC wrapper (RustBridge).
- Removed many TypeScript server modules (smarts3-server, filesystem-store, multipart-manager, controllers, router, context, logger, xml utils, etc.); Smarts3Server export removed — public API now proxies to the Rust binary.
- Smarts3 now spawns and communicates with the rusts3 binary via RustBridge IPC (commands include start, stop, createBucket).
- Build & packaging changes: build script now runs `tsrust` before `tsbuild`; added `@git.zone/tsrust` devDependency; added `dist_rust` artifacts and new cross-compile targets in npmextra.json; .gitignore updated for rust/target.
- Dependency changes: added `@push.rocks/smartrust` (RustBridge) and simplified plugins surface; previous smartfs/smartxml usage removed from TS code and replaced by the Rust implementation + IPC.
- Added Rust project files (rust/Cargo.toml, rust/src/*) implementing server, IPC management loop, storage, XML responses, errors, and config.
- Documentation updated (README and hints) to describe the Rust core, supported prebuilt targets (linux_amd64, linux_arm64), IPC commands, and developer build notes.
## 2025-11-23 - 5.1.0 - feat(multipart)
Implement full multipart upload support with persistent manager, periodic cleanup, and API integration
- Add IMultipartConfig to server config with defaults (expirationDays: 7, cleanupIntervalMinutes: 60) and merge into existing config flow
- Introduce MultipartUploadManager: persistent upload metadata on disk, part upload/assembly, restore uploads on startup, listParts/listUploads, abort/cleanup functionality
- Start and stop multipart cleanup task from Smarts3Server lifecycle (startCleanupTask on start, stopCleanupTask on stop) with configurable interval and expiration
- ObjectController: support multipart endpoints (initiate, upload part, complete, abort) and move assembled final object into the object store on completion; set ETag headers and return proper XML responses
- BucketController: support listing in-progress multipart uploads via ?uploads query parameter and return S3-compatible XML
- Persist multipart state to disk and restore on initialization to survive restarts; perform automatic cleanup of expired uploads
## 2025-11-23 - 5.0.2 - fix(readme)
Clarify contribution agreement requirement in README
- Updated the Issue Reporting and Security section in readme.md to make it explicit that developers must sign and comply with the contribution agreement (and complete identification) before obtaining a code.foss.global account to submit pull requests.
## 2025-11-23 - 5.0.1 - fix(docs)
Clarify README wording about S3 compatibility and AWS SDK usage
- Update README wording to "Full S3 API compatibility" and clarify it works seamlessly with AWS SDK v3 and other S3 clients
## 2025-11-23 - 5.0.0 - BREAKING CHANGE(core)
Production-ready S3-compatible server: nested config, multipart uploads, CORS, structured logging, SmartFS migration and improved error handling
- Breaking change: configuration format migrated from flat to nested structure (server, storage, auth, cors, logging, limits). Update existing configs accordingly.
- Implemented full multipart upload support (initiate, upload part, complete, abort) with on-disk part management and final assembly.
- Added CORS middleware with configurable origins, methods, headers, exposed headers, maxAge and credentials support.
- Structured, configurable logging (levels: error|warn|info|debug; formats: text|json) and request/response logging middleware.
- Simple static credential authentication middleware (configurable list of credentials).
- Migrated filesystem operations to @push.rocks/smartfs (Web Streams interoperability) and removed smartbucket from production dependencies.
- Improved S3-compatible error handling and XML responses (S3Error class and XML utilities).
- Exposed Smarts3Server and made store/multipart managers accessible for tests and advanced usage; added helper methods like getS3Descriptor and createBucket.
## 2025-11-23 - 4.0.0 - BREAKING CHANGE(Smarts3)
Migrate Smarts3 configuration to nested server/storage objects and remove legacy flat config support
- Smarts3.createAndStart() and Smarts3 constructor now accept ISmarts3Config with nested `server` and `storage` objects.
- Removed support for the legacy flat config shape (top-level `port` and `cleanSlate`) / ILegacySmarts3Config.
- Updated tests to use new config shape (server:{ port, silent } and storage:{ cleanSlate }).
- mergeConfig and Smarts3Server now rely on the nested config shape; consumers must update their initialization code.
## 2025-11-23 - 3.2.0 - feat(multipart)
Add multipart upload support with MultipartUploadManager and controller integration
- Introduce MultipartUploadManager (ts/classes/multipart-manager.ts) to manage multipart upload lifecycle and store parts on disk
- Wire multipart manager into server and request context (S3Context, Smarts3Server) and initialize multipart storage on server start
- Add multipart-related routes and handlers in ObjectController: initiate (POST ?uploads), upload part (PUT ?partNumber&uploadId), complete (POST ?uploadId), and abort (DELETE ?uploadId)
- On complete, combine parts into final object and store via existing FilesystemStore workflow
- Expose multipart manager on Smarts3Server for controller access
## 2025-11-23 - 3.1.0 - feat(logging)
Add structured Logger and integrate into Smarts3Server; pass full config to server
- Introduce a new Logger class (ts/classes/logger.ts) providing leveled logging (error, warn, info, debug), text/json formats and an enable flag.
- Integrate Logger into Smarts3Server: use structured logging for server lifecycle events, HTTP request/response logging and S3 errors instead of direct console usage.
- Smarts3 now passes the full merged configuration into Smarts3Server (config.logging can control logging behavior).
- Server start/stop messages and internal request/error logs are emitted via the Logger and respect the configured logging level/format and silent option.
## 2025-11-23 - 3.0.4 - fix(smarts3)
Use filesystem store for bucket creation and remove smartbucket runtime dependency
- Switched createBucket to call the internal FilesystemStore.createBucket instead of using @push.rocks/smartbucket
- Made Smarts3Server.store public so Smarts3 can access the filesystem store directly
- Removed runtime import/export of @push.rocks/smartbucket from plugins and moved @push.rocks/smartbucket to devDependencies in package.json
- Updated createBucket to return a simple { name } object after creating the bucket via the filesystem store
## 2025-11-23 - 3.0.3 - fix(filesystem)
Migrate filesystem implementation to @push.rocks/smartfs and add Web Streams handling
- Replace dependency @push.rocks/smartfile with @push.rocks/smartfs and update README references
- plugins: instantiate SmartFs with SmartFsProviderNode and export smartfs (remove direct fs export)
- Refactor FilesystemStore to use smartfs directory/file APIs for initialize, reset, list, read, write, copy and delete
- Implement Web Stream ↔ Node.js stream conversion for uploads/downloads (Readable.fromWeb and writer.write with Uint8Array)
- Persist and read metadata (.metadata.json) and cached MD5 (.md5) via smartfs APIs
- Update readme.hints and documentation to note successful migration and next steps
## 2025-11-21 - 3.0.2 - fix(smarts3)
Prepare patch release 3.0.2 — no code changes detected
- No source changes in the diff
- Bump patch version from 3.0.1 to 3.0.2 for maintenance/release bookkeeping
## 2025-11-21 - 3.0.1 - fix(readme)
Add Issue Reporting and Security section to README
- Add guidance to report bugs, issues, and security vulnerabilities via community.foss.global
- Inform developers how to sign a contribution agreement and get a code.foss.global account to submit pull requests
## 2025-11-21 - 3.0.0 - BREAKING CHANGE(Smarts3)
Remove legacy s3rver backend, simplify Smarts3 server API, and bump dependencies
- Remove legacy s3rver backend: s3rver and its types were removed from dependencies and are no longer exported from plugins.
- Simplify Smarts3 API: removed useCustomServer option; Smarts3 now always uses the built-in Smarts3Server (s3Instance is Smarts3Server) and stop() always calls Smarts3Server.stop().
- Update README to remove legacy s3rver compatibility mention.
- Dependency updates: bumped @push.rocks/smartbucket to ^4.3.0 and @push.rocks/smartxml to ^2.0.0 (major upgrades), removed s3rver/@types/s3rver, bumped @aws-sdk/client-s3 to ^3.937.0 and @git.zone/tstest to ^3.1.0.
## 2025-11-21 - 2.3.0 - feat(smarts3-server)
Introduce native custom S3 server implementation (Smarts3Server) with routing, middleware, context, filesystem store, controllers and XML utilities; add SmartXml and AWS SDK test; keep optional legacy s3rver backend.
- Add Smarts3Server: native, Node.js http-based S3-compatible server (ts/classes/smarts3-server.ts)
- New routing and middleware system: S3Router and MiddlewareStack for pattern matching and middleware composition (ts/classes/router.ts, ts/classes/middleware-stack.ts)
- Introduce request context and helpers: S3Context for parsing requests, sending responses and XML (ts/classes/context.ts)
- Filesystem-backed storage: FilesystemStore with bucket/object operations, streaming uploads, MD5 handling and Windows-safe key encoding (ts/classes/filesystem-store.ts)
- S3 error handling: S3Error class that maps S3 error codes and produces XML error responses (ts/classes/s3-error.ts)
- Controllers for service, bucket and object operations with S3-compatible XML responses and copy/range support (ts/controllers/*.ts)
- XML utilities and SmartXml integration for consistent XML generation/parsing (ts/utils/xml.utils.ts, ts/plugins.ts)
- Expose native plugins (http, crypto, url, fs) and SmartXml via plugins.ts
- ts/index.ts: add useCustomServer option, default to custom server, export Smarts3Server and handle start/stop for both custom and legacy backends
- Add AWS SDK v3 integration test (test/test.aws-sdk.node.ts) to validate compatibility
- package.json: add @aws-sdk/client-s3 devDependency and @push.rocks/smartxml dependency
- Documentation: readme.md updated to describe native custom server and legacy s3rver compatibility
## 2025-11-20 - 2.2.7 - fix(core)
Update dependencies, code style and project config; add pnpm overrides and ignore AI folders
- Bump devDependencies and runtime dependencies (@git.zone/*, @push.rocks/*, @tsclass/tsclass, s3rver) to newer compatible versions
- Add pnpm.overrides entry to package.json and normalize repository URL format
- Code style and formatting fixes in TypeScript sources (ts/index.ts, ts/00_commitinfo_data.ts): whitespace, trailing commas, parameter formatting and minor API-return typing preserved
- tsconfig.json: simplify compiler options and compact exclude list
- Update .gitignore to add AI-related folders (.claude/, .serena/) to avoid accidental commits
- Documentation and changelog formatting tweaks (readme.md, changelog.md, npmextra.json) — whitespace/newline cleanups and expanded changelog entries
## 2025-08-16 - 2.2.6 - fix(Smarts3)
Allow overriding S3 descriptor; update dependencies, test config and documentation
- ts/index.ts: getS3Descriptor now accepts an optional Partial<IS3Descriptor> to override defaults (backwards compatible)
- package.json: updated devDependencies and runtime dependency versions (tstest, smartpath, tsclass, s3rver, etc.) and added packageManager field
- package.json: expanded test script to run tstest with --web --verbose --logfile --timeout 60
- test/test.ts: test instance port changed to 3333
- readme.md: major rewrite and expansion of usage examples, API reference and guides
- added project config files: .claude/settings.local.json and .serena/project.yml
## 2024-11-06 - 2.2.5 - fix(ci)
Corrected docker image URLs in Gitea workflows to match the correct domain format.
- Updated IMAGE environment variable in .gitea/workflows/default_nottags.yaml
- Updated IMAGE environment variable in .gitea/workflows/default_tags.yaml
## 2024-11-06 - 2.2.4 - fix(core) ## 2024-11-06 - 2.2.4 - fix(core)
Improve code style and update dependencies Improve code style and update dependencies
- Updated @push.rocks/tapbundle to version ^5.4.3 in package.json. - Updated @push.rocks/tapbundle to version ^5.4.3 in package.json.
@@ -8,29 +178,34 @@ Improve code style and update dependencies
- Improved code consistency in ts/00_commitinfo_data.ts, ts/plugins.ts, and test/test.ts. - Improved code consistency in ts/00_commitinfo_data.ts, ts/plugins.ts, and test/test.ts.
## 2024-11-06 - 2.2.3 - fix(core) ## 2024-11-06 - 2.2.3 - fix(core)
Fix endpoint address from 'localhost' to '127.0.0.1' for better compatibility in Smarts3.getS3Descriptor Fix endpoint address from 'localhost' to '127.0.0.1' for better compatibility in Smarts3.getS3Descriptor
- Corrected the endpoint address in Smarts3.getS3Descriptor to ensure proper functioning across different environments. - Corrected the endpoint address in Smarts3.getS3Descriptor to ensure proper functioning across different environments.
## 2024-11-06 - 2.2.2 - fix(core) ## 2024-11-06 - 2.2.2 - fix(core)
Fixed function call for fastPut in the test suite to ensure proper file upload handling. Fixed function call for fastPut in the test suite to ensure proper file upload handling.
- Updated dependencies in package.json to newer versions. - Updated dependencies in package.json to newer versions.
- Corrected the function call in test suite for file upload. - Corrected the function call in test suite for file upload.
## 2024-10-26 - 2.2.1 - fix(core) ## 2024-10-26 - 2.2.1 - fix(core)
Fix import and typings for improved compatibility Fix import and typings for improved compatibility
- Corrected the type signature for `getS3Descriptor` to return `IS3Descriptor`. - Corrected the type signature for `getS3Descriptor` to return `IS3Descriptor`.
- Fixed import structure and updated dependencies for consistent namespace usage across plugins. - Fixed import structure and updated dependencies for consistent namespace usage across plugins.
## 2024-10-26 - 2.2.0 - feat(ci) ## 2024-10-26 - 2.2.0 - feat(ci)
Migrate CI/CD workflow from GitLab CI to Gitea CI Migrate CI/CD workflow from GitLab CI to Gitea CI
- Added new Gitea CI workflows for both non-tag and tag-based pushes - Added new Gitea CI workflows for both non-tag and tag-based pushes
- Removed existing GitLab CI configuration - Removed existing GitLab CI configuration
## 2024-05-29 - 2.1.1 - Updates and minor changes ## 2024-05-29 - 2.1.1 - Updates and minor changes
Updates and changes based on minor configuration improvements and organizational shifts. Updates and changes based on minor configuration improvements and organizational shifts.
- Updated description file. - Updated description file.
@@ -39,22 +214,26 @@ Updates and changes based on minor configuration improvements and organizational
- Shifted to new organizational scheme. - Shifted to new organizational scheme.
## 2022-07-30 - 2.1.0 - Core improvements and fixes ## 2022-07-30 - 2.1.0 - Core improvements and fixes
Minor improvements and important core changes. Minor improvements and important core changes.
- Removed tslint from the core setup. - Removed tslint from the core setup.
## 2022-07-30 - 2.0.2 - Bucket creation improvement ## 2022-07-30 - 2.0.2 - Bucket creation improvement
Enhanced file structure management. Enhanced file structure management.
- Improved bucket creation to store locally within the .nogit directory. - Improved bucket creation to store locally within the .nogit directory.
## 2022-04-14 - 2.0.0 to 2.0.1 - Structural updates and fixes ## 2022-04-14 - 2.0.0 to 2.0.1 - Structural updates and fixes
This release focused on core updates and structural changes. This release focused on core updates and structural changes.
- Reformatted the project structure. - Reformatted the project structure.
- Core updates with minor fixes. - Core updates with minor fixes.
## 2021-12-20 - 1.0.10 - ESM Transition ## 2021-12-20 - 1.0.10 - ESM Transition
Breaking changes and minor fixes, transitioning to ES Modules. Breaking changes and minor fixes, transitioning to ES Modules.
- BREAKING CHANGE: Transitioned core setup to ESM. - BREAKING CHANGE: Transitioned core setup to ESM.

View File

@@ -1,5 +1,11 @@
{ {
"gitzone": { "@git.zone/tsrust": {
"targets": [
"linux_amd64",
"linux_arm64"
]
},
"@git.zone/cli": {
"projectType": "npm", "projectType": "npm",
"module": { "module": {
"githost": "code.foss.global", "githost": "code.foss.global",
@@ -27,13 +33,19 @@
"CI/CD Integration", "CI/CD Integration",
"Developer Onboarding" "Developer Onboarding"
] ]
},
"release": {
"registries": [
"https://verdaccio.lossless.digital",
"https://registry.npmjs.org"
],
"accessLevel": "public"
} }
}, },
"npmci": { "@git.zone/tsdoc": {
"npmGlobalTools": [],
"npmAccessLevel": "public"
},
"tsdoc": {
"legal": "\n## License and Legal Information\n\nThis repository contains open-source code that is licensed under the MIT License. A copy of the MIT License can be found in the [license](license) file within this repository. \n\n**Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.\n\n### Trademarks\n\nThis project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and any related products or services are trademarks of Task Venture Capital GmbH and are not included within the scope of the MIT license granted herein. Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines, and any usage must be approved in writing by Task Venture Capital GmbH.\n\n### Company Information\n\nTask Venture Capital GmbH \nRegistered at District court Bremen HRB 35230 HB, Germany\n\nFor any legal inquiries or if you require further information, please contact us via email at hello@task.vc.\n\nBy using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.\n" "legal": "\n## License and Legal Information\n\nThis repository contains open-source code that is licensed under the MIT License. A copy of the MIT License can be found in the [license](license) file within this repository. \n\n**Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.\n\n### Trademarks\n\nThis project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and any related products or services are trademarks of Task Venture Capital GmbH and are not included within the scope of the MIT license granted herein. Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines, and any usage must be approved in writing by Task Venture Capital GmbH.\n\n### Company Information\n\nTask Venture Capital GmbH \nRegistered at District court Bremen HRB 35230 HB, Germany\n\nFor any legal inquiries or if you require further information, please contact us via email at hello@task.vc.\n\nBy using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.\n"
},
"@ship.zone/szci": {
"npmGlobalTools": []
} }
} }

View File

@@ -1,6 +1,6 @@
{ {
"name": "@push.rocks/smarts3", "name": "@push.rocks/smarts3",
"version": "2.2.4", "version": "5.3.0",
"private": false, "private": false,
"description": "A Node.js TypeScript package to create a local S3 endpoint for simulating AWS S3 operations using mapped local directories for development and testing purposes.", "description": "A Node.js TypeScript package to create a local S3 endpoint for simulating AWS S3 operations using mapped local directories for development and testing purposes.",
"main": "dist_ts/index.js", "main": "dist_ts/index.js",
@@ -9,16 +9,18 @@
"author": "Lossless GmbH", "author": "Lossless GmbH",
"license": "MIT", "license": "MIT",
"scripts": { "scripts": {
"test": "(tstest test/ --web)", "test": "(tstest test/ --web --verbose --logfile --timeout 60)",
"build": "(tsbuild --web --allowimplicitany)", "build": "(tsrust && tsbuild --web --allowimplicitany)",
"buildDocs": "tsdoc" "buildDocs": "tsdoc"
}, },
"devDependencies": { "devDependencies": {
"@git.zone/tsbuild": "^2.2.0", "@aws-sdk/client-s3": "^3.937.0",
"@git.zone/tsbundle": "^2.1.0", "@git.zone/tsbuild": "^3.1.0",
"@git.zone/tsrun": "^1.3.3", "@git.zone/tsbundle": "^2.5.2",
"@git.zone/tstest": "^1.0.72", "@git.zone/tsrun": "^2.0.0",
"@push.rocks/tapbundle": "^5.4.3", "@git.zone/tstest": "^3.1.0",
"@push.rocks/smartbucket": "^4.3.0",
"@git.zone/tsrust": "^1.3.0",
"@types/node": "^22.9.0" "@types/node": "^22.9.0"
}, },
"browserslist": [ "browserslist": [
@@ -30,6 +32,7 @@
"dist/**/*", "dist/**/*",
"dist_*/**/*", "dist_*/**/*",
"dist_ts/**/*", "dist_ts/**/*",
"dist_rust/**/*",
"dist_ts_web/**/*", "dist_ts_web/**/*",
"assets/**/*", "assets/**/*",
"cli.js", "cli.js",
@@ -37,12 +40,9 @@
"readme.md" "readme.md"
], ],
"dependencies": { "dependencies": {
"@push.rocks/smartbucket": "^3.0.23", "@push.rocks/smartpath": "^6.0.0",
"@push.rocks/smartfile": "^11.0.21", "@push.rocks/smartrust": "^1.0.0",
"@push.rocks/smartpath": "^5.0.5", "@tsclass/tsclass": "^9.3.0"
"@tsclass/tsclass": "^4.1.2",
"@types/s3rver": "^3.7.0",
"s3rver": "^3.7.1"
}, },
"keywords": [ "keywords": [
"S3 Mock Server", "S3 Mock Server",
@@ -65,9 +65,13 @@
"homepage": "https://code.foss.global/push.rocks/smarts3#readme", "homepage": "https://code.foss.global/push.rocks/smarts3#readme",
"repository": { "repository": {
"type": "git", "type": "git",
"url": "git+https://code.foss.global/push.rocks/smarts3.git" "url": "https://code.foss.global/push.rocks/smarts3.git"
}, },
"bugs": { "bugs": {
"url": "https://code.foss.global/push.rocks/smarts3/issues" "url": "https://code.foss.global/push.rocks/smarts3/issues"
},
"packageManager": "pnpm@10.14.0+sha512.ad27a79641b49c3e481a16a805baa71817a04bbe06a38d17e60e2eaee83f6a146c6a688125f5792e48dd5ba30e7da52a5cda4c3992b9ccf333f9ce223af84748",
"pnpm": {
"overrides": {}
} }
} }

13416
pnpm-lock.yaml generated

File diff suppressed because it is too large Load Diff

438
production-readiness.md Normal file
View File

@@ -0,0 +1,438 @@
# Production-Readiness Plan for smarts3
**Goal:** Make smarts3 production-ready as a MinIO alternative for use cases where:
- Running MinIO is out of scope
- You have a program written for S3 and want to use the local filesystem
- You need a lightweight, zero-dependency S3-compatible server
---
## 🔍 Current State Analysis
### ✅ What's Working
- **Native S3 server** with zero framework dependencies
- **Core S3 operations:** PUT, GET, HEAD, DELETE (objects & buckets)
- **List buckets and objects** (V1 and V2 API)
- **Object copy** with metadata handling
- **Range requests** for partial downloads
- **MD5 checksums** and ETag support
- **Custom metadata** (x-amz-meta-*)
- **Filesystem-backed storage** with Windows compatibility
- **S3-compatible XML error responses**
- **Middleware system** and routing
- **AWS SDK v3 compatibility** (tested)
### ❌ Production Gaps Identified
---
## 🎯 Critical Features (Required for Production)
### 1. Multipart Upload Support 🚀 **HIGHEST PRIORITY**
**Why:** Essential for uploading files >5MB efficiently. Without this, smarts3 can't handle real-world production workloads.
**Implementation Required:**
- `POST /:bucket/:key?uploads` - CreateMultipartUpload
- `PUT /:bucket/:key?partNumber=X&uploadId=Y` - UploadPart
- `POST /:bucket/:key?uploadId=X` - CompleteMultipartUpload
- `DELETE /:bucket/:key?uploadId=X` - AbortMultipartUpload
- `GET /:bucket/:key?uploadId=X` - ListParts
- Multipart state management (temp storage for parts)
- Part ETag tracking and validation
- Automatic cleanup of abandoned uploads
**Files to Create/Modify:**
- `ts/controllers/multipart.controller.ts` (new)
- `ts/classes/filesystem-store.ts` (add multipart methods)
- `ts/classes/smarts3-server.ts` (add multipart routes)
---
### 2. Configurable Authentication 🔐
**Why:** Currently hardcoded credentials ('S3RVER'/'S3RVER'). Production needs custom credentials.
**Implementation Required:**
- Support custom access keys and secrets via configuration
- Implement AWS Signature V4 verification
- Support multiple credential pairs (IAM-like users)
- Optional: Disable authentication for local dev use
**Configuration Example:**
```typescript
interface IAuthConfig {
enabled: boolean;
credentials: Array<{
accessKeyId: string;
secretAccessKey: string;
}>;
signatureVersion: 'v4' | 'none';
}
```
**Files to Create/Modify:**
- `ts/classes/auth-middleware.ts` (new)
- `ts/classes/signature-validator.ts` (new)
- `ts/classes/smarts3-server.ts` (integrate auth middleware)
- `ts/index.ts` (add auth config options)
---
### 3. CORS Support 🌐
**Why:** Required for browser-based uploads and modern web apps.
**Implementation Required:**
- Add CORS middleware
- Support preflight OPTIONS requests
- Configurable CORS origins, methods, headers
- Per-bucket CORS configuration (optional)
**Configuration Example:**
```typescript
interface ICorsConfig {
enabled: boolean;
allowedOrigins: string[]; // ['*'] or ['https://example.com']
allowedMethods: string[]; // ['GET', 'POST', 'PUT', 'DELETE']
allowedHeaders: string[]; // ['*'] or specific headers
exposedHeaders: string[]; // ['ETag', 'x-amz-*']
maxAge: number; // 3600 (seconds)
allowCredentials: boolean;
}
```
**Files to Create/Modify:**
- `ts/classes/cors-middleware.ts` (new)
- `ts/classes/smarts3-server.ts` (integrate CORS middleware)
- `ts/index.ts` (add CORS config options)
---
### 4. SSL/TLS Support 🔒
**Why:** Production systems require encrypted connections.
**Implementation Required:**
- HTTPS server option with cert/key configuration
- Auto-redirect HTTP to HTTPS (optional)
- Support for self-signed certs in dev mode
**Configuration Example:**
```typescript
interface ISslConfig {
enabled: boolean;
cert: string; // Path to certificate file or cert content
key: string; // Path to key file or key content
ca?: string; // Optional CA cert
redirectHttp?: boolean; // Redirect HTTP to HTTPS
}
```
**Files to Create/Modify:**
- `ts/classes/smarts3-server.ts` (add HTTPS server creation)
- `ts/index.ts` (add SSL config options)
---
### 5. Production Configuration System ⚙️
**Why:** Production needs flexible configuration, not just constructor options.
**Implementation Required:**
- Support configuration file (JSON/YAML)
- Environment variable support
- Configuration validation
- Sensible production defaults
- Example configurations for common use cases
**Configuration File Example (`smarts3.config.json`):**
```json
{
"server": {
"port": 3000,
"address": "0.0.0.0",
"ssl": {
"enabled": true,
"cert": "./certs/server.crt",
"key": "./certs/server.key"
}
},
"storage": {
"directory": "./s3-data",
"cleanSlate": false
},
"auth": {
"enabled": true,
"credentials": [
{
"accessKeyId": "AKIAIOSFODNN7EXAMPLE",
"secretAccessKey": "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
}
]
},
"cors": {
"enabled": true,
"allowedOrigins": ["*"],
"allowedMethods": ["GET", "POST", "PUT", "DELETE", "HEAD"],
"allowedHeaders": ["*"]
},
"limits": {
"maxObjectSize": 5368709120,
"maxMetadataSize": 2048,
"requestTimeout": 300000
},
"logging": {
"level": "info",
"format": "json",
"accessLog": {
"enabled": true,
"path": "./logs/access.log"
},
"errorLog": {
"enabled": true,
"path": "./logs/error.log"
}
}
}
```
**Files to Create/Modify:**
- `ts/classes/config-loader.ts` (new)
- `ts/classes/config-validator.ts` (new)
- `ts/index.ts` (use config loader)
- Create example config files in root
---
### 6. Production Logging 📝
**Why:** Console logs aren't suitable for production monitoring.
**Implementation Required:**
- Structured logging (JSON format option)
- Log levels (ERROR, WARN, INFO, DEBUG)
- File rotation support
- Access logs (S3 standard format)
- Integration with logging library
**Files to Create/Modify:**
- `ts/classes/logger.ts` (new - use @push.rocks/smartlog?)
- `ts/classes/access-logger-middleware.ts` (new)
- `ts/classes/smarts3-server.ts` (replace console.log with logger)
- All controller files (use structured logging)
---
## 🔧 Important Features (Should Have)
### 7. Health Check & Metrics 💊
**Implementation Required:**
- `GET /_health` endpoint (non-S3, for monitoring)
- `GET /_metrics` endpoint (Prometheus format?)
- Server stats (requests/sec, storage used, uptime)
- Readiness/liveness probes for Kubernetes
**Files to Create/Modify:**
- `ts/controllers/health.controller.ts` (new)
- `ts/classes/metrics-collector.ts` (new)
- `ts/classes/smarts3-server.ts` (add health routes)
---
### 8. Batch Operations 📦
**Implementation Required:**
- `POST /:bucket?delete` - DeleteObjects (delete multiple objects in one request)
- Essential for efficient cleanup operations
**Files to Create/Modify:**
- `ts/controllers/object.controller.ts` (add deleteObjects method)
---
### 9. Request Size Limits & Validation 🛡️
**Implementation Required:**
- Max object size configuration
- Max metadata size limits
- Request timeout configuration
- Body size limits
- Bucket name validation (S3 rules)
- Key name validation
**Files to Create/Modify:**
- `ts/classes/validation-middleware.ts` (new)
- `ts/utils/validators.ts` (new)
- `ts/classes/smarts3-server.ts` (integrate validation middleware)
---
### 10. Conditional Requests 🔄
**Implementation Required:**
- If-Match / If-None-Match (ETag validation)
- If-Modified-Since / If-Unmodified-Since
- Required for caching and conflict prevention
**Files to Create/Modify:**
- `ts/controllers/object.controller.ts` (add conditional logic to GET/HEAD)
---
### 11. Graceful Shutdown 👋
**Implementation Required:**
- Drain existing connections
- Reject new connections
- Clean multipart cleanup on shutdown
- SIGTERM/SIGINT handling
**Files to Create/Modify:**
- `ts/classes/smarts3-server.ts` (add graceful shutdown logic)
- `ts/index.ts` (add signal handlers)
---
## 💡 Nice-to-Have Features
### 12. Advanced Features
- Bucket versioning support
- Object tagging
- Lifecycle policies (auto-delete old objects)
- Storage class simulation (STANDARD, GLACIER, etc.)
- Server-side encryption simulation
- Presigned URL support (for time-limited access)
### 13. Performance Optimizations
- Stream optimization for large files
- Optional in-memory caching for small objects
- Parallel upload/download support
- Compression support (gzip)
### 14. Developer Experience
- Docker image for easy deployment
- Docker Compose examples
- Kubernetes manifests
- CLI for server management
- Admin API for bucket management
---
## 📐 Implementation Phases
### Phase 1: Critical Production Features (Priority 1)
**Estimated Effort:** 2-3 weeks
1. ✅ Multipart uploads (biggest technical lift)
2. ✅ Configurable authentication
3. ✅ CORS middleware
4. ✅ Production configuration system
5. ✅ Production logging
**Outcome:** smarts3 can handle real production workloads
---
### Phase 2: Reliability & Operations (Priority 2)
**Estimated Effort:** 1-2 weeks
6. ✅ SSL/TLS support
7. ✅ Health checks & metrics
8. ✅ Request validation & limits
9. ✅ Graceful shutdown
10. ✅ Batch operations
**Outcome:** smarts3 is operationally mature
---
### Phase 3: S3 Compatibility (Priority 3)
**Estimated Effort:** 1-2 weeks
11. ✅ Conditional requests
12. ✅ Additional S3 features as needed
13. ✅ Comprehensive test suite
14. ✅ Documentation updates
**Outcome:** smarts3 has broad S3 API compatibility
---
### Phase 4: Polish (Priority 4)
**Estimated Effort:** As needed
15. ✅ Docker packaging
16. ✅ Performance optimization
17. ✅ Advanced features based on user feedback
**Outcome:** smarts3 is a complete MinIO alternative
---
## 🤔 Open Questions
1. **Authentication:** Do you want full AWS Signature V4 validation, or simpler static credential checking?
2. **Configuration:** Prefer JSON, YAML, or .env file format?
3. **Logging:** Do you have a preferred logging library, or shall I use @push.rocks/smartlog?
4. **Scope:** Should we tackle all of Phase 1, or start with a subset (e.g., just multipart + auth)?
5. **Testing:** Should we add comprehensive tests as we go, or batch them at the end?
6. **Breaking changes:** Can I modify the constructor options interface, or must it remain backward compatible?
---
## 🎯 Target Use Cases
**With this plan implemented, smarts3 will be a solid MinIO alternative for:**
**Local S3 development** - Fast, simple, no Docker required
**Testing S3 integrations** - Reliable, repeatable tests
**Microservices using S3 API** with filesystem backend
**CI/CD pipelines** - Lightweight S3 for testing
**Small-to-medium production deployments** where MinIO is overkill
**Edge computing** - S3 API for local file storage
**Embedded systems** - Minimal dependencies, small footprint
---
## 📊 Current vs. Production Comparison
| Feature | Current | After Phase 1 | After Phase 2 | Production Ready |
|---------|---------|---------------|---------------|------------------|
| Basic S3 ops | ✅ | ✅ | ✅ | ✅ |
| Multipart upload | ❌ | ✅ | ✅ | ✅ |
| Authentication | ⚠️ (hardcoded) | ✅ | ✅ | ✅ |
| CORS | ❌ | ✅ | ✅ | ✅ |
| SSL/TLS | ❌ | ❌ | ✅ | ✅ |
| Config files | ❌ | ✅ | ✅ | ✅ |
| Production logging | ⚠️ (console) | ✅ | ✅ | ✅ |
| Health checks | ❌ | ❌ | ✅ | ✅ |
| Request limits | ❌ | ❌ | ✅ | ✅ |
| Graceful shutdown | ❌ | ❌ | ✅ | ✅ |
| Conditional requests | ❌ | ❌ | ❌ | ✅ |
| Batch operations | ❌ | ❌ | ✅ | ✅ |
---
## 📝 Notes
- All features should maintain backward compatibility where possible
- Each feature should include comprehensive tests
- Documentation (readme.md) should be updated as features are added
- Consider adding a migration guide for users upgrading from testing to production use
- Performance benchmarks should be established and maintained
---
**Last Updated:** 2025-11-23
**Status:** Planning Phase
**Next Step:** Get approval and prioritize implementation order

View File

@@ -1 +1,69 @@
# Project Hints for smarts3
## Current State (v6.0.0-dev)
- **Rust-powered S3 server** via `@push.rocks/smartrust` IPC bridge
- High-performance: streaming I/O, zero-copy, backpressure, range seek
- TypeScript is thin IPC wrapper; all HTTP/storage/routing in Rust binary `rusts3`
- Full S3 compatibility: PUT, GET, HEAD, DELETE for objects and buckets
- Multipart upload support (streaming, no OOM)
- **Real AWS SigV4 authentication** (cryptographic signature verification)
- **Bucket policies** (AWS/MinIO-compatible JSON policies, public access support)
- CORS support
- ListBuckets, ListObjects (v1/v2), CopyObject
## Architecture
### Rust Binary (`rust/src/`)
- `main.rs` - Clap CLI, management mode entry
- `config.rs` - Serde config structs matching TS interfaces (includes `region`)
- `management.rs` - IPC loop (newline-delimited JSON over stdin/stdout)
- `server.rs` - hyper 1.x HTTP server, routing, CORS, auth+policy pipeline, all S3 handlers
- `storage.rs` - FileStore: filesystem-backed storage, multipart manager, `.policies/` dir
- `xml_response.rs` - S3 XML response builders
- `s3_error.rs` - S3 error codes with HTTP status mapping
- `auth.rs` - AWS SigV4 signature verification (HMAC-SHA256, clock skew, constant-time compare)
- `action.rs` - S3Action enum + request-to-IAM-action resolver + RequestContext
- `policy.rs` - BucketPolicy model, evaluation engine (Deny > Allow > NoOpinion), PolicyStore (RwLock cache + disk)
### TypeScript Bridge (`ts/`)
- `ts/index.ts` - Smarts3 class with RustBridge<TRustS3Commands>
- `ts/plugins.ts` - path, smartpath, RustBridge, tsclass
- `ts/paths.ts` - packageDir, bucketsDir defaults
### IPC Commands
| Command | Params | Action |
|---------|--------|--------|
| `start` | `{ config: ISmarts3Config }` | Init storage + HTTP server |
| `stop` | `{}` | Graceful shutdown |
| `createBucket` | `{ name: string }` | Create bucket directory |
### Storage Layout (backward-compatible)
- Objects: `{root}/{bucket}/{key}._S3_object`
- Metadata: `{root}/{bucket}/{key}._S3_object.metadata.json`
- MD5: `{root}/{bucket}/{key}._S3_object.md5`
- Multipart: `{root}/.multipart/{upload_id}/part-{N}`
- Policies: `{root}/.policies/{bucket}.policy.json`
## Build
- `pnpm build` runs `tsrust && tsbuild --web --allowimplicitany`
- `tsrust` compiles Rust to `dist_rust/rusts3`
- Targets: linux_amd64, linux_arm64 (configured in npmextra.json)
## Dependencies
- `@push.rocks/smartrust` - RustBridge IPC bridge
- `@push.rocks/smartpath` - Path utilities
- `@tsclass/tsclass` - IS3Descriptor type
- `@git.zone/tsrust` (devDep) - Rust cross-compilation
## Testing
- `test/test.aws-sdk.node.ts` - AWS SDK v3 compatibility (10 tests, auth disabled, port 3337)
- `test/test.auth.node.ts` - Auth + bucket policy integration (20 tests, auth enabled, port 3344)
- `test/test.policy-crud.node.ts` - Policy API CRUD + validation edge cases (17 tests, port 3345)
- `test/test.policy-eval.node.ts` - Policy evaluation: principals, actions, resources, deny-vs-allow (22 tests, port 3346)
- `test/test.policy-actions.node.ts` - Per-action policy enforcement (15 tests, port 3347)
- `test/test.ts` - SmartBucket integration (3 tests)
- Run: `pnpm test` or `tstest test/test.aws-sdk.node.ts --verbose`

514
readme.md
View File

@@ -1,219 +1,451 @@
````markdown # @push.rocks/smarts3 🚀
# @push.rocks/smarts3
A Node.js TypeScript package to create a local S3 endpoint for development and testing using mapped local directories, simulating AWS S3. A high-performance, S3-compatible local server powered by a **Rust core** with a clean TypeScript API. Drop-in replacement for AWS S3 during development and testing — no cloud, no Docker, no MinIO. Just `npm install` and go.
## Install ## Issue Reporting and Security
To integrate `@push.rocks/smarts3` with your project, you need to install it via npm. Execute the following command within your project's root directory: For reporting bugs, issues, or security vulnerabilities, please visit [community.foss.global/](https://community.foss.global/). This is the central community hub for all issue reporting. Developers who sign and comply with our contribution agreement and go through identification can also get a [code.foss.global/](https://code.foss.global/) account to submit Pull Requests directly.
```sh ## 🌟 Why smarts3?
npm install @push.rocks/smarts3 --save
```
````
This command will add `@push.rocks/smarts3` as a dependency in your project's `package.json` file and download the package into the `node_modules` directory. | Feature | smarts3 | MinIO | s3rver |
|---------|---------|-------|--------|
| Install | `pnpm add` | Docker / binary | `npm install` |
| Startup time | ~20ms | seconds | ~200ms |
| Large file uploads | ✅ Streaming, zero-copy | ✅ | ❌ OOM risk |
| Range requests | ✅ Seek-based | ✅ | ❌ Full read |
| Language | Rust + TypeScript | Go | JavaScript |
| Multipart uploads | ✅ Full support | ✅ | ❌ |
| Auth | ✅ AWS SigV4 (full verification) | Full IAM | Basic |
| Bucket policies | ✅ IAM-style evaluation | ✅ | ❌ |
## Usage ### Core Features
### Overview -**Rust-powered HTTP server** — hyper 1.x with streaming I/O, zero-copy, backpressure
- 🔄 **Full S3 API compatibility** — works with AWS SDK v3, SmartBucket, any S3 client
- 📂 **Filesystem-backed storage** — buckets map to directories, objects to files
- 📤 **Streaming multipart uploads** — large files without memory pressure
- 🎯 **Byte-range requests**`seek()` directly to the requested byte offset
- 🔐 **AWS SigV4 authentication** — full signature verification with constant-time comparison and 15-min clock skew enforcement
- 📜 **Bucket policies** — IAM-style JSON policies with Allow/Deny evaluation, wildcard matching, and anonymous access support
- 🌐 **CORS middleware** — configurable cross-origin support
- 📊 **Structured logging** — tracing-based, error through debug levels
- 🧹 **Clean slate mode** — wipe storage on startup for test isolation
- 🧪 **Test-first design** — start/stop in milliseconds, no port conflicts
The `@push.rocks/smarts3` module allows users to create a mock S3 endpoint that maps to a local directory using `s3rver`. This simulation of AWS S3 operations facilitates development and testing by enabling file uploads, bucket creation, and other interactions locally. This local setup is ideal for developers looking to test cloud file storage operations without requiring access to a real AWS S3 instance. ## 📦 Installation
In this comprehensive guide, we will explore setting up a local S3 server, performing operations like creating buckets and uploading files, and how to effectively integrate this into your development workflow. ```bash
pnpm add @push.rocks/smarts3 -D
### Setting Up the Environment
To begin any operations, your environment must be configured correctly. Heres a simple setup procedure:
```typescript
import * as path from 'path';
import { promises as fs } from 'fs';
async function setupEnvironment() {
const packageDir = path.resolve();
const nogitDir = path.join(packageDir, './.nogit');
const bucketsDir = path.join(nogitDir, 'bucketsDir');
try {
await fs.mkdir(bucketsDir, { recursive: true });
} catch (error) {
console.error('Failed to create buckets directory!', error);
throw error;
}
console.log('Environment setup complete.');
}
setupEnvironment().catch(console.error);
``` ```
This script sets up a directory structure required for the `smarts3` server, ensuring that the directories needed for bucket storage exist before starting the server. > **Note:** The package ships with precompiled Rust binaries for `linux_amd64` and `linux_arm64`. No Rust toolchain needed on your machine.
### Starting the S3 Server ## 🚀 Quick Start
Once your environment is set up, start an instance of the `smarts3` server. This acts as your local mock S3 endpoint:
```typescript ```typescript
import { Smarts3 } from '@push.rocks/smarts3'; import { Smarts3 } from '@push.rocks/smarts3';
async function startServer() { // Start a local S3 server
const smarts3Instance = await Smarts3.createAndStart({ const s3 = await Smarts3.createAndStart({
port: 3000, server: { port: 3000 },
cleanSlate: true, storage: { cleanSlate: true },
}); });
console.log('S3 server is up and running at http://localhost:3000'); // Create a bucket
return smarts3Instance; await s3.createBucket('my-bucket');
}
startServer().catch(console.error); // Get connection details for any S3 client
const descriptor = await s3.getS3Descriptor();
// → { endpoint: 'localhost', port: 3000, accessKey: 'S3RVER', accessSecret: 'S3RVER', useSsl: false }
// When done
await s3.stop();
``` ```
**Parameters:** ## 📖 Configuration
- **Port**: Specify the port for the local S3 server. Defaults to `3000`. All config fields are optional sensible defaults are applied automatically.
- **CleanSlate**: If `true`, clears the storage directory each time the server starts, providing a fresh test state.
### Creating and Managing Buckets
With your server running, create buckets for storing files. A bucket in S3 acts similarly to a root directory.
```typescript ```typescript
async function createBucket(smarts3Instance: Smarts3, bucketName: string) { import { Smarts3, ISmarts3Config } from '@push.rocks/smarts3';
const bucket = await smarts3Instance.createBucket(bucketName);
console.log(`Bucket created: ${bucket.name}`);
}
startServer() const config: ISmarts3Config = {
.then((smarts3Instance) => createBucket(smarts3Instance, 'my-awesome-bucket')) server: {
.catch(console.error); port: 3000, // Default: 3000
address: '0.0.0.0', // Default: '0.0.0.0'
silent: false, // Default: false
region: 'us-east-1', // Default: 'us-east-1' — used for SigV4 signing
},
storage: {
directory: './my-data', // Default: .nogit/bucketsDir
cleanSlate: false, // Default: false — set true to wipe on start
},
auth: {
enabled: false, // Default: false
credentials: [{
accessKeyId: 'MY_KEY',
secretAccessKey: 'MY_SECRET',
}],
},
cors: {
enabled: false, // Default: false
allowedOrigins: ['*'],
allowedMethods: ['GET', 'POST', 'PUT', 'DELETE', 'HEAD', 'OPTIONS'],
allowedHeaders: ['*'],
exposedHeaders: ['ETag', 'x-amz-request-id', 'x-amz-version-id'],
maxAge: 86400,
allowCredentials: false,
},
logging: {
level: 'info', // 'error' | 'warn' | 'info' | 'debug'
format: 'text', // 'text' | 'json'
enabled: true,
},
limits: {
maxObjectSize: 5 * 1024 * 1024 * 1024, // 5 GB
maxMetadataSize: 2048,
requestTimeout: 300000, // 5 minutes
},
multipart: {
expirationDays: 7,
cleanupIntervalMinutes: 60,
},
};
const s3 = await Smarts3.createAndStart(config);
``` ```
### Uploading and Managing Files ### Common Configurations
Uploading files to a bucket uses the `SmartBucket` module, part of the `@push.rocks/smartbucket` ecosystem: **CI/CD testing** — silent, clean, fast:
```typescript
const s3 = await Smarts3.createAndStart({
server: { port: 9999, silent: true },
storage: { cleanSlate: true },
});
```
**Auth enabled:**
```typescript
const s3 = await Smarts3.createAndStart({
auth: {
enabled: true,
credentials: [{ accessKeyId: 'test', secretAccessKey: 'test123' }],
},
});
```
**CORS for local web dev:**
```typescript
const s3 = await Smarts3.createAndStart({
cors: {
enabled: true,
allowedOrigins: ['http://localhost:5173'],
allowCredentials: true,
},
});
```
## 📤 Usage with AWS SDK v3
```typescript
import { S3Client, PutObjectCommand, GetObjectCommand, DeleteObjectCommand } from '@aws-sdk/client-s3';
const descriptor = await s3.getS3Descriptor();
const client = new S3Client({
endpoint: `http://${descriptor.endpoint}:${descriptor.port}`,
region: 'us-east-1',
credentials: {
accessKeyId: descriptor.accessKey,
secretAccessKey: descriptor.accessSecret,
},
forcePathStyle: true, // Required for path-style S3
});
// Upload
await client.send(new PutObjectCommand({
Bucket: 'my-bucket',
Key: 'hello.txt',
Body: 'Hello, S3!',
ContentType: 'text/plain',
}));
// Download
const { Body } = await client.send(new GetObjectCommand({
Bucket: 'my-bucket',
Key: 'hello.txt',
}));
const content = await Body.transformToString(); // "Hello, S3!"
// Delete
await client.send(new DeleteObjectCommand({
Bucket: 'my-bucket',
Key: 'hello.txt',
}));
```
## 🪣 Usage with SmartBucket
```typescript ```typescript
import { SmartBucket } from '@push.rocks/smartbucket'; import { SmartBucket } from '@push.rocks/smartbucket';
async function uploadFile( const smartbucket = new SmartBucket(await s3.getS3Descriptor());
smarts3Instance: Smarts3, const bucket = await smartbucket.createBucket('my-bucket');
bucketName: string, const dir = await bucket.getBaseDirectory();
filePath: string,
fileContent: string,
) {
const s3Descriptor = await smarts3Instance.getS3Descriptor();
const smartbucketInstance = new SmartBucket(s3Descriptor);
const bucket = await smartbucketInstance.getBucket(bucketName);
await bucket.getBaseDirectory().fastStore(filePath, fileContent); // Upload
console.log(`File "${filePath}" uploaded successfully to bucket "${bucketName}".`); await dir.fastPut({ path: 'docs/readme.txt', contents: 'Hello!' });
}
startServer() // Download
.then(async (smarts3Instance) => { const content = await dir.fastGet('docs/readme.txt');
await createBucket(smarts3Instance, 'my-awesome-bucket');
await uploadFile(smarts3Instance, 'my-awesome-bucket', 'hello.txt', 'Hello, world!'); // List
}) const files = await dir.listFiles();
.catch(console.error);
``` ```
### Listing Files in a Bucket ## 📤 Multipart Uploads
Listing files within a bucket allows you to manage its contents conveniently: For files larger than 5 MB, use multipart uploads. smarts3 handles them with **streaming I/O** — parts are written directly to disk, never buffered in memory.
```typescript ```typescript
async function listFiles(smarts3Instance: Smarts3, bucketName: string) { import {
const s3Descriptor = await smarts3Instance.getS3Descriptor(); CreateMultipartUploadCommand,
const smartbucketInstance = new SmartBucket(s3Descriptor); UploadPartCommand,
const bucket = await smartbucketInstance.getBucket(bucketName); CompleteMultipartUploadCommand,
} from '@aws-sdk/client-s3';
const baseDirectory = await bucket.getBaseDirectory(); // 1. Initiate
const files = await baseDirectory.listFiles(); const { UploadId } = await client.send(new CreateMultipartUploadCommand({
Bucket: 'my-bucket',
Key: 'large-file.bin',
}));
console.log(`Files in bucket "${bucketName}":`, files); // 2. Upload parts
const parts = [];
for (let i = 0; i < chunks.length; i++) {
const { ETag } = await client.send(new UploadPartCommand({
Bucket: 'my-bucket',
Key: 'large-file.bin',
UploadId,
PartNumber: i + 1,
Body: chunks[i],
}));
parts.push({ PartNumber: i + 1, ETag });
} }
startServer() // 3. Complete
.then(async (smarts3Instance) => { await client.send(new CompleteMultipartUploadCommand({
await createBucket(smarts3Instance, 'my-awesome-bucket'); Bucket: 'my-bucket',
await listFiles(smarts3Instance, 'my-awesome-bucket'); Key: 'large-file.bin',
}) UploadId,
.catch(console.error); MultipartUpload: { Parts: parts },
}));
``` ```
### Deleting a File ## 📜 Bucket Policies
Managing storage efficiently involves deleting files when necessary: smarts3 supports AWS-style bucket policies for fine-grained access control. Policies use the same IAM JSON format as real S3 — so you can develop and test your policy logic locally before deploying.
When `auth.enabled` is `true`, the auth pipeline works as follows:
1. **Authenticate** — verify the AWS SigV4 signature (anonymous requests skip this step)
2. **Authorize** — evaluate bucket policies against the request action, resource, and caller identity
3. **Default** — authenticated users get full access; anonymous requests are denied unless a policy explicitly allows them
### Setting a Bucket Policy
Use the S3 `PutBucketPolicy` API (or any S3 client that supports it):
```typescript ```typescript
async function deleteFile(smarts3Instance: Smarts3, bucketName: string, filePath: string) { import { PutBucketPolicyCommand } from '@aws-sdk/client-s3';
const s3Descriptor = await smarts3Instance.getS3Descriptor();
const smartbucketInstance = new SmartBucket(s3Descriptor);
const bucket = await smartbucketInstance.getBucket(bucketName);
await bucket.getBaseDirectory().fastDelete(filePath); // Allow anonymous read access to all objects in a bucket
console.log(`File "${filePath}" deleted from bucket "${bucketName}".`); await client.send(new PutBucketPolicyCommand({
} Bucket: 'public-assets',
Policy: JSON.stringify({
startServer() Version: '2012-10-17',
.then(async (smarts3Instance) => { Statement: [{
await createBucket(smarts3Instance, 'my-awesome-bucket'); Sid: 'PublicRead',
await deleteFile(smarts3Instance, 'my-awesome-bucket', 'hello.txt'); Effect: 'Allow',
}) Principal: '*',
.catch(console.error); Action: ['s3:GetObject'],
Resource: ['arn:aws:s3:::public-assets/*'],
}],
}),
}));
``` ```
### Scenario Integrations ### Policy Features
#### Development and Testing - **Effect**: `Allow` and `Deny` (explicit Deny always wins)
- **Principal**: `"*"` (everyone) or `{ "AWS": ["arn:..."] }` for specific identities
- **Action**: IAM-style actions like `s3:GetObject`, `s3:PutObject`, `s3:*`, or prefix wildcards like `s3:Get*`
- **Resource**: ARN patterns with `*` and `?` wildcards (e.g. `arn:aws:s3:::my-bucket/*`)
- **Persistence**: Policies survive server restarts — stored as JSON on disk alongside your data
1. **Feature Development:** Use `@push.rocks/smarts3` to simulate file upload endpoints, ensuring your application handles file operations correctly before going live. ### Policy CRUD Operations
2. **Continuous Integration/Continuous Deployment (CI/CD):** Integrate with CI/CD pipelines to automatically test file interactions.
3. **Data Migration Testing:** Simulate data migrations between buckets to perfect processes before implementation on actual S3. | Operation | AWS SDK Command | HTTP |
|-----------|----------------|------|
| Get policy | `GetBucketPolicyCommand` | `GET /{bucket}?policy` |
| Set policy | `PutBucketPolicyCommand` | `PUT /{bucket}?policy` |
| Delete policy | `DeleteBucketPolicyCommand` | `DELETE /{bucket}?policy` |
4. **Onboarding New Developers:** Offer new team members hands-on practice with mock setups to improve their understanding without real-world consequences. Deleting a bucket automatically removes its associated policy.
### Stopping the Server ## 🧪 Testing Integration
Safely shutting down the server when tasks are complete ensures system resources are managed well:
```typescript ```typescript
async function stopServer(smarts3Instance: Smarts3) { import { Smarts3 } from '@push.rocks/smarts3';
await smarts3Instance.stop(); import { tap, expect } from '@git.zone/tstest/tapbundle';
console.log('S3 server has been stopped.');
}
startServer() let s3: Smarts3;
.then(async (smarts3Instance) => {
await createBucket(smarts3Instance, 'my-awesome-bucket'); tap.test('setup', async () => {
await stopServer(smarts3Instance); s3 = await Smarts3.createAndStart({
}) server: { port: 4567, silent: true },
.catch(console.error); storage: { cleanSlate: true },
});
});
tap.test('should store and retrieve objects', async () => {
await s3.createBucket('test');
// ... your test logic using AWS SDK or SmartBucket
});
tap.test('teardown', async () => {
await s3.stop();
});
export default tap.start();
``` ```
In this guide, we walked through setting up and fully utilizing the `@push.rocks/smarts3` package for local development and testing. The package simulates AWS S3 operations, reducing dependency on remote services and allowing efficient development iteration cycles. By implementing the practices and scripts shared here, you can ensure a seamless and productive development experience using the local S3 simulation capabilities of `@push.rocks/smarts3`. ## 🔧 API Reference
### `Smarts3` Class
#### `static createAndStart(config?: ISmarts3Config): Promise<Smarts3>`
Create and start a server in one call.
#### `start(): Promise<void>`
Spawn the Rust binary and start the HTTP server.
#### `stop(): Promise<void>`
Gracefully stop the server and kill the Rust process.
#### `createBucket(name: string): Promise<{ name: string }>`
Create an S3 bucket.
#### `getS3Descriptor(options?): Promise<IS3Descriptor>`
Get connection details for S3 clients. Returns:
| Field | Type | Description |
|-------|------|-------------|
| `endpoint` | `string` | Server hostname (`localhost` by default) |
| `port` | `number` | Server port |
| `accessKey` | `string` | Access key from first configured credential |
| `accessSecret` | `string` | Secret key from first configured credential |
| `useSsl` | `boolean` | Always `false` (plain HTTP) |
## 🏗️ Architecture
smarts3 uses a **hybrid Rust + TypeScript** architecture:
``` ```
┌─────────────────────────────────┐
│ Your Code (AWS SDK, etc.) │
│ ↕ HTTP (localhost:3000) │
├─────────────────────────────────┤
│ rusts3 binary (Rust) │
│ ├─ hyper 1.x HTTP server │
│ ├─ S3 path-style routing │
│ ├─ Streaming storage layer │
│ ├─ Multipart manager │
│ ├─ SigV4 auth + policy engine │
│ ├─ CORS middleware │
│ └─ S3 XML response builder │
├─────────────────────────────────┤
│ TypeScript (thin IPC wrapper) │
│ ├─ Smarts3 class │
│ ├─ RustBridge (stdin/stdout) │
│ └─ Config & S3 descriptor │
└─────────────────────────────────┘
```
**Why Rust?** The TypeScript implementation had critical perf issues: OOM on multipart uploads (parts buffered in memory), double stream copying, file descriptor leaks on HEAD requests, full-file reads for range requests, and no backpressure. The Rust binary solves all of these with streaming I/O, zero-copy, and direct `seek()` for range requests.
**IPC Protocol:** TypeScript spawns the `rusts3` binary with `--management` and communicates via newline-delimited JSON over stdin/stdout. Commands: `start`, `stop`, `createBucket`.
### S3 Operations Supported
| Operation | Method | Path |
|-----------|--------|------|
| ListBuckets | `GET /` | |
| CreateBucket | `PUT /{bucket}` | |
| DeleteBucket | `DELETE /{bucket}` | |
| HeadBucket | `HEAD /{bucket}` | |
| ListObjects (v1/v2) | `GET /{bucket}` | `?list-type=2` for v2 |
| PutObject | `PUT /{bucket}/{key}` | |
| GetObject | `GET /{bucket}/{key}` | Supports `Range` header |
| HeadObject | `HEAD /{bucket}/{key}` | |
| DeleteObject | `DELETE /{bucket}/{key}` | |
| CopyObject | `PUT /{bucket}/{key}` | `x-amz-copy-source` header |
| InitiateMultipartUpload | `POST /{bucket}/{key}?uploads` | |
| UploadPart | `PUT /{bucket}/{key}?partNumber&uploadId` | |
| CompleteMultipartUpload | `POST /{bucket}/{key}?uploadId` | |
| AbortMultipartUpload | `DELETE /{bucket}/{key}?uploadId` | |
| ListMultipartUploads | `GET /{bucket}?uploads` | |
| GetBucketPolicy | `GET /{bucket}?policy` | |
| PutBucketPolicy | `PUT /{bucket}?policy` | |
| DeleteBucketPolicy | `DELETE /{bucket}?policy` | |
### On-Disk Format
```
{storage.directory}/
{bucket}/
{key}._S3_object # Object data
{key}._S3_object.metadata.json # Metadata (content-type, x-amz-meta-*, etc.)
{key}._S3_object.md5 # Cached MD5 hash
.multipart/
{upload-id}/
metadata.json # Upload metadata (bucket, key, parts)
part-1 # Part data files
part-2
...
.policies/
{bucket}.policy.json # Bucket policy (IAM JSON format)
```
## 🔗 Related Packages
- [`@push.rocks/smartbucket`](https://code.foss.global/push.rocks/smartbucket) — High-level S3 abstraction layer
- [`@push.rocks/smartrust`](https://code.foss.global/push.rocks/smartrust) — TypeScript ↔ Rust IPC bridge
- [`@git.zone/tsrust`](https://code.foss.global/git.zone/tsrust) — Rust cross-compilation for npm packages
## License and Legal Information ## License and Legal Information
This repository contains open-source code that is licensed under the MIT License. A copy of the MIT License can be found in the [license](license) file within this repository. This repository contains open-source code licensed under the MIT License. A copy of the license can be found in the [LICENSE](./LICENSE) file.
**Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file. **Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.
### Trademarks ### Trademarks
This project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and any related products or services are trademarks of Task Venture Capital GmbH and are not included within the scope of the MIT license granted herein. Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines, and any usage must be approved in writing by Task Venture Capital GmbH. This project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and any related products or services are trademarks of Task Venture Capital GmbH or third parties, and are not included within the scope of the MIT license granted herein.
Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines or the guidelines of the respective third-party owners, and any usage must be approved in writing. Third-party trademarks used herein are the property of their respective owners and used only in a descriptive manner, e.g. for an implementation of an API or similar.
### Company Information ### Company Information
Task Venture Capital GmbH Task Venture Capital GmbH
Registered at District court Bremen HRB 35230 HB, Germany Registered at District Court Bremen HRB 35230 HB, Germany
For any legal inquiries or if you require further information, please contact us via email at hello@task.vc. For any legal inquiries or further information, please contact us via email at hello@task.vc.
By using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works. By using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.
```

2
rust/.cargo/config.toml Normal file
View File

@@ -0,0 +1,2 @@
[target.aarch64-unknown-linux-gnu]
linker = "aarch64-linux-gnu-gcc"

1438
rust/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

33
rust/Cargo.toml Normal file
View File

@@ -0,0 +1,33 @@
[package]
name = "rusts3"
version = "0.1.0"
edition = "2021"
[[bin]]
name = "rusts3"
path = "src/main.rs"
[dependencies]
tokio = { version = "1", features = ["full"] }
hyper = { version = "1", features = ["http1", "server"] }
hyper-util = { version = "0.1", features = ["tokio", "http1"] }
http-body-util = "0.1"
serde = { version = "1", features = ["derive"] }
serde_json = "1"
quick-xml = { version = "0.37", features = ["serialize"] }
md-5 = "0.10"
tokio-util = { version = "0.7", features = ["io"] }
bytes = "1"
uuid = { version = "1", features = ["v4"] }
clap = { version = "4", features = ["derive"] }
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
thiserror = "2"
anyhow = "1"
percent-encoding = "2"
url = "2"
chrono = { version = "0.4", features = ["serde"] }
futures-core = "0.3"
hmac = "0.12"
sha2 = "0.10"
hex = "0.4"

172
rust/src/action.rs Normal file
View File

@@ -0,0 +1,172 @@
use hyper::body::Incoming;
use hyper::{Method, Request};
use std::collections::HashMap;
/// S3 actions that map to IAM permission strings.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum S3Action {
ListAllMyBuckets,
CreateBucket,
DeleteBucket,
HeadBucket,
ListBucket,
GetObject,
HeadObject,
PutObject,
DeleteObject,
CopyObject,
ListBucketMultipartUploads,
AbortMultipartUpload,
InitiateMultipartUpload,
UploadPart,
CompleteMultipartUpload,
GetBucketPolicy,
PutBucketPolicy,
DeleteBucketPolicy,
}
impl S3Action {
/// Return the IAM-style action string (e.g. "s3:GetObject").
pub fn iam_action(&self) -> &'static str {
match self {
S3Action::ListAllMyBuckets => "s3:ListAllMyBuckets",
S3Action::CreateBucket => "s3:CreateBucket",
S3Action::DeleteBucket => "s3:DeleteBucket",
S3Action::HeadBucket => "s3:ListBucket",
S3Action::ListBucket => "s3:ListBucket",
S3Action::GetObject => "s3:GetObject",
S3Action::HeadObject => "s3:GetObject",
S3Action::PutObject => "s3:PutObject",
S3Action::DeleteObject => "s3:DeleteObject",
S3Action::CopyObject => "s3:PutObject",
S3Action::ListBucketMultipartUploads => "s3:ListBucketMultipartUploads",
S3Action::AbortMultipartUpload => "s3:AbortMultipartUpload",
S3Action::InitiateMultipartUpload => "s3:PutObject",
S3Action::UploadPart => "s3:PutObject",
S3Action::CompleteMultipartUpload => "s3:PutObject",
S3Action::GetBucketPolicy => "s3:GetBucketPolicy",
S3Action::PutBucketPolicy => "s3:PutBucketPolicy",
S3Action::DeleteBucketPolicy => "s3:DeleteBucketPolicy",
}
}
}
/// Context extracted from a request, used for policy evaluation.
#[derive(Debug, Clone)]
pub struct RequestContext {
pub action: S3Action,
pub bucket: Option<String>,
pub key: Option<String>,
}
impl RequestContext {
/// Build the ARN for this request's resource.
pub fn resource_arn(&self) -> String {
match (&self.bucket, &self.key) {
(Some(bucket), Some(key)) => format!("arn:aws:s3:::{}/{}", bucket, key),
(Some(bucket), None) => format!("arn:aws:s3:::{}", bucket),
_ => "arn:aws:s3:::*".to_string(),
}
}
}
/// Resolve the S3 action from an incoming HTTP request.
pub fn resolve_action(req: &Request<Incoming>) -> RequestContext {
let method = req.method().clone();
let path = req.uri().path().to_string();
let query_string = req.uri().query().unwrap_or("").to_string();
let query = parse_query_simple(&query_string);
let segments: Vec<&str> = path
.trim_start_matches('/')
.splitn(2, '/')
.filter(|s| !s.is_empty())
.collect();
match segments.len() {
0 => {
// Root: GET / -> ListBuckets
RequestContext {
action: S3Action::ListAllMyBuckets,
bucket: None,
key: None,
}
}
1 => {
let bucket = percent_decode(segments[0]);
let has_policy = query.contains_key("policy");
let has_uploads = query.contains_key("uploads");
let action = match (&method, has_policy, has_uploads) {
(&Method::GET, true, _) => S3Action::GetBucketPolicy,
(&Method::PUT, true, _) => S3Action::PutBucketPolicy,
(&Method::DELETE, true, _) => S3Action::DeleteBucketPolicy,
(&Method::GET, _, true) => S3Action::ListBucketMultipartUploads,
(&Method::GET, _, _) => S3Action::ListBucket,
(&Method::PUT, _, _) => S3Action::CreateBucket,
(&Method::DELETE, _, _) => S3Action::DeleteBucket,
(&Method::HEAD, _, _) => S3Action::HeadBucket,
_ => S3Action::ListBucket,
};
RequestContext {
action,
bucket: Some(bucket),
key: None,
}
}
2 => {
let bucket = percent_decode(segments[0]);
let key = percent_decode(segments[1]);
let has_copy_source = req.headers().contains_key("x-amz-copy-source");
let has_part_number = query.contains_key("partNumber");
let has_upload_id = query.contains_key("uploadId");
let has_uploads = query.contains_key("uploads");
let action = match &method {
&Method::PUT if has_part_number && has_upload_id => S3Action::UploadPart,
&Method::PUT if has_copy_source => S3Action::CopyObject,
&Method::PUT => S3Action::PutObject,
&Method::GET => S3Action::GetObject,
&Method::HEAD => S3Action::HeadObject,
&Method::DELETE if has_upload_id => S3Action::AbortMultipartUpload,
&Method::DELETE => S3Action::DeleteObject,
&Method::POST if has_uploads => S3Action::InitiateMultipartUpload,
&Method::POST if has_upload_id => S3Action::CompleteMultipartUpload,
_ => S3Action::GetObject,
};
RequestContext {
action,
bucket: Some(bucket),
key: Some(key),
}
}
_ => RequestContext {
action: S3Action::ListAllMyBuckets,
bucket: None,
key: None,
},
}
}
fn parse_query_simple(query_string: &str) -> HashMap<String, String> {
let mut map = HashMap::new();
if query_string.is_empty() {
return map;
}
for pair in query_string.split('&') {
let mut parts = pair.splitn(2, '=');
let key = parts.next().unwrap_or("");
let value = parts.next().unwrap_or("");
map.insert(key.to_string(), value.to_string());
}
map
}
fn percent_decode(s: &str) -> String {
percent_encoding::percent_decode_str(s)
.decode_utf8_lossy()
.to_string()
}

310
rust/src/auth.rs Normal file
View File

@@ -0,0 +1,310 @@
use hmac::{Hmac, Mac};
use hyper::body::Incoming;
use hyper::Request;
use sha2::{Digest, Sha256};
use std::collections::HashMap;
use crate::config::{Credential, S3Config};
use crate::s3_error::S3Error;
type HmacSha256 = Hmac<Sha256>;
/// The identity of an authenticated caller.
#[derive(Debug, Clone)]
pub struct AuthenticatedIdentity {
pub access_key_id: String,
}
/// Parsed components of an AWS4-HMAC-SHA256 Authorization header.
struct SigV4Header {
access_key_id: String,
date_stamp: String,
region: String,
signed_headers: Vec<String>,
signature: String,
}
/// Verify the request's SigV4 signature. Returns the caller identity on success.
pub fn verify_request(
req: &Request<Incoming>,
config: &S3Config,
) -> Result<AuthenticatedIdentity, S3Error> {
let auth_header = req
.headers()
.get("authorization")
.and_then(|v| v.to_str().ok())
.unwrap_or("");
// Reject SigV2
if auth_header.starts_with("AWS ") {
return Err(S3Error::authorization_header_malformed());
}
if !auth_header.starts_with("AWS4-HMAC-SHA256") {
return Err(S3Error::authorization_header_malformed());
}
let parsed = parse_auth_header(auth_header)?;
// Look up credential
let credential = find_credential(&parsed.access_key_id, config)
.ok_or_else(S3Error::invalid_access_key_id)?;
// Get x-amz-date
let amz_date = req
.headers()
.get("x-amz-date")
.and_then(|v| v.to_str().ok())
.or_else(|| {
req.headers()
.get("date")
.and_then(|v| v.to_str().ok())
})
.ok_or_else(|| S3Error::missing_security_header("Missing x-amz-date header"))?;
// Enforce 15-min clock skew
check_clock_skew(amz_date)?;
// Get payload hash
let content_sha256 = req
.headers()
.get("x-amz-content-sha256")
.and_then(|v| v.to_str().ok())
.unwrap_or("UNSIGNED-PAYLOAD");
// Build canonical request
let canonical_request = build_canonical_request(req, &parsed.signed_headers, content_sha256);
// Build string to sign
let scope = format!(
"{}/{}/s3/aws4_request",
parsed.date_stamp, parsed.region
);
let canonical_hash = hex::encode(Sha256::digest(canonical_request.as_bytes()));
let string_to_sign = format!(
"AWS4-HMAC-SHA256\n{}\n{}\n{}",
amz_date, scope, canonical_hash
);
// Derive signing key
let signing_key = derive_signing_key(
&credential.secret_access_key,
&parsed.date_stamp,
&parsed.region,
);
// Compute signature
let computed = hmac_sha256(&signing_key, string_to_sign.as_bytes());
let computed_hex = hex::encode(&computed);
// Constant-time comparison
if !constant_time_eq(computed_hex.as_bytes(), parsed.signature.as_bytes()) {
return Err(S3Error::signature_does_not_match());
}
Ok(AuthenticatedIdentity {
access_key_id: parsed.access_key_id,
})
}
/// Parse the Authorization header into its components.
fn parse_auth_header(header: &str) -> Result<SigV4Header, S3Error> {
// Format: AWS4-HMAC-SHA256 Credential=KEY/YYYYMMDD/region/s3/aws4_request, SignedHeaders=h1;h2, Signature=hex
let after_algo = header
.strip_prefix("AWS4-HMAC-SHA256")
.ok_or_else(S3Error::authorization_header_malformed)?
.trim();
let mut credential_str = None;
let mut signed_headers_str = None;
let mut signature_str = None;
for part in after_algo.split(',') {
let part = part.trim();
if let Some(val) = part.strip_prefix("Credential=") {
credential_str = Some(val.trim());
} else if let Some(val) = part.strip_prefix("SignedHeaders=") {
signed_headers_str = Some(val.trim());
} else if let Some(val) = part.strip_prefix("Signature=") {
signature_str = Some(val.trim());
}
}
let credential_str = credential_str
.ok_or_else(S3Error::authorization_header_malformed)?;
let signed_headers_str = signed_headers_str
.ok_or_else(S3Error::authorization_header_malformed)?;
let signature = signature_str
.ok_or_else(S3Error::authorization_header_malformed)?
.to_string();
// Parse credential: KEY/YYYYMMDD/region/s3/aws4_request
let cred_parts: Vec<&str> = credential_str.splitn(5, '/').collect();
if cred_parts.len() < 5 {
return Err(S3Error::authorization_header_malformed());
}
let access_key_id = cred_parts[0].to_string();
let date_stamp = cred_parts[1].to_string();
let region = cred_parts[2].to_string();
let signed_headers: Vec<String> = signed_headers_str
.split(';')
.map(|s| s.trim().to_lowercase())
.collect();
Ok(SigV4Header {
access_key_id,
date_stamp,
region,
signed_headers,
signature,
})
}
/// Find a credential by access key ID.
fn find_credential<'a>(access_key_id: &str, config: &'a S3Config) -> Option<&'a Credential> {
config
.auth
.credentials
.iter()
.find(|c| c.access_key_id == access_key_id)
}
/// Check clock skew (15 minutes max).
fn check_clock_skew(amz_date: &str) -> Result<(), S3Error> {
// Parse ISO 8601 basic format: YYYYMMDDTHHMMSSZ
let parsed = chrono::NaiveDateTime::parse_from_str(amz_date, "%Y%m%dT%H%M%SZ")
.map_err(|_| S3Error::authorization_header_malformed())?;
let request_time = chrono::DateTime::<chrono::Utc>::from_naive_utc_and_offset(parsed, chrono::Utc);
let now = chrono::Utc::now();
let diff = (now - request_time).num_seconds().unsigned_abs();
if diff > 15 * 60 {
return Err(S3Error::request_time_too_skewed());
}
Ok(())
}
/// Build the canonical request string.
fn build_canonical_request(
req: &Request<Incoming>,
signed_headers: &[String],
payload_hash: &str,
) -> String {
let method = req.method().as_str();
let uri_path = req.uri().path();
// Canonical URI: the path, already percent-encoded by the client
let canonical_uri = if uri_path.is_empty() { "/" } else { uri_path };
// Canonical query string: sorted key=value pairs
let canonical_query = build_canonical_query(req.uri().query().unwrap_or(""));
// Canonical headers: sorted by lowercase header name
let canonical_headers = build_canonical_headers(req, signed_headers);
// Signed headers string
let signed_headers_str = signed_headers.join(";");
// Payload hash — accept UNSIGNED-PAYLOAD and STREAMING-AWS4-HMAC-SHA256-PAYLOAD as-is
let effective_payload_hash = if payload_hash == "UNSIGNED-PAYLOAD"
|| payload_hash == "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
{
payload_hash.to_string()
} else {
payload_hash.to_string()
};
format!(
"{}\n{}\n{}\n{}\n{}\n{}",
method,
canonical_uri,
canonical_query,
canonical_headers,
signed_headers_str,
effective_payload_hash
)
}
/// Build canonical query string (sorted key=value pairs).
fn build_canonical_query(query: &str) -> String {
if query.is_empty() {
return String::new();
}
let mut pairs: Vec<(String, String)> = Vec::new();
for pair in query.split('&') {
let mut parts = pair.splitn(2, '=');
let key = parts.next().unwrap_or("");
let value = parts.next().unwrap_or("");
pairs.push((key.to_string(), value.to_string()));
}
pairs.sort();
pairs
.iter()
.map(|(k, v)| format!("{}={}", k, v))
.collect::<Vec<_>>()
.join("&")
}
/// Build canonical headers string.
fn build_canonical_headers(req: &Request<Incoming>, signed_headers: &[String]) -> String {
let mut header_map: HashMap<String, Vec<String>> = HashMap::new();
for (name, value) in req.headers() {
let name_lower = name.as_str().to_lowercase();
if signed_headers.contains(&name_lower) {
if let Ok(val) = value.to_str() {
header_map
.entry(name_lower)
.or_default()
.push(val.trim().to_string());
}
}
}
let mut result = String::new();
for header_name in signed_headers {
let values = header_map
.get(header_name)
.map(|v| v.join(","))
.unwrap_or_default();
result.push_str(header_name);
result.push(':');
result.push_str(&values);
result.push('\n');
}
result
}
/// Derive the signing key via 4-step HMAC chain.
fn derive_signing_key(secret_key: &str, date_stamp: &str, region: &str) -> Vec<u8> {
let k_secret = format!("AWS4{}", secret_key);
let k_date = hmac_sha256(k_secret.as_bytes(), date_stamp.as_bytes());
let k_region = hmac_sha256(&k_date, region.as_bytes());
let k_service = hmac_sha256(&k_region, b"s3");
hmac_sha256(&k_service, b"aws4_request")
}
/// Compute HMAC-SHA256.
fn hmac_sha256(key: &[u8], data: &[u8]) -> Vec<u8> {
let mut mac = HmacSha256::new_from_slice(key).expect("HMAC key length is always valid");
mac.update(data);
mac.finalize().into_bytes().to_vec()
}
/// Constant-time byte comparison.
fn constant_time_eq(a: &[u8], b: &[u8]) -> bool {
if a.len() != b.len() {
return false;
}
let mut diff = 0u8;
for (x, y) in a.iter().zip(b.iter()) {
diff |= x ^ y;
}
diff == 0
}

84
rust/src/config.rs Normal file
View File

@@ -0,0 +1,84 @@
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct S3Config {
pub server: ServerConfig,
pub storage: StorageConfig,
pub auth: AuthConfig,
pub cors: CorsConfig,
pub logging: LoggingConfig,
pub limits: LimitsConfig,
pub multipart: MultipartConfig,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ServerConfig {
pub port: u16,
pub address: String,
pub silent: bool,
#[serde(default = "default_region")]
pub region: String,
}
fn default_region() -> String {
"us-east-1".to_string()
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct StorageConfig {
pub directory: String,
pub clean_slate: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct AuthConfig {
pub enabled: bool,
pub credentials: Vec<Credential>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Credential {
#[serde(rename = "accessKeyId")]
pub access_key_id: String,
#[serde(rename = "secretAccessKey")]
pub secret_access_key: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CorsConfig {
pub enabled: bool,
pub allowed_origins: Option<Vec<String>>,
pub allowed_methods: Option<Vec<String>>,
pub allowed_headers: Option<Vec<String>>,
pub exposed_headers: Option<Vec<String>>,
pub max_age: Option<u64>,
pub allow_credentials: Option<bool>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct LoggingConfig {
pub level: Option<String>,
pub format: Option<String>,
pub enabled: Option<bool>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct LimitsConfig {
pub max_object_size: Option<u64>,
pub max_metadata_size: Option<u64>,
pub request_timeout: Option<u64>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct MultipartConfig {
pub expiration_days: Option<u64>,
pub cleanup_interval_minutes: Option<u64>,
}

46
rust/src/main.rs Normal file
View File

@@ -0,0 +1,46 @@
mod action;
mod auth;
mod config;
mod management;
mod policy;
mod s3_error;
mod server;
mod storage;
mod xml_response;
use clap::Parser;
#[derive(Parser)]
#[command(name = "rusts3", about = "High-performance S3-compatible server")]
struct Cli {
/// Run in management mode (IPC via stdin/stdout)
#[arg(long)]
management: bool,
/// Log level
#[arg(long, default_value = "info")]
log_level: String,
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let cli = Cli::parse();
if cli.management {
// Init tracing to stderr only (stdout reserved for IPC)
tracing_subscriber::fmt()
.with_writer(std::io::stderr)
.with_env_filter(
tracing_subscriber::EnvFilter::try_new(&cli.log_level)
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("info")),
)
.init();
management::management_loop().await?;
} else {
eprintln!("rusts3: use --management flag for IPC mode");
std::process::exit(1);
}
Ok(())
}

155
rust/src/management.rs Normal file
View File

@@ -0,0 +1,155 @@
use anyhow::Result;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use std::io::Write;
use tokio::io::{AsyncBufReadExt, BufReader};
use crate::config::S3Config;
use crate::server::S3Server;
#[derive(Deserialize)]
struct IpcRequest {
id: String,
method: String,
params: Value,
}
#[derive(Serialize)]
struct IpcResponse {
id: String,
success: bool,
#[serde(skip_serializing_if = "Option::is_none")]
result: Option<Value>,
#[serde(skip_serializing_if = "Option::is_none")]
error: Option<String>,
}
#[derive(Serialize)]
struct IpcEvent {
event: String,
data: Value,
}
fn send_line(value: &impl Serialize) {
let mut stdout = std::io::stdout().lock();
serde_json::to_writer(&mut stdout, value).ok();
stdout.write_all(b"\n").ok();
stdout.flush().ok();
}
fn send_response(id: String, result: Value) {
send_line(&IpcResponse {
id,
success: true,
result: Some(result),
error: None,
});
}
fn send_error(id: String, message: String) {
send_line(&IpcResponse {
id,
success: false,
result: None,
error: Some(message),
});
}
pub async fn management_loop() -> Result<()> {
// Emit ready event
send_line(&IpcEvent {
event: "ready".to_string(),
data: serde_json::json!({}),
});
let mut server: Option<S3Server> = None;
let stdin = BufReader::new(tokio::io::stdin());
let mut lines = stdin.lines();
while let Ok(Some(line)) = lines.next_line().await {
let line = line.trim().to_string();
if line.is_empty() {
continue;
}
let req: IpcRequest = match serde_json::from_str(&line) {
Ok(r) => r,
Err(e) => {
tracing::warn!("Invalid IPC request: {}", e);
continue;
}
};
let id = req.id.clone();
let method = req.method.as_str();
match method {
"start" => {
#[derive(Deserialize)]
struct StartParams {
config: S3Config,
}
match serde_json::from_value::<StartParams>(req.params) {
Ok(params) => {
match S3Server::start(params.config).await {
Ok(s) => {
server = Some(s);
send_response(id, serde_json::json!({}));
}
Err(e) => {
send_error(id, format!("Failed to start server: {}", e));
}
}
}
Err(e) => {
send_error(id, format!("Invalid start params: {}", e));
}
}
}
"stop" => {
if let Some(s) = server.take() {
s.stop().await;
}
send_response(id, serde_json::json!({}));
}
"createBucket" => {
#[derive(Deserialize)]
struct CreateBucketParams {
name: String,
}
match serde_json::from_value::<CreateBucketParams>(req.params) {
Ok(params) => {
if let Some(ref s) = server {
match s.store().create_bucket(&params.name).await {
Ok(()) => {
send_response(id, serde_json::json!({}));
}
Err(e) => {
send_error(
id,
format!("Failed to create bucket: {}", e),
);
}
}
} else {
send_error(id, "Server not started".to_string());
}
}
Err(e) => {
send_error(id, format!("Invalid createBucket params: {}", e));
}
}
}
_ => {
send_error(id, format!("Unknown method: {}", method));
}
}
}
// Clean shutdown
if let Some(s) = server.take() {
s.stop().await;
}
Ok(())
}

429
rust/src/policy.rs Normal file
View File

@@ -0,0 +1,429 @@
use serde::{Deserialize, Deserializer, Serialize};
use std::collections::HashMap;
use std::path::PathBuf;
use tokio::fs;
use tokio::sync::RwLock;
use crate::action::RequestContext;
use crate::auth::AuthenticatedIdentity;
use crate::s3_error::S3Error;
// ============================
// Policy data model
// ============================
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BucketPolicy {
#[serde(rename = "Version")]
pub version: String,
#[serde(rename = "Statement")]
pub statements: Vec<PolicyStatement>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PolicyStatement {
#[serde(rename = "Sid", default, skip_serializing_if = "Option::is_none")]
pub sid: Option<String>,
#[serde(rename = "Effect")]
pub effect: PolicyEffect,
#[serde(rename = "Principal", deserialize_with = "deserialize_principal")]
pub principal: Principal,
#[serde(rename = "Action", deserialize_with = "deserialize_string_or_vec")]
pub action: Vec<String>,
#[serde(rename = "Resource", deserialize_with = "deserialize_string_or_vec")]
pub resource: Vec<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum PolicyEffect {
Allow,
Deny,
}
#[derive(Debug, Clone)]
pub enum Principal {
Wildcard,
Aws(Vec<String>),
}
impl Serialize for Principal {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
match self {
Principal::Wildcard => serializer.serialize_str("*"),
Principal::Aws(ids) => {
use serde::ser::SerializeMap;
let mut map = serializer.serialize_map(Some(1))?;
if ids.len() == 1 {
map.serialize_entry("AWS", &ids[0])?;
} else {
map.serialize_entry("AWS", ids)?;
}
map.end()
}
}
}
}
fn deserialize_principal<'de, D>(deserializer: D) -> Result<Principal, D::Error>
where
D: Deserializer<'de>,
{
#[derive(Deserialize)]
#[serde(untagged)]
enum PrincipalRaw {
Star(String),
Map(HashMap<String, StringOrVec>),
}
let raw = PrincipalRaw::deserialize(deserializer)?;
match raw {
PrincipalRaw::Star(s) if s == "*" => Ok(Principal::Wildcard),
PrincipalRaw::Star(_) => Err(serde::de::Error::custom(
"Principal string must be \"*\"",
)),
PrincipalRaw::Map(map) => {
if let Some(aws) = map.get("AWS") {
Ok(Principal::Aws(aws.clone().into_vec()))
} else {
Err(serde::de::Error::custom("Principal map must contain \"AWS\" key"))
}
}
}
}
#[derive(Debug, Clone, Deserialize)]
#[serde(untagged)]
enum StringOrVec {
Single(String),
Multiple(Vec<String>),
}
impl StringOrVec {
fn into_vec(self) -> Vec<String> {
match self {
StringOrVec::Single(s) => vec![s],
StringOrVec::Multiple(v) => v,
}
}
}
fn deserialize_string_or_vec<'de, D>(deserializer: D) -> Result<Vec<String>, D::Error>
where
D: Deserializer<'de>,
{
let raw = StringOrVec::deserialize(deserializer)?;
Ok(raw.into_vec())
}
// ============================
// Policy evaluation
// ============================
#[derive(Debug, Clone, PartialEq)]
pub enum PolicyDecision {
Allow,
Deny,
NoOpinion,
}
/// Evaluate a bucket policy against a request context and caller identity.
pub fn evaluate_policy(
policy: &BucketPolicy,
ctx: &RequestContext,
identity: Option<&AuthenticatedIdentity>,
) -> PolicyDecision {
let resource_arn = ctx.resource_arn();
let iam_action = ctx.action.iam_action();
let mut has_allow = false;
for stmt in &policy.statements {
// Check principal match
if !principal_matches(&stmt.principal, identity) {
continue;
}
// Check action match
if !action_matches(&stmt.action, iam_action) {
continue;
}
// Check resource match
if !resource_matches(&stmt.resource, &resource_arn, ctx.bucket.as_deref()) {
continue;
}
// Statement matches — apply effect
match stmt.effect {
PolicyEffect::Deny => return PolicyDecision::Deny,
PolicyEffect::Allow => has_allow = true,
}
}
if has_allow {
PolicyDecision::Allow
} else {
PolicyDecision::NoOpinion
}
}
/// Check if the principal matches the caller.
fn principal_matches(principal: &Principal, identity: Option<&AuthenticatedIdentity>) -> bool {
match principal {
Principal::Wildcard => true,
Principal::Aws(ids) => {
if let Some(id) = identity {
ids.iter().any(|arn| {
// Match against full ARN or just the access key ID
arn == "*" || arn.ends_with(&id.access_key_id)
})
} else {
false
}
}
}
}
/// Check if the action matches. Supports wildcard `s3:*` and `*`.
fn action_matches(policy_actions: &[String], request_action: &str) -> bool {
for pa in policy_actions {
if pa == "*" || pa == "s3:*" {
return true;
}
if pa.eq_ignore_ascii_case(request_action) {
return true;
}
// Simple prefix wildcard: "s3:Get*" matches "s3:GetObject"
if let Some(prefix) = pa.strip_suffix('*') {
if request_action
.to_lowercase()
.starts_with(&prefix.to_lowercase())
{
return true;
}
}
}
false
}
/// Check if the resource matches. Supports wildcard patterns.
fn resource_matches(policy_resources: &[String], request_arn: &str, bucket: Option<&str>) -> bool {
for pr in policy_resources {
if pr == "*" {
return true;
}
if arn_pattern_matches(pr, request_arn) {
return true;
}
// Also check bucket-level ARN if the request is for an object
if let Some(b) = bucket {
let bucket_arn = format!("arn:aws:s3:::{}", b);
if arn_pattern_matches(pr, &bucket_arn) {
return true;
}
}
}
false
}
/// Simple ARN pattern matching with `*` and `?` wildcards.
fn arn_pattern_matches(pattern: &str, value: &str) -> bool {
// Handle trailing /* specifically: arn:aws:s3:::bucket/* matches arn:aws:s3:::bucket/anything
if pattern.ends_with("/*") {
let prefix = &pattern[..pattern.len() - 1]; // Remove trailing *
if value.starts_with(prefix) {
return true;
}
// Also match exact bucket without trailing /
let bucket_only = &pattern[..pattern.len() - 2];
if value == bucket_only {
return true;
}
}
simple_wildcard_match(pattern, value)
}
fn simple_wildcard_match(pattern: &str, value: &str) -> bool {
let pat_bytes = pattern.as_bytes();
let val_bytes = value.as_bytes();
let mut pi = 0;
let mut vi = 0;
let mut star_pi = usize::MAX;
let mut star_vi = 0;
while vi < val_bytes.len() {
if pi < pat_bytes.len() && (pat_bytes[pi] == b'?' || pat_bytes[pi] == val_bytes[vi]) {
pi += 1;
vi += 1;
} else if pi < pat_bytes.len() && pat_bytes[pi] == b'*' {
star_pi = pi;
star_vi = vi;
pi += 1;
} else if star_pi != usize::MAX {
pi = star_pi + 1;
star_vi += 1;
vi = star_vi;
} else {
return false;
}
}
while pi < pat_bytes.len() && pat_bytes[pi] == b'*' {
pi += 1;
}
pi == pat_bytes.len()
}
// ============================
// Policy validation
// ============================
const MAX_POLICY_SIZE: usize = 20 * 1024; // 20 KB
pub fn validate_policy(json: &str) -> Result<BucketPolicy, S3Error> {
if json.len() > MAX_POLICY_SIZE {
return Err(S3Error::malformed_policy("Policy exceeds maximum size of 20KB"));
}
let policy: BucketPolicy =
serde_json::from_str(json).map_err(|e| S3Error::malformed_policy(&e.to_string()))?;
if policy.version != "2012-10-17" {
return Err(S3Error::malformed_policy(
"Policy version must be \"2012-10-17\"",
));
}
if policy.statements.is_empty() {
return Err(S3Error::malformed_policy(
"Policy must contain at least one statement",
));
}
for (i, stmt) in policy.statements.iter().enumerate() {
if stmt.action.is_empty() {
return Err(S3Error::malformed_policy(&format!(
"Statement {} has no actions",
i
)));
}
for action in &stmt.action {
if action != "*" && !action.starts_with("s3:") {
return Err(S3Error::malformed_policy(&format!(
"Action \"{}\" must start with \"s3:\"",
action
)));
}
}
if stmt.resource.is_empty() {
return Err(S3Error::malformed_policy(&format!(
"Statement {} has no resources",
i
)));
}
for resource in &stmt.resource {
if resource != "*" && !resource.starts_with("arn:aws:s3:::") {
return Err(S3Error::malformed_policy(&format!(
"Resource \"{}\" must start with \"arn:aws:s3:::\"",
resource
)));
}
}
}
Ok(policy)
}
// ============================
// PolicyStore — in-memory cache + disk
// ============================
pub struct PolicyStore {
policies: RwLock<HashMap<String, BucketPolicy>>,
policies_dir: PathBuf,
}
impl PolicyStore {
pub fn new(policies_dir: PathBuf) -> Self {
Self {
policies: RwLock::new(HashMap::new()),
policies_dir,
}
}
/// Load all policies from disk into cache.
pub async fn load_from_disk(&self) -> anyhow::Result<()> {
let dir = &self.policies_dir;
if !dir.exists() {
return Ok(());
}
let mut entries = fs::read_dir(dir).await?;
let mut policies = HashMap::new();
while let Some(entry) = entries.next_entry().await? {
let name = entry.file_name().to_string_lossy().to_string();
if let Some(bucket) = name.strip_suffix(".policy.json") {
match fs::read_to_string(entry.path()).await {
Ok(json) => match serde_json::from_str::<BucketPolicy>(&json) {
Ok(policy) => {
tracing::info!("Loaded policy for bucket: {}", bucket);
policies.insert(bucket.to_string(), policy);
}
Err(e) => {
tracing::warn!("Failed to parse policy for {}: {}", bucket, e);
}
},
Err(e) => {
tracing::warn!("Failed to read policy file {}: {}", name, e);
}
}
}
}
let mut cache = self.policies.write().await;
*cache = policies;
Ok(())
}
/// Get a policy for a bucket.
pub async fn get_policy(&self, bucket: &str) -> Option<BucketPolicy> {
let cache = self.policies.read().await;
cache.get(bucket).cloned()
}
/// Store a policy for a bucket (atomic write + cache update).
pub async fn put_policy(&self, bucket: &str, policy: BucketPolicy) -> anyhow::Result<()> {
let json = serde_json::to_string_pretty(&policy)?;
// Atomic write: temp file + rename
let policy_path = self.policies_dir.join(format!("{}.policy.json", bucket));
let temp_path = self
.policies_dir
.join(format!("{}.policy.json.tmp", bucket));
fs::write(&temp_path, &json).await?;
fs::rename(&temp_path, &policy_path).await?;
// Update cache
let mut cache = self.policies.write().await;
cache.insert(bucket.to_string(), policy);
Ok(())
}
/// Delete a policy for a bucket.
pub async fn delete_policy(&self, bucket: &str) -> anyhow::Result<()> {
let policy_path = self.policies_dir.join(format!("{}.policy.json", bucket));
let _ = fs::remove_file(&policy_path).await;
let mut cache = self.policies.write().await;
cache.remove(bucket);
Ok(())
}
}

106
rust/src/s3_error.rs Normal file
View File

@@ -0,0 +1,106 @@
use hyper::StatusCode;
#[derive(Debug, thiserror::Error)]
#[error("S3Error({code}): {message}")]
pub struct S3Error {
pub code: String,
pub message: String,
pub status: StatusCode,
}
impl S3Error {
pub fn new(code: &str, message: &str, status: StatusCode) -> Self {
Self {
code: code.to_string(),
message: message.to_string(),
status,
}
}
pub fn no_such_key() -> Self {
Self::new("NoSuchKey", "The specified key does not exist.", StatusCode::NOT_FOUND)
}
pub fn no_such_bucket() -> Self {
Self::new("NoSuchBucket", "The specified bucket does not exist", StatusCode::NOT_FOUND)
}
pub fn bucket_not_empty() -> Self {
Self::new("BucketNotEmpty", "The bucket you tried to delete is not empty", StatusCode::CONFLICT)
}
pub fn access_denied() -> Self {
Self::new("AccessDenied", "Access Denied", StatusCode::FORBIDDEN)
}
pub fn no_such_upload() -> Self {
Self::new("NoSuchUpload", "The specified upload does not exist", StatusCode::NOT_FOUND)
}
pub fn invalid_part_number() -> Self {
Self::new("InvalidPartNumber", "Part number must be between 1 and 10000", StatusCode::BAD_REQUEST)
}
pub fn internal_error(msg: &str) -> Self {
Self::new("InternalError", msg, StatusCode::INTERNAL_SERVER_ERROR)
}
pub fn invalid_request(msg: &str) -> Self {
Self::new("InvalidRequest", msg, StatusCode::BAD_REQUEST)
}
pub fn signature_does_not_match() -> Self {
Self::new(
"SignatureDoesNotMatch",
"The request signature we calculated does not match the signature you provided.",
StatusCode::FORBIDDEN,
)
}
pub fn invalid_access_key_id() -> Self {
Self::new(
"InvalidAccessKeyId",
"The AWS Access Key Id you provided does not exist in our records.",
StatusCode::FORBIDDEN,
)
}
pub fn request_time_too_skewed() -> Self {
Self::new(
"RequestTimeTooSkewed",
"The difference between the request time and the current time is too large.",
StatusCode::FORBIDDEN,
)
}
pub fn authorization_header_malformed() -> Self {
Self::new(
"AuthorizationHeaderMalformed",
"The authorization header is malformed.",
StatusCode::BAD_REQUEST,
)
}
pub fn missing_security_header(msg: &str) -> Self {
Self::new("MissingSecurityHeader", msg, StatusCode::BAD_REQUEST)
}
pub fn no_such_bucket_policy() -> Self {
Self::new(
"NoSuchBucketPolicy",
"The bucket policy does not exist.",
StatusCode::NOT_FOUND,
)
}
pub fn malformed_policy(msg: &str) -> Self {
Self::new("MalformedPolicy", msg, StatusCode::BAD_REQUEST)
}
pub fn to_xml(&self) -> String {
format!(
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<Error><Code>{}</Code><Message>{}</Message></Error>",
self.code, self.message
)
}
}

971
rust/src/server.rs Normal file
View File

@@ -0,0 +1,971 @@
use anyhow::Result;
use bytes::Bytes;
use futures_core::Stream;
use http_body_util::BodyExt;
use hyper::body::Incoming;
use hyper::server::conn::http1;
use hyper::service::service_fn;
use hyper::{Method, Request, Response, StatusCode};
use hyper_util::rt::TokioIo;
use std::collections::HashMap;
use std::net::SocketAddr;
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
use tokio::io::AsyncReadExt;
use tokio::net::TcpListener;
use tokio::sync::watch;
use tokio_util::io::ReaderStream;
use uuid::Uuid;
use crate::action::{self, RequestContext, S3Action};
use crate::auth::{self, AuthenticatedIdentity};
use crate::config::S3Config;
use crate::policy::{self, PolicyDecision, PolicyStore};
use crate::s3_error::S3Error;
use crate::storage::FileStore;
use crate::xml_response;
pub struct S3Server {
store: Arc<FileStore>,
shutdown_tx: watch::Sender<bool>,
server_handle: tokio::task::JoinHandle<()>,
}
impl S3Server {
pub async fn start(config: S3Config) -> Result<Self> {
let store = Arc::new(FileStore::new(config.storage.directory.clone().into()));
// Initialize or reset storage
if config.storage.clean_slate {
store.reset().await?;
} else {
store.initialize().await?;
}
// Initialize policy store
let policy_store = Arc::new(PolicyStore::new(store.policies_dir()));
policy_store.load_from_disk().await?;
let addr: SocketAddr = format!("{}:{}", config.address(), config.server.port)
.parse()?;
let listener = TcpListener::bind(addr).await?;
let (shutdown_tx, shutdown_rx) = watch::channel(false);
let server_store = store.clone();
let server_config = config.clone();
let server_policy_store = policy_store.clone();
let server_handle = tokio::spawn(async move {
loop {
let mut rx = shutdown_rx.clone();
tokio::select! {
result = listener.accept() => {
match result {
Ok((stream, _remote_addr)) => {
let io = TokioIo::new(stream);
let store = server_store.clone();
let cfg = server_config.clone();
let ps = server_policy_store.clone();
tokio::spawn(async move {
let svc = service_fn(move |req: Request<Incoming>| {
let store = store.clone();
let cfg = cfg.clone();
let ps = ps.clone();
async move {
handle_request(req, store, cfg, ps).await
}
});
if let Err(e) = http1::Builder::new()
.keep_alive(true)
.serve_connection(io, svc)
.await
{
if !e.is_incomplete_message() {
tracing::error!("Connection error: {}", e);
}
}
});
}
Err(e) => {
tracing::error!("Accept error: {}", e);
}
}
}
_ = rx.changed() => {
break;
}
}
}
});
if !config.server.silent {
tracing::info!("S3 server listening on {}", addr);
}
Ok(Self {
store,
shutdown_tx,
server_handle,
})
}
pub async fn stop(self) {
let _ = self.shutdown_tx.send(true);
let _ = self.server_handle.await;
}
pub fn store(&self) -> &FileStore {
&self.store
}
}
impl S3Config {
fn address(&self) -> &str {
&self.server.address
}
}
// ============================
// Request handling
// ============================
type BoxBody = http_body_util::combinators::BoxBody<Bytes, Box<dyn std::error::Error + Send + Sync>>;
fn full_body(data: impl Into<Bytes>) -> BoxBody {
http_body_util::Full::new(data.into())
.map_err(|never: std::convert::Infallible| -> Box<dyn std::error::Error + Send + Sync> { match never {} })
.boxed()
}
fn empty_body() -> BoxBody {
http_body_util::Empty::new()
.map_err(|never: std::convert::Infallible| -> Box<dyn std::error::Error + Send + Sync> { match never {} })
.boxed()
}
fn stream_body(reader: tokio::fs::File, content_length: u64) -> BoxBody {
let stream = ReaderStream::with_capacity(reader.take(content_length), 64 * 1024);
let mapped = FrameStream { inner: stream };
http_body_util::StreamBody::new(mapped).boxed()
}
/// Adapter that converts ReaderStream into a Stream of Frame<Bytes>
struct FrameStream {
inner: ReaderStream<tokio::io::Take<tokio::fs::File>>,
}
impl Stream for FrameStream {
type Item = Result<hyper::body::Frame<Bytes>, Box<dyn std::error::Error + Send + Sync>>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let inner = unsafe { self.map_unchecked_mut(|s| &mut s.inner) };
match inner.poll_next(cx) {
Poll::Ready(Some(Ok(bytes))) => {
Poll::Ready(Some(Ok(hyper::body::Frame::data(bytes))))
}
Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(Box::new(e) as Box<dyn std::error::Error + Send + Sync>))),
Poll::Ready(None) => Poll::Ready(None),
Poll::Pending => Poll::Pending,
}
}
}
fn xml_response(status: StatusCode, xml: String, request_id: &str) -> Response<BoxBody> {
Response::builder()
.status(status)
.header("content-type", "application/xml")
.header("x-amz-request-id", request_id)
.body(full_body(xml))
.unwrap()
}
fn empty_response(status: StatusCode, request_id: &str) -> Response<BoxBody> {
Response::builder()
.status(status)
.header("x-amz-request-id", request_id)
.body(empty_body())
.unwrap()
}
fn s3_error_response(err: &S3Error, request_id: &str) -> Response<BoxBody> {
let xml = err.to_xml();
Response::builder()
.status(err.status)
.header("content-type", "application/xml")
.header("x-amz-request-id", request_id)
.body(full_body(xml))
.unwrap()
}
async fn handle_request(
req: Request<Incoming>,
store: Arc<FileStore>,
config: S3Config,
policy_store: Arc<PolicyStore>,
) -> Result<Response<BoxBody>, std::convert::Infallible> {
let request_id = Uuid::new_v4().to_string();
let method = req.method().clone();
let uri = req.uri().clone();
let start = std::time::Instant::now();
// Handle CORS preflight
if config.cors.enabled && method == Method::OPTIONS {
let resp = build_cors_preflight(&config, &request_id);
return Ok(resp);
}
// Step 1: Resolve S3 action from request
let request_ctx = action::resolve_action(&req);
// Step 2: Auth + policy pipeline
if config.auth.enabled {
// Attempt authentication
let identity = {
let has_auth_header = req
.headers()
.get("authorization")
.and_then(|v| v.to_str().ok())
.map(|s| !s.is_empty())
.unwrap_or(false);
if has_auth_header {
match auth::verify_request(&req, &config) {
Ok(id) => Some(id),
Err(e) => {
tracing::warn!("Auth failed: {}", e.message);
return Ok(s3_error_response(&e, &request_id));
}
}
} else {
None // Anonymous request
}
};
// Step 3: Authorization (policy evaluation)
if let Err(e) = authorize_request(&request_ctx, identity.as_ref(), &policy_store).await {
return Ok(s3_error_response(&e, &request_id));
}
}
// Route and handle
let mut response = match route_request(req, store, &config, &request_id, &policy_store).await {
Ok(resp) => resp,
Err(err) => {
if let Some(s3err) = err.downcast_ref::<S3Error>() {
s3_error_response(s3err, &request_id)
} else {
tracing::error!("Internal error: {}", err);
let s3err = S3Error::internal_error(&err.to_string());
s3_error_response(&s3err, &request_id)
}
}
};
// Add CORS headers if enabled
if config.cors.enabled {
add_cors_headers(response.headers_mut(), &config);
}
let duration = start.elapsed();
tracing::info!(
method = %method,
path = %uri.path(),
status = %response.status().as_u16(),
duration_ms = %duration.as_millis(),
"request"
);
Ok(response)
}
/// Authorize a request based on bucket policies and authentication state.
async fn authorize_request(
ctx: &RequestContext,
identity: Option<&AuthenticatedIdentity>,
policy_store: &PolicyStore,
) -> Result<(), S3Error> {
// ListAllMyBuckets requires authentication (no bucket to apply policy to)
if ctx.action == S3Action::ListAllMyBuckets {
if identity.is_none() {
return Err(S3Error::access_denied());
}
return Ok(());
}
// If there's a bucket, check its policy
if let Some(ref bucket) = ctx.bucket {
if let Some(bucket_policy) = policy_store.get_policy(bucket).await {
let decision = policy::evaluate_policy(&bucket_policy, ctx, identity);
match decision {
PolicyDecision::Deny => return Err(S3Error::access_denied()),
PolicyDecision::Allow => return Ok(()),
PolicyDecision::NoOpinion => {
// Fall through to default behavior
}
}
}
}
// Default: authenticated users get full access, anonymous denied
if identity.is_none() {
return Err(S3Error::access_denied());
}
Ok(())
}
// ============================
// Routing
// ============================
async fn route_request(
req: Request<Incoming>,
store: Arc<FileStore>,
_config: &S3Config,
request_id: &str,
policy_store: &Arc<PolicyStore>,
) -> Result<Response<BoxBody>> {
let method = req.method().clone();
let path = req.uri().path().to_string();
let query_string = req.uri().query().unwrap_or("").to_string();
let query = parse_query(&query_string);
// Parse path: /, /{bucket}, /{bucket}/{key...}
let segments: Vec<&str> = path
.trim_start_matches('/')
.splitn(2, '/')
.filter(|s| !s.is_empty())
.collect();
match segments.len() {
0 => {
// Root: GET / -> ListBuckets
match method {
Method::GET => handle_list_buckets(store, request_id).await,
_ => Ok(empty_response(StatusCode::METHOD_NOT_ALLOWED, request_id)),
}
}
1 => {
// Bucket level: /{bucket}
let bucket = percent_decode(segments[0]);
// Check for ?policy query parameter
if query.contains_key("policy") {
return match method {
Method::GET => handle_get_bucket_policy(policy_store, &bucket, request_id).await,
Method::PUT => handle_put_bucket_policy(req, &store, policy_store, &bucket, request_id).await,
Method::DELETE => handle_delete_bucket_policy(policy_store, &bucket, request_id).await,
_ => Ok(empty_response(StatusCode::METHOD_NOT_ALLOWED, request_id)),
};
}
match method {
Method::GET => {
if query.contains_key("uploads") {
handle_list_multipart_uploads(store, &bucket, request_id).await
} else {
handle_list_objects(store, &bucket, &query, request_id).await
}
}
Method::PUT => handle_create_bucket(store, &bucket, request_id).await,
Method::DELETE => handle_delete_bucket(store, &bucket, request_id, policy_store).await,
Method::HEAD => handle_head_bucket(store, &bucket, request_id).await,
_ => Ok(empty_response(StatusCode::METHOD_NOT_ALLOWED, request_id)),
}
}
2 => {
// Object level: /{bucket}/{key...}
let bucket = percent_decode(segments[0]);
let key = percent_decode(segments[1]);
match method {
Method::PUT => {
if query.contains_key("partNumber") && query.contains_key("uploadId") {
handle_upload_part(req, store, &query, request_id).await
} else if req.headers().contains_key("x-amz-copy-source") {
handle_copy_object(req, store, &bucket, &key, request_id).await
} else {
handle_put_object(req, store, &bucket, &key, request_id).await
}
}
Method::GET => {
handle_get_object(req, store, &bucket, &key, request_id).await
}
Method::HEAD => {
handle_head_object(store, &bucket, &key, request_id).await
}
Method::DELETE => {
if query.contains_key("uploadId") {
let upload_id = query.get("uploadId").unwrap();
handle_abort_multipart(store, upload_id, request_id).await
} else {
handle_delete_object(store, &bucket, &key, request_id).await
}
}
Method::POST => {
if query.contains_key("uploads") {
handle_initiate_multipart(req, store, &bucket, &key, request_id).await
} else if query.contains_key("uploadId") {
let upload_id = query.get("uploadId").unwrap().clone();
handle_complete_multipart(req, store, &bucket, &key, &upload_id, request_id).await
} else {
let err = S3Error::invalid_request("Invalid POST request");
Ok(s3_error_response(&err, request_id))
}
}
_ => Ok(empty_response(StatusCode::METHOD_NOT_ALLOWED, request_id)),
}
}
_ => Ok(empty_response(StatusCode::BAD_REQUEST, request_id)),
}
}
// ============================
// Handlers
// ============================
async fn handle_list_buckets(
store: Arc<FileStore>,
request_id: &str,
) -> Result<Response<BoxBody>> {
let buckets = store.list_buckets().await?;
let xml = xml_response::list_buckets_xml(&buckets);
Ok(xml_response(StatusCode::OK, xml, request_id))
}
async fn handle_create_bucket(
store: Arc<FileStore>,
bucket: &str,
request_id: &str,
) -> Result<Response<BoxBody>> {
store.create_bucket(bucket).await?;
Ok(empty_response(StatusCode::OK, request_id))
}
async fn handle_delete_bucket(
store: Arc<FileStore>,
bucket: &str,
request_id: &str,
policy_store: &Arc<PolicyStore>,
) -> Result<Response<BoxBody>> {
store.delete_bucket(bucket).await?;
// Clean up bucket policy on deletion
let _ = policy_store.delete_policy(bucket).await;
Ok(empty_response(StatusCode::NO_CONTENT, request_id))
}
async fn handle_head_bucket(
store: Arc<FileStore>,
bucket: &str,
request_id: &str,
) -> Result<Response<BoxBody>> {
if store.bucket_exists(bucket).await {
Ok(empty_response(StatusCode::OK, request_id))
} else {
Err(S3Error::no_such_bucket().into())
}
}
async fn handle_list_objects(
store: Arc<FileStore>,
bucket: &str,
query: &HashMap<String, String>,
request_id: &str,
) -> Result<Response<BoxBody>> {
let prefix = query.get("prefix").map(|s| s.as_str()).unwrap_or("");
let delimiter = query.get("delimiter").map(|s| s.as_str()).unwrap_or("");
let max_keys = query
.get("max-keys")
.and_then(|s| s.parse().ok())
.unwrap_or(1000usize);
let continuation_token = query.get("continuation-token").map(|s| s.as_str());
let is_v2 = query.get("list-type").map(|s| s.as_str()) == Some("2");
let result = store
.list_objects(bucket, prefix, delimiter, max_keys, continuation_token)
.await?;
let xml = if is_v2 {
xml_response::list_objects_v2_xml(bucket, &result)
} else {
xml_response::list_objects_v1_xml(bucket, &result)
};
Ok(xml_response(StatusCode::OK, xml, request_id))
}
async fn handle_put_object(
req: Request<Incoming>,
store: Arc<FileStore>,
bucket: &str,
key: &str,
request_id: &str,
) -> Result<Response<BoxBody>> {
let metadata = extract_metadata(req.headers());
let body = req.into_body();
let result = store.put_object(bucket, key, body, metadata).await?;
let resp = Response::builder()
.status(StatusCode::OK)
.header("ETag", format!("\"{}\"", result.md5))
.header("x-amz-request-id", request_id)
.body(empty_body())
.unwrap();
Ok(resp)
}
async fn handle_get_object(
req: Request<Incoming>,
store: Arc<FileStore>,
bucket: &str,
key: &str,
request_id: &str,
) -> Result<Response<BoxBody>> {
// Parse Range header
let range = parse_range_header(req.headers());
let result = store.get_object(bucket, key, range).await?;
let content_type = result
.metadata
.get("content-type")
.cloned()
.unwrap_or_else(|| "binary/octet-stream".to_string());
let mut builder = Response::builder()
.header("ETag", format!("\"{}\"", result.md5))
.header("Last-Modified", result.last_modified.format("%a, %d %b %Y %H:%M:%S GMT").to_string())
.header("Content-Type", &content_type)
.header("Accept-Ranges", "bytes")
.header("x-amz-request-id", request_id);
// Add custom metadata headers
for (k, v) in &result.metadata {
if k.starts_with("x-amz-meta-") {
builder = builder.header(k.as_str(), v.as_str());
}
}
if let Some((start, end)) = range {
let content_length = end - start + 1;
let resp = builder
.status(StatusCode::PARTIAL_CONTENT)
.header("Content-Length", content_length.to_string())
.header(
"Content-Range",
format!("bytes {}-{}/{}", start, end, result.size),
)
.body(stream_body(result.body, content_length))
.unwrap();
Ok(resp)
} else {
let resp = builder
.status(StatusCode::OK)
.header("Content-Length", result.size.to_string())
.body(stream_body(result.body, result.content_length))
.unwrap();
Ok(resp)
}
}
async fn handle_head_object(
store: Arc<FileStore>,
bucket: &str,
key: &str,
request_id: &str,
) -> Result<Response<BoxBody>> {
let result = store.head_object(bucket, key).await?;
let content_type = result
.metadata
.get("content-type")
.cloned()
.unwrap_or_else(|| "binary/octet-stream".to_string());
let mut builder = Response::builder()
.status(StatusCode::OK)
.header("ETag", format!("\"{}\"", result.md5))
.header("Last-Modified", result.last_modified.format("%a, %d %b %Y %H:%M:%S GMT").to_string())
.header("Content-Type", &content_type)
.header("Content-Length", result.size.to_string())
.header("Accept-Ranges", "bytes")
.header("x-amz-request-id", request_id);
for (k, v) in &result.metadata {
if k.starts_with("x-amz-meta-") {
builder = builder.header(k.as_str(), v.as_str());
}
}
Ok(builder.body(empty_body()).unwrap())
}
async fn handle_delete_object(
store: Arc<FileStore>,
bucket: &str,
key: &str,
request_id: &str,
) -> Result<Response<BoxBody>> {
store.delete_object(bucket, key).await?;
Ok(empty_response(StatusCode::NO_CONTENT, request_id))
}
async fn handle_copy_object(
req: Request<Incoming>,
store: Arc<FileStore>,
dest_bucket: &str,
dest_key: &str,
request_id: &str,
) -> Result<Response<BoxBody>> {
let copy_source = req
.headers()
.get("x-amz-copy-source")
.and_then(|v| v.to_str().ok())
.unwrap_or("")
.to_string();
let metadata_directive = req
.headers()
.get("x-amz-metadata-directive")
.and_then(|v| v.to_str().ok())
.unwrap_or("COPY")
.to_uppercase();
// Parse source: /bucket/key or bucket/key
let source = copy_source.trim_start_matches('/');
let first_slash = source.find('/').unwrap_or(source.len());
let src_bucket = percent_decode(&source[..first_slash]);
let src_key = if first_slash < source.len() {
percent_decode(&source[first_slash + 1..])
} else {
String::new()
};
let new_metadata = if metadata_directive == "REPLACE" {
Some(extract_metadata(req.headers()))
} else {
None
};
let result = store
.copy_object(&src_bucket, &src_key, dest_bucket, dest_key, &metadata_directive, new_metadata)
.await?;
let xml = xml_response::copy_object_result_xml(&result.md5, &result.last_modified.to_rfc3339());
Ok(xml_response(StatusCode::OK, xml, request_id))
}
// ============================
// Policy handlers
// ============================
async fn handle_get_bucket_policy(
policy_store: &Arc<PolicyStore>,
bucket: &str,
request_id: &str,
) -> Result<Response<BoxBody>> {
match policy_store.get_policy(bucket).await {
Some(p) => {
let json = serde_json::to_string_pretty(&p)?;
let resp = Response::builder()
.status(StatusCode::OK)
.header("content-type", "application/json")
.header("x-amz-request-id", request_id)
.body(full_body(json))
.unwrap();
Ok(resp)
}
None => Err(S3Error::no_such_bucket_policy().into()),
}
}
async fn handle_put_bucket_policy(
req: Request<Incoming>,
store: &Arc<FileStore>,
policy_store: &Arc<PolicyStore>,
bucket: &str,
request_id: &str,
) -> Result<Response<BoxBody>> {
// Verify bucket exists
if !store.bucket_exists(bucket).await {
return Err(S3Error::no_such_bucket().into());
}
// Read body
let body_bytes = req.collect().await.map_err(|e| anyhow::anyhow!("Body error: {}", e))?.to_bytes();
let body_str = String::from_utf8_lossy(&body_bytes);
// Validate and parse
let validated_policy = policy::validate_policy(&body_str)?;
// Store
policy_store
.put_policy(bucket, validated_policy)
.await
.map_err(|e| S3Error::internal_error(&e.to_string()))?;
Ok(empty_response(StatusCode::NO_CONTENT, request_id))
}
async fn handle_delete_bucket_policy(
policy_store: &Arc<PolicyStore>,
bucket: &str,
request_id: &str,
) -> Result<Response<BoxBody>> {
policy_store
.delete_policy(bucket)
.await
.map_err(|e| S3Error::internal_error(&e.to_string()))?;
Ok(empty_response(StatusCode::NO_CONTENT, request_id))
}
// ============================
// Multipart handlers
// ============================
async fn handle_initiate_multipart(
req: Request<Incoming>,
store: Arc<FileStore>,
bucket: &str,
key: &str,
request_id: &str,
) -> Result<Response<BoxBody>> {
let metadata = extract_metadata(req.headers());
let upload_id = store.initiate_multipart(bucket, key, metadata).await?;
let xml = xml_response::initiate_multipart_xml(bucket, key, &upload_id);
Ok(xml_response(StatusCode::OK, xml, request_id))
}
async fn handle_upload_part(
req: Request<Incoming>,
store: Arc<FileStore>,
query: &HashMap<String, String>,
request_id: &str,
) -> Result<Response<BoxBody>> {
let upload_id = query.get("uploadId").unwrap();
let part_number: u32 = query
.get("partNumber")
.and_then(|s| s.parse().ok())
.unwrap_or(0);
if part_number < 1 || part_number > 10000 {
return Err(S3Error::invalid_part_number().into());
}
let body = req.into_body();
let (etag, _size) = store.upload_part(upload_id, part_number, body).await?;
let resp = Response::builder()
.status(StatusCode::OK)
.header("ETag", format!("\"{}\"", etag))
.header("x-amz-request-id", request_id)
.body(empty_body())
.unwrap();
Ok(resp)
}
async fn handle_complete_multipart(
req: Request<Incoming>,
store: Arc<FileStore>,
bucket: &str,
key: &str,
upload_id: &str,
request_id: &str,
) -> Result<Response<BoxBody>> {
// Read request body (XML)
let body_bytes = req.collect().await.map_err(|e| anyhow::anyhow!("Body error: {}", e))?.to_bytes();
let body_str = String::from_utf8_lossy(&body_bytes);
// Parse parts from XML using regex-like approach
let parts = parse_complete_multipart_xml(&body_str);
let result = store.complete_multipart(upload_id, &parts).await?;
let xml = xml_response::complete_multipart_xml(bucket, key, &result.etag);
Ok(xml_response(StatusCode::OK, xml, request_id))
}
async fn handle_abort_multipart(
store: Arc<FileStore>,
upload_id: &str,
request_id: &str,
) -> Result<Response<BoxBody>> {
store.abort_multipart(upload_id).await?;
Ok(empty_response(StatusCode::NO_CONTENT, request_id))
}
async fn handle_list_multipart_uploads(
store: Arc<FileStore>,
bucket: &str,
request_id: &str,
) -> Result<Response<BoxBody>> {
let uploads = store.list_multipart_uploads(bucket).await?;
let xml = xml_response::list_multipart_uploads_xml(bucket, &uploads);
Ok(xml_response(StatusCode::OK, xml, request_id))
}
// ============================
// Helpers
// ============================
fn parse_query(query_string: &str) -> HashMap<String, String> {
let mut map = HashMap::new();
if query_string.is_empty() {
return map;
}
for pair in query_string.split('&') {
let mut parts = pair.splitn(2, '=');
let key = parts.next().unwrap_or("");
let value = parts.next().unwrap_or("");
let key = percent_decode(key);
let value = percent_decode(value);
map.insert(key, value);
}
map
}
fn percent_decode(s: &str) -> String {
percent_encoding::percent_decode_str(s)
.decode_utf8_lossy()
.to_string()
}
fn extract_metadata(headers: &hyper::HeaderMap) -> HashMap<String, String> {
let mut metadata = HashMap::new();
for (name, value) in headers {
let name_str = name.as_str().to_lowercase();
if let Ok(val) = value.to_str() {
match name_str.as_str() {
"content-type" | "cache-control" | "content-disposition"
| "content-encoding" | "content-language" | "expires" => {
metadata.insert(name_str, val.to_string());
}
_ if name_str.starts_with("x-amz-meta-") => {
metadata.insert(name_str, val.to_string());
}
_ => {}
}
}
}
// Default content-type
if !metadata.contains_key("content-type") {
metadata.insert("content-type".to_string(), "binary/octet-stream".to_string());
}
metadata
}
fn parse_range_header(headers: &hyper::HeaderMap) -> Option<(u64, u64)> {
let range_val = headers.get("range")?.to_str().ok()?;
let bytes_prefix = "bytes=";
if !range_val.starts_with(bytes_prefix) {
return None;
}
let range_spec = &range_val[bytes_prefix.len()..];
let mut parts = range_spec.splitn(2, '-');
let start: u64 = parts.next()?.parse().ok()?;
let end_str = parts.next()?;
let end: u64 = if end_str.is_empty() {
// If no end specified, we'll handle this later based on file size
u64::MAX
} else {
end_str.parse().ok()?
};
Some((start, end))
}
fn parse_complete_multipart_xml(xml: &str) -> Vec<(u32, String)> {
let mut parts = Vec::new();
// Simple XML parsing for <Part><PartNumber>N</PartNumber><ETag>...</ETag></Part>
let mut remaining = xml;
while let Some(part_start) = remaining.find("<Part>") {
let after_part = &remaining[part_start + 6..];
if let Some(part_end) = after_part.find("</Part>") {
let part_content = &after_part[..part_end];
let part_number = extract_xml_value(part_content, "PartNumber")
.and_then(|s| s.parse::<u32>().ok());
let etag = extract_xml_value(part_content, "ETag")
.map(|s| s.replace('"', ""));
if let (Some(pn), Some(et)) = (part_number, etag) {
parts.push((pn, et));
}
remaining = &after_part[part_end + 7..];
} else {
break;
}
}
parts.sort_by_key(|(pn, _)| *pn);
parts
}
fn extract_xml_value<'a>(xml: &'a str, tag: &str) -> Option<String> {
let open = format!("<{}>", tag);
let close = format!("</{}>", tag);
let start = xml.find(&open)? + open.len();
let end = xml.find(&close)?;
Some(xml[start..end].to_string())
}
// ============================
// CORS
// ============================
fn build_cors_preflight(config: &S3Config, request_id: &str) -> Response<BoxBody> {
let mut builder = Response::builder()
.status(StatusCode::NO_CONTENT)
.header("x-amz-request-id", request_id);
if let Some(ref origins) = config.cors.allowed_origins {
builder = builder.header("Access-Control-Allow-Origin", origins.join(", "));
}
if let Some(ref methods) = config.cors.allowed_methods {
builder = builder.header("Access-Control-Allow-Methods", methods.join(", "));
}
if let Some(ref headers) = config.cors.allowed_headers {
builder = builder.header("Access-Control-Allow-Headers", headers.join(", "));
}
if let Some(max_age) = config.cors.max_age {
builder = builder.header("Access-Control-Max-Age", max_age.to_string());
}
if config.cors.allow_credentials == Some(true) {
builder = builder.header("Access-Control-Allow-Credentials", "true");
}
builder.body(empty_body()).unwrap()
}
fn add_cors_headers(headers: &mut hyper::HeaderMap, config: &S3Config) {
if let Some(ref origins) = config.cors.allowed_origins {
headers.insert(
"access-control-allow-origin",
origins.join(", ").parse().unwrap(),
);
}
if let Some(ref exposed) = config.cors.exposed_headers {
headers.insert(
"access-control-expose-headers",
exposed.join(", ").parse().unwrap(),
);
}
if config.cors.allow_credentials == Some(true) {
headers.insert(
"access-control-allow-credentials",
"true".parse().unwrap(),
);
}
}

838
rust/src/storage.rs Normal file
View File

@@ -0,0 +1,838 @@
use anyhow::Result;
use chrono::{DateTime, Utc};
use http_body_util::BodyExt;
use hyper::body::Incoming;
use md5::{Digest, Md5};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use tokio::fs;
use tokio::io::{AsyncReadExt, AsyncSeekExt, AsyncWriteExt, BufWriter};
use uuid::Uuid;
use crate::s3_error::S3Error;
// ============================
// Result types
// ============================
pub struct PutResult {
pub md5: String,
}
pub struct GetResult {
pub size: u64,
pub last_modified: DateTime<Utc>,
pub md5: String,
pub metadata: HashMap<String, String>,
pub body: tokio::fs::File,
pub content_length: u64,
}
pub struct HeadResult {
pub size: u64,
pub last_modified: DateTime<Utc>,
pub md5: String,
pub metadata: HashMap<String, String>,
}
pub struct CopyResult {
pub md5: String,
pub last_modified: DateTime<Utc>,
}
pub struct ListObjectEntry {
pub key: String,
pub size: u64,
pub last_modified: DateTime<Utc>,
pub md5: String,
}
pub struct ListObjectsResult {
pub contents: Vec<ListObjectEntry>,
pub common_prefixes: Vec<String>,
pub is_truncated: bool,
pub next_continuation_token: Option<String>,
pub prefix: String,
pub delimiter: String,
pub max_keys: usize,
}
pub struct BucketInfo {
pub name: String,
pub creation_date: DateTime<Utc>,
}
pub struct MultipartUploadInfo {
pub upload_id: String,
pub key: String,
pub initiated: DateTime<Utc>,
}
pub struct CompleteMultipartResult {
pub etag: String,
}
// ============================
// Multipart metadata (disk format, compatible with TS)
// ============================
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
struct MultipartMetadata {
upload_id: String,
bucket: String,
key: String,
initiated: String,
metadata: HashMap<String, String>,
parts: Vec<PartMetadata>,
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
struct PartMetadata {
part_number: u32,
etag: String,
size: u64,
last_modified: String,
}
// ============================
// FileStore
// ============================
pub struct FileStore {
root_dir: PathBuf,
}
impl FileStore {
pub fn new(root_dir: PathBuf) -> Self {
Self { root_dir }
}
pub async fn initialize(&self) -> Result<()> {
fs::create_dir_all(&self.root_dir).await?;
fs::create_dir_all(self.policies_dir()).await?;
Ok(())
}
pub fn policies_dir(&self) -> PathBuf {
self.root_dir.join(".policies")
}
pub async fn reset(&self) -> Result<()> {
if self.root_dir.exists() {
fs::remove_dir_all(&self.root_dir).await?;
}
fs::create_dir_all(&self.root_dir).await?;
fs::create_dir_all(self.policies_dir()).await?;
Ok(())
}
// ============================
// Bucket operations
// ============================
pub async fn list_buckets(&self) -> Result<Vec<BucketInfo>> {
let mut buckets = Vec::new();
let mut entries = fs::read_dir(&self.root_dir).await?;
while let Some(entry) = entries.next_entry().await? {
let meta = entry.metadata().await?;
if meta.is_dir() {
let name = entry.file_name().to_string_lossy().to_string();
// Skip hidden dirs like .multipart
if name.starts_with('.') {
continue;
}
let creation_date: DateTime<Utc> = meta
.created()
.unwrap_or(meta.modified().unwrap_or(std::time::SystemTime::UNIX_EPOCH))
.into();
buckets.push(BucketInfo {
name,
creation_date,
});
}
}
buckets.sort_by(|a, b| a.name.cmp(&b.name));
Ok(buckets)
}
pub async fn bucket_exists(&self, bucket: &str) -> bool {
self.root_dir.join(bucket).is_dir()
}
pub async fn create_bucket(&self, bucket: &str) -> Result<()> {
let bucket_path = self.root_dir.join(bucket);
fs::create_dir_all(&bucket_path).await?;
Ok(())
}
pub async fn delete_bucket(&self, bucket: &str) -> Result<()> {
let bucket_path = self.root_dir.join(bucket);
if !bucket_path.is_dir() {
return Err(S3Error::no_such_bucket().into());
}
// Check if bucket is empty (ignore hidden files)
let mut entries = fs::read_dir(&bucket_path).await?;
while let Some(_entry) = entries.next_entry().await? {
return Err(S3Error::bucket_not_empty().into());
}
fs::remove_dir_all(&bucket_path).await?;
Ok(())
}
// ============================
// Object operations
// ============================
pub async fn put_object(
&self,
bucket: &str,
key: &str,
body: Incoming,
metadata: HashMap<String, String>,
) -> Result<PutResult> {
if !self.bucket_exists(bucket).await {
return Err(S3Error::no_such_bucket().into());
}
let object_path = self.object_path(bucket, key);
if let Some(parent) = object_path.parent() {
fs::create_dir_all(parent).await?;
}
let file = fs::File::create(&object_path).await?;
let mut writer = BufWriter::new(file);
let mut hasher = Md5::new();
// Stream body frames directly to file
let mut body = body;
loop {
match body.frame().await {
Some(Ok(frame)) => {
if let Ok(data) = frame.into_data() {
hasher.update(&data);
writer.write_all(&data).await?;
}
}
Some(Err(e)) => {
return Err(anyhow::anyhow!("Body read error: {}", e));
}
None => break,
}
}
writer.flush().await?;
drop(writer);
let md5_hex = format!("{:x}", hasher.finalize());
// Write MD5 sidecar
let md5_path = format!("{}.md5", object_path.display());
fs::write(&md5_path, &md5_hex).await?;
// Write metadata sidecar
let metadata_path = format!("{}.metadata.json", object_path.display());
let metadata_json = serde_json::to_string_pretty(&metadata)?;
fs::write(&metadata_path, metadata_json).await?;
Ok(PutResult {
md5: md5_hex,
})
}
pub async fn get_object(
&self,
bucket: &str,
key: &str,
range: Option<(u64, u64)>,
) -> Result<GetResult> {
let object_path = self.object_path(bucket, key);
if !object_path.exists() {
return Err(S3Error::no_such_key().into());
}
let file_meta = fs::metadata(&object_path).await?;
let size = file_meta.len();
let last_modified: DateTime<Utc> = file_meta.modified()?.into();
let md5 = self.read_md5(&object_path).await;
let metadata = self.read_metadata(&object_path).await;
let mut file = fs::File::open(&object_path).await?;
let content_length = if let Some((start, end)) = range {
file.seek(std::io::SeekFrom::Start(start)).await?;
end - start + 1
} else {
size
};
Ok(GetResult {
size,
last_modified,
md5,
metadata,
body: file,
content_length,
})
}
pub async fn head_object(&self, bucket: &str, key: &str) -> Result<HeadResult> {
let object_path = self.object_path(bucket, key);
if !object_path.exists() {
return Err(S3Error::no_such_key().into());
}
// Only stat the file, don't open it
let file_meta = fs::metadata(&object_path).await?;
let size = file_meta.len();
let last_modified: DateTime<Utc> = file_meta.modified()?.into();
let md5 = self.read_md5(&object_path).await;
let metadata = self.read_metadata(&object_path).await;
Ok(HeadResult {
size,
last_modified,
md5,
metadata,
})
}
pub async fn delete_object(&self, bucket: &str, key: &str) -> Result<()> {
let object_path = self.object_path(bucket, key);
let md5_path = format!("{}.md5", object_path.display());
let metadata_path = format!("{}.metadata.json", object_path.display());
// S3 doesn't error if object doesn't exist
let _ = fs::remove_file(&object_path).await;
let _ = fs::remove_file(&md5_path).await;
let _ = fs::remove_file(&metadata_path).await;
// Clean up empty parent directories up to bucket level
let bucket_path = self.root_dir.join(bucket);
let mut current = object_path.parent().map(|p| p.to_path_buf());
while let Some(dir) = current {
if dir == bucket_path {
break;
}
if fs::read_dir(&dir).await.is_ok() {
let mut entries = fs::read_dir(&dir).await?;
if entries.next_entry().await?.is_none() {
let _ = fs::remove_dir(&dir).await;
} else {
break;
}
}
current = dir.parent().map(|p| p.to_path_buf());
}
Ok(())
}
pub async fn copy_object(
&self,
src_bucket: &str,
src_key: &str,
dest_bucket: &str,
dest_key: &str,
metadata_directive: &str,
new_metadata: Option<HashMap<String, String>>,
) -> Result<CopyResult> {
let src_path = self.object_path(src_bucket, src_key);
let dest_path = self.object_path(dest_bucket, dest_key);
if !src_path.exists() {
return Err(S3Error::no_such_key().into());
}
if !self.bucket_exists(dest_bucket).await {
return Err(S3Error::no_such_bucket().into());
}
if let Some(parent) = dest_path.parent() {
fs::create_dir_all(parent).await?;
}
// Copy object file
fs::copy(&src_path, &dest_path).await?;
// Handle metadata
if metadata_directive == "COPY" {
let src_meta_path = format!("{}.metadata.json", src_path.display());
let dest_meta_path = format!("{}.metadata.json", dest_path.display());
let _ = fs::copy(&src_meta_path, &dest_meta_path).await;
} else if let Some(meta) = new_metadata {
let dest_meta_path = format!("{}.metadata.json", dest_path.display());
let json = serde_json::to_string_pretty(&meta)?;
fs::write(&dest_meta_path, json).await?;
}
// Copy MD5
let src_md5_path = format!("{}.md5", src_path.display());
let dest_md5_path = format!("{}.md5", dest_path.display());
let _ = fs::copy(&src_md5_path, &dest_md5_path).await;
let file_meta = fs::metadata(&dest_path).await?;
let md5 = self.read_md5(&dest_path).await;
let last_modified: DateTime<Utc> = file_meta.modified()?.into();
Ok(CopyResult {
md5,
last_modified,
})
}
pub async fn list_objects(
&self,
bucket: &str,
prefix: &str,
delimiter: &str,
max_keys: usize,
continuation_token: Option<&str>,
) -> Result<ListObjectsResult> {
let bucket_path = self.root_dir.join(bucket);
if !bucket_path.is_dir() {
return Err(S3Error::no_such_bucket().into());
}
// Collect all object keys recursively
let mut keys = Vec::new();
self.collect_keys(&bucket_path, &bucket_path, &mut keys)
.await?;
// Apply prefix filter
if !prefix.is_empty() {
keys.retain(|k| k.starts_with(prefix));
}
keys.sort();
// Handle continuation token
if let Some(token) = continuation_token {
if let Some(pos) = keys.iter().position(|k| k.as_str() > token) {
keys = keys[pos..].to_vec();
} else {
keys.clear();
}
}
// Handle delimiter and pagination
let mut common_prefixes: Vec<String> = Vec::new();
let mut common_prefix_set = std::collections::HashSet::new();
let mut contents: Vec<ListObjectEntry> = Vec::new();
let mut is_truncated = false;
for key in &keys {
if !delimiter.is_empty() {
let remaining = &key[prefix.len()..];
if let Some(delim_idx) = remaining.find(delimiter) {
let cp = format!(
"{}{}",
prefix,
&remaining[..delim_idx + delimiter.len()]
);
if common_prefix_set.insert(cp.clone()) {
common_prefixes.push(cp);
}
continue;
}
}
if contents.len() >= max_keys {
is_truncated = true;
break;
}
let object_path = self.object_path(bucket, key);
if let Ok(meta) = fs::metadata(&object_path).await {
let md5 = self.read_md5(&object_path).await;
let last_modified: DateTime<Utc> = meta.modified().unwrap_or(std::time::SystemTime::UNIX_EPOCH).into();
contents.push(ListObjectEntry {
key: key.clone(),
size: meta.len(),
last_modified,
md5,
});
}
}
let next_continuation_token = if is_truncated {
contents.last().map(|e| e.key.clone())
} else {
None
};
common_prefixes.sort();
Ok(ListObjectsResult {
contents,
common_prefixes,
is_truncated,
next_continuation_token,
prefix: prefix.to_string(),
delimiter: delimiter.to_string(),
max_keys,
})
}
// ============================
// Multipart operations
// ============================
fn multipart_dir(&self) -> PathBuf {
self.root_dir.join(".multipart")
}
pub async fn initiate_multipart(
&self,
bucket: &str,
key: &str,
metadata: HashMap<String, String>,
) -> Result<String> {
let upload_id = Uuid::new_v4().to_string().replace('-', "");
let upload_dir = self.multipart_dir().join(&upload_id);
fs::create_dir_all(&upload_dir).await?;
let meta = MultipartMetadata {
upload_id: upload_id.clone(),
bucket: bucket.to_string(),
key: key.to_string(),
initiated: Utc::now().to_rfc3339(),
metadata,
parts: Vec::new(),
};
let meta_path = upload_dir.join("metadata.json");
let json = serde_json::to_string_pretty(&meta)?;
fs::write(&meta_path, json).await?;
Ok(upload_id)
}
pub async fn upload_part(
&self,
upload_id: &str,
part_number: u32,
body: Incoming,
) -> Result<(String, u64)> {
let upload_dir = self.multipart_dir().join(upload_id);
if !upload_dir.is_dir() {
return Err(S3Error::no_such_upload().into());
}
let part_path = upload_dir.join(format!("part-{}", part_number));
let file = fs::File::create(&part_path).await?;
let mut writer = BufWriter::new(file);
let mut hasher = Md5::new();
let mut size: u64 = 0;
let mut body = body;
loop {
match body.frame().await {
Some(Ok(frame)) => {
if let Ok(data) = frame.into_data() {
hasher.update(&data);
size += data.len() as u64;
writer.write_all(&data).await?;
}
}
Some(Err(e)) => {
return Err(anyhow::anyhow!("Body read error: {}", e));
}
None => break,
}
}
writer.flush().await?;
drop(writer);
let etag = format!("{:x}", hasher.finalize());
// Update metadata
self.update_multipart_metadata(upload_id, part_number, &etag, size)
.await?;
Ok((etag, size))
}
async fn update_multipart_metadata(
&self,
upload_id: &str,
part_number: u32,
etag: &str,
size: u64,
) -> Result<()> {
let meta_path = self.multipart_dir().join(upload_id).join("metadata.json");
let content = fs::read_to_string(&meta_path).await?;
let mut meta: MultipartMetadata = serde_json::from_str(&content)?;
// Remove existing part with same number
meta.parts.retain(|p| p.part_number != part_number);
meta.parts.push(PartMetadata {
part_number,
etag: etag.to_string(),
size,
last_modified: Utc::now().to_rfc3339(),
});
meta.parts.sort_by_key(|p| p.part_number);
let json = serde_json::to_string_pretty(&meta)?;
fs::write(&meta_path, json).await?;
Ok(())
}
pub async fn complete_multipart(
&self,
upload_id: &str,
parts: &[(u32, String)],
) -> Result<CompleteMultipartResult> {
let upload_dir = self.multipart_dir().join(upload_id);
if !upload_dir.is_dir() {
return Err(S3Error::no_such_upload().into());
}
// Read metadata to get bucket/key
let meta_path = upload_dir.join("metadata.json");
let content = fs::read_to_string(&meta_path).await?;
let meta: MultipartMetadata = serde_json::from_str(&content)?;
let object_path = self.object_path(&meta.bucket, &meta.key);
if let Some(parent) = object_path.parent() {
fs::create_dir_all(parent).await?;
}
// Concatenate parts into final object, stream each part
let dest_file = fs::File::create(&object_path).await?;
let mut writer = BufWriter::new(dest_file);
let mut hasher = Md5::new();
for (part_number, _etag) in parts {
let part_path = upload_dir.join(format!("part-{}", part_number));
if !part_path.exists() {
return Err(anyhow::anyhow!("Part {} not found", part_number));
}
let mut part_file = fs::File::open(&part_path).await?;
let mut buf = vec![0u8; 64 * 1024]; // 64KB buffer
loop {
let n = part_file.read(&mut buf).await?;
if n == 0 {
break;
}
hasher.update(&buf[..n]);
writer.write_all(&buf[..n]).await?;
}
}
writer.flush().await?;
drop(writer);
let etag = format!("{:x}", hasher.finalize());
// Write MD5 sidecar
let md5_path = format!("{}.md5", object_path.display());
fs::write(&md5_path, &etag).await?;
// Write metadata sidecar
let metadata_path = format!("{}.metadata.json", object_path.display());
let metadata_json = serde_json::to_string_pretty(&meta.metadata)?;
fs::write(&metadata_path, metadata_json).await?;
// Clean up multipart directory
let _ = fs::remove_dir_all(&upload_dir).await;
Ok(CompleteMultipartResult {
etag,
})
}
pub async fn abort_multipart(&self, upload_id: &str) -> Result<()> {
let upload_dir = self.multipart_dir().join(upload_id);
if !upload_dir.is_dir() {
return Err(S3Error::no_such_upload().into());
}
fs::remove_dir_all(&upload_dir).await?;
Ok(())
}
pub async fn list_multipart_uploads(
&self,
bucket: &str,
) -> Result<Vec<MultipartUploadInfo>> {
let multipart_dir = self.multipart_dir();
if !multipart_dir.is_dir() {
return Ok(Vec::new());
}
let mut uploads = Vec::new();
let mut entries = fs::read_dir(&multipart_dir).await?;
while let Some(entry) = entries.next_entry().await? {
if !entry.metadata().await?.is_dir() {
continue;
}
let meta_path = entry.path().join("metadata.json");
if let Ok(content) = fs::read_to_string(&meta_path).await {
if let Ok(meta) = serde_json::from_str::<MultipartMetadata>(&content) {
if meta.bucket == bucket {
let initiated = DateTime::parse_from_rfc3339(&meta.initiated)
.map(|dt| dt.with_timezone(&Utc))
.unwrap_or_else(|_| Utc::now());
uploads.push(MultipartUploadInfo {
upload_id: meta.upload_id,
key: meta.key,
initiated,
});
}
}
}
}
Ok(uploads)
}
// ============================
// Helpers
// ============================
fn object_path(&self, bucket: &str, key: &str) -> PathBuf {
let encoded = encode_key(key);
self.root_dir
.join(bucket)
.join(format!("{}._S3_object", encoded))
}
async fn read_md5(&self, object_path: &Path) -> String {
let md5_path = format!("{}.md5", object_path.display());
match fs::read_to_string(&md5_path).await {
Ok(s) => s.trim().to_string(),
Err(_) => {
// Calculate MD5 if sidecar missing
match self.calculate_md5(object_path).await {
Ok(hash) => {
let _ = fs::write(&md5_path, &hash).await;
hash
}
Err(_) => String::new(),
}
}
}
}
async fn calculate_md5(&self, path: &Path) -> Result<String> {
let mut file = fs::File::open(path).await?;
let mut hasher = Md5::new();
let mut buf = vec![0u8; 64 * 1024];
loop {
let n = file.read(&mut buf).await?;
if n == 0 {
break;
}
hasher.update(&buf[..n]);
}
Ok(format!("{:x}", hasher.finalize()))
}
async fn read_metadata(&self, object_path: &Path) -> HashMap<String, String> {
let meta_path = format!("{}.metadata.json", object_path.display());
match fs::read_to_string(&meta_path).await {
Ok(s) => serde_json::from_str(&s).unwrap_or_default(),
Err(_) => HashMap::new(),
}
}
fn collect_keys<'a>(
&'a self,
bucket_path: &'a Path,
dir: &'a Path,
keys: &'a mut Vec<String>,
) -> std::pin::Pin<Box<dyn std::future::Future<Output = Result<()>> + Send + 'a>> {
Box::pin(async move {
let mut entries = match fs::read_dir(dir).await {
Ok(e) => e,
Err(_) => return Ok(()),
};
while let Some(entry) = entries.next_entry().await? {
let meta = entry.metadata().await?;
let name = entry.file_name().to_string_lossy().to_string();
if meta.is_dir() {
self.collect_keys(bucket_path, &entry.path(), keys).await?;
} else if name.ends_with("._S3_object")
&& !name.ends_with(".metadata.json")
&& !name.ends_with(".md5")
{
let relative = entry
.path()
.strip_prefix(bucket_path)
.unwrap_or(Path::new(""))
.to_string_lossy()
.to_string();
let key = decode_key(relative.trim_end_matches("._S3_object"));
keys.push(key);
}
}
Ok(())
})
}
}
// ============================
// Key encoding (identity on Linux)
// ============================
fn encode_key(key: &str) -> String {
if cfg!(windows) {
key.chars()
.map(|c| match c {
'<' | '>' | ':' | '"' | '\\' | '|' | '?' | '*' => {
format!("&{:02x}", c as u32)
}
_ => c.to_string(),
})
.collect()
} else {
key.to_string()
}
}
fn decode_key(encoded: &str) -> String {
if cfg!(windows) {
let mut result = String::new();
let mut chars = encoded.chars();
while let Some(c) = chars.next() {
if c == '&' {
let hex: String = chars.by_ref().take(2).collect();
if let Ok(byte) = u8::from_str_radix(&hex, 16) {
result.push(byte as char);
} else {
result.push('&');
result.push_str(&hex);
}
} else {
result.push(c);
}
}
result
} else {
encoded.to_string()
}
}

211
rust/src/xml_response.rs Normal file
View File

@@ -0,0 +1,211 @@
use crate::storage::{BucketInfo, ListObjectsResult, MultipartUploadInfo};
const XML_DECL: &str = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>";
const S3_NS: &str = "http://s3.amazonaws.com/doc/2006-03-01/";
fn xml_escape(s: &str) -> String {
s.replace('&', "&amp;")
.replace('<', "&lt;")
.replace('>', "&gt;")
.replace('"', "&quot;")
.replace('\'', "&apos;")
}
pub fn list_buckets_xml(buckets: &[BucketInfo]) -> String {
let mut xml = format!(
"{}\n<ListAllMyBucketsResult xmlns=\"{}\">\
<Owner><ID>123456789000</ID><DisplayName>S3rver</DisplayName></Owner>\
<Buckets>",
XML_DECL, S3_NS
);
for b in buckets {
xml.push_str(&format!(
"<Bucket><Name>{}</Name><CreationDate>{}</CreationDate></Bucket>",
xml_escape(&b.name),
b.creation_date.to_rfc3339()
));
}
xml.push_str("</Buckets></ListAllMyBucketsResult>");
xml
}
pub fn list_objects_v1_xml(bucket: &str, result: &ListObjectsResult) -> String {
let mut xml = format!(
"{}\n<ListBucketResult xmlns=\"{}\">\
<Name>{}</Name>\
<Prefix>{}</Prefix>\
<MaxKeys>{}</MaxKeys>\
<IsTruncated>{}</IsTruncated>",
XML_DECL,
S3_NS,
xml_escape(bucket),
xml_escape(&result.prefix),
result.max_keys,
result.is_truncated
);
if !result.delimiter.is_empty() {
xml.push_str(&format!("<Delimiter>{}</Delimiter>", xml_escape(&result.delimiter)));
}
for entry in &result.contents {
xml.push_str(&format!(
"<Contents>\
<Key>{}</Key>\
<LastModified>{}</LastModified>\
<ETag>\"{}\"</ETag>\
<Size>{}</Size>\
<StorageClass>STANDARD</StorageClass>\
</Contents>",
xml_escape(&entry.key),
entry.last_modified.to_rfc3339(),
xml_escape(&entry.md5),
entry.size
));
}
for cp in &result.common_prefixes {
xml.push_str(&format!(
"<CommonPrefixes><Prefix>{}</Prefix></CommonPrefixes>",
xml_escape(cp)
));
}
xml.push_str("</ListBucketResult>");
xml
}
pub fn list_objects_v2_xml(bucket: &str, result: &ListObjectsResult) -> String {
let mut xml = format!(
"{}\n<ListBucketResult xmlns=\"{}\">\
<Name>{}</Name>\
<Prefix>{}</Prefix>\
<MaxKeys>{}</MaxKeys>\
<KeyCount>{}</KeyCount>\
<IsTruncated>{}</IsTruncated>",
XML_DECL,
S3_NS,
xml_escape(bucket),
xml_escape(&result.prefix),
result.max_keys,
result.contents.len(),
result.is_truncated
);
if !result.delimiter.is_empty() {
xml.push_str(&format!("<Delimiter>{}</Delimiter>", xml_escape(&result.delimiter)));
}
if let Some(ref token) = result.next_continuation_token {
xml.push_str(&format!(
"<NextContinuationToken>{}</NextContinuationToken>",
xml_escape(token)
));
}
for entry in &result.contents {
xml.push_str(&format!(
"<Contents>\
<Key>{}</Key>\
<LastModified>{}</LastModified>\
<ETag>\"{}\"</ETag>\
<Size>{}</Size>\
<StorageClass>STANDARD</StorageClass>\
</Contents>",
xml_escape(&entry.key),
entry.last_modified.to_rfc3339(),
xml_escape(&entry.md5),
entry.size
));
}
for cp in &result.common_prefixes {
xml.push_str(&format!(
"<CommonPrefixes><Prefix>{}</Prefix></CommonPrefixes>",
xml_escape(cp)
));
}
xml.push_str("</ListBucketResult>");
xml
}
pub fn copy_object_result_xml(etag: &str, last_modified: &str) -> String {
format!(
"{}\n<CopyObjectResult>\
<LastModified>{}</LastModified>\
<ETag>\"{}\"</ETag>\
</CopyObjectResult>",
XML_DECL,
xml_escape(last_modified),
xml_escape(etag)
)
}
pub fn initiate_multipart_xml(bucket: &str, key: &str, upload_id: &str) -> String {
format!(
"{}\n<InitiateMultipartUploadResult xmlns=\"{}\">\
<Bucket>{}</Bucket>\
<Key>{}</Key>\
<UploadId>{}</UploadId>\
</InitiateMultipartUploadResult>",
XML_DECL,
S3_NS,
xml_escape(bucket),
xml_escape(key),
xml_escape(upload_id)
)
}
pub fn complete_multipart_xml(bucket: &str, key: &str, etag: &str) -> String {
format!(
"{}\n<CompleteMultipartUploadResult xmlns=\"{}\">\
<Location>/{}/{}</Location>\
<Bucket>{}</Bucket>\
<Key>{}</Key>\
<ETag>\"{}\"</ETag>\
</CompleteMultipartUploadResult>",
XML_DECL,
S3_NS,
xml_escape(bucket),
xml_escape(key),
xml_escape(bucket),
xml_escape(key),
xml_escape(etag)
)
}
pub fn list_multipart_uploads_xml(bucket: &str, uploads: &[MultipartUploadInfo]) -> String {
let mut xml = format!(
"{}\n<ListMultipartUploadsResult xmlns=\"{}\">\
<Bucket>{}</Bucket>\
<KeyMarker></KeyMarker>\
<UploadIdMarker></UploadIdMarker>\
<MaxUploads>1000</MaxUploads>\
<IsTruncated>false</IsTruncated>",
XML_DECL,
S3_NS,
xml_escape(bucket)
);
for u in uploads {
xml.push_str(&format!(
"<Upload>\
<Key>{}</Key>\
<UploadId>{}</UploadId>\
<Initiator><ID>S3RVER</ID><DisplayName>S3RVER</DisplayName></Initiator>\
<Owner><ID>S3RVER</ID><DisplayName>S3RVER</DisplayName></Owner>\
<StorageClass>STANDARD</StorageClass>\
<Initiated>{}</Initiated>\
</Upload>",
xml_escape(&u.key),
xml_escape(&u.upload_id),
u.initiated.to_rfc3339()
));
}
xml.push_str("</ListMultipartUploadsResult>");
xml
}

301
test/test.auth.node.ts Normal file
View File

@@ -0,0 +1,301 @@
import { expect, tap } from '@git.zone/tstest/tapbundle';
import {
S3Client,
CreateBucketCommand,
ListBucketsCommand,
PutObjectCommand,
GetObjectCommand,
DeleteObjectCommand,
DeleteBucketCommand,
PutBucketPolicyCommand,
GetBucketPolicyCommand,
DeleteBucketPolicyCommand,
} from '@aws-sdk/client-s3';
import { Readable } from 'stream';
import * as smarts3 from '../ts/index.js';
let testSmarts3Instance: smarts3.Smarts3;
let authClient: S3Client;
let wrongClient: S3Client;
const TEST_PORT = 3344;
const ACCESS_KEY = 'TESTAKID';
const SECRET_KEY = 'TESTSECRETKEY123';
async function streamToString(stream: Readable): Promise<string> {
const chunks: Buffer[] = [];
return new Promise((resolve, reject) => {
stream.on('data', (chunk) => chunks.push(Buffer.from(chunk)));
stream.on('error', reject);
stream.on('end', () => resolve(Buffer.concat(chunks).toString('utf8')));
});
}
// ============================
// Server setup
// ============================
tap.test('should start S3 server with auth enabled', async () => {
testSmarts3Instance = await smarts3.Smarts3.createAndStart({
server: {
port: TEST_PORT,
silent: true,
region: 'us-east-1',
},
storage: {
cleanSlate: true,
},
auth: {
enabled: true,
credentials: [
{
accessKeyId: ACCESS_KEY,
secretAccessKey: SECRET_KEY,
},
],
},
});
// Authenticated client with correct credentials
authClient = new S3Client({
endpoint: `http://localhost:${TEST_PORT}`,
region: 'us-east-1',
credentials: {
accessKeyId: ACCESS_KEY,
secretAccessKey: SECRET_KEY,
},
forcePathStyle: true,
});
// Client with wrong credentials
wrongClient = new S3Client({
endpoint: `http://localhost:${TEST_PORT}`,
region: 'us-east-1',
credentials: {
accessKeyId: 'WRONGKEY',
secretAccessKey: 'WRONGSECRET',
},
forcePathStyle: true,
});
});
// ============================
// Authenticated CRUD
// ============================
tap.test('authenticated: should list buckets', async () => {
const response = await authClient.send(new ListBucketsCommand({}));
expect(response.$metadata.httpStatusCode).toEqual(200);
expect(Array.isArray(response.Buckets)).toEqual(true);
});
tap.test('authenticated: should create a bucket', async () => {
const response = await authClient.send(new CreateBucketCommand({ Bucket: 'auth-test-bucket' }));
expect(response.$metadata.httpStatusCode).toEqual(200);
});
tap.test('authenticated: should upload an object', async () => {
const response = await authClient.send(
new PutObjectCommand({
Bucket: 'auth-test-bucket',
Key: 'hello.txt',
Body: 'Hello authenticated world!',
ContentType: 'text/plain',
}),
);
expect(response.$metadata.httpStatusCode).toEqual(200);
});
tap.test('authenticated: should download the object', async () => {
const response = await authClient.send(
new GetObjectCommand({
Bucket: 'auth-test-bucket',
Key: 'hello.txt',
}),
);
expect(response.$metadata.httpStatusCode).toEqual(200);
const content = await streamToString(response.Body as Readable);
expect(content).toEqual('Hello authenticated world!');
});
// ============================
// Wrong credentials → 403
// ============================
tap.test('wrong credentials: should fail to list buckets', async () => {
await expect(wrongClient.send(new ListBucketsCommand({}))).rejects.toThrow();
});
tap.test('wrong credentials: should fail to get object', async () => {
await expect(
wrongClient.send(
new GetObjectCommand({
Bucket: 'auth-test-bucket',
Key: 'hello.txt',
}),
),
).rejects.toThrow();
});
// ============================
// Anonymous → 403 (no policy yet)
// ============================
tap.test('anonymous: should fail to list buckets', async () => {
const resp = await fetch(`http://localhost:${TEST_PORT}/`);
expect(resp.status).toEqual(403);
});
tap.test('anonymous: should fail to get object (no policy)', async () => {
const resp = await fetch(`http://localhost:${TEST_PORT}/auth-test-bucket/hello.txt`);
expect(resp.status).toEqual(403);
});
// ============================
// Bucket policy: public read
// ============================
tap.test('should PUT a public-read bucket policy', async () => {
const policy = {
Version: '2012-10-17',
Statement: [
{
Sid: 'PublicRead',
Effect: 'Allow',
Principal: '*',
Action: ['s3:GetObject'],
Resource: [`arn:aws:s3:::auth-test-bucket/*`],
},
],
};
const response = await authClient.send(
new PutBucketPolicyCommand({
Bucket: 'auth-test-bucket',
Policy: JSON.stringify(policy),
}),
);
expect(response.$metadata.httpStatusCode).toEqual(204);
});
tap.test('should GET the bucket policy', async () => {
const response = await authClient.send(
new GetBucketPolicyCommand({
Bucket: 'auth-test-bucket',
}),
);
expect(response.$metadata.httpStatusCode).toEqual(200);
const policy = JSON.parse(response.Policy!);
expect(policy.Statement[0].Sid).toEqual('PublicRead');
});
tap.test('anonymous: should GET object after public-read policy', async () => {
const resp = await fetch(`http://localhost:${TEST_PORT}/auth-test-bucket/hello.txt`);
expect(resp.status).toEqual(200);
const content = await resp.text();
expect(content).toEqual('Hello authenticated world!');
});
tap.test('anonymous: should still fail to PUT object (policy only allows GET)', async () => {
const resp = await fetch(`http://localhost:${TEST_PORT}/auth-test-bucket/anon-file.txt`, {
method: 'PUT',
body: 'should fail',
});
expect(resp.status).toEqual(403);
});
// ============================
// Deny policy
// ============================
tap.test('should PUT a deny policy that blocks authenticated delete', async () => {
const policy = {
Version: '2012-10-17',
Statement: [
{
Sid: 'PublicRead',
Effect: 'Allow',
Principal: '*',
Action: ['s3:GetObject'],
Resource: [`arn:aws:s3:::auth-test-bucket/*`],
},
{
Sid: 'DenyDelete',
Effect: 'Deny',
Principal: '*',
Action: ['s3:DeleteObject'],
Resource: [`arn:aws:s3:::auth-test-bucket/*`],
},
],
};
const response = await authClient.send(
new PutBucketPolicyCommand({
Bucket: 'auth-test-bucket',
Policy: JSON.stringify(policy),
}),
);
expect(response.$metadata.httpStatusCode).toEqual(204);
});
tap.test('authenticated: should be denied delete by policy', async () => {
await expect(
authClient.send(
new DeleteObjectCommand({
Bucket: 'auth-test-bucket',
Key: 'hello.txt',
}),
),
).rejects.toThrow();
});
// ============================
// DELETE bucket policy
// ============================
tap.test('should DELETE the bucket policy', async () => {
const response = await authClient.send(
new DeleteBucketPolicyCommand({
Bucket: 'auth-test-bucket',
}),
);
expect(response.$metadata.httpStatusCode).toEqual(204);
});
tap.test('should GET policy → 404 after deletion', async () => {
await expect(
authClient.send(
new GetBucketPolicyCommand({
Bucket: 'auth-test-bucket',
}),
),
).rejects.toThrow();
});
// ============================
// Cleanup
// ============================
tap.test('authenticated: delete object after policy removed', async () => {
const response = await authClient.send(
new DeleteObjectCommand({
Bucket: 'auth-test-bucket',
Key: 'hello.txt',
}),
);
expect(response.$metadata.httpStatusCode).toEqual(204);
});
tap.test('authenticated: delete the bucket', async () => {
const response = await authClient.send(
new DeleteBucketCommand({ Bucket: 'auth-test-bucket' }),
);
expect(response.$metadata.httpStatusCode).toEqual(204);
});
tap.test('should stop the S3 server', async () => {
await testSmarts3Instance.stop();
});
export default tap.start();

108
test/test.aws-sdk.node.ts Normal file
View File

@@ -0,0 +1,108 @@
import { expect, tap } from '@git.zone/tstest/tapbundle';
import { S3Client, CreateBucketCommand, ListBucketsCommand, PutObjectCommand, GetObjectCommand, DeleteObjectCommand, DeleteBucketCommand } from '@aws-sdk/client-s3';
import { Readable } from 'stream';
import * as smarts3 from '../ts/index.js';
let testSmarts3Instance: smarts3.Smarts3;
let s3Client: S3Client;
// Helper to convert stream to string
async function streamToString(stream: Readable): Promise<string> {
const chunks: Buffer[] = [];
return new Promise((resolve, reject) => {
stream.on('data', (chunk) => chunks.push(Buffer.from(chunk)));
stream.on('error', reject);
stream.on('end', () => resolve(Buffer.concat(chunks).toString('utf8')));
});
}
tap.test('should start the S3 server and configure client', async () => {
testSmarts3Instance = await smarts3.Smarts3.createAndStart({
server: {
port: 3337,
silent: true,
},
storage: {
cleanSlate: true,
},
});
const descriptor = await testSmarts3Instance.getS3Descriptor();
s3Client = new S3Client({
endpoint: `http://${descriptor.endpoint}:${descriptor.port}`,
region: 'us-east-1',
credentials: {
accessKeyId: descriptor.accessKey,
secretAccessKey: descriptor.accessSecret,
},
forcePathStyle: true,
});
});
tap.test('should list buckets (empty)', async () => {
const response = await s3Client.send(new ListBucketsCommand({}));
expect(Array.isArray(response.Buckets)).toEqual(true);
expect(response.Buckets!.length).toEqual(0);
});
tap.test('should create a bucket', async () => {
const response = await s3Client.send(new CreateBucketCommand({ Bucket: 'test-bucket' }));
expect(response.$metadata.httpStatusCode).toEqual(200);
});
tap.test('should list buckets (showing created bucket)', async () => {
const response = await s3Client.send(new ListBucketsCommand({}));
expect(response.Buckets!.length).toEqual(1);
expect(response.Buckets![0].Name).toEqual('test-bucket');
});
tap.test('should upload an object', async () => {
const response = await s3Client.send(new PutObjectCommand({
Bucket: 'test-bucket',
Key: 'test-file.txt',
Body: 'Hello from AWS SDK!',
ContentType: 'text/plain',
}));
expect(response.$metadata.httpStatusCode).toEqual(200);
expect(response.ETag).toBeTypeofString();
});
tap.test('should download the object', async () => {
const response = await s3Client.send(new GetObjectCommand({
Bucket: 'test-bucket',
Key: 'test-file.txt',
}));
expect(response.$metadata.httpStatusCode).toEqual(200);
const content = await streamToString(response.Body as Readable);
expect(content).toEqual('Hello from AWS SDK!');
});
tap.test('should delete the object', async () => {
const response = await s3Client.send(new DeleteObjectCommand({
Bucket: 'test-bucket',
Key: 'test-file.txt',
}));
expect(response.$metadata.httpStatusCode).toEqual(204);
});
tap.test('should fail to get deleted object', async () => {
await expect(
s3Client.send(new GetObjectCommand({
Bucket: 'test-bucket',
Key: 'test-file.txt',
}))
).rejects.toThrow();
});
tap.test('should delete the bucket', async () => {
const response = await s3Client.send(new DeleteBucketCommand({ Bucket: 'test-bucket' }));
expect(response.$metadata.httpStatusCode).toEqual(204);
});
tap.test('should stop the S3 server', async () => {
await testSmarts3Instance.stop();
});
export default tap.start();

View File

@@ -0,0 +1,335 @@
import { expect, tap } from '@git.zone/tstest/tapbundle';
import {
S3Client,
CreateBucketCommand,
DeleteBucketCommand,
ListBucketsCommand,
ListObjectsV2Command,
PutObjectCommand,
GetObjectCommand,
DeleteObjectCommand,
CopyObjectCommand,
HeadBucketCommand,
PutBucketPolicyCommand,
GetBucketPolicyCommand,
DeleteBucketPolicyCommand,
} from '@aws-sdk/client-s3';
import * as smarts3 from '../ts/index.js';
let testSmarts3Instance: smarts3.Smarts3;
let authClient: S3Client;
const TEST_PORT = 3347;
const ACCESS_KEY = 'TESTAKID';
const SECRET_KEY = 'TESTSECRETKEY123';
const BUCKET = 'actions-bucket';
const BASE_URL = `http://localhost:${TEST_PORT}`;
async function putPolicy(statements: any[]) {
await authClient.send(
new PutBucketPolicyCommand({
Bucket: BUCKET,
Policy: JSON.stringify({ Version: '2012-10-17', Statement: statements }),
})
);
}
async function clearPolicy() {
await authClient.send(new DeleteBucketPolicyCommand({ Bucket: BUCKET }));
}
function denyStatement(action: string) {
return {
Sid: `Deny_${action.replace(':', '_')}`,
Effect: 'Deny' as const,
Principal: '*',
Action: action,
Resource: [
`arn:aws:s3:::${BUCKET}`,
`arn:aws:s3:::${BUCKET}/*`,
],
};
}
// ============================
// Server setup
// ============================
tap.test('setup: start server, create bucket, upload object', async () => {
testSmarts3Instance = await smarts3.Smarts3.createAndStart({
server: { port: TEST_PORT, silent: true, region: 'us-east-1' },
storage: { cleanSlate: true },
auth: {
enabled: true,
credentials: [{ accessKeyId: ACCESS_KEY, secretAccessKey: SECRET_KEY }],
},
});
authClient = new S3Client({
endpoint: BASE_URL,
region: 'us-east-1',
credentials: { accessKeyId: ACCESS_KEY, secretAccessKey: SECRET_KEY },
forcePathStyle: true,
});
await authClient.send(new CreateBucketCommand({ Bucket: BUCKET }));
await authClient.send(
new PutObjectCommand({
Bucket: BUCKET,
Key: 'obj.txt',
Body: 'test content for actions',
ContentType: 'text/plain',
})
);
});
// ============================
// Per-action deny enforcement
// ============================
tap.test('Deny s3:ListBucket → authenticated ListObjects fails', async () => {
await putPolicy([denyStatement('s3:ListBucket')]);
await expect(
authClient.send(new ListObjectsV2Command({ Bucket: BUCKET }))
).rejects.toThrow();
await clearPolicy();
});
tap.test('Deny s3:CreateBucket → authenticated CreateBucket on new bucket fails', async () => {
// We need to create a policy on the target bucket, but the target doesn't exist yet.
// Instead, we use a different approach: deny on existing bucket and test HeadBucket works
// but for CreateBucket, use fetch to target a new bucket name with the deny check.
// Actually, CreateBucket has no bucket policy to evaluate against (the bucket doesn't exist yet).
// The deny would need to be on the bucket being created.
// Since the bucket doesn't exist, there's no policy to load — so CreateBucket can't be denied via policy.
// This is expected AWS behavior. Skip this test and note it.
// Verify CreateBucket still works (no policy can deny it since bucket doesn't exist yet)
await authClient.send(new CreateBucketCommand({ Bucket: 'new-test-bucket' }));
await authClient.send(new DeleteBucketCommand({ Bucket: 'new-test-bucket' }));
});
tap.test('Deny s3:DeleteBucket → authenticated DeleteBucket fails', async () => {
await putPolicy([denyStatement('s3:DeleteBucket')]);
await expect(
authClient.send(new DeleteBucketCommand({ Bucket: BUCKET }))
).rejects.toThrow();
await clearPolicy();
});
tap.test('Deny s3:GetObject → authenticated GetObject fails', async () => {
await putPolicy([denyStatement('s3:GetObject')]);
await expect(
authClient.send(new GetObjectCommand({ Bucket: BUCKET, Key: 'obj.txt' }))
).rejects.toThrow();
await clearPolicy();
});
tap.test('Deny s3:PutObject → authenticated PutObject fails', async () => {
await putPolicy([denyStatement('s3:PutObject')]);
await expect(
authClient.send(
new PutObjectCommand({
Bucket: BUCKET,
Key: 'new-obj.txt',
Body: 'should fail',
})
)
).rejects.toThrow();
await clearPolicy();
});
tap.test('Deny s3:DeleteObject → authenticated DeleteObject fails', async () => {
await putPolicy([denyStatement('s3:DeleteObject')]);
await expect(
authClient.send(new DeleteObjectCommand({ Bucket: BUCKET, Key: 'obj.txt' }))
).rejects.toThrow();
await clearPolicy();
});
tap.test('Deny s3:PutObject → authenticated CopyObject fails (maps to s3:PutObject)', async () => {
await putPolicy([denyStatement('s3:PutObject')]);
await expect(
authClient.send(
new CopyObjectCommand({
Bucket: BUCKET,
Key: 'obj-copy.txt',
CopySource: `${BUCKET}/obj.txt`,
})
)
).rejects.toThrow();
await clearPolicy();
});
tap.test('Deny s3:GetBucketPolicy → authenticated GetBucketPolicy fails', async () => {
// First put a policy that denies GetBucketPolicy
// We need to be careful: put the deny policy, then try to get it
await putPolicy([denyStatement('s3:GetBucketPolicy')]);
await expect(
authClient.send(new GetBucketPolicyCommand({ Bucket: BUCKET }))
).rejects.toThrow();
// Clear using direct delete (which isn't denied)
await clearPolicy();
});
tap.test('Deny s3:PutBucketPolicy → authenticated PutBucketPolicy fails (for second policy)', async () => {
// First put a policy that denies PutBucketPolicy
await putPolicy([denyStatement('s3:PutBucketPolicy')]);
// Now try to put another policy — should fail
await expect(
authClient.send(
new PutBucketPolicyCommand({
Bucket: BUCKET,
Policy: JSON.stringify({
Version: '2012-10-17',
Statement: [
{
Sid: 'SomeOtherPolicy',
Effect: 'Allow',
Principal: '*',
Action: 's3:GetObject',
Resource: `arn:aws:s3:::${BUCKET}/*`,
},
],
}),
})
)
).rejects.toThrow();
await clearPolicy();
});
tap.test('Deny s3:DeleteBucketPolicy → authenticated DeleteBucketPolicy fails', async () => {
await putPolicy([denyStatement('s3:DeleteBucketPolicy')]);
await expect(
authClient.send(new DeleteBucketPolicyCommand({ Bucket: BUCKET }))
).rejects.toThrow();
// We need another way to clean up — use fetch with auth to bypass? No, the deny is on all principals.
// Actually, we can't clear the policy via SDK since delete is denied.
// The server still denies it. We need to stop and restart or use a different mechanism.
// For test cleanup, just stop the server at end and it will be wiped with cleanSlate on next start.
});
tap.test('Recovery: remove deny policy → authenticated operations resume working', async () => {
// The previous test left a deny policy on DeleteBucketPolicy.
// But we can work around it by stopping/restarting or if the deny is still in place.
// Actually, we denied s3:DeleteBucketPolicy but NOT s3:PutBucketPolicy.
// So we can overwrite the policy with an empty-ish one, then delete.
await authClient.send(
new PutBucketPolicyCommand({
Bucket: BUCKET,
Policy: JSON.stringify({
Version: '2012-10-17',
Statement: [
{
Sid: 'AllowAll',
Effect: 'Allow',
Principal: '*',
Action: 's3:*',
Resource: [`arn:aws:s3:::${BUCKET}`, `arn:aws:s3:::${BUCKET}/*`],
},
],
}),
})
);
// Now all operations should work again
const getResp = await authClient.send(
new GetObjectCommand({ Bucket: BUCKET, Key: 'obj.txt' })
);
expect(getResp.$metadata.httpStatusCode).toEqual(200);
const listResp = await authClient.send(
new ListObjectsV2Command({ Bucket: BUCKET })
);
expect(listResp.$metadata.httpStatusCode).toEqual(200);
await clearPolicy();
});
// ============================
// Special cases
// ============================
tap.test('ListAllMyBuckets always requires auth → anonymous fetch to / returns 403', async () => {
const resp = await fetch(`${BASE_URL}/`);
expect(resp.status).toEqual(403);
});
tap.test('Auth disabled mode → anonymous full access works', async () => {
// Start a second server with auth disabled
const noAuthInstance = await smarts3.Smarts3.createAndStart({
server: { port: 3348, silent: true, region: 'us-east-1' },
storage: { cleanSlate: true },
auth: { enabled: false, credentials: [] },
});
// Anonymous operations should all work
const listResp = await fetch('http://localhost:3348/');
expect(listResp.status).toEqual(200);
// Create bucket via fetch
const createResp = await fetch('http://localhost:3348/anon-bucket', { method: 'PUT' });
expect(createResp.status).toEqual(200);
// Put object
const putResp = await fetch('http://localhost:3348/anon-bucket/file.txt', {
method: 'PUT',
body: 'hello anon',
});
expect(putResp.status).toEqual(200);
// Get object
const getResp = await fetch('http://localhost:3348/anon-bucket/file.txt');
expect(getResp.status).toEqual(200);
const text = await getResp.text();
expect(text).toEqual('hello anon');
// Delete object
const delObjResp = await fetch('http://localhost:3348/anon-bucket/file.txt', { method: 'DELETE' });
expect(delObjResp.status).toEqual(204);
// Delete bucket
const delBucketResp = await fetch('http://localhost:3348/anon-bucket', { method: 'DELETE' });
expect(delBucketResp.status).toEqual(204);
await noAuthInstance.stop();
});
// ============================
// Teardown
// ============================
tap.test('teardown: clean up and stop server', async () => {
// Clean up any remaining objects
try {
await authClient.send(new DeleteObjectCommand({ Bucket: BUCKET, Key: 'obj.txt' }));
} catch {
// May already be deleted
}
try {
await authClient.send(new DeleteBucketCommand({ Bucket: BUCKET }));
} catch {
// May already be deleted
}
await testSmarts3Instance.stop();
});
export default tap.start();

View File

@@ -0,0 +1,252 @@
import { expect, tap } from '@git.zone/tstest/tapbundle';
import {
S3Client,
CreateBucketCommand,
DeleteBucketCommand,
PutBucketPolicyCommand,
GetBucketPolicyCommand,
DeleteBucketPolicyCommand,
} from '@aws-sdk/client-s3';
import * as smarts3 from '../ts/index.js';
let testSmarts3Instance: smarts3.Smarts3;
let authClient: S3Client;
const TEST_PORT = 3345;
const ACCESS_KEY = 'TESTAKID';
const SECRET_KEY = 'TESTSECRETKEY123';
const BUCKET = 'policy-crud-bucket';
function makePolicy(statements: any[]) {
return JSON.stringify({ Version: '2012-10-17', Statement: statements });
}
const validStatement = {
Sid: 'Test1',
Effect: 'Allow',
Principal: '*',
Action: ['s3:GetObject'],
Resource: [`arn:aws:s3:::${BUCKET}/*`],
};
// ============================
// Server setup
// ============================
tap.test('setup: start S3 server with auth enabled', async () => {
testSmarts3Instance = await smarts3.Smarts3.createAndStart({
server: { port: TEST_PORT, silent: true, region: 'us-east-1' },
storage: { cleanSlate: true },
auth: {
enabled: true,
credentials: [{ accessKeyId: ACCESS_KEY, secretAccessKey: SECRET_KEY }],
},
});
authClient = new S3Client({
endpoint: `http://localhost:${TEST_PORT}`,
region: 'us-east-1',
credentials: { accessKeyId: ACCESS_KEY, secretAccessKey: SECRET_KEY },
forcePathStyle: true,
});
});
tap.test('setup: create bucket', async () => {
await authClient.send(new CreateBucketCommand({ Bucket: BUCKET }));
});
// ============================
// CRUD tests
// ============================
tap.test('GET policy on bucket with no policy → throws (NoSuchBucketPolicy)', async () => {
await expect(
authClient.send(new GetBucketPolicyCommand({ Bucket: BUCKET }))
).rejects.toThrow();
});
tap.test('PUT valid policy → 204', async () => {
const response = await authClient.send(
new PutBucketPolicyCommand({
Bucket: BUCKET,
Policy: makePolicy([validStatement]),
})
);
expect(response.$metadata.httpStatusCode).toEqual(204);
});
tap.test('GET policy back → returns matching JSON', async () => {
const response = await authClient.send(
new GetBucketPolicyCommand({ Bucket: BUCKET })
);
expect(response.$metadata.httpStatusCode).toEqual(200);
const policy = JSON.parse(response.Policy!);
expect(policy.Version).toEqual('2012-10-17');
expect(policy.Statement[0].Sid).toEqual('Test1');
expect(policy.Statement[0].Effect).toEqual('Allow');
});
tap.test('PUT updated policy (overwrite) → 204, GET returns new version', async () => {
const updatedStatement = {
Sid: 'Updated',
Effect: 'Deny',
Principal: '*',
Action: ['s3:DeleteObject'],
Resource: [`arn:aws:s3:::${BUCKET}/*`],
};
const putResp = await authClient.send(
new PutBucketPolicyCommand({
Bucket: BUCKET,
Policy: makePolicy([updatedStatement]),
})
);
expect(putResp.$metadata.httpStatusCode).toEqual(204);
const getResp = await authClient.send(
new GetBucketPolicyCommand({ Bucket: BUCKET })
);
const policy = JSON.parse(getResp.Policy!);
expect(policy.Statement[0].Sid).toEqual('Updated');
expect(policy.Statement[0].Effect).toEqual('Deny');
});
tap.test('DELETE policy → 204', async () => {
const response = await authClient.send(
new DeleteBucketPolicyCommand({ Bucket: BUCKET })
);
expect(response.$metadata.httpStatusCode).toEqual(204);
});
tap.test('DELETE policy again (idempotent) → 204', async () => {
const response = await authClient.send(
new DeleteBucketPolicyCommand({ Bucket: BUCKET })
);
expect(response.$metadata.httpStatusCode).toEqual(204);
});
tap.test('GET policy after delete → throws', async () => {
await expect(
authClient.send(new GetBucketPolicyCommand({ Bucket: BUCKET }))
).rejects.toThrow();
});
tap.test('PUT policy on non-existent bucket → throws (NoSuchBucket)', async () => {
await expect(
authClient.send(
new PutBucketPolicyCommand({
Bucket: 'nonexistent-bucket-xyz',
Policy: makePolicy([validStatement]),
})
)
).rejects.toThrow();
});
tap.test('PUT invalid JSON → throws (MalformedPolicy)', async () => {
await expect(
authClient.send(
new PutBucketPolicyCommand({
Bucket: BUCKET,
Policy: '{not valid json!!!',
})
)
).rejects.toThrow();
});
tap.test('PUT policy with wrong version → throws (MalformedPolicy)', async () => {
await expect(
authClient.send(
new PutBucketPolicyCommand({
Bucket: BUCKET,
Policy: JSON.stringify({
Version: '2023-01-01',
Statement: [validStatement],
}),
})
)
).rejects.toThrow();
});
tap.test('PUT policy with empty statements array → throws (MalformedPolicy)', async () => {
await expect(
authClient.send(
new PutBucketPolicyCommand({
Bucket: BUCKET,
Policy: JSON.stringify({
Version: '2012-10-17',
Statement: [],
}),
})
)
).rejects.toThrow();
});
tap.test('PUT policy with action missing s3: prefix → throws (MalformedPolicy)', async () => {
await expect(
authClient.send(
new PutBucketPolicyCommand({
Bucket: BUCKET,
Policy: makePolicy([
{
Sid: 'BadAction',
Effect: 'Allow',
Principal: '*',
Action: ['GetObject'],
Resource: [`arn:aws:s3:::${BUCKET}/*`],
},
]),
})
)
).rejects.toThrow();
});
tap.test('PUT policy with resource missing arn:aws:s3::: prefix → throws (MalformedPolicy)', async () => {
await expect(
authClient.send(
new PutBucketPolicyCommand({
Bucket: BUCKET,
Policy: makePolicy([
{
Sid: 'BadResource',
Effect: 'Allow',
Principal: '*',
Action: ['s3:GetObject'],
Resource: ['policy-crud-bucket/*'],
},
]),
})
)
).rejects.toThrow();
});
tap.test('Bucket deletion cleans up associated policy', async () => {
// PUT a policy
await authClient.send(
new PutBucketPolicyCommand({
Bucket: BUCKET,
Policy: makePolicy([validStatement]),
})
);
// Delete the bucket
await authClient.send(new DeleteBucketCommand({ Bucket: BUCKET }));
// Re-create the bucket
await authClient.send(new CreateBucketCommand({ Bucket: BUCKET }));
// GET policy should now be gone
await expect(
authClient.send(new GetBucketPolicyCommand({ Bucket: BUCKET }))
).rejects.toThrow();
});
// ============================
// Teardown
// ============================
tap.test('teardown: delete bucket and stop server', async () => {
await authClient.send(new DeleteBucketCommand({ Bucket: BUCKET }));
await testSmarts3Instance.stop();
});
export default tap.start();

View File

@@ -0,0 +1,517 @@
import { expect, tap } from '@git.zone/tstest/tapbundle';
import {
S3Client,
CreateBucketCommand,
DeleteBucketCommand,
PutObjectCommand,
GetObjectCommand,
DeleteObjectCommand,
PutBucketPolicyCommand,
DeleteBucketPolicyCommand,
} from '@aws-sdk/client-s3';
import { Readable } from 'stream';
import * as smarts3 from '../ts/index.js';
let testSmarts3Instance: smarts3.Smarts3;
let authClient: S3Client;
const TEST_PORT = 3346;
const ACCESS_KEY = 'TESTAKID';
const SECRET_KEY = 'TESTSECRETKEY123';
const BUCKET = 'eval-bucket';
const BASE_URL = `http://localhost:${TEST_PORT}`;
async function streamToString(stream: Readable): Promise<string> {
const chunks: Buffer[] = [];
return new Promise((resolve, reject) => {
stream.on('data', (chunk) => chunks.push(Buffer.from(chunk)));
stream.on('error', reject);
stream.on('end', () => resolve(Buffer.concat(chunks).toString('utf8')));
});
}
async function putPolicy(statements: any[]) {
await authClient.send(
new PutBucketPolicyCommand({
Bucket: BUCKET,
Policy: JSON.stringify({ Version: '2012-10-17', Statement: statements }),
})
);
}
async function clearPolicy() {
await authClient.send(new DeleteBucketPolicyCommand({ Bucket: BUCKET }));
}
// ============================
// Server setup
// ============================
tap.test('setup: start server, create bucket, upload object', async () => {
testSmarts3Instance = await smarts3.Smarts3.createAndStart({
server: { port: TEST_PORT, silent: true, region: 'us-east-1' },
storage: { cleanSlate: true },
auth: {
enabled: true,
credentials: [{ accessKeyId: ACCESS_KEY, secretAccessKey: SECRET_KEY }],
},
});
authClient = new S3Client({
endpoint: BASE_URL,
region: 'us-east-1',
credentials: { accessKeyId: ACCESS_KEY, secretAccessKey: SECRET_KEY },
forcePathStyle: true,
});
await authClient.send(new CreateBucketCommand({ Bucket: BUCKET }));
await authClient.send(
new PutObjectCommand({
Bucket: BUCKET,
Key: 'test-obj.txt',
Body: 'hello policy eval',
ContentType: 'text/plain',
})
);
});
// ============================
// Principal matching
// ============================
tap.test('Principal: "*" → anonymous fetch GET succeeds', async () => {
await putPolicy([
{
Sid: 'PrincipalWildcard',
Effect: 'Allow',
Principal: '*',
Action: 's3:GetObject',
Resource: `arn:aws:s3:::${BUCKET}/*`,
},
]);
const resp = await fetch(`${BASE_URL}/${BUCKET}/test-obj.txt`);
expect(resp.status).toEqual(200);
const text = await resp.text();
expect(text).toEqual('hello policy eval');
await clearPolicy();
});
tap.test('Principal: {"AWS": "*"} → anonymous GET fails, authenticated GET succeeds', async () => {
await putPolicy([
{
Sid: 'AwsWildcard',
Effect: 'Allow',
Principal: { AWS: '*' },
Action: 's3:GetObject',
Resource: `arn:aws:s3:::${BUCKET}/*`,
},
]);
// Anonymous → no identity → Principal AWS:* doesn't match anonymous → NoOpinion → denied
const anonResp = await fetch(`${BASE_URL}/${BUCKET}/test-obj.txt`);
expect(anonResp.status).toEqual(403);
// Authenticated → has identity → Principal AWS:* matches → Allow
const authResp = await authClient.send(
new GetObjectCommand({ Bucket: BUCKET, Key: 'test-obj.txt' })
);
expect(authResp.$metadata.httpStatusCode).toEqual(200);
await clearPolicy();
});
tap.test('Principal: {"AWS": "arn:aws:iam::TESTAKID"} → authenticated GET succeeds', async () => {
await putPolicy([
{
Sid: 'SpecificPrincipal',
Effect: 'Allow',
Principal: { AWS: `arn:aws:iam::${ACCESS_KEY}` },
Action: 's3:GetObject',
Resource: `arn:aws:s3:::${BUCKET}/*`,
},
]);
const resp = await authClient.send(
new GetObjectCommand({ Bucket: BUCKET, Key: 'test-obj.txt' })
);
expect(resp.$metadata.httpStatusCode).toEqual(200);
await clearPolicy();
});
tap.test('Principal: {"AWS": "arn:aws:iam::WRONGKEY"} → authenticated GET still succeeds (default allow)', async () => {
await putPolicy([
{
Sid: 'WrongPrincipal',
Effect: 'Allow',
Principal: { AWS: 'arn:aws:iam::WRONGKEY' },
Action: 's3:GetObject',
Resource: `arn:aws:s3:::${BUCKET}/*`,
},
]);
// Principal doesn't match our key → NoOpinion → default allow for authenticated
const resp = await authClient.send(
new GetObjectCommand({ Bucket: BUCKET, Key: 'test-obj.txt' })
);
expect(resp.$metadata.httpStatusCode).toEqual(200);
await clearPolicy();
});
// ============================
// Action matching
// ============================
tap.test('Action: "s3:*" → anonymous can GET and PUT (wildcard matches all)', async () => {
await putPolicy([
{
Sid: 'S3Wildcard',
Effect: 'Allow',
Principal: '*',
Action: 's3:*',
Resource: `arn:aws:s3:::${BUCKET}/*`,
},
]);
const getResp = await fetch(`${BASE_URL}/${BUCKET}/test-obj.txt`);
expect(getResp.status).toEqual(200);
const putResp = await fetch(`${BASE_URL}/${BUCKET}/anon-wildcard.txt`, {
method: 'PUT',
body: 'wildcard put',
});
expect(putResp.status).toEqual(200);
// Clean up the object we created
await authClient.send(new DeleteObjectCommand({ Bucket: BUCKET, Key: 'anon-wildcard.txt' }));
await clearPolicy();
});
tap.test('Action: "*" → global wildcard matches all actions', async () => {
await putPolicy([
{
Sid: 'GlobalWildcard',
Effect: 'Allow',
Principal: '*',
Action: '*',
Resource: `arn:aws:s3:::${BUCKET}/*`,
},
]);
const getResp = await fetch(`${BASE_URL}/${BUCKET}/test-obj.txt`);
expect(getResp.status).toEqual(200);
const putResp = await fetch(`${BASE_URL}/${BUCKET}/anon-global.txt`, {
method: 'PUT',
body: 'global wildcard',
});
expect(putResp.status).toEqual(200);
await authClient.send(new DeleteObjectCommand({ Bucket: BUCKET, Key: 'anon-global.txt' }));
await clearPolicy();
});
tap.test('Action: "s3:Get*" → anonymous can GET but not PUT (prefix wildcard)', async () => {
await putPolicy([
{
Sid: 'PrefixWildcard',
Effect: 'Allow',
Principal: '*',
Action: 's3:Get*',
Resource: `arn:aws:s3:::${BUCKET}/*`,
},
]);
const getResp = await fetch(`${BASE_URL}/${BUCKET}/test-obj.txt`);
expect(getResp.status).toEqual(200);
const putResp = await fetch(`${BASE_URL}/${BUCKET}/anon-prefix.txt`, {
method: 'PUT',
body: 'should fail',
});
expect(putResp.status).toEqual(403);
await clearPolicy();
});
tap.test('Action: ["s3:GetObject", "s3:PutObject"] → anonymous can GET and PUT but not DELETE', async () => {
await putPolicy([
{
Sid: 'MultiAction',
Effect: 'Allow',
Principal: '*',
Action: ['s3:GetObject', 's3:PutObject'],
Resource: `arn:aws:s3:::${BUCKET}/*`,
},
]);
const getResp = await fetch(`${BASE_URL}/${BUCKET}/test-obj.txt`);
expect(getResp.status).toEqual(200);
const putResp = await fetch(`${BASE_URL}/${BUCKET}/anon-multi.txt`, {
method: 'PUT',
body: 'multi action',
});
expect(putResp.status).toEqual(200);
const delResp = await fetch(`${BASE_URL}/${BUCKET}/anon-multi.txt`, {
method: 'DELETE',
});
expect(delResp.status).toEqual(403);
// Clean up
await authClient.send(new DeleteObjectCommand({ Bucket: BUCKET, Key: 'anon-multi.txt' }));
await clearPolicy();
});
// ============================
// Resource ARN matching
// ============================
tap.test('Resource: "arn:aws:s3:::eval-bucket/*" → anonymous GET of object succeeds', async () => {
await putPolicy([
{
Sid: 'ResourceWildcard',
Effect: 'Allow',
Principal: '*',
Action: 's3:GetObject',
Resource: `arn:aws:s3:::${BUCKET}/*`,
},
]);
const resp = await fetch(`${BASE_URL}/${BUCKET}/test-obj.txt`);
expect(resp.status).toEqual(200);
await clearPolicy();
});
tap.test('Resource: exact key → anonymous GET of that key succeeds, other key fails', async () => {
await putPolicy([
{
Sid: 'ExactResource',
Effect: 'Allow',
Principal: '*',
Action: 's3:GetObject',
Resource: `arn:aws:s3:::${BUCKET}/test-obj.txt`,
},
]);
const goodResp = await fetch(`${BASE_URL}/${BUCKET}/test-obj.txt`);
expect(goodResp.status).toEqual(200);
// Other key → resource doesn't match → NoOpinion → denied for anonymous
const badResp = await fetch(`${BASE_URL}/${BUCKET}/nonexistent.txt`);
expect(badResp.status).toEqual(403);
await clearPolicy();
});
tap.test('Resource: wrong bucket ARN → NoOpinion → anonymous GET denied', async () => {
await putPolicy([
{
Sid: 'WrongBucket',
Effect: 'Allow',
Principal: '*',
Action: 's3:GetObject',
Resource: 'arn:aws:s3:::other-bucket/*',
},
]);
const resp = await fetch(`${BASE_URL}/${BUCKET}/test-obj.txt`);
expect(resp.status).toEqual(403);
await clearPolicy();
});
tap.test('Resource: "*" → matches everything, anonymous GET succeeds', async () => {
await putPolicy([
{
Sid: 'StarResource',
Effect: 'Allow',
Principal: '*',
Action: 's3:GetObject',
Resource: '*',
},
]);
const resp = await fetch(`${BASE_URL}/${BUCKET}/test-obj.txt`);
expect(resp.status).toEqual(200);
await clearPolicy();
});
// ============================
// Deny-over-Allow priority
// ============================
tap.test('Allow + Deny same action → anonymous GET denied', async () => {
await putPolicy([
{
Sid: 'AllowGet',
Effect: 'Allow',
Principal: '*',
Action: 's3:GetObject',
Resource: `arn:aws:s3:::${BUCKET}/*`,
},
{
Sid: 'DenyGet',
Effect: 'Deny',
Principal: '*',
Action: 's3:GetObject',
Resource: `arn:aws:s3:::${BUCKET}/*`,
},
]);
const resp = await fetch(`${BASE_URL}/${BUCKET}/test-obj.txt`);
expect(resp.status).toEqual(403);
await clearPolicy();
});
tap.test('Allow s3:* + Deny s3:DeleteObject → anonymous GET succeeds, DELETE denied', async () => {
await putPolicy([
{
Sid: 'AllowAll',
Effect: 'Allow',
Principal: '*',
Action: 's3:*',
Resource: `arn:aws:s3:::${BUCKET}/*`,
},
{
Sid: 'DenyDelete',
Effect: 'Deny',
Principal: '*',
Action: 's3:DeleteObject',
Resource: `arn:aws:s3:::${BUCKET}/*`,
},
]);
const getResp = await fetch(`${BASE_URL}/${BUCKET}/test-obj.txt`);
expect(getResp.status).toEqual(200);
const delResp = await fetch(`${BASE_URL}/${BUCKET}/test-obj.txt`, { method: 'DELETE' });
expect(delResp.status).toEqual(403);
await clearPolicy();
});
tap.test('Statement order does not matter: Deny first, Allow second → still denied', async () => {
await putPolicy([
{
Sid: 'DenyFirst',
Effect: 'Deny',
Principal: '*',
Action: 's3:GetObject',
Resource: `arn:aws:s3:::${BUCKET}/*`,
},
{
Sid: 'AllowSecond',
Effect: 'Allow',
Principal: '*',
Action: 's3:GetObject',
Resource: `arn:aws:s3:::${BUCKET}/*`,
},
]);
const resp = await fetch(`${BASE_URL}/${BUCKET}/test-obj.txt`);
expect(resp.status).toEqual(403);
await clearPolicy();
});
// ============================
// NoOpinion fallback
// ============================
tap.test('NoOpinion: policy allows PutObject only → authenticated GET falls through (default allow)', async () => {
await putPolicy([
{
Sid: 'AllowPutOnly',
Effect: 'Allow',
Principal: '*',
Action: 's3:PutObject',
Resource: `arn:aws:s3:::${BUCKET}/*`,
},
]);
// Authenticated → NoOpinion → default allow
const resp = await authClient.send(
new GetObjectCommand({ Bucket: BUCKET, Key: 'test-obj.txt' })
);
expect(resp.$metadata.httpStatusCode).toEqual(200);
await clearPolicy();
});
tap.test('NoOpinion: same policy → anonymous GET falls through → default deny (403)', async () => {
await putPolicy([
{
Sid: 'AllowPutOnly',
Effect: 'Allow',
Principal: '*',
Action: 's3:PutObject',
Resource: `arn:aws:s3:::${BUCKET}/*`,
},
]);
// Anonymous → NoOpinion for GetObject → default deny
const resp = await fetch(`${BASE_URL}/${BUCKET}/test-obj.txt`);
expect(resp.status).toEqual(403);
await clearPolicy();
});
// ============================
// IAM action mapping
// ============================
tap.test('Policy allows s3:GetObject → anonymous HEAD object succeeds (HeadObject maps to s3:GetObject)', async () => {
await putPolicy([
{
Sid: 'AllowGet',
Effect: 'Allow',
Principal: '*',
Action: 's3:GetObject',
Resource: `arn:aws:s3:::${BUCKET}/*`,
},
]);
const resp = await fetch(`${BASE_URL}/${BUCKET}/test-obj.txt`, { method: 'HEAD' });
expect(resp.status).toEqual(200);
await clearPolicy();
});
tap.test('Policy allows s3:ListBucket → anonymous HEAD bucket succeeds', async () => {
await putPolicy([
{
Sid: 'AllowList',
Effect: 'Allow',
Principal: '*',
Action: 's3:ListBucket',
Resource: `arn:aws:s3:::${BUCKET}`,
},
]);
const resp = await fetch(`${BASE_URL}/${BUCKET}`, { method: 'HEAD' });
expect(resp.status).toEqual(200);
await clearPolicy();
});
tap.test('Policy allows s3:ListBucket → anonymous GET bucket (list objects) succeeds', async () => {
await putPolicy([
{
Sid: 'AllowList',
Effect: 'Allow',
Principal: '*',
Action: 's3:ListBucket',
Resource: `arn:aws:s3:::${BUCKET}`,
},
]);
const resp = await fetch(`${BASE_URL}/${BUCKET}`);
expect(resp.status).toEqual(200);
const text = await resp.text();
expect(text).toInclude('ListBucketResult');
await clearPolicy();
});
// ============================
// Teardown
// ============================
tap.test('teardown: clean up and stop server', async () => {
await authClient.send(new DeleteObjectCommand({ Bucket: BUCKET, Key: 'test-obj.txt' }));
await authClient.send(new DeleteBucketCommand({ Bucket: BUCKET }));
await testSmarts3Instance.stop();
});
export default tap.start();

View File

@@ -1,4 +1,4 @@
import { expect, tap } from '@push.rocks/tapbundle'; import { expect, tap } from '@git.zone/tstest/tapbundle';
import * as plugins from './plugins.js'; import * as plugins from './plugins.js';
import * as smarts3 from '../ts/index.js'; import * as smarts3 from '../ts/index.js';
@@ -7,8 +7,12 @@ let testSmarts3Instance: smarts3.Smarts3;
tap.test('should create a smarts3 instance and run it', async (toolsArg) => { tap.test('should create a smarts3 instance and run it', async (toolsArg) => {
testSmarts3Instance = await smarts3.Smarts3.createAndStart({ testSmarts3Instance = await smarts3.Smarts3.createAndStart({
port: 3000, server: {
port: 3333,
},
storage: {
cleanSlate: true, cleanSlate: true,
},
}); });
console.log(`Let the instance run for 2 seconds`); console.log(`Let the instance run for 2 seconds`);
await toolsArg.delayFor(2000); await toolsArg.delayFor(2000);

View File

@@ -3,6 +3,6 @@
*/ */
export const commitinfo = { export const commitinfo = {
name: '@push.rocks/smarts3', name: '@push.rocks/smarts3',
version: '2.2.4', version: '5.3.0',
description: 'A Node.js TypeScript package to create a local S3 endpoint for simulating AWS S3 operations using mapped local directories for development and testing purposes.' description: 'A Node.js TypeScript package to create a local S3 endpoint for simulating AWS S3 operations using mapped local directories for development and testing purposes.'
} }

View File

@@ -1,64 +1,251 @@
import * as plugins from './plugins.js'; import * as plugins from './plugins.js';
import * as paths from './paths.js'; import * as paths from './paths.js';
export interface ISmarts3ContructorOptions { /**
* Authentication configuration
*/
export interface IAuthConfig {
enabled: boolean;
credentials: Array<{
accessKeyId: string;
secretAccessKey: string;
}>;
}
/**
* CORS configuration
*/
export interface ICorsConfig {
enabled: boolean;
allowedOrigins?: string[];
allowedMethods?: string[];
allowedHeaders?: string[];
exposedHeaders?: string[];
maxAge?: number;
allowCredentials?: boolean;
}
/**
* Logging configuration
*/
export interface ILoggingConfig {
level?: 'error' | 'warn' | 'info' | 'debug';
format?: 'text' | 'json';
enabled?: boolean;
}
/**
* Request limits configuration
*/
export interface ILimitsConfig {
maxObjectSize?: number;
maxMetadataSize?: number;
requestTimeout?: number;
}
/**
* Multipart upload configuration
*/
export interface IMultipartConfig {
expirationDays?: number;
cleanupIntervalMinutes?: number;
}
/**
* Server configuration
*/
export interface IServerConfig {
port?: number; port?: number;
address?: string;
silent?: boolean;
region?: string;
}
/**
* Storage configuration
*/
export interface IStorageConfig {
directory?: string;
cleanSlate?: boolean; cleanSlate?: boolean;
} }
/**
* Complete smarts3 configuration
*/
export interface ISmarts3Config {
server?: IServerConfig;
storage?: IStorageConfig;
auth?: IAuthConfig;
cors?: ICorsConfig;
logging?: ILoggingConfig;
limits?: ILimitsConfig;
multipart?: IMultipartConfig;
}
/**
* Default configuration values
*/
const DEFAULT_CONFIG: ISmarts3Config = {
server: {
port: 3000,
address: '0.0.0.0',
silent: false,
region: 'us-east-1',
},
storage: {
directory: paths.bucketsDir,
cleanSlate: false,
},
auth: {
enabled: false,
credentials: [
{
accessKeyId: 'S3RVER',
secretAccessKey: 'S3RVER',
},
],
},
cors: {
enabled: false,
allowedOrigins: ['*'],
allowedMethods: ['GET', 'POST', 'PUT', 'DELETE', 'HEAD', 'OPTIONS'],
allowedHeaders: ['*'],
exposedHeaders: ['ETag', 'x-amz-request-id', 'x-amz-version-id'],
maxAge: 86400,
allowCredentials: false,
},
logging: {
level: 'info',
format: 'text',
enabled: true,
},
limits: {
maxObjectSize: 5 * 1024 * 1024 * 1024, // 5GB
maxMetadataSize: 2048,
requestTimeout: 300000, // 5 minutes
},
multipart: {
expirationDays: 7,
cleanupIntervalMinutes: 60,
},
};
/**
* Merge user config with defaults (deep merge)
*/
function mergeConfig(userConfig: ISmarts3Config): Required<ISmarts3Config> {
return {
server: {
...DEFAULT_CONFIG.server!,
...(userConfig.server || {}),
},
storage: {
...DEFAULT_CONFIG.storage!,
...(userConfig.storage || {}),
},
auth: {
...DEFAULT_CONFIG.auth!,
...(userConfig.auth || {}),
},
cors: {
...DEFAULT_CONFIG.cors!,
...(userConfig.cors || {}),
},
logging: {
...DEFAULT_CONFIG.logging!,
...(userConfig.logging || {}),
},
limits: {
...DEFAULT_CONFIG.limits!,
...(userConfig.limits || {}),
},
multipart: {
...DEFAULT_CONFIG.multipart!,
...(userConfig.multipart || {}),
},
};
}
/**
* IPC command type map for RustBridge
*/
type TRustS3Commands = {
start: { params: { config: Required<ISmarts3Config> }; result: {} };
stop: { params: {}; result: {} };
createBucket: { params: { name: string }; result: {} };
};
/**
* Main Smarts3 class - production-ready S3-compatible server
*/
export class Smarts3 { export class Smarts3 {
// STATIC // STATIC
public static async createAndStart(optionsArg: ConstructorParameters<typeof Smarts3>[0]) { public static async createAndStart(configArg: ISmarts3Config = {}) {
const smartS3Instance = new Smarts3(optionsArg); const smartS3Instance = new Smarts3(configArg);
await smartS3Instance.start(); await smartS3Instance.start();
return smartS3Instance; return smartS3Instance;
} }
// INSTANCE // INSTANCE
public options: ISmarts3ContructorOptions; public config: Required<ISmarts3Config>;
public s3Instance: plugins.s3rver; private bridge: InstanceType<typeof plugins.RustBridge<TRustS3Commands>>;
constructor(optionsArg: ISmarts3ContructorOptions) { constructor(configArg: ISmarts3Config = {}) {
this.options = optionsArg; this.config = mergeConfig(configArg);
this.options = { this.bridge = new plugins.RustBridge<TRustS3Commands>({
...this.options, binaryName: 'rusts3',
...optionsArg, localPaths: [
}; plugins.path.join(paths.packageDir, 'dist_rust', 'rusts3'),
plugins.path.join(paths.packageDir, 'rust', 'target', 'release', 'rusts3'),
plugins.path.join(paths.packageDir, 'rust', 'target', 'debug', 'rusts3'),
],
readyTimeoutMs: 30000,
requestTimeoutMs: 300000,
});
} }
public async start() { public async start() {
if (this.options.cleanSlate) { const spawned = await this.bridge.spawn();
await plugins.smartfile.fs.ensureEmptyDir(paths.bucketsDir); if (!spawned) {
} else { throw new Error('Failed to spawn rusts3 binary. Make sure it is compiled (pnpm build).');
await plugins.smartfile.fs.ensureDir(paths.bucketsDir);
} }
this.s3Instance = new plugins.s3rver({ await this.bridge.sendCommand('start', { config: this.config });
port: this.options.port || 3000,
address: '0.0.0.0', if (!this.config.server.silent) {
silent: false,
directory: paths.bucketsDir,
});
await this.s3Instance.run();
console.log('s3 server is running'); console.log('s3 server is running');
} }
}
public async getS3Descriptor(): Promise<plugins.tsclass.storage.IS3Descriptor> { public async getS3Descriptor(
return { optionsArg?: Partial<plugins.tsclass.storage.IS3Descriptor>,
accessKey: 'S3RVER', ): Promise<plugins.tsclass.storage.IS3Descriptor> {
accessSecret: 'S3RVER', const cred = this.config.auth.credentials[0] || {
endpoint: '127.0.0.1', accessKeyId: 'S3RVER',
port: this.options.port, secretAccessKey: 'S3RVER',
};
const descriptor: plugins.tsclass.storage.IS3Descriptor = {
endpoint: this.config.server.address === '0.0.0.0' ? 'localhost' : this.config.server.address!,
port: this.config.server.port!,
useSsl: false, useSsl: false,
accessKey: cred.accessKeyId,
accessSecret: cred.secretAccessKey,
bucketName: '',
};
return {
...descriptor,
...(optionsArg ? optionsArg : {}),
}; };
} }
public async createBucket(bucketNameArg: string) { public async createBucket(bucketNameArg: string) {
const smartbucketInstance = new plugins.smartbucket.SmartBucket(await this.getS3Descriptor()); await this.bridge.sendCommand('createBucket', { name: bucketNameArg });
const bucket = await smartbucketInstance.createBucket(bucketNameArg); return { name: bucketNameArg };
return bucket;
} }
public async stop() { public async stop() {
await this.s3Instance.close(); await this.bridge.sendCommand('stop', {});
this.bridge.kill();
} }
} }

View File

@@ -4,18 +4,12 @@ import * as path from 'path';
export { path }; export { path };
// @push.rocks scope // @push.rocks scope
import * as smartbucket from '@push.rocks/smartbucket';
import * as smartfile from '@push.rocks/smartfile';
import * as smartpath from '@push.rocks/smartpath'; import * as smartpath from '@push.rocks/smartpath';
import { RustBridge } from '@push.rocks/smartrust';
export { smartbucket, smartfile, smartpath }; export { smartpath, RustBridge };
// @tsclass scope // @tsclass scope
import * as tsclass from '@tsclass/tsclass'; import * as tsclass from '@tsclass/tsclass';
export { tsclass }; export { tsclass };
// thirdparty scope
import s3rver from 's3rver';
export { s3rver };

View File

@@ -1,7 +1,5 @@
{ {
"compilerOptions": { "compilerOptions": {
"experimentalDecorators": true,
"useDefineForClassFields": false,
"target": "ES2022", "target": "ES2022",
"module": "NodeNext", "module": "NodeNext",
"moduleResolution": "NodeNext", "moduleResolution": "NodeNext",
@@ -10,7 +8,5 @@
"baseUrl": ".", "baseUrl": ".",
"paths": {} "paths": {}
}, },
"exclude": [ "exclude": ["dist_*/**/*.d.ts"]
"dist_*/**/*.d.ts"
]
} }