Compare commits
46 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| e79fe339aa | |||
| 96ae76e70c | |||
| ed2c02bcf9 | |||
| 2f3031cfc7 | |||
| 459adc077a | |||
| 19f18ef480 | |||
| 6148b28cba | |||
| 012632111e | |||
| b9a59a8649 | |||
| f8a8c9fdff | |||
| d37b444dd5 | |||
| 02ad9a29a7 | |||
| 24c504518d | |||
| 92f07ef3d7 | |||
| 22e010c554 | |||
| 8ebc1bb9e1 | |||
| 3fc21dcd99 | |||
| ad5e0e8a72 | |||
| c384df20ce | |||
| 4e944f3d05 | |||
| e0455daa2e | |||
| f3f1afe9af | |||
| 94dc9cfc3f | |||
| a9c0ced1ca | |||
| c8626a9afd | |||
| 55a1f66e57 | |||
| 5b5f35821f | |||
| e8161e6417 | |||
| 1a10c32b12 | |||
| cb8cb87d9f | |||
| 96117d54b9 | |||
| 53f58e45c3 | |||
| 34d708be7e | |||
| 418e8dc052 | |||
| b8567ebe08 | |||
| 827bfa6370 | |||
| ceba64e34a | |||
| 8646d58f06 | |||
| 8ce6ff11c3 | |||
| 5c7aaebaba | |||
| be7d086c0b | |||
| 91a7b69f1d | |||
| 4e078b35d4 | |||
| d8a8259c73 | |||
| 9e7ce25b45 | |||
| b634ee50d1 |
@@ -13,5 +13,8 @@ rust/target/
|
||||
package-lock.json
|
||||
yarn.lock
|
||||
|
||||
# generated bundle (rebuilt on every build, embeds version)
|
||||
ts_debugserver/bundled.ts
|
||||
|
||||
# playwright
|
||||
.playwright-mcp/
|
||||
|
||||
+126
@@ -1,5 +1,131 @@
|
||||
# Changelog
|
||||
|
||||
## 2026-04-29 - 2.7.1 - fix(repo)
|
||||
no changes to commit
|
||||
|
||||
|
||||
## 2026-04-14 - 2.7.0 - feat(update)
|
||||
add aggregation pipeline updates and enforce immutable _id handling
|
||||
|
||||
- support aggregation pipeline syntax in update and findOneAndUpdate operations, including upserts
|
||||
- add $unset stage support for aggregation-based document transformations
|
||||
- return an ImmutableField error when updates attempt to change _id and preserve _id when omitted from replacements
|
||||
|
||||
## 2026-04-05 - 2.6.2 - fix(readme)
|
||||
align architecture diagram formatting in the documentation
|
||||
|
||||
- Adjusts spacing and box alignment in the README architecture diagram for clearer presentation.
|
||||
|
||||
## 2026-04-05 - 2.6.1 - fix(readme)
|
||||
correct ASCII diagram spacing in architecture overview
|
||||
|
||||
- Adjusts alignment in the README architecture diagram for clearer visual formatting.
|
||||
|
||||
## 2026-04-05 - 2.6.0 - feat(readme)
|
||||
document index enforcement, storage reliability, and data integrity validation features
|
||||
|
||||
- Add documentation for engine-level unique index enforcement and duplicate key behavior
|
||||
- Describe storage engine reliability features including WAL, CRC32 checks, compaction, hint file staleness detection, and stale socket cleanup
|
||||
- Add usage documentation for the offline data integrity validation CLI
|
||||
|
||||
## 2026-04-05 - 2.5.9 - fix(rustdb-storage)
|
||||
run collection compaction during file storage initialization after crashes
|
||||
|
||||
- Triggers compaction for all loaded collections before starting the periodic background compaction task.
|
||||
- Helps clean up dead weight left from before a crash during startup.
|
||||
|
||||
## 2026-04-05 - 2.5.8 - fix(rustdb-storage)
|
||||
detect stale hint files using data file size metadata and add restart persistence regression tests
|
||||
|
||||
- Store the current data.rdb size in hint file headers and validate it on load to rebuild KeyDir when hints are stale or written in the old format.
|
||||
- Persist updated hint metadata after compaction and shutdown to avoid missing appended tombstones after restart.
|
||||
- Add validation reporting for stale hint files based on recorded versus actual data file size.
|
||||
- Add regression tests covering delete persistence across restarts, missing hint recovery, stale socket cleanup, and unique index enforcement persistence.
|
||||
|
||||
## 2026-04-05 - 2.5.7 - fix(repo)
|
||||
no changes to commit
|
||||
|
||||
|
||||
## 2026-04-05 - 2.5.6 - fix(repo)
|
||||
no changes to commit
|
||||
|
||||
|
||||
## 2026-04-05 - 2.5.5 - fix(repo)
|
||||
no changes to commit
|
||||
|
||||
|
||||
## 2026-04-05 - 2.5.4 - fix(package)
|
||||
bump package version to 2.5.3
|
||||
|
||||
- Updates the package metadata version by one patch release.
|
||||
|
||||
## 2026-04-05 - 2.5.3 - fix(rustdb-commands)
|
||||
restore persisted index initialization before writes to enforce unique constraints after restart
|
||||
|
||||
- load stored index specifications from storage when creating command context index engines
|
||||
- rebuild index data from existing documents so custom indexes are active before insert, update, and upsert operations
|
||||
- add @push.rocks/smartdata as a runtime dependency
|
||||
|
||||
## 2026-04-05 - 2.5.2 - fix(rustdb-indexes)
|
||||
persist created indexes and restore them on server startup
|
||||
|
||||
- Save index specifications to storage when indexes are created.
|
||||
- Remove persisted index metadata when indexes are dropped by name, key spec, or wildcard.
|
||||
- Rebuild in-memory index engines from stored definitions and existing documents during startup.
|
||||
|
||||
## 2026-04-05 - 2.5.1 - fix(docs)
|
||||
update project documentation
|
||||
|
||||
- Modifies a single documentation-related file with a minimal text change.
|
||||
- No source code, API, or package metadata changes are indicated in the diff summary.
|
||||
|
||||
## 2026-04-05 - 2.5.0 - feat(storage)
|
||||
add offline data validation and strengthen storage/index integrity checks
|
||||
|
||||
- adds a `--validate-data <PATH>` CLI mode to run offline integrity checks on storage directories
|
||||
- introduces storage validation reporting for headers, checksums, duplicate ids, tombstones, and stale or orphaned hint entries
|
||||
- pre-checks unique index constraints before insert, update, upsert, and findAndModify writes to prevent duplicate-key violations before storage changes
|
||||
- validates hint files against data files during collection load and rebuilds indexes from data when hints are stale
|
||||
- ensures new data files always receive a SMARTDB header and persists fresh hint files after successful compaction
|
||||
- cleans up stale local Unix socket files before starting the TypeScript local server
|
||||
|
||||
## 2026-04-05 - 2.4.1 - fix(package)
|
||||
update package metadata
|
||||
|
||||
- Adjusts package manifest content with a minimal one-line change.
|
||||
|
||||
## 2026-04-05 - 2.4.0 - feat(rustdb)
|
||||
add restore and periodic persistence support for in-memory storage
|
||||
|
||||
- Restore previously persisted state during startup when a persist path is configured.
|
||||
- Spawn a background task to periodically persist in-memory data using the configured interval.
|
||||
- Warn when running purely in-memory without durable persistence configured.
|
||||
|
||||
## 2026-04-04 - 2.3.1 - fix(package)
|
||||
update package metadata
|
||||
|
||||
- Adjusts a single package-level metadata entry in the project configuration.
|
||||
|
||||
## 2026-04-04 - 2.3.0 - feat(test)
|
||||
add integration coverage for file storage, compaction, migration, and LocalSmartDb workflows
|
||||
|
||||
- adds end-to-end tests for file-backed storage creation, CRUD operations, bulk updates, persistence, and index file generation
|
||||
- adds compaction stress tests covering repeated updates, tombstones, file shrinking behavior, and restart integrity
|
||||
- adds migration tests for automatic v0 JSON layout detection, v1 conversion, restart persistence, and post-migration writes
|
||||
- adds LocalSmartDb lifecycle and unix socket tests, including restart persistence, custom socket paths, and database isolation
|
||||
|
||||
## 2026-04-04 - 2.2.0 - feat(storage)
|
||||
add Bitcask storage migration, binary WAL, and data compaction support
|
||||
|
||||
- add TypeScript storage migration from legacy JSON collections to the v1 Bitcask binary format before starting the Rust engine
|
||||
- replace the legacy JSON WAL with a binary write-ahead log plus shared binary record and KeyDir infrastructure in rustdb-storage
|
||||
- introduce data file compaction with dead-record reclamation and tests, and add the bson dependency for BSON serialization during migration
|
||||
|
||||
## 2026-04-02 - 2.1.1 - fix(package)
|
||||
update package metadata
|
||||
|
||||
- Adjusts a single package metadata entry in package.json.
|
||||
|
||||
## 2026-04-02 - 2.1.0 - feat(smartdb)
|
||||
add operation log APIs, point-in-time revert support, and a web-based debug dashboard
|
||||
|
||||
|
||||
+4
-2
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@push.rocks/smartdb",
|
||||
"version": "2.1.0",
|
||||
"version": "2.7.1",
|
||||
"private": false,
|
||||
"description": "A MongoDB-compatible embedded database server with wire protocol support, backed by a high-performance Rust engine.",
|
||||
"exports": {
|
||||
@@ -29,7 +29,9 @@
|
||||
"dependencies": {
|
||||
"@api.global/typedserver": "^8.0.0",
|
||||
"@design.estate/dees-element": "^2.0.0",
|
||||
"@push.rocks/smartrust": "^1.3.2"
|
||||
"@push.rocks/smartdata": "7.1.5",
|
||||
"@push.rocks/smartrust": "^1.3.2",
|
||||
"bson": "^7.2.0"
|
||||
},
|
||||
"browserslist": [
|
||||
"last 1 chrome versions"
|
||||
|
||||
Generated
+10
-4
@@ -14,9 +14,15 @@ importers:
|
||||
'@design.estate/dees-element':
|
||||
specifier: ^2.0.0
|
||||
version: 2.2.3
|
||||
'@push.rocks/smartdata':
|
||||
specifier: 7.1.5
|
||||
version: 7.1.5(socks@2.8.7)
|
||||
'@push.rocks/smartrust':
|
||||
specifier: ^1.3.2
|
||||
version: 1.3.2
|
||||
bson:
|
||||
specifier: ^7.2.0
|
||||
version: 7.2.0
|
||||
devDependencies:
|
||||
'@git.zone/tsbuild':
|
||||
specifier: ^4.4.0
|
||||
@@ -1023,8 +1029,8 @@ packages:
|
||||
'@push.rocks/smartcrypto@2.0.4':
|
||||
resolution: {integrity: sha512-1+/5bsjyataf5uUkUNnnVXGRAt+gHVk1KDzozjTqgqJxHvQk1d9fVDohL6CxUhUucTPtu5VR5xNBiV8YCDuGyw==}
|
||||
|
||||
'@push.rocks/smartdata@7.1.3':
|
||||
resolution: {integrity: sha512-7vQJ9pdRk450yn2m9tmGPdSRlQVmxFPZjHD4sGYsfqCQPg+GLFusu+H16zpf+jKzAq4F2ZBMPaYymJHXvXiVcw==}
|
||||
'@push.rocks/smartdata@7.1.5':
|
||||
resolution: {integrity: sha512-7x7VedEg6RocWndqUPuTbY2Bh85Q/x0LOVHL4o/NVXyh3IGNtiVQ8ple4WR0qYqlHRAojX4eDSBPMiYzIasqAg==}
|
||||
|
||||
'@push.rocks/smartdelay@3.0.5':
|
||||
resolution: {integrity: sha512-mUuI7kj2f7ztjpic96FvRIlf2RsKBa5arw81AHNsndbxO6asRcxuWL8dTVxouEIK8YsBUlj0AsrCkHhMbLQdHw==}
|
||||
@@ -5662,7 +5668,7 @@ snapshots:
|
||||
'@types/node-forge': 1.3.14
|
||||
node-forge: 1.4.0
|
||||
|
||||
'@push.rocks/smartdata@7.1.3(socks@2.8.7)':
|
||||
'@push.rocks/smartdata@7.1.5(socks@2.8.7)':
|
||||
dependencies:
|
||||
'@push.rocks/lik': 6.4.0
|
||||
'@push.rocks/smartdelay': 3.0.5
|
||||
@@ -5896,7 +5902,7 @@ snapshots:
|
||||
'@push.rocks/smartmongo@5.1.1(socks@2.8.7)':
|
||||
dependencies:
|
||||
'@push.rocks/mongodump': 1.1.0(socks@2.8.7)
|
||||
'@push.rocks/smartdata': 7.1.3(socks@2.8.7)
|
||||
'@push.rocks/smartdata': 7.1.5(socks@2.8.7)
|
||||
'@push.rocks/smartfs': 1.5.0
|
||||
'@push.rocks/smartpath': 6.0.0
|
||||
'@push.rocks/smartpromise': 4.2.3
|
||||
|
||||
@@ -44,38 +44,38 @@ SmartDB uses a **sidecar binary** pattern — TypeScript handles lifecycle, Rust
|
||||
|
||||
```
|
||||
┌──────────────────────────────────────────────────────────────┐
|
||||
│ Your Application │
|
||||
│ (TypeScript / Node.js) │
|
||||
│ ┌─────────────────┐ ┌───────────────────────────┐ │
|
||||
│ │ SmartdbServer │────▶│ RustDbBridge (IPC) │ │
|
||||
│ │ or LocalSmartDb │ │ @push.rocks/smartrust │ │
|
||||
│ └─────────────────┘ └───────────┬───────────────┘ │
|
||||
└──────────────────────────────────────┼───────────────────────┘
|
||||
│ spawn + JSON IPC
|
||||
▼
|
||||
│ Your Application │
|
||||
│ (TypeScript / Node.js) │
|
||||
│ ┌──────────────────┐ ┌───────────────────────────┐ │
|
||||
│ │ SmartdbServer │─────▶│ RustDbBridge (IPC) │ │
|
||||
│ │ or LocalSmartDb │ │ @push.rocks/smartrust │ │
|
||||
│ └──────────────────┘ └───────────┬───────────────┘ │
|
||||
└────────────────────────────────────────┼─────────────────────┘
|
||||
│ spawn + JSON IPC
|
||||
▼
|
||||
┌──────────────────────────────────────────────────────────────┐
|
||||
│ rustdb binary 🦀 │
|
||||
│ rustdb binary │
|
||||
│ │
|
||||
│ ┌──────────────┐ ┌──────────────┐ ┌───────────────┐ │
|
||||
│ │ Wire Protocol│→ │Command Router│→ │ Handlers │ │
|
||||
│ │ (OP_MSG) │ │ (40+ cmds) │ │ Find,Insert.. │ │
|
||||
│ └──────────────┘ └──────────────┘ └───────┬───────┘ │
|
||||
│ │ │
|
||||
│ ┌─────────┐ ┌────────┐ ┌───────────┐ ┌──────┴──────┐ │
|
||||
│ │ Query │ │ Update │ │Aggregation│ │ Index │ │
|
||||
│ │ Matcher │ │ Engine │ │ Engine │ │ Engine │ │
|
||||
│ └─────────┘ └────────┘ └───────────┘ └─────────────┘ │
|
||||
│ ┌──────────────┐ ┌──────────────┐ ┌───────────────┐ │
|
||||
│ │ Wire Protocol│→ │Command Router│→ │ Handlers │ │
|
||||
│ │ (OP_MSG) │ │ (40+ cmds) │ │ Find,Insert.. │ │
|
||||
│ └──────────────┘ └──────────────┘ └───────┬───────┘ │
|
||||
│ │ │
|
||||
│ ┌─────────┐ ┌────────┐ ┌───────────┐ ┌──────┴──────┐ │
|
||||
│ │ Query │ │ Update │ │Aggregation│ │ Index │ │
|
||||
│ │ Matcher │ │ Engine │ │ Engine │ │ Engine │ │
|
||||
│ └─────────┘ └────────┘ └───────────┘ └─────────────┘ │
|
||||
│ │
|
||||
│ ┌──────────────────┐ ┌──────────────────┐ ┌──────────┐ │
|
||||
│ │ MemoryStorage │ │ FileStorage │ │ OpLog │ │
|
||||
│ └──────────────────┘ └──────────────────┘ └──────────┘ │
|
||||
│ ┌──────────────────┐ ┌──────────────────┐ ┌──────────┐ │
|
||||
│ │ MemoryStorage │ │ FileStorage │ │ OpLog │ │
|
||||
│ └──────────────────┘ └──────────────────┘ └──────────┘ │
|
||||
└──────────────────────────────────────────────────────────────┘
|
||||
▲
|
||||
│ TCP / Unix Socket (wire protocol)
|
||||
│
|
||||
┌─────────────┴────────────────────────────────────────────────┐
|
||||
│ MongoClient (mongodb npm driver) │
|
||||
│ Connects directly to Rust binary │
|
||||
│ MongoClient (mongodb npm driver) │
|
||||
│ Connects directly to Rust binary │
|
||||
└──────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
@@ -159,7 +159,7 @@ The debug dashboard gives you:
|
||||
|
||||
## 📝 Operation Log & Point-in-Time Revert
|
||||
|
||||
Every write operation (insert, update, delete) is automatically recorded in an in-memory **operation log (OpLog)** with full before/after document snapshots. This enables:
|
||||
Every write operation (insert, update, delete) is automatically recorded in an in-memory **operation log (OpLog)** with full before/after document snapshots. The OpLog lives in RAM and resets on restart — it covers the current session only. This enables:
|
||||
|
||||
- **Change tracking** — see exactly what changed, when, and in which collection
|
||||
- **Field-level diffs** — compare previous and new document states
|
||||
@@ -248,6 +248,62 @@ const server = new SmartdbServer({
|
||||
persistPath: './data/snapshot.json',
|
||||
persistIntervalMs: 30000, // Save every 30s
|
||||
});
|
||||
|
||||
// TLS transport for TCP mode
|
||||
const tlsServer = new SmartdbServer({
|
||||
port: 27017,
|
||||
tls: {
|
||||
enabled: true,
|
||||
certPath: './certs/server.pem',
|
||||
keyPath: './certs/server.key',
|
||||
// caPath: './certs/client-ca.pem',
|
||||
// requireClientCert: true, // Enables mTLS client certificate checks
|
||||
},
|
||||
});
|
||||
|
||||
// SCRAM-SHA-256 authentication
|
||||
const secureServer = new SmartdbServer({
|
||||
port: 27017,
|
||||
auth: {
|
||||
enabled: true,
|
||||
usersPath: './data/smartdb-users.json', // Optional: persists derived SCRAM credentials
|
||||
users: [
|
||||
{
|
||||
username: 'root',
|
||||
password: 'change-me',
|
||||
database: 'admin',
|
||||
roles: ['root'],
|
||||
},
|
||||
],
|
||||
},
|
||||
});
|
||||
```
|
||||
|
||||
When `auth.enabled` is true, protected commands require successful SCRAM-SHA-256 authentication through the official MongoDB driver:
|
||||
|
||||
```typescript
|
||||
const client = new MongoClient('mongodb://root:change-me@127.0.0.1:27017/admin?authSource=admin', {
|
||||
directConnection: true,
|
||||
});
|
||||
await client.connect();
|
||||
```
|
||||
|
||||
TLS is available for TCP listeners. `getConnectionUri()` includes `?tls=true` when TLS is enabled; pass the trusted CA to the MongoDB driver with `tlsCAFile`, `ca`, or `secureContext`.
|
||||
|
||||
Authentication verifies SCRAM credentials, denies unauthenticated commands, and enforces command-level built-in roles for supported operations.
|
||||
|
||||
Supported built-in role names are `root`, `read`, `readWrite`, `dbAdmin`, `userAdmin`, `clusterMonitor`, plus `readAnyDatabase`, `readWriteAnyDatabase`, `dbAdminAnyDatabase`, and `userAdminAnyDatabase`. When `usersPath` is set, SmartDB persists SCRAM credential material atomically and does not store plaintext passwords.
|
||||
|
||||
Basic user management commands are available for authenticated users with `root` or `userAdmin` privileges:
|
||||
|
||||
```typescript
|
||||
await client.db('admin').command({
|
||||
createUser: 'reader',
|
||||
pwd: 'readpass',
|
||||
roles: [{ role: 'read', db: 'myapp' }],
|
||||
});
|
||||
|
||||
await client.db('admin').command({ usersInfo: 'reader' });
|
||||
```
|
||||
|
||||
#### Methods & Properties
|
||||
@@ -429,6 +485,8 @@ await collection.dropIndex('email_1');
|
||||
await collection.dropIndexes(); // drop all except _id
|
||||
```
|
||||
|
||||
> 🛡️ **Unique indexes are enforced at the engine level.** Duplicate values are rejected with a `DuplicateKey` error (code 11000) *before* the document is written to disk — on `insertOne`, `updateOne`, `findAndModify`, and upserts. Index definitions are persisted to `indexes.json` and automatically restored on restart.
|
||||
|
||||
### Database & Admin
|
||||
|
||||
```typescript
|
||||
@@ -497,6 +555,39 @@ The Rust engine is organized as a Cargo workspace with 8 focused crates:
|
||||
|
||||
Cross-compiled for `linux_amd64` and `linux_arm64` via [@git.zone/tsrust](https://www.npmjs.com/package/@git.zone/tsrust).
|
||||
|
||||
### Storage Engine Reliability 🔒
|
||||
|
||||
The Bitcask-style file storage engine includes several reliability features:
|
||||
|
||||
- **Write-ahead log (WAL)** — every write is logged before being applied, with crash recovery on restart
|
||||
- **CRC32 checksums** — every record is integrity-checked on read
|
||||
- **Automatic compaction** — dead records are reclaimed when they exceed 50% of file size, runs on startup and after every write
|
||||
- **Hint file staleness detection** — the hint file records the data file size at write time; if data.rdb changed since (e.g. crash after a delete), the engine falls back to a full scan to ensure tombstones are not lost
|
||||
- **Stale socket cleanup** — orphaned `/tmp/smartdb-*.sock` files from crashed instances are automatically cleaned up on startup
|
||||
|
||||
### Data Integrity CLI 🔍
|
||||
|
||||
The Rust binary includes an offline integrity checker:
|
||||
|
||||
```bash
|
||||
# Check all collections in a data directory
|
||||
./dist_rust/rustdb_linux_amd64 --validate-data /path/to/data
|
||||
|
||||
# Output:
|
||||
# === SmartDB Data Integrity Report ===
|
||||
#
|
||||
# Database: mydb
|
||||
# Collection: users
|
||||
# Header: OK
|
||||
# Records: 1,234 (1,200 live, 34 tombstones)
|
||||
# Data size: 2.1 MB
|
||||
# Duplicates: 0
|
||||
# CRC errors: 0
|
||||
# Hint file: OK
|
||||
```
|
||||
|
||||
Checks file headers, record CRC32 checksums, duplicate `_id` entries, and hint file consistency. Exit code 1 if any errors are found.
|
||||
|
||||
---
|
||||
|
||||
## Testing Example
|
||||
@@ -541,7 +632,7 @@ export default tap.start();
|
||||
|
||||
## License and Legal Information
|
||||
|
||||
This repository contains open-source code licensed under the MIT License. A copy of the license can be found in the [LICENSE](./LICENSE) file.
|
||||
This repository contains open-source code licensed under the MIT License. A copy of the license can be found in the [license](./license) file.
|
||||
|
||||
**Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.
|
||||
|
||||
|
||||
Generated
+344
-11
@@ -60,7 +60,7 @@ version = "1.1.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "40c48f72fd53cd289104fc64099abca73db4166ad86ea0b4341abe65af83dadc"
|
||||
dependencies = [
|
||||
"windows-sys",
|
||||
"windows-sys 0.61.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -71,7 +71,7 @@ checksum = "291e6a250ff86cd4a820112fb8898808a366d8f9f58ce16d1f538353ad55747d"
|
||||
dependencies = [
|
||||
"anstyle",
|
||||
"once_cell_polyfill",
|
||||
"windows-sys",
|
||||
"windows-sys 0.61.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -124,6 +124,15 @@ dependencies = [
|
||||
"wyz",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "block-buffer"
|
||||
version = "0.10.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71"
|
||||
dependencies = [
|
||||
"generic-array",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bson"
|
||||
version = "2.15.0"
|
||||
@@ -139,7 +148,7 @@ dependencies = [
|
||||
"indexmap",
|
||||
"js-sys",
|
||||
"once_cell",
|
||||
"rand",
|
||||
"rand 0.9.2",
|
||||
"serde",
|
||||
"serde_bytes",
|
||||
"serde_json",
|
||||
@@ -221,6 +230,15 @@ version = "1.0.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1d07550c9036bf2ae0c684c4297d503f838287c83c53686d05370d0e139ae570"
|
||||
|
||||
[[package]]
|
||||
name = "cpufeatures"
|
||||
version = "0.2.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crc32fast"
|
||||
version = "1.5.0"
|
||||
@@ -236,6 +254,16 @@ version = "0.8.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28"
|
||||
|
||||
[[package]]
|
||||
name = "crypto-common"
|
||||
version = "0.1.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a"
|
||||
dependencies = [
|
||||
"generic-array",
|
||||
"typenum",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dashmap"
|
||||
version = "6.1.0"
|
||||
@@ -259,6 +287,17 @@ dependencies = [
|
||||
"powerfmt",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "digest"
|
||||
version = "0.10.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292"
|
||||
dependencies = [
|
||||
"block-buffer",
|
||||
"crypto-common",
|
||||
"subtle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "equivalent"
|
||||
version = "1.0.2"
|
||||
@@ -272,9 +311,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"windows-sys",
|
||||
"windows-sys 0.61.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fastrand"
|
||||
version = "2.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a043dc74da1e37d6afe657061213aa6f425f855399a11d3463c6ecccc4dfda1f"
|
||||
|
||||
[[package]]
|
||||
name = "find-msvc-tools"
|
||||
version = "0.1.9"
|
||||
@@ -336,6 +381,16 @@ dependencies = [
|
||||
"slab",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "generic-array"
|
||||
version = "0.14.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a"
|
||||
dependencies = [
|
||||
"typenum",
|
||||
"version_check",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "getrandom"
|
||||
version = "0.2.17"
|
||||
@@ -409,6 +464,15 @@ version = "0.4.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
|
||||
|
||||
[[package]]
|
||||
name = "hmac"
|
||||
version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e"
|
||||
dependencies = [
|
||||
"digest",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "id-arena"
|
||||
version = "2.3.0"
|
||||
@@ -477,6 +541,12 @@ dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "linux-raw-sys"
|
||||
version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "32a66949e030da00e8c7d4434b251670a91556f4144941d37452769c25d58a53"
|
||||
|
||||
[[package]]
|
||||
name = "lock_api"
|
||||
version = "0.4.14"
|
||||
@@ -524,7 +594,7 @@ checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"wasi",
|
||||
"windows-sys",
|
||||
"windows-sys 0.61.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -533,7 +603,7 @@ version = "0.50.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5"
|
||||
dependencies = [
|
||||
"windows-sys",
|
||||
"windows-sys 0.61.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -577,6 +647,16 @@ dependencies = [
|
||||
"windows-link",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pbkdf2"
|
||||
version = "0.12.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2"
|
||||
dependencies = [
|
||||
"digest",
|
||||
"hmac",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pin-project-lite"
|
||||
version = "0.2.17"
|
||||
@@ -644,14 +724,35 @@ version = "0.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09"
|
||||
|
||||
[[package]]
|
||||
name = "rand"
|
||||
version = "0.8.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5ca0ecfa931c29007047d1bc58e623ab12e5590e8c7cc53200d5202b69266d8a"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"rand_chacha 0.3.1",
|
||||
"rand_core 0.6.4",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand"
|
||||
version = "0.9.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1"
|
||||
dependencies = [
|
||||
"rand_chacha",
|
||||
"rand_core",
|
||||
"rand_chacha 0.9.0",
|
||||
"rand_core 0.9.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_chacha"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
|
||||
dependencies = [
|
||||
"ppv-lite86",
|
||||
"rand_core 0.6.4",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -661,7 +762,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb"
|
||||
dependencies = [
|
||||
"ppv-lite86",
|
||||
"rand_core",
|
||||
"rand_core 0.9.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_core"
|
||||
version = "0.6.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
|
||||
dependencies = [
|
||||
"getrandom 0.2.17",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -711,6 +821,20 @@ version = "0.8.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dc897dd8d9e8bd1ed8cdad82b5966c3e0ecae09fb1907d58efaa013543185d0a"
|
||||
|
||||
[[package]]
|
||||
name = "ring"
|
||||
version = "0.17.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"cfg-if",
|
||||
"getrandom 0.2.17",
|
||||
"libc",
|
||||
"untrusted",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustdb"
|
||||
version = "0.1.0"
|
||||
@@ -723,6 +847,7 @@ dependencies = [
|
||||
"dashmap",
|
||||
"futures-util",
|
||||
"mimalloc",
|
||||
"rustdb-auth",
|
||||
"rustdb-commands",
|
||||
"rustdb-config",
|
||||
"rustdb-index",
|
||||
@@ -730,14 +855,33 @@ dependencies = [
|
||||
"rustdb-storage",
|
||||
"rustdb-txn",
|
||||
"rustdb-wire",
|
||||
"rustls-pemfile",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tokio",
|
||||
"tokio-rustls",
|
||||
"tokio-util",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustdb-auth"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"base64",
|
||||
"bson",
|
||||
"hmac",
|
||||
"pbkdf2",
|
||||
"rand 0.8.6",
|
||||
"rustdb-config",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sha2",
|
||||
"subtle",
|
||||
"thiserror",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustdb-commands"
|
||||
version = "0.1.0"
|
||||
@@ -745,6 +889,7 @@ dependencies = [
|
||||
"async-trait",
|
||||
"bson",
|
||||
"dashmap",
|
||||
"rustdb-auth",
|
||||
"rustdb-config",
|
||||
"rustdb-index",
|
||||
"rustdb-query",
|
||||
@@ -802,6 +947,7 @@ dependencies = [
|
||||
"dashmap",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tempfile",
|
||||
"thiserror",
|
||||
"tokio",
|
||||
"tracing",
|
||||
@@ -835,6 +981,62 @@ dependencies = [
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustix"
|
||||
version = "1.1.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b6fe4565b9518b83ef4f91bb47ce29620ca828bd32cb7e408f0062e9930ba190"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"errno",
|
||||
"libc",
|
||||
"linux-raw-sys",
|
||||
"windows-sys 0.61.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustls"
|
||||
version = "0.23.40"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ef86cd5876211988985292b91c96a8f2d298df24e75989a43a3c73f2d4d8168b"
|
||||
dependencies = [
|
||||
"once_cell",
|
||||
"ring",
|
||||
"rustls-pki-types",
|
||||
"rustls-webpki",
|
||||
"subtle",
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustls-pemfile"
|
||||
version = "2.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50"
|
||||
dependencies = [
|
||||
"rustls-pki-types",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustls-pki-types"
|
||||
version = "1.14.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "30a7197ae7eb376e574fe940d068c30fe0462554a3ddbe4eca7838e049c937a9"
|
||||
dependencies = [
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustls-webpki"
|
||||
version = "0.103.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "61c429a8649f110dddef65e2a5ad240f747e85f7758a6bccc7e5777bd33f756e"
|
||||
dependencies = [
|
||||
"ring",
|
||||
"rustls-pki-types",
|
||||
"untrusted",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustversion"
|
||||
version = "1.0.22"
|
||||
@@ -907,6 +1109,17 @@ dependencies = [
|
||||
"zmij",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sha2"
|
||||
version = "0.10.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"cpufeatures",
|
||||
"digest",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sharded-slab"
|
||||
version = "0.1.7"
|
||||
@@ -951,7 +1164,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3a766e1110788c36f4fa1c2b71b387a7815aa65f88ce0229841826633d93723e"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"windows-sys",
|
||||
"windows-sys 0.61.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -960,6 +1173,12 @@ version = "0.11.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
|
||||
|
||||
[[package]]
|
||||
name = "subtle"
|
||||
version = "2.6.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292"
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "2.0.117"
|
||||
@@ -977,6 +1196,19 @@ version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369"
|
||||
|
||||
[[package]]
|
||||
name = "tempfile"
|
||||
version = "3.27.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "32497e9a4c7b38532efcdebeef879707aa9f794296a4f0244f6f69e9bc8574bd"
|
||||
dependencies = [
|
||||
"fastrand",
|
||||
"getrandom 0.4.2",
|
||||
"once_cell",
|
||||
"rustix",
|
||||
"windows-sys 0.61.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "thiserror"
|
||||
version = "2.0.18"
|
||||
@@ -1051,7 +1283,7 @@ dependencies = [
|
||||
"signal-hook-registry",
|
||||
"socket2",
|
||||
"tokio-macros",
|
||||
"windows-sys",
|
||||
"windows-sys 0.61.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1065,6 +1297,16 @@ dependencies = [
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-rustls"
|
||||
version = "0.26.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61"
|
||||
dependencies = [
|
||||
"rustls",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-util"
|
||||
version = "0.7.18"
|
||||
@@ -1139,6 +1381,12 @@ dependencies = [
|
||||
"tracing-log",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "typenum"
|
||||
version = "1.20.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "40ce102ab67701b8526c123c1bab5cbe42d7040ccfd0f64af1a385808d2f43de"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-ident"
|
||||
version = "1.0.24"
|
||||
@@ -1151,6 +1399,12 @@ version = "0.2.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853"
|
||||
|
||||
[[package]]
|
||||
name = "untrusted"
|
||||
version = "0.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1"
|
||||
|
||||
[[package]]
|
||||
name = "utf8parse"
|
||||
version = "0.2.2"
|
||||
@@ -1290,6 +1544,15 @@ version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5"
|
||||
|
||||
[[package]]
|
||||
name = "windows-sys"
|
||||
version = "0.52.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
|
||||
dependencies = [
|
||||
"windows-targets",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-sys"
|
||||
version = "0.61.2"
|
||||
@@ -1299,6 +1562,70 @@ dependencies = [
|
||||
"windows-link",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-targets"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
|
||||
dependencies = [
|
||||
"windows_aarch64_gnullvm",
|
||||
"windows_aarch64_msvc",
|
||||
"windows_i686_gnu",
|
||||
"windows_i686_gnullvm",
|
||||
"windows_i686_msvc",
|
||||
"windows_x86_64_gnu",
|
||||
"windows_x86_64_gnullvm",
|
||||
"windows_x86_64_msvc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_gnullvm"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_msvc"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnu"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnullvm"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_msvc"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnu"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnullvm"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_msvc"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
|
||||
|
||||
[[package]]
|
||||
name = "wit-bindgen"
|
||||
version = "0.51.0"
|
||||
@@ -1416,6 +1743,12 @@ dependencies = [
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zeroize"
|
||||
version = "1.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0"
|
||||
|
||||
[[package]]
|
||||
name = "zmij"
|
||||
version = "1.0.21"
|
||||
|
||||
@@ -8,6 +8,7 @@ members = [
|
||||
"crates/rustdb-storage",
|
||||
"crates/rustdb-index",
|
||||
"crates/rustdb-txn",
|
||||
"crates/rustdb-auth",
|
||||
"crates/rustdb-commands",
|
||||
]
|
||||
|
||||
@@ -51,6 +52,10 @@ dashmap = "6"
|
||||
# Cancellation / utility
|
||||
tokio-util = { version = "0.7", features = ["codec"] }
|
||||
|
||||
# TLS transport
|
||||
tokio-rustls = { version = "0.26", default-features = false, features = ["ring", "tls12"] }
|
||||
rustls-pemfile = "2"
|
||||
|
||||
# mimalloc allocator
|
||||
mimalloc = "0.1"
|
||||
|
||||
@@ -60,12 +65,23 @@ crc32fast = "1"
|
||||
# Regex for $regex operator
|
||||
regex = "1"
|
||||
|
||||
# Auth crypto
|
||||
base64 = "0.22"
|
||||
hmac = "0.12"
|
||||
pbkdf2 = { version = "0.12", features = ["hmac"] }
|
||||
rand = "0.8"
|
||||
sha2 = "0.10"
|
||||
subtle = "2"
|
||||
|
||||
# UUID for sessions
|
||||
uuid = { version = "1", features = ["v4", "serde"] }
|
||||
|
||||
# Async traits
|
||||
async-trait = "0.1"
|
||||
|
||||
# Test utilities
|
||||
tempfile = "3"
|
||||
|
||||
# Internal crates
|
||||
rustdb-config = { path = "crates/rustdb-config" }
|
||||
rustdb-wire = { path = "crates/rustdb-wire" }
|
||||
@@ -73,4 +89,5 @@ rustdb-query = { path = "crates/rustdb-query" }
|
||||
rustdb-storage = { path = "crates/rustdb-storage" }
|
||||
rustdb-index = { path = "crates/rustdb-index" }
|
||||
rustdb-txn = { path = "crates/rustdb-txn" }
|
||||
rustdb-auth = { path = "crates/rustdb-auth" }
|
||||
rustdb-commands = { path = "crates/rustdb-commands" }
|
||||
|
||||
@@ -0,0 +1,20 @@
|
||||
[package]
|
||||
name = "rustdb-auth"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
authors.workspace = true
|
||||
description = "Authentication primitives for RustDb"
|
||||
|
||||
[dependencies]
|
||||
base64 = { workspace = true }
|
||||
bson = { workspace = true }
|
||||
hmac = { workspace = true }
|
||||
pbkdf2 = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
rustdb-config = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
subtle = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
@@ -0,0 +1,565 @@
|
||||
use std::collections::HashMap;
|
||||
use std::io::Write;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::RwLock;
|
||||
|
||||
use base64::{engine::general_purpose::STANDARD as BASE64_STANDARD, Engine as _};
|
||||
use hmac::{Hmac, Mac};
|
||||
use pbkdf2::pbkdf2_hmac;
|
||||
use rand::{rngs::OsRng, RngCore};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::{Digest, Sha256};
|
||||
use subtle::ConstantTimeEq;
|
||||
|
||||
use rustdb_config::{AuthOptions, AuthUserOptions};
|
||||
|
||||
type HmacSha256 = Hmac<Sha256>;
|
||||
|
||||
const SCRAM_SHA_256: &str = "SCRAM-SHA-256";
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum AuthError {
|
||||
#[error("authentication is disabled")]
|
||||
Disabled,
|
||||
#[error("unsupported authentication mechanism: {0}")]
|
||||
UnsupportedMechanism(String),
|
||||
#[error("invalid SCRAM payload: {0}")]
|
||||
InvalidPayload(String),
|
||||
#[error("authentication failed")]
|
||||
AuthenticationFailed,
|
||||
#[error("unknown SASL conversation")]
|
||||
UnknownConversation,
|
||||
#[error("user already exists: {0}")]
|
||||
UserAlreadyExists(String),
|
||||
#[error("user not found: {0}")]
|
||||
UserNotFound(String),
|
||||
#[error("auth metadata persistence failed: {0}")]
|
||||
Persistence(String),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum AuthAction {
|
||||
Read,
|
||||
Write,
|
||||
DbAdmin,
|
||||
UserAdmin,
|
||||
ClusterMonitor,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct AuthenticatedUser {
|
||||
pub username: String,
|
||||
pub database: String,
|
||||
pub roles: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
struct ScramCredential {
|
||||
salt: Vec<u8>,
|
||||
iterations: u32,
|
||||
stored_key: Vec<u8>,
|
||||
server_key: Vec<u8>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
struct AuthUser {
|
||||
username: String,
|
||||
database: String,
|
||||
roles: Vec<String>,
|
||||
scram_sha256: ScramCredential,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||
struct PersistedAuthState {
|
||||
users: Vec<AuthUser>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ScramConversation {
|
||||
user: AuthenticatedUser,
|
||||
client_first_bare: String,
|
||||
server_first: String,
|
||||
nonce: String,
|
||||
stored_key: Vec<u8>,
|
||||
server_key: Vec<u8>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ScramStartResult {
|
||||
pub payload: Vec<u8>,
|
||||
pub conversation: ScramConversation,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ScramContinueResult {
|
||||
pub payload: Vec<u8>,
|
||||
pub user: AuthenticatedUser,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct AuthEngine {
|
||||
enabled: bool,
|
||||
users: RwLock<HashMap<String, AuthUser>>,
|
||||
users_path: Option<PathBuf>,
|
||||
scram_iterations: u32,
|
||||
}
|
||||
|
||||
impl AuthEngine {
|
||||
pub fn from_options(options: &AuthOptions) -> Result<Self, AuthError> {
|
||||
let users_path = options.users_path.as_ref().map(PathBuf::from);
|
||||
let mut users = if let Some(ref path) = users_path {
|
||||
load_users(path)?
|
||||
} else {
|
||||
HashMap::new()
|
||||
};
|
||||
|
||||
let mut changed = false;
|
||||
for user_options in &options.users {
|
||||
let key = user_key(&user_options.database, &user_options.username);
|
||||
if !users.contains_key(&key) {
|
||||
let user = AuthUser::from_options(user_options, options.scram_iterations);
|
||||
users.insert(key, user);
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
|
||||
if changed {
|
||||
if let Some(ref path) = users_path {
|
||||
persist_users(path, &users)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
enabled: options.enabled,
|
||||
users: RwLock::new(users),
|
||||
users_path,
|
||||
scram_iterations: options.scram_iterations,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn disabled() -> Self {
|
||||
Self {
|
||||
enabled: false,
|
||||
users: RwLock::new(HashMap::new()),
|
||||
users_path: None,
|
||||
scram_iterations: 15000,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn enabled(&self) -> bool {
|
||||
self.enabled
|
||||
}
|
||||
|
||||
pub fn supported_mechanisms(&self, namespace_user: &str) -> Vec<String> {
|
||||
let Some((database, username)) = namespace_user.split_once('.') else {
|
||||
return Vec::new();
|
||||
};
|
||||
let users = self.users.read().unwrap_or_else(|poisoned| poisoned.into_inner());
|
||||
if users.contains_key(&user_key(database, username)) {
|
||||
vec![SCRAM_SHA_256.to_string()]
|
||||
} else {
|
||||
Vec::new()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_authorized(
|
||||
&self,
|
||||
authenticated_users: &[AuthenticatedUser],
|
||||
target_db: &str,
|
||||
action: AuthAction,
|
||||
) -> bool {
|
||||
authenticated_users
|
||||
.iter()
|
||||
.any(|user| user.roles.iter().any(|role| role_allows(role, user, target_db, action)))
|
||||
}
|
||||
|
||||
pub fn create_user(
|
||||
&self,
|
||||
database: &str,
|
||||
username: &str,
|
||||
password: &str,
|
||||
roles: Vec<String>,
|
||||
) -> Result<(), AuthError> {
|
||||
let key = user_key(database, username);
|
||||
let mut users = self.users.write().unwrap_or_else(|poisoned| poisoned.into_inner());
|
||||
if users.contains_key(&key) {
|
||||
return Err(AuthError::UserAlreadyExists(format!("{database}.{username}")));
|
||||
}
|
||||
let options = AuthUserOptions {
|
||||
username: username.to_string(),
|
||||
password: password.to_string(),
|
||||
database: database.to_string(),
|
||||
roles,
|
||||
};
|
||||
users.insert(key, AuthUser::from_options(&options, self.scram_iterations));
|
||||
self.persist_locked(&users)
|
||||
}
|
||||
|
||||
pub fn drop_user(&self, database: &str, username: &str) -> Result<(), AuthError> {
|
||||
let key = user_key(database, username);
|
||||
let mut users = self.users.write().unwrap_or_else(|poisoned| poisoned.into_inner());
|
||||
if users.remove(&key).is_none() {
|
||||
return Err(AuthError::UserNotFound(format!("{database}.{username}")));
|
||||
}
|
||||
self.persist_locked(&users)
|
||||
}
|
||||
|
||||
pub fn update_user(
|
||||
&self,
|
||||
database: &str,
|
||||
username: &str,
|
||||
password: Option<&str>,
|
||||
roles: Option<Vec<String>>,
|
||||
) -> Result<(), AuthError> {
|
||||
let key = user_key(database, username);
|
||||
let mut users = self.users.write().unwrap_or_else(|poisoned| poisoned.into_inner());
|
||||
let user = users
|
||||
.get_mut(&key)
|
||||
.ok_or_else(|| AuthError::UserNotFound(format!("{database}.{username}")))?;
|
||||
if let Some(new_roles) = roles {
|
||||
user.roles = new_roles;
|
||||
}
|
||||
if let Some(new_password) = password {
|
||||
let options = AuthUserOptions {
|
||||
username: username.to_string(),
|
||||
password: new_password.to_string(),
|
||||
database: database.to_string(),
|
||||
roles: user.roles.clone(),
|
||||
};
|
||||
user.scram_sha256 = AuthUser::from_options(&options, self.scram_iterations).scram_sha256;
|
||||
}
|
||||
self.persist_locked(&users)
|
||||
}
|
||||
|
||||
pub fn grant_roles(
|
||||
&self,
|
||||
database: &str,
|
||||
username: &str,
|
||||
roles: Vec<String>,
|
||||
) -> Result<(), AuthError> {
|
||||
let key = user_key(database, username);
|
||||
let mut users = self.users.write().unwrap_or_else(|poisoned| poisoned.into_inner());
|
||||
let user = users
|
||||
.get_mut(&key)
|
||||
.ok_or_else(|| AuthError::UserNotFound(format!("{database}.{username}")))?;
|
||||
for role in roles {
|
||||
if !user.roles.contains(&role) {
|
||||
user.roles.push(role);
|
||||
}
|
||||
}
|
||||
self.persist_locked(&users)
|
||||
}
|
||||
|
||||
pub fn revoke_roles(
|
||||
&self,
|
||||
database: &str,
|
||||
username: &str,
|
||||
roles: Vec<String>,
|
||||
) -> Result<(), AuthError> {
|
||||
let key = user_key(database, username);
|
||||
let mut users = self.users.write().unwrap_or_else(|poisoned| poisoned.into_inner());
|
||||
let user = users
|
||||
.get_mut(&key)
|
||||
.ok_or_else(|| AuthError::UserNotFound(format!("{database}.{username}")))?;
|
||||
user.roles.retain(|role| !roles.contains(role));
|
||||
self.persist_locked(&users)
|
||||
}
|
||||
|
||||
pub fn users_info(&self, database: &str, username: Option<&str>) -> Vec<AuthenticatedUser> {
|
||||
let users = self.users.read().unwrap_or_else(|poisoned| poisoned.into_inner());
|
||||
users
|
||||
.values()
|
||||
.filter(|user| user.database == database)
|
||||
.filter(|user| username.map(|name| user.username == name).unwrap_or(true))
|
||||
.map(AuthUser::to_authenticated_user)
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn start_scram_sha256(
|
||||
&self,
|
||||
database: &str,
|
||||
payload: &[u8],
|
||||
) -> Result<ScramStartResult, AuthError> {
|
||||
if !self.enabled {
|
||||
return Err(AuthError::Disabled);
|
||||
}
|
||||
|
||||
let message = std::str::from_utf8(payload)
|
||||
.map_err(|_| AuthError::InvalidPayload("payload is not valid UTF-8".to_string()))?;
|
||||
let client_first_bare = message
|
||||
.strip_prefix("n,,")
|
||||
.ok_or_else(|| AuthError::InvalidPayload("expected SCRAM gs2 header 'n,,'".to_string()))?;
|
||||
let attrs = parse_scram_attrs(client_first_bare);
|
||||
let raw_username = attrs
|
||||
.get("n")
|
||||
.ok_or_else(|| AuthError::InvalidPayload("missing username".to_string()))?;
|
||||
let username = decode_scram_name(raw_username);
|
||||
let client_nonce = attrs
|
||||
.get("r")
|
||||
.ok_or_else(|| AuthError::InvalidPayload("missing client nonce".to_string()))?;
|
||||
|
||||
let users = self.users.read().unwrap_or_else(|poisoned| poisoned.into_inner());
|
||||
let user = users
|
||||
.get(&user_key(database, &username))
|
||||
.ok_or(AuthError::AuthenticationFailed)?;
|
||||
|
||||
let nonce = format!("{}{}", client_nonce, secure_base64(18));
|
||||
let server_first = format!(
|
||||
"r={},s={},i={}",
|
||||
nonce,
|
||||
BASE64_STANDARD.encode(&user.scram_sha256.salt),
|
||||
user.scram_sha256.iterations,
|
||||
);
|
||||
|
||||
Ok(ScramStartResult {
|
||||
payload: server_first.as_bytes().to_vec(),
|
||||
conversation: ScramConversation {
|
||||
user: user.to_authenticated_user(),
|
||||
client_first_bare: client_first_bare.to_string(),
|
||||
server_first: server_first.clone(),
|
||||
nonce,
|
||||
stored_key: user.scram_sha256.stored_key.clone(),
|
||||
server_key: user.scram_sha256.server_key.clone(),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
pub fn continue_scram_sha256(
|
||||
&self,
|
||||
conversation: ScramConversation,
|
||||
payload: &[u8],
|
||||
) -> Result<ScramContinueResult, AuthError> {
|
||||
let message = std::str::from_utf8(payload)
|
||||
.map_err(|_| AuthError::InvalidPayload("payload is not valid UTF-8".to_string()))?;
|
||||
let proof_marker = ",p=";
|
||||
let proof_pos = message
|
||||
.rfind(proof_marker)
|
||||
.ok_or_else(|| AuthError::InvalidPayload("missing client proof".to_string()))?;
|
||||
let client_final_without_proof = &message[..proof_pos];
|
||||
let proof_b64 = &message[proof_pos + proof_marker.len()..];
|
||||
let attrs = parse_scram_attrs(client_final_without_proof);
|
||||
let nonce = attrs
|
||||
.get("r")
|
||||
.ok_or_else(|| AuthError::InvalidPayload("missing nonce".to_string()))?;
|
||||
if nonce != &conversation.nonce {
|
||||
return Err(AuthError::AuthenticationFailed);
|
||||
}
|
||||
|
||||
let client_proof = BASE64_STANDARD
|
||||
.decode(proof_b64.as_bytes())
|
||||
.map_err(|_| AuthError::InvalidPayload("invalid client proof encoding".to_string()))?;
|
||||
if client_proof.len() != 32 || conversation.stored_key.len() != 32 {
|
||||
return Err(AuthError::AuthenticationFailed);
|
||||
}
|
||||
|
||||
let auth_message = format!(
|
||||
"{},{},{}",
|
||||
conversation.client_first_bare,
|
||||
conversation.server_first,
|
||||
client_final_without_proof,
|
||||
);
|
||||
let client_signature = hmac_sha256(&conversation.stored_key, auth_message.as_bytes());
|
||||
let client_key: Vec<u8> = client_proof
|
||||
.iter()
|
||||
.zip(client_signature.iter())
|
||||
.map(|(proof_byte, signature_byte)| proof_byte ^ signature_byte)
|
||||
.collect();
|
||||
let computed_stored_key = Sha256::digest(&client_key).to_vec();
|
||||
|
||||
if computed_stored_key.ct_eq(&conversation.stored_key).unwrap_u8() != 1 {
|
||||
return Err(AuthError::AuthenticationFailed);
|
||||
}
|
||||
|
||||
let server_signature = hmac_sha256(&conversation.server_key, auth_message.as_bytes());
|
||||
let server_final = format!("v={}", BASE64_STANDARD.encode(server_signature));
|
||||
|
||||
Ok(ScramContinueResult {
|
||||
payload: server_final.as_bytes().to_vec(),
|
||||
user: conversation.user,
|
||||
})
|
||||
}
|
||||
|
||||
fn persist_locked(&self, users: &HashMap<String, AuthUser>) -> Result<(), AuthError> {
|
||||
if let Some(ref path) = self.users_path {
|
||||
persist_users(path, users)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for AuthEngine {
|
||||
fn default() -> Self {
|
||||
Self::disabled()
|
||||
}
|
||||
}
|
||||
|
||||
impl AuthUser {
|
||||
fn from_options(options: &AuthUserOptions, iterations: u32) -> Self {
|
||||
let salt = secure_random(24);
|
||||
let salted_password = salted_password(options.password.as_bytes(), &salt, iterations);
|
||||
let client_key = hmac_sha256(&salted_password, b"Client Key");
|
||||
let stored_key = Sha256::digest(&client_key).to_vec();
|
||||
let server_key = hmac_sha256(&salted_password, b"Server Key");
|
||||
|
||||
Self {
|
||||
username: options.username.clone(),
|
||||
database: options.database.clone(),
|
||||
roles: options.roles.clone(),
|
||||
scram_sha256: ScramCredential {
|
||||
salt,
|
||||
iterations,
|
||||
stored_key,
|
||||
server_key,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn to_authenticated_user(&self) -> AuthenticatedUser {
|
||||
AuthenticatedUser {
|
||||
username: self.username.clone(),
|
||||
database: self.database.clone(),
|
||||
roles: self.roles.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn role_allows(role: &str, user: &AuthenticatedUser, target_db: &str, action: AuthAction) -> bool {
|
||||
let (role_db, role_name) = role.split_once('.').unwrap_or(("", role));
|
||||
if role_name == "root" {
|
||||
return true;
|
||||
}
|
||||
|
||||
let any_database = role_name.ends_with("AnyDatabase");
|
||||
let scoped_db = if role_db.is_empty() { &user.database } else { role_db };
|
||||
if !any_database && scoped_db != target_db {
|
||||
return false;
|
||||
}
|
||||
|
||||
match role_name {
|
||||
"read" | "readAnyDatabase" => action == AuthAction::Read,
|
||||
"readWrite" | "readWriteAnyDatabase" => {
|
||||
matches!(action, AuthAction::Read | AuthAction::Write)
|
||||
}
|
||||
"dbAdmin" | "dbAdminAnyDatabase" => action == AuthAction::DbAdmin,
|
||||
"userAdmin" | "userAdminAnyDatabase" => action == AuthAction::UserAdmin,
|
||||
"clusterMonitor" => action == AuthAction::ClusterMonitor,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
fn load_users(path: &Path) -> Result<HashMap<String, AuthUser>, AuthError> {
|
||||
if !path.exists() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
let data = std::fs::read_to_string(path).map_err(|e| AuthError::Persistence(e.to_string()))?;
|
||||
let persisted: PersistedAuthState = serde_json::from_str(&data)
|
||||
.map_err(|e| AuthError::Persistence(format!("failed to parse users file: {e}")))?;
|
||||
Ok(persisted
|
||||
.users
|
||||
.into_iter()
|
||||
.map(|user| (user_key(&user.database, &user.username), user))
|
||||
.collect())
|
||||
}
|
||||
|
||||
fn persist_users(path: &Path, users: &HashMap<String, AuthUser>) -> Result<(), AuthError> {
|
||||
if let Some(parent) = path.parent() {
|
||||
std::fs::create_dir_all(parent).map_err(|e| AuthError::Persistence(e.to_string()))?;
|
||||
}
|
||||
|
||||
let mut user_list: Vec<AuthUser> = users.values().cloned().collect();
|
||||
user_list.sort_by(|a, b| a.database.cmp(&b.database).then(a.username.cmp(&b.username)));
|
||||
let payload = serde_json::to_vec_pretty(&PersistedAuthState { users: user_list })
|
||||
.map_err(|e| AuthError::Persistence(e.to_string()))?;
|
||||
|
||||
let tmp_path = path.with_extension("tmp");
|
||||
{
|
||||
let mut file = std::fs::File::create(&tmp_path)
|
||||
.map_err(|e| AuthError::Persistence(e.to_string()))?;
|
||||
file.write_all(&payload)
|
||||
.map_err(|e| AuthError::Persistence(e.to_string()))?;
|
||||
file.sync_all()
|
||||
.map_err(|e| AuthError::Persistence(e.to_string()))?;
|
||||
}
|
||||
std::fs::rename(&tmp_path, path).map_err(|e| AuthError::Persistence(e.to_string()))?;
|
||||
if let Some(parent) = path.parent() {
|
||||
if let Ok(dir) = std::fs::File::open(parent) {
|
||||
let _ = dir.sync_all();
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn user_key(database: &str, username: &str) -> String {
|
||||
format!("{}\0{}", database, username)
|
||||
}
|
||||
|
||||
fn salted_password(password: &[u8], salt: &[u8], iterations: u32) -> Vec<u8> {
|
||||
let mut output = [0u8; 32];
|
||||
pbkdf2_hmac::<Sha256>(password, salt, iterations, &mut output);
|
||||
output.to_vec()
|
||||
}
|
||||
|
||||
fn hmac_sha256(key: &[u8], message: &[u8]) -> Vec<u8> {
|
||||
let mut mac = HmacSha256::new_from_slice(key).expect("HMAC-SHA256 accepts keys of any size");
|
||||
mac.update(message);
|
||||
mac.finalize().into_bytes().to_vec()
|
||||
}
|
||||
|
||||
fn secure_random(len: usize) -> Vec<u8> {
|
||||
let mut bytes = vec![0u8; len];
|
||||
OsRng.fill_bytes(&mut bytes);
|
||||
bytes
|
||||
}
|
||||
|
||||
fn secure_base64(len: usize) -> String {
|
||||
BASE64_STANDARD.encode(secure_random(len))
|
||||
}
|
||||
|
||||
fn parse_scram_attrs(input: &str) -> HashMap<String, String> {
|
||||
let mut result = HashMap::new();
|
||||
for part in input.split(',') {
|
||||
if let Some((key, value)) = part.split_once('=') {
|
||||
result.insert(key.to_string(), value.to_string());
|
||||
}
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
fn decode_scram_name(input: &str) -> String {
|
||||
input.replace("=2C", ",").replace("=3D", "=")
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn mechanism_lookup_returns_scram_sha256() {
|
||||
let options = AuthOptions {
|
||||
enabled: true,
|
||||
users: vec![AuthUserOptions {
|
||||
username: "root".to_string(),
|
||||
password: "secret".to_string(),
|
||||
database: "admin".to_string(),
|
||||
roles: vec!["root".to_string()],
|
||||
}],
|
||||
users_path: None,
|
||||
scram_iterations: 4096,
|
||||
};
|
||||
let engine = AuthEngine::from_options(&options).unwrap();
|
||||
assert_eq!(engine.supported_mechanisms("admin.root"), vec![SCRAM_SHA_256.to_string()]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn read_write_role_allows_read_and_write_only_on_own_db() {
|
||||
let user = AuthenticatedUser {
|
||||
username: "app".to_string(),
|
||||
database: "appdb".to_string(),
|
||||
roles: vec!["readWrite".to_string()],
|
||||
};
|
||||
assert!(role_allows("readWrite", &user, "appdb", AuthAction::Read));
|
||||
assert!(role_allows("readWrite", &user, "appdb", AuthAction::Write));
|
||||
assert!(!role_allows("readWrite", &user, "other", AuthAction::Read));
|
||||
assert!(!role_allows("readWrite", &user, "appdb", AuthAction::DbAdmin));
|
||||
}
|
||||
}
|
||||
@@ -22,3 +22,4 @@ rustdb-query = { workspace = true }
|
||||
rustdb-storage = { workspace = true }
|
||||
rustdb-index = { workspace = true }
|
||||
rustdb-txn = { workspace = true }
|
||||
rustdb-auth = { workspace = true }
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use bson::Document;
|
||||
use bson::{Bson, Document};
|
||||
use dashmap::DashMap;
|
||||
use rustdb_index::IndexEngine;
|
||||
use rustdb_auth::{AuthEngine, AuthenticatedUser, ScramConversation};
|
||||
use rustdb_index::{IndexEngine, IndexOptions};
|
||||
use rustdb_storage::{OpLog, StorageAdapter};
|
||||
use rustdb_txn::{SessionEngine, TransactionEngine};
|
||||
|
||||
@@ -22,6 +23,106 @@ pub struct CommandContext {
|
||||
pub start_time: std::time::Instant,
|
||||
/// Operation log for point-in-time replay.
|
||||
pub oplog: Arc<OpLog>,
|
||||
/// Authentication engine and user store.
|
||||
pub auth: Arc<AuthEngine>,
|
||||
}
|
||||
|
||||
impl CommandContext {
|
||||
/// Get or lazily initialize an IndexEngine for a namespace.
|
||||
///
|
||||
/// If no IndexEngine exists yet for this namespace, loads persisted index
|
||||
/// specs from `indexes.json` via the storage adapter, creates the engine
|
||||
/// with those specs, and rebuilds index data from existing documents.
|
||||
/// This ensures unique indexes are enforced even on the very first write
|
||||
/// after a restart.
|
||||
pub async fn get_or_init_index_engine(&self, db: &str, coll: &str) -> dashmap::mapref::one::RefMut<'_, String, IndexEngine> {
|
||||
let ns_key = format!("{}.{}", db, coll);
|
||||
|
||||
// Fast path: engine already exists.
|
||||
if self.indexes.contains_key(&ns_key) {
|
||||
return self.indexes.entry(ns_key).or_insert_with(IndexEngine::new);
|
||||
}
|
||||
|
||||
// Slow path: load from persisted specs.
|
||||
let mut engine = IndexEngine::new();
|
||||
let mut has_custom = false;
|
||||
|
||||
if let Ok(specs) = self.storage.get_indexes(db, coll).await {
|
||||
for spec in &specs {
|
||||
let name = spec.get_str("name").unwrap_or("").to_string();
|
||||
if name == "_id_" || name.is_empty() {
|
||||
continue;
|
||||
}
|
||||
let key = match spec.get("key") {
|
||||
Some(Bson::Document(k)) => k.clone(),
|
||||
_ => continue,
|
||||
};
|
||||
let unique = matches!(spec.get("unique"), Some(Bson::Boolean(true)));
|
||||
let sparse = matches!(spec.get("sparse"), Some(Bson::Boolean(true)));
|
||||
let expire_after_seconds = match spec.get("expireAfterSeconds") {
|
||||
Some(Bson::Int32(n)) => Some(*n as u64),
|
||||
Some(Bson::Int64(n)) => Some(*n as u64),
|
||||
_ => None,
|
||||
};
|
||||
let options = IndexOptions {
|
||||
name: Some(name),
|
||||
unique,
|
||||
sparse,
|
||||
expire_after_seconds,
|
||||
};
|
||||
let _ = engine.create_index(key, options);
|
||||
has_custom = true;
|
||||
}
|
||||
}
|
||||
|
||||
if has_custom {
|
||||
// Rebuild index data from existing documents.
|
||||
if let Ok(docs) = self.storage.find_all(db, coll).await {
|
||||
if !docs.is_empty() {
|
||||
engine.rebuild_from_documents(&docs);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.indexes.entry(ns_key).or_insert(engine)
|
||||
}
|
||||
}
|
||||
|
||||
/// Per-client connection state. Authentication is socket-scoped in MongoDB.
|
||||
pub struct ConnectionState {
|
||||
pub authenticated_users: Vec<AuthenticatedUser>,
|
||||
pub sasl_conversations: std::collections::HashMap<i32, ScramConversation>,
|
||||
next_conversation_id: i32,
|
||||
}
|
||||
|
||||
impl ConnectionState {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
authenticated_users: Vec::new(),
|
||||
sasl_conversations: std::collections::HashMap::new(),
|
||||
next_conversation_id: 1,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_authenticated(&self) -> bool {
|
||||
!self.authenticated_users.is_empty()
|
||||
}
|
||||
|
||||
pub fn next_conversation_id(&mut self) -> i32 {
|
||||
let id = self.next_conversation_id;
|
||||
self.next_conversation_id += 1;
|
||||
id
|
||||
}
|
||||
|
||||
pub fn authenticate(&mut self, user: AuthenticatedUser) {
|
||||
self.authenticated_users.push(user);
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ConnectionState {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// State of an open cursor from a find or aggregate command.
|
||||
|
||||
@@ -27,6 +27,18 @@ pub enum CommandError {
|
||||
#[error("duplicate key: {0}")]
|
||||
DuplicateKey(String),
|
||||
|
||||
#[error("immutable field: {0}")]
|
||||
ImmutableField(String),
|
||||
|
||||
#[error("unauthorized: {0}")]
|
||||
Unauthorized(String),
|
||||
|
||||
#[error("authentication failed")]
|
||||
AuthenticationFailed,
|
||||
|
||||
#[error("illegal operation: {0}")]
|
||||
IllegalOperation(String),
|
||||
|
||||
#[error("internal error: {0}")]
|
||||
InternalError(String),
|
||||
}
|
||||
@@ -43,6 +55,10 @@ impl CommandError {
|
||||
CommandError::NamespaceNotFound(_) => (26, "NamespaceNotFound"),
|
||||
CommandError::NamespaceExists(_) => (48, "NamespaceExists"),
|
||||
CommandError::DuplicateKey(_) => (11000, "DuplicateKey"),
|
||||
CommandError::ImmutableField(_) => (66, "ImmutableField"),
|
||||
CommandError::Unauthorized(_) => (13, "Unauthorized"),
|
||||
CommandError::AuthenticationFailed => (18, "AuthenticationFailed"),
|
||||
CommandError::IllegalOperation(_) => (20, "IllegalOperation"),
|
||||
CommandError::InternalError(_) => (1, "InternalError"),
|
||||
};
|
||||
|
||||
|
||||
@@ -98,6 +98,18 @@ pub async fn handle(
|
||||
"ok": 1.0,
|
||||
}),
|
||||
|
||||
"createUser" => handle_create_user(cmd, db, ctx).await,
|
||||
|
||||
"updateUser" => handle_update_user(cmd, db, ctx).await,
|
||||
|
||||
"dropUser" => handle_drop_user(cmd, db, ctx).await,
|
||||
|
||||
"usersInfo" => handle_users_info(cmd, db, ctx).await,
|
||||
|
||||
"grantRolesToUser" => handle_grant_roles_to_user(cmd, db, ctx).await,
|
||||
|
||||
"revokeRolesFromUser" => handle_revoke_roles_from_user(cmd, db, ctx).await,
|
||||
|
||||
"listDatabases" => handle_list_databases(cmd, ctx).await,
|
||||
|
||||
"listCollections" => handle_list_collections(cmd, db, ctx).await,
|
||||
@@ -144,15 +156,9 @@ pub async fn handle(
|
||||
Ok(doc! { "ok": 1.0 })
|
||||
}
|
||||
|
||||
"commitTransaction" => {
|
||||
// Stub: acknowledge.
|
||||
Ok(doc! { "ok": 1.0 })
|
||||
}
|
||||
|
||||
"abortTransaction" => {
|
||||
// Stub: acknowledge.
|
||||
Ok(doc! { "ok": 1.0 })
|
||||
}
|
||||
"commitTransaction" | "abortTransaction" => Err(CommandError::IllegalOperation(
|
||||
"Transaction numbers are only allowed on a replica set member or mongos".into(),
|
||||
)),
|
||||
|
||||
// Auth stubs - accept silently.
|
||||
"saslStart" => Ok(doc! {
|
||||
@@ -189,6 +195,166 @@ pub async fn handle(
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_create_user(
|
||||
cmd: &Document,
|
||||
db: &str,
|
||||
ctx: &CommandContext,
|
||||
) -> CommandResult<Document> {
|
||||
let username = cmd
|
||||
.get_str("createUser")
|
||||
.map_err(|_| CommandError::InvalidArgument("missing 'createUser' field".into()))?;
|
||||
let password = cmd
|
||||
.get_str("pwd")
|
||||
.map_err(|_| CommandError::InvalidArgument("missing 'pwd' field".into()))?;
|
||||
let roles = parse_roles(cmd, db, "roles")?;
|
||||
ctx.auth
|
||||
.create_user(db, username, password, roles)
|
||||
.map_err(auth_error_to_command_error)?;
|
||||
Ok(doc! { "ok": 1.0 })
|
||||
}
|
||||
|
||||
async fn handle_update_user(
|
||||
cmd: &Document,
|
||||
db: &str,
|
||||
ctx: &CommandContext,
|
||||
) -> CommandResult<Document> {
|
||||
let username = cmd
|
||||
.get_str("updateUser")
|
||||
.map_err(|_| CommandError::InvalidArgument("missing 'updateUser' field".into()))?;
|
||||
let password = cmd.get_str("pwd").ok();
|
||||
let roles = if cmd.contains_key("roles") {
|
||||
Some(parse_roles(cmd, db, "roles")?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
ctx.auth
|
||||
.update_user(db, username, password, roles)
|
||||
.map_err(auth_error_to_command_error)?;
|
||||
Ok(doc! { "ok": 1.0 })
|
||||
}
|
||||
|
||||
async fn handle_drop_user(
|
||||
cmd: &Document,
|
||||
db: &str,
|
||||
ctx: &CommandContext,
|
||||
) -> CommandResult<Document> {
|
||||
let username = cmd
|
||||
.get_str("dropUser")
|
||||
.map_err(|_| CommandError::InvalidArgument("missing 'dropUser' field".into()))?;
|
||||
ctx.auth
|
||||
.drop_user(db, username)
|
||||
.map_err(auth_error_to_command_error)?;
|
||||
Ok(doc! { "ok": 1.0 })
|
||||
}
|
||||
|
||||
async fn handle_users_info(
|
||||
cmd: &Document,
|
||||
db: &str,
|
||||
ctx: &CommandContext,
|
||||
) -> CommandResult<Document> {
|
||||
let username = match cmd.get("usersInfo") {
|
||||
Some(Bson::String(name)) => Some(name.as_str()),
|
||||
Some(Bson::Document(user_doc)) => user_doc.get_str("user").ok(),
|
||||
_ => None,
|
||||
};
|
||||
let users = ctx.auth.users_info(db, username);
|
||||
let user_docs: Vec<Bson> = users
|
||||
.into_iter()
|
||||
.map(|user| {
|
||||
let roles: Vec<Bson> = user
|
||||
.roles
|
||||
.iter()
|
||||
.map(|role| Bson::Document(role_to_document(&user.database, role)))
|
||||
.collect();
|
||||
Bson::Document(doc! {
|
||||
"user": user.username,
|
||||
"db": user.database,
|
||||
"roles": roles,
|
||||
"mechanisms": ["SCRAM-SHA-256"],
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
Ok(doc! { "users": user_docs, "ok": 1.0 })
|
||||
}
|
||||
|
||||
async fn handle_grant_roles_to_user(
|
||||
cmd: &Document,
|
||||
db: &str,
|
||||
ctx: &CommandContext,
|
||||
) -> CommandResult<Document> {
|
||||
let username = cmd
|
||||
.get_str("grantRolesToUser")
|
||||
.map_err(|_| CommandError::InvalidArgument("missing 'grantRolesToUser' field".into()))?;
|
||||
let roles = parse_roles(cmd, db, "roles")?;
|
||||
ctx.auth
|
||||
.grant_roles(db, username, roles)
|
||||
.map_err(auth_error_to_command_error)?;
|
||||
Ok(doc! { "ok": 1.0 })
|
||||
}
|
||||
|
||||
async fn handle_revoke_roles_from_user(
|
||||
cmd: &Document,
|
||||
db: &str,
|
||||
ctx: &CommandContext,
|
||||
) -> CommandResult<Document> {
|
||||
let username = cmd
|
||||
.get_str("revokeRolesFromUser")
|
||||
.map_err(|_| CommandError::InvalidArgument("missing 'revokeRolesFromUser' field".into()))?;
|
||||
let roles = parse_roles(cmd, db, "roles")?;
|
||||
ctx.auth
|
||||
.revoke_roles(db, username, roles)
|
||||
.map_err(auth_error_to_command_error)?;
|
||||
Ok(doc! { "ok": 1.0 })
|
||||
}
|
||||
|
||||
fn parse_roles(cmd: &Document, db: &str, key: &str) -> CommandResult<Vec<String>> {
|
||||
let role_values = cmd
|
||||
.get_array(key)
|
||||
.map_err(|_| CommandError::InvalidArgument(format!("missing '{key}' array")))?;
|
||||
let mut roles = Vec::with_capacity(role_values.len());
|
||||
for role_value in role_values {
|
||||
match role_value {
|
||||
Bson::String(role) => roles.push(role.clone()),
|
||||
Bson::Document(role_doc) => {
|
||||
let role = role_doc
|
||||
.get_str("role")
|
||||
.map_err(|_| CommandError::InvalidArgument("role document missing 'role'".into()))?;
|
||||
let role_db = role_doc.get_str("db").unwrap_or(db);
|
||||
if role_db == db {
|
||||
roles.push(role.to_string());
|
||||
} else {
|
||||
roles.push(format!("{role_db}.{role}"));
|
||||
}
|
||||
}
|
||||
_ => return Err(CommandError::InvalidArgument("roles must be strings or documents".into())),
|
||||
}
|
||||
}
|
||||
Ok(roles)
|
||||
}
|
||||
|
||||
fn role_to_document(default_db: &str, role: &str) -> Document {
|
||||
if let Some((role_db, role_name)) = role.split_once('.') {
|
||||
doc! { "role": role_name, "db": role_db }
|
||||
} else {
|
||||
doc! { "role": role, "db": default_db }
|
||||
}
|
||||
}
|
||||
|
||||
fn auth_error_to_command_error(error: rustdb_auth::AuthError) -> CommandError {
|
||||
match error {
|
||||
rustdb_auth::AuthError::UserAlreadyExists(message) => CommandError::DuplicateKey(message),
|
||||
rustdb_auth::AuthError::UserNotFound(message) => CommandError::NamespaceNotFound(message),
|
||||
rustdb_auth::AuthError::Persistence(message) => CommandError::InternalError(message),
|
||||
rustdb_auth::AuthError::AuthenticationFailed => CommandError::AuthenticationFailed,
|
||||
rustdb_auth::AuthError::InvalidPayload(message) => CommandError::InvalidArgument(message),
|
||||
rustdb_auth::AuthError::UnsupportedMechanism(message) => CommandError::InvalidArgument(message),
|
||||
rustdb_auth::AuthError::Disabled => CommandError::Unauthorized("authentication is disabled".into()),
|
||||
rustdb_auth::AuthError::UnknownConversation => {
|
||||
CommandError::InvalidArgument("unknown SASL conversation".into())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle `listDatabases` command.
|
||||
async fn handle_list_databases(
|
||||
cmd: &Document,
|
||||
|
||||
@@ -0,0 +1,87 @@
|
||||
use bson::{doc, Binary, Bson, Document};
|
||||
|
||||
use crate::context::{CommandContext, ConnectionState};
|
||||
use crate::error::{CommandError, CommandResult};
|
||||
|
||||
pub async fn handle_sasl_start(
|
||||
cmd: &Document,
|
||||
db: &str,
|
||||
ctx: &CommandContext,
|
||||
connection: &mut ConnectionState,
|
||||
) -> CommandResult<Document> {
|
||||
let mechanism = cmd
|
||||
.get_str("mechanism")
|
||||
.map_err(|_| CommandError::InvalidArgument("missing SASL mechanism".into()))?;
|
||||
if mechanism != "SCRAM-SHA-256" {
|
||||
return Err(CommandError::InvalidArgument(format!(
|
||||
"unsupported SASL mechanism: {mechanism}"
|
||||
)));
|
||||
}
|
||||
|
||||
let payload = payload_bytes(cmd)?;
|
||||
let result = ctx
|
||||
.auth
|
||||
.start_scram_sha256(db, &payload)
|
||||
.map_err(map_auth_error)?;
|
||||
let conversation_id = connection.next_conversation_id();
|
||||
connection
|
||||
.sasl_conversations
|
||||
.insert(conversation_id, result.conversation);
|
||||
|
||||
Ok(doc! {
|
||||
"conversationId": conversation_id,
|
||||
"done": false,
|
||||
"payload": Binary { subtype: bson::spec::BinarySubtype::Generic, bytes: result.payload },
|
||||
"ok": 1.0,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn handle_sasl_continue(
|
||||
cmd: &Document,
|
||||
ctx: &CommandContext,
|
||||
connection: &mut ConnectionState,
|
||||
) -> CommandResult<Document> {
|
||||
let conversation_id = cmd
|
||||
.get_i32("conversationId")
|
||||
.map_err(|_| CommandError::InvalidArgument("missing SASL conversationId".into()))?;
|
||||
let payload = payload_bytes(cmd)?;
|
||||
let conversation = connection
|
||||
.sasl_conversations
|
||||
.remove(&conversation_id)
|
||||
.ok_or_else(|| CommandError::InvalidArgument("unknown SASL conversation".into()))?;
|
||||
let result = ctx
|
||||
.auth
|
||||
.continue_scram_sha256(conversation, &payload)
|
||||
.map_err(map_auth_error)?;
|
||||
connection.authenticate(result.user);
|
||||
|
||||
Ok(doc! {
|
||||
"conversationId": conversation_id,
|
||||
"done": true,
|
||||
"payload": Binary { subtype: bson::spec::BinarySubtype::Generic, bytes: result.payload },
|
||||
"ok": 1.0,
|
||||
})
|
||||
}
|
||||
|
||||
fn payload_bytes(cmd: &Document) -> CommandResult<Vec<u8>> {
|
||||
match cmd.get("payload") {
|
||||
Some(Bson::Binary(binary)) => Ok(binary.bytes.clone()),
|
||||
Some(Bson::String(value)) => Ok(value.as_bytes().to_vec()),
|
||||
_ => Err(CommandError::InvalidArgument("missing SASL payload".into())),
|
||||
}
|
||||
}
|
||||
|
||||
fn map_auth_error(error: rustdb_auth::AuthError) -> CommandError {
|
||||
match error {
|
||||
rustdb_auth::AuthError::InvalidPayload(message) => CommandError::InvalidArgument(message),
|
||||
rustdb_auth::AuthError::UnsupportedMechanism(message) => CommandError::InvalidArgument(message),
|
||||
rustdb_auth::AuthError::Disabled => CommandError::Unauthorized("authentication is disabled".into()),
|
||||
rustdb_auth::AuthError::UnknownConversation => {
|
||||
CommandError::InvalidArgument("unknown SASL conversation".into())
|
||||
}
|
||||
rustdb_auth::AuthError::AuthenticationFailed => CommandError::AuthenticationFailed,
|
||||
rustdb_auth::AuthError::UserAlreadyExists(message) => CommandError::DuplicateKey(message),
|
||||
rustdb_auth::AuthError::UserNotFound(message) => CommandError::NamespaceNotFound(message),
|
||||
rustdb_auth::AuthError::Persistence(message) => CommandError::InternalError(message),
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
use bson::{doc, Document};
|
||||
use bson::{doc, Bson, Document};
|
||||
|
||||
use crate::context::CommandContext;
|
||||
use crate::error::CommandResult;
|
||||
@@ -7,12 +7,13 @@ use crate::error::CommandResult;
|
||||
///
|
||||
/// Returns server capabilities matching wire protocol expectations.
|
||||
pub async fn handle(
|
||||
_cmd: &Document,
|
||||
cmd: &Document,
|
||||
_db: &str,
|
||||
_ctx: &CommandContext,
|
||||
ctx: &CommandContext,
|
||||
) -> CommandResult<Document> {
|
||||
Ok(doc! {
|
||||
let mut response = doc! {
|
||||
"ismaster": true,
|
||||
"helloOk": true,
|
||||
"isWritablePrimary": true,
|
||||
"maxBsonObjectSize": 16_777_216_i32,
|
||||
"maxMessageSizeBytes": 48_000_000_i32,
|
||||
@@ -24,5 +25,19 @@ pub async fn handle(
|
||||
"maxWireVersion": 21_i32,
|
||||
"readOnly": false,
|
||||
"ok": 1.0,
|
||||
})
|
||||
};
|
||||
|
||||
if ctx.auth.enabled() {
|
||||
if let Ok(namespace_user) = cmd.get_str("saslSupportedMechs") {
|
||||
let mechanisms: Vec<Bson> = ctx
|
||||
.auth
|
||||
.supported_mechanisms(namespace_user)
|
||||
.into_iter()
|
||||
.map(Bson::String)
|
||||
.collect();
|
||||
response.insert("saslSupportedMechs", Bson::Array(mechanisms));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
@@ -101,7 +101,15 @@ async fn handle_create_indexes(
|
||||
expire_after_seconds,
|
||||
};
|
||||
|
||||
// Create the index.
|
||||
let options_for_persist = IndexOptions {
|
||||
name: options.name.clone(),
|
||||
unique: options.unique,
|
||||
sparse: options.sparse,
|
||||
expire_after_seconds: options.expire_after_seconds,
|
||||
};
|
||||
let key_for_persist = key.clone();
|
||||
|
||||
// Create the index in-memory.
|
||||
let mut engine = ctx
|
||||
.indexes
|
||||
.entry(ns_key.clone())
|
||||
@@ -110,6 +118,22 @@ async fn handle_create_indexes(
|
||||
match engine.create_index(key, options) {
|
||||
Ok(index_name) => {
|
||||
debug!(index_name = %index_name, "Created index");
|
||||
|
||||
// Persist index spec to disk.
|
||||
let mut spec = doc! { "key": key_for_persist };
|
||||
if options_for_persist.unique {
|
||||
spec.insert("unique", true);
|
||||
}
|
||||
if options_for_persist.sparse {
|
||||
spec.insert("sparse", true);
|
||||
}
|
||||
if let Some(ttl) = options_for_persist.expire_after_seconds {
|
||||
spec.insert("expireAfterSeconds", ttl as i64);
|
||||
}
|
||||
if let Err(e) = ctx.storage.save_index(db, coll, &index_name, spec).await {
|
||||
tracing::warn!(index = %index_name, error = %e, "failed to persist index spec");
|
||||
}
|
||||
|
||||
created_count += 1;
|
||||
}
|
||||
Err(e) => {
|
||||
@@ -180,9 +204,21 @@ async fn handle_drop_indexes(
|
||||
match index_spec {
|
||||
Some(Bson::String(name)) if name == "*" => {
|
||||
// Drop all indexes except _id_.
|
||||
// Collect names to drop from storage first.
|
||||
let names_to_drop: Vec<String> = if let Some(engine) = ctx.indexes.get(&ns_key) {
|
||||
engine.list_indexes().iter()
|
||||
.filter(|info| info.name != "_id_")
|
||||
.map(|info| info.name.clone())
|
||||
.collect()
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
if let Some(mut engine) = ctx.indexes.get_mut(&ns_key) {
|
||||
engine.drop_all_indexes();
|
||||
}
|
||||
for idx_name in &names_to_drop {
|
||||
let _ = ctx.storage.drop_index(db, coll, idx_name).await;
|
||||
}
|
||||
}
|
||||
Some(Bson::String(name)) => {
|
||||
// Drop by name.
|
||||
@@ -196,6 +232,7 @@ async fn handle_drop_indexes(
|
||||
name
|
||||
)));
|
||||
}
|
||||
let _ = ctx.storage.drop_index(db, coll, name).await;
|
||||
}
|
||||
Some(Bson::Document(key_spec)) => {
|
||||
// Drop by key spec: find the index with matching key.
|
||||
@@ -210,6 +247,7 @@ async fn handle_drop_indexes(
|
||||
engine.drop_index(&name).map_err(|e| {
|
||||
CommandError::IndexError(e.to_string())
|
||||
})?;
|
||||
let _ = ctx.storage.drop_index(db, coll, &name).await;
|
||||
} else {
|
||||
return Err(CommandError::IndexError(
|
||||
"index not found with specified key".into(),
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use bson::{doc, oid::ObjectId, Bson, Document};
|
||||
use rustdb_index::IndexEngine;
|
||||
use rustdb_storage::OpType;
|
||||
use tracing::{debug, warn};
|
||||
use tracing::debug;
|
||||
|
||||
use crate::context::CommandContext;
|
||||
use crate::error::{CommandError, CommandResult};
|
||||
@@ -56,12 +55,35 @@ pub async fn handle(
|
||||
let mut inserted_count: i32 = 0;
|
||||
let mut write_errors: Vec<Document> = Vec::new();
|
||||
|
||||
// Ensure the IndexEngine is loaded (with persisted specs from indexes.json).
|
||||
// This must happen BEFORE any writes, so unique constraints are enforced
|
||||
// even on the first write after a restart.
|
||||
drop(ctx.get_or_init_index_engine(db, coll).await);
|
||||
|
||||
for (idx, mut doc) in docs.into_iter().enumerate() {
|
||||
// Auto-generate _id if not present.
|
||||
if !doc.contains_key("_id") {
|
||||
doc.insert("_id", ObjectId::new());
|
||||
}
|
||||
|
||||
// Pre-check unique index constraints BEFORE storage write.
|
||||
// The engine is guaranteed to exist from the get_or_init call above.
|
||||
if let Some(engine) = ctx.indexes.get(&ns_key) {
|
||||
if let Err(e) = engine.check_unique_constraints(&doc) {
|
||||
let err_msg = e.to_string();
|
||||
write_errors.push(doc! {
|
||||
"index": idx as i32,
|
||||
"code": 11000_i32,
|
||||
"codeName": "DuplicateKey",
|
||||
"errmsg": &err_msg,
|
||||
});
|
||||
if ordered {
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Attempt storage insert.
|
||||
match ctx.storage.insert_one(db, coll, doc.clone()).await {
|
||||
Ok(id_str) => {
|
||||
@@ -75,17 +97,15 @@ pub async fn handle(
|
||||
None,
|
||||
);
|
||||
|
||||
// Update index engine.
|
||||
let mut engine = ctx
|
||||
.indexes
|
||||
.entry(ns_key.clone())
|
||||
.or_insert_with(IndexEngine::new);
|
||||
if let Err(e) = engine.on_insert(&doc) {
|
||||
warn!(
|
||||
namespace = %ns_key,
|
||||
error = %e,
|
||||
"index update failed after successful insert"
|
||||
);
|
||||
// Update index engine (already initialized above).
|
||||
if let Some(mut engine) = ctx.indexes.get_mut(&ns_key) {
|
||||
if let Err(e) = engine.on_insert(&doc) {
|
||||
tracing::error!(
|
||||
namespace = %ns_key,
|
||||
error = %e,
|
||||
"index update failed after successful insert"
|
||||
);
|
||||
}
|
||||
}
|
||||
inserted_count += 1;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
pub mod admin_handler;
|
||||
pub mod aggregate_handler;
|
||||
pub mod auth_handler;
|
||||
pub mod delete_handler;
|
||||
pub mod find_handler;
|
||||
pub mod hello_handler;
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use std::collections::HashSet;
|
||||
|
||||
use bson::{doc, oid::ObjectId, Bson, Document};
|
||||
use rustdb_index::IndexEngine;
|
||||
use rustdb_query::{QueryMatcher, UpdateEngine, sort_documents, apply_projection};
|
||||
use rustdb_storage::OpType;
|
||||
use tracing::debug;
|
||||
@@ -22,6 +21,11 @@ pub async fn handle(
|
||||
}
|
||||
}
|
||||
|
||||
enum TUpdateSpec {
|
||||
Document(Document),
|
||||
Pipeline(Vec<Document>),
|
||||
}
|
||||
|
||||
/// Handle the `update` command.
|
||||
async fn handle_update(
|
||||
cmd: &Document,
|
||||
@@ -47,6 +51,10 @@ async fn handle_update(
|
||||
ensure_collection_exists(db, coll, ctx).await?;
|
||||
|
||||
let ns_key = format!("{}.{}", db, coll);
|
||||
|
||||
// Ensure the IndexEngine is loaded with persisted specs from indexes.json.
|
||||
drop(ctx.get_or_init_index_engine(db, coll).await);
|
||||
|
||||
let mut total_n: i32 = 0;
|
||||
let mut total_n_modified: i32 = 0;
|
||||
let mut upserted_list: Vec<Document> = Vec::new();
|
||||
@@ -75,21 +83,22 @@ async fn handle_update(
|
||||
};
|
||||
|
||||
let update = match update_spec.get("u") {
|
||||
Some(Bson::Document(d)) => d.clone(),
|
||||
Some(Bson::Array(_pipeline)) => {
|
||||
// Aggregation pipeline updates are not yet supported; treat as error.
|
||||
write_errors.push(doc! {
|
||||
"index": idx as i32,
|
||||
"code": 14_i32,
|
||||
"codeName": "TypeMismatch",
|
||||
"errmsg": "aggregation pipeline updates not yet supported",
|
||||
});
|
||||
if ordered {
|
||||
break;
|
||||
Some(update_value) => match parse_update_spec(update_value) {
|
||||
Ok(parsed) => parsed,
|
||||
Err(err) => {
|
||||
write_errors.push(doc! {
|
||||
"index": idx as i32,
|
||||
"code": 14_i32,
|
||||
"codeName": "TypeMismatch",
|
||||
"errmsg": err,
|
||||
});
|
||||
if ordered {
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
_ => {
|
||||
},
|
||||
None => {
|
||||
write_errors.push(doc! {
|
||||
"index": idx as i32,
|
||||
"code": 14_i32,
|
||||
@@ -134,21 +143,28 @@ async fn handle_update(
|
||||
let new_doc = build_upsert_doc(&filter);
|
||||
|
||||
// Apply update operators or replacement.
|
||||
match UpdateEngine::apply_update(&new_doc, &update, array_filters.as_deref()) {
|
||||
match apply_update_spec(&new_doc, &update, array_filters.as_deref()) {
|
||||
Ok(mut updated) => {
|
||||
// Apply $setOnInsert if present.
|
||||
if let Some(Bson::Document(soi)) = update.get("$setOnInsert") {
|
||||
UpdateEngine::apply_set_on_insert(&mut updated, soi);
|
||||
}
|
||||
apply_set_on_insert_if_present(&update, &mut updated);
|
||||
|
||||
// Ensure _id exists.
|
||||
let new_id = if !updated.contains_key("_id") {
|
||||
let oid = ObjectId::new();
|
||||
updated.insert("_id", oid);
|
||||
Bson::ObjectId(oid)
|
||||
} else {
|
||||
updated.get("_id").unwrap().clone()
|
||||
};
|
||||
let new_id = ensure_document_id(&mut updated);
|
||||
|
||||
// Pre-check unique index constraints before upsert insert.
|
||||
if let Some(engine) = ctx.indexes.get(&ns_key) {
|
||||
if let Err(e) = engine.check_unique_constraints(&updated) {
|
||||
write_errors.push(doc! {
|
||||
"index": idx as i32,
|
||||
"code": 11000_i32,
|
||||
"codeName": "DuplicateKey",
|
||||
"errmsg": e.to_string(),
|
||||
});
|
||||
if ordered {
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Insert the new document.
|
||||
match ctx.storage.insert_one(db, coll, updated.clone()).await {
|
||||
@@ -163,12 +179,12 @@ async fn handle_update(
|
||||
None,
|
||||
);
|
||||
|
||||
// Update index.
|
||||
let mut engine = ctx
|
||||
.indexes
|
||||
.entry(ns_key.clone())
|
||||
.or_insert_with(IndexEngine::new);
|
||||
let _ = engine.on_insert(&updated);
|
||||
// Update index (engine already initialized above).
|
||||
if let Some(mut engine) = ctx.indexes.get_mut(&ns_key) {
|
||||
if let Err(e) = engine.on_insert(&updated) {
|
||||
tracing::error!(namespace = %ns_key, error = %e, "index update failed after upsert insert");
|
||||
}
|
||||
}
|
||||
|
||||
total_n += 1;
|
||||
upserted_list.push(doc! {
|
||||
@@ -210,12 +226,37 @@ async fn handle_update(
|
||||
};
|
||||
|
||||
for matched_doc in &docs_to_update {
|
||||
match UpdateEngine::apply_update(
|
||||
matched_doc,
|
||||
&update,
|
||||
array_filters.as_deref(),
|
||||
) {
|
||||
Ok(updated_doc) => {
|
||||
match apply_update_spec(matched_doc, &update, array_filters.as_deref()) {
|
||||
Ok(mut updated_doc) => {
|
||||
if let Err(e) = ensure_immutable_id(matched_doc, &mut updated_doc) {
|
||||
write_errors.push(doc! {
|
||||
"index": idx as i32,
|
||||
"code": 66_i32,
|
||||
"codeName": "ImmutableField",
|
||||
"errmsg": e.to_string(),
|
||||
});
|
||||
if ordered {
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Pre-check unique index constraints before storage write.
|
||||
if let Some(engine) = ctx.indexes.get(&ns_key) {
|
||||
if let Err(e) = engine.check_unique_constraints_for_update(matched_doc, &updated_doc) {
|
||||
write_errors.push(doc! {
|
||||
"index": idx as i32,
|
||||
"code": 11000_i32,
|
||||
"codeName": "DuplicateKey",
|
||||
"errmsg": e.to_string(),
|
||||
});
|
||||
if ordered {
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
let id_str = extract_id_string(matched_doc);
|
||||
match ctx
|
||||
.storage
|
||||
@@ -235,7 +276,9 @@ async fn handle_update(
|
||||
|
||||
// Update index.
|
||||
if let Some(mut engine) = ctx.indexes.get_mut(&ns_key) {
|
||||
let _ = engine.on_update(matched_doc, &updated_doc);
|
||||
if let Err(e) = engine.on_update(matched_doc, &updated_doc) {
|
||||
tracing::error!(namespace = %ns_key, error = %e, "index update failed after update");
|
||||
}
|
||||
}
|
||||
total_n += 1;
|
||||
// Check if the document actually changed.
|
||||
@@ -324,8 +367,11 @@ async fn handle_find_and_modify(
|
||||
};
|
||||
|
||||
let update_doc = match cmd.get("update") {
|
||||
Some(Bson::Document(d)) => Some(d.clone()),
|
||||
_ => None,
|
||||
Some(update_value) => Some(
|
||||
parse_update_spec(update_value)
|
||||
.map_err(CommandError::InvalidArgument)?
|
||||
),
|
||||
None => None,
|
||||
};
|
||||
|
||||
let remove = match cmd.get("remove") {
|
||||
@@ -366,6 +412,9 @@ async fn handle_find_and_modify(
|
||||
|
||||
let ns_key = format!("{}.{}", db, coll);
|
||||
|
||||
// Ensure the IndexEngine is loaded with persisted specs.
|
||||
drop(ctx.get_or_init_index_engine(db, coll).await);
|
||||
|
||||
// Load and filter documents.
|
||||
let mut matched = load_filtered_docs(db, coll, &query, &ns_key, ctx).await?;
|
||||
|
||||
@@ -437,12 +486,21 @@ async fn handle_find_and_modify(
|
||||
|
||||
if let Some(original_doc) = target {
|
||||
// Update the matched document.
|
||||
let updated_doc = UpdateEngine::apply_update(
|
||||
let mut updated_doc = apply_update_spec(
|
||||
&original_doc,
|
||||
&update,
|
||||
array_filters.as_deref(),
|
||||
)
|
||||
.map_err(|e| CommandError::InternalError(e.to_string()))?;
|
||||
.map_err(CommandError::InternalError)?;
|
||||
|
||||
ensure_immutable_id(&original_doc, &mut updated_doc)?;
|
||||
|
||||
// Pre-check unique index constraints before storage write.
|
||||
if let Some(engine) = ctx.indexes.get(&ns_key) {
|
||||
if let Err(e) = engine.check_unique_constraints_for_update(&original_doc, &updated_doc) {
|
||||
return Err(CommandError::StorageError(e.to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
let id_str = extract_id_string(&original_doc);
|
||||
ctx.storage
|
||||
@@ -461,7 +519,9 @@ async fn handle_find_and_modify(
|
||||
|
||||
// Update index.
|
||||
if let Some(mut engine) = ctx.indexes.get_mut(&ns_key) {
|
||||
let _ = engine.on_update(&original_doc, &updated_doc);
|
||||
if let Err(e) = engine.on_update(&original_doc, &updated_doc) {
|
||||
tracing::error!(namespace = %ns_key, error = %e, "index update failed after findAndModify update");
|
||||
}
|
||||
}
|
||||
|
||||
let return_doc = if return_new {
|
||||
@@ -484,26 +544,24 @@ async fn handle_find_and_modify(
|
||||
// Upsert: create a new document.
|
||||
let new_doc = build_upsert_doc(&query);
|
||||
|
||||
let mut updated_doc = UpdateEngine::apply_update(
|
||||
let mut updated_doc = apply_update_spec(
|
||||
&new_doc,
|
||||
&update,
|
||||
array_filters.as_deref(),
|
||||
)
|
||||
.map_err(|e| CommandError::InternalError(e.to_string()))?;
|
||||
.map_err(CommandError::InternalError)?;
|
||||
|
||||
// Apply $setOnInsert if present.
|
||||
if let Some(Bson::Document(soi)) = update.get("$setOnInsert") {
|
||||
UpdateEngine::apply_set_on_insert(&mut updated_doc, soi);
|
||||
}
|
||||
apply_set_on_insert_if_present(&update, &mut updated_doc);
|
||||
|
||||
// Ensure _id.
|
||||
let upserted_id = if !updated_doc.contains_key("_id") {
|
||||
let oid = ObjectId::new();
|
||||
updated_doc.insert("_id", oid);
|
||||
Bson::ObjectId(oid)
|
||||
} else {
|
||||
updated_doc.get("_id").unwrap().clone()
|
||||
};
|
||||
let upserted_id = ensure_document_id(&mut updated_doc);
|
||||
|
||||
// Pre-check unique index constraints before upsert insert.
|
||||
if let Some(engine) = ctx.indexes.get(&ns_key) {
|
||||
if let Err(e) = engine.check_unique_constraints(&updated_doc) {
|
||||
return Err(CommandError::StorageError(e.to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
let inserted_id_str = ctx.storage
|
||||
.insert_one(db, coll, updated_doc.clone())
|
||||
@@ -521,11 +579,11 @@ async fn handle_find_and_modify(
|
||||
|
||||
// Update index.
|
||||
{
|
||||
let mut engine = ctx
|
||||
.indexes
|
||||
.entry(ns_key.clone())
|
||||
.or_insert_with(IndexEngine::new);
|
||||
let _ = engine.on_insert(&updated_doc);
|
||||
if let Some(mut engine) = ctx.indexes.get_mut(&ns_key) {
|
||||
if let Err(e) = engine.on_insert(&updated_doc) {
|
||||
tracing::error!(namespace = %ns_key, error = %e, "index update failed after findAndModify upsert");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let value = if return_new {
|
||||
@@ -611,6 +669,88 @@ fn build_upsert_doc(filter: &Document) -> Document {
|
||||
doc
|
||||
}
|
||||
|
||||
fn parse_update_spec(update_value: &Bson) -> Result<TUpdateSpec, String> {
|
||||
match update_value {
|
||||
Bson::Document(d) => Ok(TUpdateSpec::Document(d.clone())),
|
||||
Bson::Array(stages) => {
|
||||
if stages.is_empty() {
|
||||
return Err("aggregation pipeline update cannot be empty".into());
|
||||
}
|
||||
|
||||
let mut pipeline = Vec::with_capacity(stages.len());
|
||||
for stage in stages {
|
||||
match stage {
|
||||
Bson::Document(d) => pipeline.push(d.clone()),
|
||||
_ => {
|
||||
return Err(
|
||||
"aggregation pipeline update stages must be documents".into(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(TUpdateSpec::Pipeline(pipeline))
|
||||
}
|
||||
_ => Err("missing or invalid 'u' field in update spec".into()),
|
||||
}
|
||||
}
|
||||
|
||||
fn apply_update_spec(
|
||||
doc: &Document,
|
||||
update: &TUpdateSpec,
|
||||
array_filters: Option<&[Document]>,
|
||||
) -> Result<Document, String> {
|
||||
match update {
|
||||
TUpdateSpec::Document(update_doc) => UpdateEngine::apply_update(doc, update_doc, array_filters)
|
||||
.map_err(|e| e.to_string()),
|
||||
TUpdateSpec::Pipeline(pipeline) => {
|
||||
if array_filters.is_some_and(|filters| !filters.is_empty()) {
|
||||
return Err(
|
||||
"arrayFilters are not supported with aggregation pipeline updates"
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
|
||||
UpdateEngine::apply_pipeline_update(doc, pipeline).map_err(|e| e.to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn apply_set_on_insert_if_present(update: &TUpdateSpec, doc: &mut Document) {
|
||||
if let TUpdateSpec::Document(update_doc) = update {
|
||||
if let Some(Bson::Document(soi)) = update_doc.get("$setOnInsert") {
|
||||
UpdateEngine::apply_set_on_insert(doc, soi);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn ensure_document_id(doc: &mut Document) -> Bson {
|
||||
if let Some(id) = doc.get("_id") {
|
||||
id.clone()
|
||||
} else {
|
||||
let oid = ObjectId::new();
|
||||
doc.insert("_id", oid);
|
||||
Bson::ObjectId(oid)
|
||||
}
|
||||
}
|
||||
|
||||
fn ensure_immutable_id(original_doc: &Document, updated_doc: &mut Document) -> CommandResult<()> {
|
||||
if let Some(original_id) = original_doc.get("_id") {
|
||||
match updated_doc.get("_id") {
|
||||
Some(updated_id) if updated_id == original_id => Ok(()),
|
||||
Some(_) => Err(CommandError::ImmutableField(
|
||||
"cannot modify immutable field '_id'".into(),
|
||||
)),
|
||||
None => {
|
||||
updated_doc.insert("_id", original_id.clone());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Extract _id as a string for storage operations.
|
||||
fn extract_id_string(doc: &Document) -> String {
|
||||
match doc.get("_id") {
|
||||
|
||||
@@ -3,6 +3,6 @@ pub mod error;
|
||||
pub mod handlers;
|
||||
mod router;
|
||||
|
||||
pub use context::{CommandContext, CursorState};
|
||||
pub use context::{CommandContext, ConnectionState, CursorState};
|
||||
pub use error::{CommandError, CommandResult};
|
||||
pub use router::CommandRouter;
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use bson::Document;
|
||||
use bson::{Bson, Document};
|
||||
use tracing::{debug, warn};
|
||||
|
||||
use rustdb_wire::ParsedCommand;
|
||||
use rustdb_auth::AuthAction;
|
||||
|
||||
use crate::context::CommandContext;
|
||||
use crate::context::{CommandContext, ConnectionState};
|
||||
use crate::error::CommandError;
|
||||
use crate::handlers;
|
||||
|
||||
@@ -21,12 +22,46 @@ impl CommandRouter {
|
||||
}
|
||||
|
||||
/// Route a parsed command to the appropriate handler, returning a BSON response document.
|
||||
pub async fn route(&self, cmd: &ParsedCommand) -> Document {
|
||||
pub async fn route(&self, cmd: &ParsedCommand, connection: &mut ConnectionState) -> Document {
|
||||
let db = &cmd.database;
|
||||
let command_name = cmd.command_name.as_str();
|
||||
|
||||
debug!(command = %command_name, database = %db, "routing command");
|
||||
|
||||
if self.ctx.auth.enabled()
|
||||
&& !connection.is_authenticated()
|
||||
&& !allows_unauthenticated(command_name)
|
||||
{
|
||||
return CommandError::Unauthorized(format!(
|
||||
"command '{}' requires authentication",
|
||||
command_name,
|
||||
))
|
||||
.to_error_doc();
|
||||
}
|
||||
|
||||
if self.ctx.auth.enabled() && connection.is_authenticated() {
|
||||
if let Some(action) = required_action(command_name, &cmd.command) {
|
||||
if !self
|
||||
.ctx
|
||||
.auth
|
||||
.is_authorized(&connection.authenticated_users, db, action)
|
||||
{
|
||||
return CommandError::Unauthorized(format!(
|
||||
"command '{}' is not authorized for database '{}'",
|
||||
command_name, db,
|
||||
))
|
||||
.to_error_doc();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if transaction_command_unsupported(command_name, &cmd.command) {
|
||||
return CommandError::IllegalOperation(
|
||||
"Transaction numbers are only allowed on a replica set member or mongos".into(),
|
||||
)
|
||||
.to_error_doc();
|
||||
}
|
||||
|
||||
// Extract session id if present, and touch the session.
|
||||
if let Some(lsid) = cmd.command.get("lsid") {
|
||||
if let Some(session_id) = rustdb_txn::SessionEngine::extract_session_id(lsid) {
|
||||
@@ -40,6 +75,14 @@ impl CommandRouter {
|
||||
handlers::hello_handler::handle(&cmd.command, db, &self.ctx).await
|
||||
}
|
||||
|
||||
// -- authentication --
|
||||
"saslStart" => {
|
||||
handlers::auth_handler::handle_sasl_start(&cmd.command, db, &self.ctx, connection).await
|
||||
}
|
||||
"saslContinue" => {
|
||||
handlers::auth_handler::handle_sasl_continue(&cmd.command, &self.ctx, connection).await
|
||||
}
|
||||
|
||||
// -- query commands --
|
||||
"find" => {
|
||||
handlers::find_handler::handle(&cmd.command, db, &self.ctx).await
|
||||
@@ -88,7 +131,9 @@ impl CommandRouter {
|
||||
| "dbStats" | "collStats" | "validate" | "explain"
|
||||
| "startSession" | "endSessions" | "killSessions"
|
||||
| "commitTransaction" | "abortTransaction"
|
||||
| "saslStart" | "saslContinue" | "authenticate" | "logout"
|
||||
| "authenticate" | "logout"
|
||||
| "createUser" | "updateUser" | "dropUser" | "usersInfo"
|
||||
| "grantRolesToUser" | "revokeRolesFromUser"
|
||||
| "currentOp" | "killOp" | "top" | "profile"
|
||||
| "compact" | "reIndex" | "fsync" | "connPoolSync" => {
|
||||
handlers::admin_handler::handle(&cmd.command, db, &self.ctx, command_name).await
|
||||
@@ -107,3 +152,64 @@ impl CommandRouter {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn allows_unauthenticated(command_name: &str) -> bool {
|
||||
matches!(
|
||||
command_name,
|
||||
"hello" | "ismaster" | "isMaster" | "saslStart" | "saslContinue" | "getnonce"
|
||||
)
|
||||
}
|
||||
|
||||
fn required_action(command_name: &str, command: &Document) -> Option<AuthAction> {
|
||||
match command_name {
|
||||
"hello" | "ismaster" | "isMaster" | "saslStart" | "saslContinue" | "getnonce" => None,
|
||||
"ping" | "buildInfo" | "buildinfo" | "hostInfo" | "whatsmyuri" | "getLog"
|
||||
| "getCmdLineOpts" | "getParameter" | "getFreeMonitoringStatus" | "setFreeMonitoring"
|
||||
| "getShardMap" | "shardingState" | "atlasVersion" | "connectionStatus"
|
||||
| "startSession" | "endSessions" | "killSessions" | "authenticate" | "logout" => None,
|
||||
|
||||
"find" | "getMore" | "killCursors" | "count" | "distinct" | "listIndexes"
|
||||
| "listCollections" | "collStats" | "dbStats" | "validate" | "explain" => {
|
||||
Some(AuthAction::Read)
|
||||
}
|
||||
|
||||
"aggregate" => Some(if aggregate_writes(command) {
|
||||
AuthAction::Write
|
||||
} else {
|
||||
AuthAction::Read
|
||||
}),
|
||||
|
||||
"insert" | "update" | "findAndModify" | "delete" | "commitTransaction"
|
||||
| "abortTransaction" => Some(AuthAction::Write),
|
||||
|
||||
"createIndexes" | "dropIndexes" | "create" | "drop" | "dropDatabase"
|
||||
| "renameCollection" | "compact" | "reIndex" | "fsync" | "profile" => {
|
||||
Some(AuthAction::DbAdmin)
|
||||
}
|
||||
|
||||
"createUser" | "updateUser" | "dropUser" | "usersInfo" | "grantRolesToUser"
|
||||
| "revokeRolesFromUser" => Some(AuthAction::UserAdmin),
|
||||
|
||||
"serverStatus" | "listDatabases" | "currentOp" | "killOp" | "top" => {
|
||||
Some(AuthAction::ClusterMonitor)
|
||||
}
|
||||
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn aggregate_writes(command: &Document) -> bool {
|
||||
let Ok(pipeline) = command.get_array("pipeline") else {
|
||||
return false;
|
||||
};
|
||||
pipeline.last().and_then(|stage| match stage {
|
||||
Bson::Document(doc) => Some(doc.contains_key("$out") || doc.contains_key("$merge")),
|
||||
_ => None,
|
||||
}).unwrap_or(false)
|
||||
}
|
||||
|
||||
fn transaction_command_unsupported(command_name: &str, command: &Document) -> bool {
|
||||
matches!(command_name, "commitTransaction" | "abortTransaction")
|
||||
|| matches!(command.get("startTransaction"), Some(Bson::Boolean(true)))
|
||||
|| matches!(command.get("autocommit"), Some(Bson::Boolean(false)))
|
||||
}
|
||||
|
||||
@@ -46,6 +46,99 @@ pub struct RustDbOptions {
|
||||
/// Interval in ms for periodic persistence (default: 60000)
|
||||
#[serde(default = "default_persist_interval")]
|
||||
pub persist_interval_ms: u64,
|
||||
|
||||
/// Authentication configuration.
|
||||
#[serde(default)]
|
||||
pub auth: AuthOptions,
|
||||
|
||||
/// TLS transport configuration for TCP listeners.
|
||||
#[serde(default)]
|
||||
pub tls: TlsOptions,
|
||||
}
|
||||
|
||||
/// Authentication configuration for the embedded server.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct AuthOptions {
|
||||
/// Whether clients must authenticate before issuing protected commands.
|
||||
#[serde(default)]
|
||||
pub enabled: bool,
|
||||
|
||||
/// Bootstrap users loaded at startup. Passwords are converted into SCRAM credentials in memory.
|
||||
#[serde(default)]
|
||||
pub users: Vec<AuthUserOptions>,
|
||||
|
||||
/// Optional path for persisted SCRAM user metadata. Stores derived credentials, never plaintext passwords.
|
||||
#[serde(default)]
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub users_path: Option<String>,
|
||||
|
||||
/// SCRAM iteration count used for bootstrap credentials.
|
||||
#[serde(default = "default_scram_iterations")]
|
||||
pub scram_iterations: u32,
|
||||
}
|
||||
|
||||
impl Default for AuthOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
enabled: false,
|
||||
users: Vec::new(),
|
||||
users_path: None,
|
||||
scram_iterations: default_scram_iterations(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// TLS transport configuration for the embedded server.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct TlsOptions {
|
||||
/// Whether TCP client connections must use TLS.
|
||||
#[serde(default)]
|
||||
pub enabled: bool,
|
||||
|
||||
/// PEM-encoded server certificate chain.
|
||||
#[serde(default)]
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub cert_path: Option<String>,
|
||||
|
||||
/// PEM-encoded server private key.
|
||||
#[serde(default)]
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub key_path: Option<String>,
|
||||
|
||||
/// PEM-encoded client CA roots for mTLS verification.
|
||||
#[serde(default)]
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub ca_path: Option<String>,
|
||||
|
||||
/// Require clients to present a certificate signed by caPath.
|
||||
#[serde(default)]
|
||||
pub require_client_cert: bool,
|
||||
}
|
||||
|
||||
impl Default for TlsOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
enabled: false,
|
||||
cert_path: None,
|
||||
key_path: None,
|
||||
ca_path: None,
|
||||
require_client_cert: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A bootstrap user for SCRAM authentication.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct AuthUserOptions {
|
||||
pub username: String,
|
||||
pub password: String,
|
||||
#[serde(default = "default_auth_database")]
|
||||
pub database: String,
|
||||
#[serde(default)]
|
||||
pub roles: Vec<String>,
|
||||
}
|
||||
|
||||
fn default_port() -> u16 {
|
||||
@@ -60,6 +153,14 @@ fn default_persist_interval() -> u64 {
|
||||
60000
|
||||
}
|
||||
|
||||
fn default_scram_iterations() -> u32 {
|
||||
15000
|
||||
}
|
||||
|
||||
fn default_auth_database() -> String {
|
||||
"admin".to_string()
|
||||
}
|
||||
|
||||
impl Default for RustDbOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
@@ -70,6 +171,8 @@ impl Default for RustDbOptions {
|
||||
storage_path: None,
|
||||
persist_path: None,
|
||||
persist_interval_ms: default_persist_interval(),
|
||||
auth: AuthOptions::default(),
|
||||
tls: TlsOptions::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -92,6 +195,59 @@ impl RustDbOptions {
|
||||
"storagePath is required when storage is 'file'".to_string(),
|
||||
));
|
||||
}
|
||||
if self.auth.enabled {
|
||||
if self.auth.users.is_empty() && self.auth.users_path.is_none() {
|
||||
return Err(ConfigError::ValidationError(
|
||||
"auth.users or auth.usersPath must be set when auth.enabled is true".to_string(),
|
||||
));
|
||||
}
|
||||
if self.auth.scram_iterations < 4096 {
|
||||
return Err(ConfigError::ValidationError(
|
||||
"auth.scramIterations must be at least 4096".to_string(),
|
||||
));
|
||||
}
|
||||
for user in &self.auth.users {
|
||||
if user.username.is_empty() {
|
||||
return Err(ConfigError::ValidationError(
|
||||
"auth.users[].username must not be empty".to_string(),
|
||||
));
|
||||
}
|
||||
if user.password.is_empty() {
|
||||
return Err(ConfigError::ValidationError(
|
||||
format!("auth user '{}' must have a non-empty password", user.username),
|
||||
));
|
||||
}
|
||||
if user.database.is_empty() {
|
||||
return Err(ConfigError::ValidationError(
|
||||
format!("auth user '{}' must have a non-empty database", user.username),
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
if self.tls.enabled {
|
||||
if self.socket_path.is_some() {
|
||||
return Err(ConfigError::ValidationError(
|
||||
"tls.enabled is only supported for TCP listeners".to_string(),
|
||||
));
|
||||
}
|
||||
if self.tls.cert_path.as_deref().unwrap_or_default().is_empty() {
|
||||
return Err(ConfigError::ValidationError(
|
||||
"tls.certPath is required when tls.enabled is true".to_string(),
|
||||
));
|
||||
}
|
||||
if self.tls.key_path.as_deref().unwrap_or_default().is_empty() {
|
||||
return Err(ConfigError::ValidationError(
|
||||
"tls.keyPath is required when tls.enabled is true".to_string(),
|
||||
));
|
||||
}
|
||||
if self.tls.require_client_cert
|
||||
&& self.tls.ca_path.as_deref().unwrap_or_default().is_empty()
|
||||
{
|
||||
return Err(ConfigError::ValidationError(
|
||||
"tls.caPath is required when tls.requireClientCert is true".to_string(),
|
||||
));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -101,7 +257,12 @@ impl RustDbOptions {
|
||||
let encoded = urlencoding(socket_path);
|
||||
format!("mongodb://{}", encoded)
|
||||
} else {
|
||||
format!("mongodb://{}:{}", self.host, self.port)
|
||||
let base = format!("mongodb://{}:{}", self.host, self.port);
|
||||
if self.tls.enabled {
|
||||
format!("{}/?tls=true", base)
|
||||
} else {
|
||||
base
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -153,6 +153,55 @@ impl IndexEngine {
|
||||
self.indexes.contains_key(name)
|
||||
}
|
||||
|
||||
/// Check unique constraints for a document without modifying the index.
|
||||
/// Returns Ok(()) if no conflict, Err(DuplicateKey) if a unique constraint
|
||||
/// would be violated. This is a read-only check (immutable &self).
|
||||
pub fn check_unique_constraints(&self, doc: &Document) -> Result<(), IndexError> {
|
||||
for idx in self.indexes.values() {
|
||||
if idx.unique {
|
||||
let key_bytes = Self::extract_key_bytes(doc, &idx.key, idx.sparse);
|
||||
if let Some(ref kb) = key_bytes {
|
||||
if let Some(existing_ids) = idx.hash.get(kb) {
|
||||
if !existing_ids.is_empty() {
|
||||
return Err(IndexError::DuplicateKey {
|
||||
index: idx.name.clone(),
|
||||
key: format!("{:?}", kb),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check unique constraints for an update, excluding the document being updated.
|
||||
/// Returns Ok(()) if no conflict. This is a read-only check (immutable &self).
|
||||
pub fn check_unique_constraints_for_update(
|
||||
&self,
|
||||
old_doc: &Document,
|
||||
new_doc: &Document,
|
||||
) -> Result<(), IndexError> {
|
||||
let doc_id = Self::extract_id(old_doc);
|
||||
for idx in self.indexes.values() {
|
||||
if idx.unique {
|
||||
let new_key_bytes = Self::extract_key_bytes(new_doc, &idx.key, idx.sparse);
|
||||
if let Some(ref kb) = new_key_bytes {
|
||||
if let Some(existing_ids) = idx.hash.get(kb) {
|
||||
let has_conflict = existing_ids.iter().any(|id| *id != doc_id);
|
||||
if has_conflict {
|
||||
return Err(IndexError::DuplicateKey {
|
||||
index: idx.name.clone(),
|
||||
key: format!("{:?}", kb),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Notify the engine that a document has been inserted.
|
||||
/// Checks unique constraints and updates all index structures.
|
||||
pub fn on_insert(&mut self, doc: &Document) -> Result<(), IndexError> {
|
||||
|
||||
@@ -2,10 +2,10 @@ use bson::{Bson, Document};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use crate::error::QueryError;
|
||||
use crate::field_path::{get_nested_value, remove_nested_value};
|
||||
use crate::matcher::QueryMatcher;
|
||||
use crate::sort::sort_documents;
|
||||
use crate::projection::apply_projection;
|
||||
use crate::field_path::get_nested_value;
|
||||
use crate::sort::sort_documents;
|
||||
|
||||
/// Aggregation pipeline engine.
|
||||
pub struct AggregationEngine;
|
||||
@@ -42,6 +42,7 @@ impl AggregationEngine {
|
||||
"$count" => Self::stage_count(current, stage_spec)?,
|
||||
"$addFields" | "$set" => Self::stage_add_fields(current, stage_spec)?,
|
||||
"$replaceRoot" | "$replaceWith" => Self::stage_replace_root(current, stage_spec)?,
|
||||
"$unset" => Self::stage_unset(current, stage_spec)?,
|
||||
"$lookup" => Self::stage_lookup(current, stage_spec, resolver, db)?,
|
||||
"$facet" => Self::stage_facet(current, stage_spec, resolver, db)?,
|
||||
"$unionWith" => Self::stage_union_with(current, stage_spec, resolver, db)?,
|
||||
@@ -60,7 +61,11 @@ impl AggregationEngine {
|
||||
fn stage_match(docs: Vec<Document>, spec: &Bson) -> Result<Vec<Document>, QueryError> {
|
||||
let filter = match spec {
|
||||
Bson::Document(d) => d,
|
||||
_ => return Err(QueryError::AggregationError("$match requires a document".into())),
|
||||
_ => {
|
||||
return Err(QueryError::AggregationError(
|
||||
"$match requires a document".into(),
|
||||
))
|
||||
}
|
||||
};
|
||||
Ok(QueryMatcher::filter(&docs, filter))
|
||||
}
|
||||
@@ -68,15 +73,26 @@ impl AggregationEngine {
|
||||
fn stage_project(docs: Vec<Document>, spec: &Bson) -> Result<Vec<Document>, QueryError> {
|
||||
let projection = match spec {
|
||||
Bson::Document(d) => d,
|
||||
_ => return Err(QueryError::AggregationError("$project requires a document".into())),
|
||||
_ => {
|
||||
return Err(QueryError::AggregationError(
|
||||
"$project requires a document".into(),
|
||||
))
|
||||
}
|
||||
};
|
||||
Ok(docs.into_iter().map(|doc| apply_projection(&doc, projection)).collect())
|
||||
Ok(docs
|
||||
.into_iter()
|
||||
.map(|doc| apply_projection(&doc, projection))
|
||||
.collect())
|
||||
}
|
||||
|
||||
fn stage_sort(mut docs: Vec<Document>, spec: &Bson) -> Result<Vec<Document>, QueryError> {
|
||||
let sort_spec = match spec {
|
||||
Bson::Document(d) => d,
|
||||
_ => return Err(QueryError::AggregationError("$sort requires a document".into())),
|
||||
_ => {
|
||||
return Err(QueryError::AggregationError(
|
||||
"$sort requires a document".into(),
|
||||
))
|
||||
}
|
||||
};
|
||||
sort_documents(&mut docs, sort_spec);
|
||||
Ok(docs)
|
||||
@@ -97,7 +113,11 @@ impl AggregationEngine {
|
||||
fn stage_group(docs: Vec<Document>, spec: &Bson) -> Result<Vec<Document>, QueryError> {
|
||||
let group_spec = match spec {
|
||||
Bson::Document(d) => d,
|
||||
_ => return Err(QueryError::AggregationError("$group requires a document".into())),
|
||||
_ => {
|
||||
return Err(QueryError::AggregationError(
|
||||
"$group requires a document".into(),
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
let id_expr = group_spec.get("_id").cloned().unwrap_or(Bson::Null);
|
||||
@@ -158,13 +178,18 @@ impl AggregationEngine {
|
||||
let (path, preserve_null) = match spec {
|
||||
Bson::String(s) => (s.trim_start_matches('$').to_string(), false),
|
||||
Bson::Document(d) => {
|
||||
let path = d.get_str("path")
|
||||
let path = d
|
||||
.get_str("path")
|
||||
.map(|s| s.trim_start_matches('$').to_string())
|
||||
.map_err(|_| QueryError::AggregationError("$unwind requires 'path'".into()))?;
|
||||
let preserve = d.get_bool("preserveNullAndEmptyArrays").unwrap_or(false);
|
||||
(path, preserve)
|
||||
}
|
||||
_ => return Err(QueryError::AggregationError("$unwind requires a string or document".into())),
|
||||
_ => {
|
||||
return Err(QueryError::AggregationError(
|
||||
"$unwind requires a string or document".into(),
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
let mut result = Vec::new();
|
||||
@@ -206,7 +231,11 @@ impl AggregationEngine {
|
||||
fn stage_count(docs: Vec<Document>, spec: &Bson) -> Result<Vec<Document>, QueryError> {
|
||||
let field = match spec {
|
||||
Bson::String(s) => s.clone(),
|
||||
_ => return Err(QueryError::AggregationError("$count requires a string".into())),
|
||||
_ => {
|
||||
return Err(QueryError::AggregationError(
|
||||
"$count requires a string".into(),
|
||||
))
|
||||
}
|
||||
};
|
||||
Ok(vec![bson::doc! { field: docs.len() as i64 }])
|
||||
}
|
||||
@@ -214,7 +243,11 @@ impl AggregationEngine {
|
||||
fn stage_add_fields(docs: Vec<Document>, spec: &Bson) -> Result<Vec<Document>, QueryError> {
|
||||
let fields = match spec {
|
||||
Bson::Document(d) => d,
|
||||
_ => return Err(QueryError::AggregationError("$addFields requires a document".into())),
|
||||
_ => {
|
||||
return Err(QueryError::AggregationError(
|
||||
"$addFields requires a document".into(),
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
Ok(docs
|
||||
@@ -231,9 +264,16 @@ impl AggregationEngine {
|
||||
|
||||
fn stage_replace_root(docs: Vec<Document>, spec: &Bson) -> Result<Vec<Document>, QueryError> {
|
||||
let new_root_expr = match spec {
|
||||
Bson::Document(d) => d.get("newRoot").cloned().unwrap_or(Bson::Document(d.clone())),
|
||||
Bson::Document(d) => d
|
||||
.get("newRoot")
|
||||
.cloned()
|
||||
.unwrap_or(Bson::Document(d.clone())),
|
||||
Bson::String(s) => Bson::String(s.clone()),
|
||||
_ => return Err(QueryError::AggregationError("$replaceRoot requires a document".into())),
|
||||
_ => {
|
||||
return Err(QueryError::AggregationError(
|
||||
"$replaceRoot requires a document".into(),
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
let mut result = Vec::new();
|
||||
@@ -246,6 +286,40 @@ impl AggregationEngine {
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn stage_unset(docs: Vec<Document>, spec: &Bson) -> Result<Vec<Document>, QueryError> {
|
||||
let fields: Vec<String> = match spec {
|
||||
Bson::String(s) => vec![s.clone()],
|
||||
Bson::Array(arr) => arr
|
||||
.iter()
|
||||
.map(|value| match value {
|
||||
Bson::String(field) => Ok(field.clone()),
|
||||
_ => Err(QueryError::AggregationError(
|
||||
"$unset array entries must be strings".into(),
|
||||
)),
|
||||
})
|
||||
.collect::<Result<Vec<_>, _>>()?,
|
||||
_ => {
|
||||
return Err(QueryError::AggregationError(
|
||||
"$unset requires a string or array of strings".into(),
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
Ok(docs
|
||||
.into_iter()
|
||||
.map(|mut doc| {
|
||||
for field in &fields {
|
||||
if field.contains('.') {
|
||||
remove_nested_value(&mut doc, field);
|
||||
} else {
|
||||
doc.remove(field);
|
||||
}
|
||||
}
|
||||
doc
|
||||
})
|
||||
.collect())
|
||||
}
|
||||
|
||||
fn stage_lookup(
|
||||
docs: Vec<Document>,
|
||||
spec: &Bson,
|
||||
@@ -254,20 +328,29 @@ impl AggregationEngine {
|
||||
) -> Result<Vec<Document>, QueryError> {
|
||||
let lookup = match spec {
|
||||
Bson::Document(d) => d,
|
||||
_ => return Err(QueryError::AggregationError("$lookup requires a document".into())),
|
||||
_ => {
|
||||
return Err(QueryError::AggregationError(
|
||||
"$lookup requires a document".into(),
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
let from = lookup.get_str("from")
|
||||
let from = lookup
|
||||
.get_str("from")
|
||||
.map_err(|_| QueryError::AggregationError("$lookup requires 'from'".into()))?;
|
||||
let local_field = lookup.get_str("localField")
|
||||
let local_field = lookup
|
||||
.get_str("localField")
|
||||
.map_err(|_| QueryError::AggregationError("$lookup requires 'localField'".into()))?;
|
||||
let foreign_field = lookup.get_str("foreignField")
|
||||
let foreign_field = lookup
|
||||
.get_str("foreignField")
|
||||
.map_err(|_| QueryError::AggregationError("$lookup requires 'foreignField'".into()))?;
|
||||
let as_field = lookup.get_str("as")
|
||||
let as_field = lookup
|
||||
.get_str("as")
|
||||
.map_err(|_| QueryError::AggregationError("$lookup requires 'as'".into()))?;
|
||||
|
||||
let resolver = resolver
|
||||
.ok_or_else(|| QueryError::AggregationError("$lookup requires a collection resolver".into()))?;
|
||||
let resolver = resolver.ok_or_else(|| {
|
||||
QueryError::AggregationError("$lookup requires a collection resolver".into())
|
||||
})?;
|
||||
let foreign_docs = resolver.resolve(db, from)?;
|
||||
|
||||
Ok(docs
|
||||
@@ -299,7 +382,11 @@ impl AggregationEngine {
|
||||
) -> Result<Vec<Document>, QueryError> {
|
||||
let facets = match spec {
|
||||
Bson::Document(d) => d,
|
||||
_ => return Err(QueryError::AggregationError("$facet requires a document".into())),
|
||||
_ => {
|
||||
return Err(QueryError::AggregationError(
|
||||
"$facet requires a document".into(),
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
let mut result = Document::new();
|
||||
@@ -337,22 +424,32 @@ impl AggregationEngine {
|
||||
let (coll, pipeline) = match spec {
|
||||
Bson::String(s) => (s.as_str(), None),
|
||||
Bson::Document(d) => {
|
||||
let coll = d.get_str("coll")
|
||||
.map_err(|_| QueryError::AggregationError("$unionWith requires 'coll'".into()))?;
|
||||
let coll = d.get_str("coll").map_err(|_| {
|
||||
QueryError::AggregationError("$unionWith requires 'coll'".into())
|
||||
})?;
|
||||
let pipeline = d.get_array("pipeline").ok().map(|arr| {
|
||||
arr.iter()
|
||||
.filter_map(|s| {
|
||||
if let Bson::Document(d) = s { Some(d.clone()) } else { None }
|
||||
if let Bson::Document(d) = s {
|
||||
Some(d.clone())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect::<Vec<Document>>()
|
||||
});
|
||||
(coll, pipeline)
|
||||
}
|
||||
_ => return Err(QueryError::AggregationError("$unionWith requires a string or document".into())),
|
||||
_ => {
|
||||
return Err(QueryError::AggregationError(
|
||||
"$unionWith requires a string or document".into(),
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
let resolver = resolver
|
||||
.ok_or_else(|| QueryError::AggregationError("$unionWith requires a collection resolver".into()))?;
|
||||
let resolver = resolver.ok_or_else(|| {
|
||||
QueryError::AggregationError("$unionWith requires a collection resolver".into())
|
||||
})?;
|
||||
let mut other_docs = resolver.resolve(db, coll)?;
|
||||
|
||||
if let Some(p) = pipeline {
|
||||
@@ -476,7 +573,11 @@ fn accumulate_min(docs: &[Document], expr: &Bson) -> Bson {
|
||||
None => val,
|
||||
Some(current) => {
|
||||
if let (Some(cv), Some(vv)) = (bson_to_f64(¤t), bson_to_f64(&val)) {
|
||||
if vv < cv { val } else { current }
|
||||
if vv < cv {
|
||||
val
|
||||
} else {
|
||||
current
|
||||
}
|
||||
} else {
|
||||
current
|
||||
}
|
||||
@@ -499,7 +600,11 @@ fn accumulate_max(docs: &[Document], expr: &Bson) -> Bson {
|
||||
None => val,
|
||||
Some(current) => {
|
||||
if let (Some(cv), Some(vv)) = (bson_to_f64(¤t), bson_to_f64(&val)) {
|
||||
if vv > cv { val } else { current }
|
||||
if vv > cv {
|
||||
val
|
||||
} else {
|
||||
current
|
||||
}
|
||||
} else {
|
||||
current
|
||||
}
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
use bson::{Bson, Document, doc};
|
||||
use bson::{doc, Bson, Document};
|
||||
|
||||
use crate::aggregation::AggregationEngine;
|
||||
use crate::error::QueryError;
|
||||
use crate::field_path::{get_nested_value, set_nested_value, remove_nested_value};
|
||||
use crate::field_path::{get_nested_value, remove_nested_value, set_nested_value};
|
||||
use crate::matcher::QueryMatcher;
|
||||
|
||||
/// Update engine — applies update operators to documents.
|
||||
@@ -56,6 +57,46 @@ impl UpdateEngine {
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Apply an aggregation pipeline update specification to a document.
|
||||
pub fn apply_pipeline_update(
|
||||
doc: &Document,
|
||||
pipeline: &[Document],
|
||||
) -> Result<Document, QueryError> {
|
||||
if pipeline.is_empty() {
|
||||
return Err(QueryError::InvalidUpdate(
|
||||
"aggregation pipeline update cannot be empty".into(),
|
||||
));
|
||||
}
|
||||
|
||||
for stage in pipeline {
|
||||
let (stage_name, _) = stage.iter().next().ok_or_else(|| {
|
||||
QueryError::InvalidUpdate(
|
||||
"aggregation pipeline update stages must not be empty".into(),
|
||||
)
|
||||
})?;
|
||||
|
||||
if !matches!(
|
||||
stage_name.as_str(),
|
||||
"$addFields" | "$set" | "$project" | "$unset" | "$replaceRoot" | "$replaceWith"
|
||||
) {
|
||||
return Err(QueryError::InvalidUpdate(format!(
|
||||
"Unsupported aggregation pipeline update stage: {}",
|
||||
stage_name
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
let mut results = AggregationEngine::aggregate(vec![doc.clone()], pipeline, None, "")
|
||||
.map_err(|e| QueryError::InvalidUpdate(e.to_string()))?;
|
||||
|
||||
match results.len() {
|
||||
1 => Ok(results.remove(0)),
|
||||
_ => Err(QueryError::InvalidUpdate(
|
||||
"aggregation pipeline update must produce exactly one document".into(),
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Apply $setOnInsert fields (used during upsert only).
|
||||
pub fn apply_set_on_insert(doc: &mut Document, fields: &Document) {
|
||||
for (key, value) in fields {
|
||||
@@ -252,16 +293,14 @@ impl UpdateEngine {
|
||||
for (key, spec) in fields {
|
||||
let value = match spec {
|
||||
Bson::Boolean(true) => Bson::DateTime(now),
|
||||
Bson::Document(d) => {
|
||||
match d.get_str("$type").unwrap_or("date") {
|
||||
"date" => Bson::DateTime(now),
|
||||
"timestamp" => Bson::Timestamp(bson::Timestamp {
|
||||
time: (now.timestamp_millis() / 1000) as u32,
|
||||
increment: 0,
|
||||
}),
|
||||
_ => Bson::DateTime(now),
|
||||
}
|
||||
}
|
||||
Bson::Document(d) => match d.get_str("$type").unwrap_or("date") {
|
||||
"date" => Bson::DateTime(now),
|
||||
"timestamp" => Bson::Timestamp(bson::Timestamp {
|
||||
time: (now.timestamp_millis() / 1000) as u32,
|
||||
increment: 0,
|
||||
}),
|
||||
_ => Bson::DateTime(now),
|
||||
},
|
||||
_ => continue,
|
||||
};
|
||||
|
||||
@@ -282,7 +321,9 @@ impl UpdateEngine {
|
||||
Bson::Document(d) if d.contains_key("$each") => {
|
||||
let each = match d.get("$each") {
|
||||
Some(Bson::Array(a)) => a.clone(),
|
||||
_ => return Err(QueryError::InvalidUpdate("$each must be an array".into())),
|
||||
_ => {
|
||||
return Err(QueryError::InvalidUpdate("$each must be an array".into()))
|
||||
}
|
||||
};
|
||||
|
||||
let position = d.get("$position").and_then(|v| match v {
|
||||
@@ -325,11 +366,21 @@ impl UpdateEngine {
|
||||
continue;
|
||||
}
|
||||
match direction {
|
||||
Bson::Int32(-1) | Bson::Int64(-1) => { arr.remove(0); }
|
||||
Bson::Int32(1) | Bson::Int64(1) => { arr.pop(); }
|
||||
Bson::Double(f) if *f == 1.0 => { arr.pop(); }
|
||||
Bson::Double(f) if *f == -1.0 => { arr.remove(0); }
|
||||
_ => { arr.pop(); }
|
||||
Bson::Int32(-1) | Bson::Int64(-1) => {
|
||||
arr.remove(0);
|
||||
}
|
||||
Bson::Int32(1) | Bson::Int64(1) => {
|
||||
arr.pop();
|
||||
}
|
||||
Bson::Double(f) if *f == 1.0 => {
|
||||
arr.pop();
|
||||
}
|
||||
Bson::Double(f) if *f == -1.0 => {
|
||||
arr.remove(0);
|
||||
}
|
||||
_ => {
|
||||
arr.pop();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -455,7 +506,11 @@ impl UpdateEngine {
|
||||
let ascending = *dir > 0;
|
||||
arr.sort_by(|a, b| {
|
||||
let ord = partial_cmp_bson(a, b);
|
||||
if ascending { ord } else { ord.reverse() }
|
||||
if ascending {
|
||||
ord
|
||||
} else {
|
||||
ord.reverse()
|
||||
}
|
||||
});
|
||||
}
|
||||
Bson::Document(spec) => {
|
||||
@@ -465,8 +520,16 @@ impl UpdateEngine {
|
||||
Bson::Int32(n) => *n > 0,
|
||||
_ => true,
|
||||
};
|
||||
let a_val = if let Bson::Document(d) = a { d.get(field) } else { None };
|
||||
let b_val = if let Bson::Document(d) = b { d.get(field) } else { None };
|
||||
let a_val = if let Bson::Document(d) = a {
|
||||
d.get(field)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let b_val = if let Bson::Document(d) = b {
|
||||
d.get(field)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let ord = match (a_val, b_val) {
|
||||
(Some(av), Some(bv)) => partial_cmp_bson(av, bv),
|
||||
(Some(_), None) => std::cmp::Ordering::Greater,
|
||||
@@ -572,4 +635,27 @@ mod tests {
|
||||
let tags = result.get_array("tags").unwrap();
|
||||
assert_eq!(tags.len(), 2); // no duplicate
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pipeline_update() {
|
||||
let doc = doc! { "_id": 1, "name": "Alice", "age": 30, "legacy": true };
|
||||
let pipeline = vec![
|
||||
doc! { "$set": { "displayName": "$name", "status": "updated" } },
|
||||
doc! { "$unset": ["legacy"] },
|
||||
];
|
||||
|
||||
let result = UpdateEngine::apply_pipeline_update(&doc, &pipeline).unwrap();
|
||||
assert_eq!(result.get_str("displayName").unwrap(), "Alice");
|
||||
assert_eq!(result.get_str("status").unwrap(), "updated");
|
||||
assert!(result.get("legacy").is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pipeline_update_rejects_unsupported_stage() {
|
||||
let doc = doc! { "_id": 1, "name": "Alice" };
|
||||
let pipeline = vec![doc! { "$match": { "name": "Alice" } }];
|
||||
|
||||
let result = UpdateEngine::apply_pipeline_update(&doc, &pipeline);
|
||||
assert!(matches!(result, Err(QueryError::InvalidUpdate(_))));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,3 +17,6 @@ tracing = { workspace = true }
|
||||
crc32fast = { workspace = true }
|
||||
uuid = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = { workspace = true }
|
||||
|
||||
@@ -0,0 +1,499 @@
|
||||
//! Binary Write-Ahead Log for crash recovery.
|
||||
//!
|
||||
//! # Protocol
|
||||
//!
|
||||
//! Every mutation follows this sequence:
|
||||
//! 1. Append WAL record → fsync
|
||||
//! 2. Perform the actual data write
|
||||
//! 3. Append WAL commit marker → fsync
|
||||
//!
|
||||
//! On recovery, uncommitted entries (those without a matching commit marker)
|
||||
//! are replayed or verified.
|
||||
//!
|
||||
//! # Record format
|
||||
//!
|
||||
//! ```text
|
||||
//! ┌──────────┬──────────┬──────────┬──────────┬──────────┬──────────┬────────────┐
|
||||
//! │ magic │ seq │ op │ key_len │ val_len │ crc32 │ payload │
|
||||
//! │ u16 LE │ u64 LE │ u8 │ u32 LE │ u32 LE │ u32 LE │ [key][val] │
|
||||
//! │ 0xWA01 │ │ │ │ │ │ │
|
||||
//! └──────────┴──────────┴──────────┴──────────┴──────────┴──────────┴────────────┘
|
||||
//! ```
|
||||
//!
|
||||
//! # Commit marker
|
||||
//!
|
||||
//! ```text
|
||||
//! ┌──────────┬──────────┬──────────┐
|
||||
//! │ magic │ seq │ crc32 │
|
||||
//! │ u16 LE │ u64 LE │ u32 LE │
|
||||
//! │ 0xCA01 │ │ │
|
||||
//! └──────────┴──────────┴──────────┘
|
||||
//! ```
|
||||
|
||||
use std::io::{self, BufReader, Read, Write};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
|
||||
use crate::error::{StorageError, StorageResult};
|
||||
use crate::record::{FileHeader, FileType, FILE_HEADER_SIZE};
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Constants
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const WAL_RECORD_MAGIC: u16 = 0xAA01;
|
||||
const WAL_COMMIT_MAGIC: u16 = 0xCC01;
|
||||
|
||||
/// WAL record header: magic(2) + seq(8) + op(1) + key_len(4) + val_len(4) + crc(4) = 23
|
||||
const WAL_RECORD_HEADER: usize = 23;
|
||||
|
||||
/// Commit marker size: magic(2) + seq(8) + crc(4) = 14
|
||||
const WAL_COMMIT_SIZE: usize = 14;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// WAL operation type
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
#[repr(u8)]
|
||||
pub enum WalOpType {
|
||||
Insert = 1,
|
||||
Update = 2,
|
||||
Delete = 3,
|
||||
}
|
||||
|
||||
impl WalOpType {
|
||||
fn from_u8(v: u8) -> StorageResult<Self> {
|
||||
match v {
|
||||
1 => Ok(WalOpType::Insert),
|
||||
2 => Ok(WalOpType::Update),
|
||||
3 => Ok(WalOpType::Delete),
|
||||
_ => Err(StorageError::WalError(format!("unknown WAL op: {v}"))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// WAL entry (parsed from file)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct WalEntry {
|
||||
pub seq: u64,
|
||||
pub op: WalOpType,
|
||||
pub key: Vec<u8>,
|
||||
pub value: Vec<u8>,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Internal: what we read from the WAL file
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[derive(Debug)]
|
||||
enum WalItem {
|
||||
Record(WalEntry),
|
||||
Commit(u64), // seq that was committed
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// BinaryWal
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Binary write-ahead log backed by a single file.
|
||||
pub struct BinaryWal {
|
||||
path: PathBuf,
|
||||
next_seq: AtomicU64,
|
||||
}
|
||||
|
||||
impl BinaryWal {
|
||||
/// Create a new WAL. Does not touch the filesystem until `initialize()`.
|
||||
pub fn new(path: PathBuf) -> Self {
|
||||
Self {
|
||||
path,
|
||||
next_seq: AtomicU64::new(1),
|
||||
}
|
||||
}
|
||||
|
||||
/// Initialize: create parent dirs, recover sequence counter from existing file.
|
||||
pub fn initialize(&self) -> StorageResult<()> {
|
||||
if let Some(parent) = self.path.parent() {
|
||||
std::fs::create_dir_all(parent)?;
|
||||
}
|
||||
|
||||
if self.path.exists() {
|
||||
// Scan to find highest seq
|
||||
let items = self.read_all_items()?;
|
||||
let max_seq = items
|
||||
.iter()
|
||||
.map(|item| match item {
|
||||
WalItem::Record(e) => e.seq,
|
||||
WalItem::Commit(s) => *s,
|
||||
})
|
||||
.max()
|
||||
.unwrap_or(0);
|
||||
self.next_seq.store(max_seq + 1, Ordering::SeqCst);
|
||||
} else {
|
||||
// Create the file with a header
|
||||
let mut f = std::fs::File::create(&self.path)?;
|
||||
let hdr = FileHeader::new(FileType::Wal);
|
||||
f.write_all(&hdr.encode())?;
|
||||
f.flush()?;
|
||||
f.sync_all()?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Append a WAL record. Returns the sequence number. Fsyncs.
|
||||
pub fn append(
|
||||
&self,
|
||||
op: WalOpType,
|
||||
key: &[u8],
|
||||
value: &[u8],
|
||||
) -> StorageResult<u64> {
|
||||
let seq = self.next_seq.fetch_add(1, Ordering::SeqCst);
|
||||
let key_len = key.len() as u32;
|
||||
let val_len = value.len() as u32;
|
||||
|
||||
// Build header bytes (without CRC)
|
||||
let mut hdr = Vec::with_capacity(WAL_RECORD_HEADER);
|
||||
hdr.extend_from_slice(&WAL_RECORD_MAGIC.to_le_bytes());
|
||||
hdr.extend_from_slice(&seq.to_le_bytes());
|
||||
hdr.push(op as u8);
|
||||
hdr.extend_from_slice(&key_len.to_le_bytes());
|
||||
hdr.extend_from_slice(&val_len.to_le_bytes());
|
||||
// CRC placeholder
|
||||
hdr.extend_from_slice(&0u32.to_le_bytes());
|
||||
|
||||
// Compute CRC over header (without crc field) + payload
|
||||
let mut hasher = crc32fast::Hasher::new();
|
||||
hasher.update(&hdr[0..19]); // magic + seq + op + key_len + val_len
|
||||
hasher.update(key);
|
||||
hasher.update(value);
|
||||
let crc = hasher.finalize();
|
||||
hdr[19..23].copy_from_slice(&crc.to_le_bytes());
|
||||
|
||||
// Append to file
|
||||
let mut f = std::fs::OpenOptions::new()
|
||||
.create(true)
|
||||
.append(true)
|
||||
.open(&self.path)?;
|
||||
f.write_all(&hdr)?;
|
||||
f.write_all(key)?;
|
||||
f.write_all(value)?;
|
||||
f.sync_all()?;
|
||||
|
||||
Ok(seq)
|
||||
}
|
||||
|
||||
/// Append a commit marker for the given sequence. Fsyncs.
|
||||
pub fn append_commit(&self, seq: u64) -> StorageResult<()> {
|
||||
let mut buf = Vec::with_capacity(WAL_COMMIT_SIZE);
|
||||
buf.extend_from_slice(&WAL_COMMIT_MAGIC.to_le_bytes());
|
||||
buf.extend_from_slice(&seq.to_le_bytes());
|
||||
|
||||
// CRC over magic + seq
|
||||
let mut hasher = crc32fast::Hasher::new();
|
||||
hasher.update(&buf[0..10]);
|
||||
let crc = hasher.finalize();
|
||||
buf.extend_from_slice(&crc.to_le_bytes());
|
||||
|
||||
let mut f = std::fs::OpenOptions::new()
|
||||
.create(true)
|
||||
.append(true)
|
||||
.open(&self.path)?;
|
||||
f.write_all(&buf)?;
|
||||
f.sync_all()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Recover: return all WAL entries that were NOT committed.
|
||||
pub fn recover(&self) -> StorageResult<Vec<WalEntry>> {
|
||||
let items = self.read_all_items()?;
|
||||
|
||||
// Collect committed seq numbers
|
||||
let committed: std::collections::HashSet<u64> = items
|
||||
.iter()
|
||||
.filter_map(|item| {
|
||||
if let WalItem::Commit(s) = item {
|
||||
Some(*s)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Return records without a commit marker
|
||||
let uncommitted: Vec<WalEntry> = items
|
||||
.into_iter()
|
||||
.filter_map(|item| {
|
||||
if let WalItem::Record(entry) = item {
|
||||
if !committed.contains(&entry.seq) {
|
||||
return Some(entry);
|
||||
}
|
||||
}
|
||||
None
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(uncommitted)
|
||||
}
|
||||
|
||||
/// Truncate the WAL: rewrite with just the file header (clears all entries).
|
||||
pub fn truncate(&self) -> StorageResult<()> {
|
||||
let mut f = std::fs::File::create(&self.path)?;
|
||||
let hdr = FileHeader::new(FileType::Wal);
|
||||
f.write_all(&hdr.encode())?;
|
||||
f.flush()?;
|
||||
f.sync_all()?;
|
||||
// Don't reset next_seq — it should keep incrementing
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Path to the WAL file.
|
||||
pub fn path(&self) -> &Path {
|
||||
&self.path
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Internal: read all items from the WAL file
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
fn read_all_items(&self) -> StorageResult<Vec<WalItem>> {
|
||||
if !self.path.exists() {
|
||||
return Ok(vec![]);
|
||||
}
|
||||
|
||||
let file = std::fs::File::open(&self.path)?;
|
||||
let mut reader = BufReader::new(file);
|
||||
|
||||
// Skip file header (if present)
|
||||
let file_len = std::fs::metadata(&self.path)?.len();
|
||||
if file_len >= FILE_HEADER_SIZE as u64 {
|
||||
let mut hdr_buf = [0u8; FILE_HEADER_SIZE];
|
||||
reader.read_exact(&mut hdr_buf)?;
|
||||
// Validate but don't fail hard — allow reading even slightly off headers
|
||||
let _ = FileHeader::decode(&hdr_buf);
|
||||
}
|
||||
|
||||
let mut items = Vec::new();
|
||||
|
||||
loop {
|
||||
// Peek at the magic to determine if this is a record or commit marker
|
||||
let mut magic_buf = [0u8; 2];
|
||||
match reader.read_exact(&mut magic_buf) {
|
||||
Ok(()) => {}
|
||||
Err(e) if e.kind() == io::ErrorKind::UnexpectedEof => break,
|
||||
Err(e) => return Err(e.into()),
|
||||
}
|
||||
let magic = u16::from_le_bytes(magic_buf);
|
||||
|
||||
match magic {
|
||||
WAL_RECORD_MAGIC => {
|
||||
// Read rest of header: seq(8) + op(1) + key_len(4) + val_len(4) + crc(4) = 21
|
||||
let mut rest = [0u8; 21];
|
||||
match reader.read_exact(&mut rest) {
|
||||
Ok(()) => {}
|
||||
Err(e) if e.kind() == io::ErrorKind::UnexpectedEof => break,
|
||||
Err(e) => return Err(e.into()),
|
||||
}
|
||||
|
||||
let seq = u64::from_le_bytes(rest[0..8].try_into().unwrap());
|
||||
let op = WalOpType::from_u8(rest[8])?;
|
||||
let key_len = u32::from_le_bytes(rest[9..13].try_into().unwrap()) as usize;
|
||||
let val_len = u32::from_le_bytes(rest[13..17].try_into().unwrap()) as usize;
|
||||
let stored_crc = u32::from_le_bytes(rest[17..21].try_into().unwrap());
|
||||
|
||||
let mut payload = vec![0u8; key_len + val_len];
|
||||
match reader.read_exact(&mut payload) {
|
||||
Ok(()) => {}
|
||||
Err(e) if e.kind() == io::ErrorKind::UnexpectedEof => break,
|
||||
Err(e) => return Err(e.into()),
|
||||
}
|
||||
|
||||
// Verify CRC
|
||||
let mut hasher = crc32fast::Hasher::new();
|
||||
hasher.update(&magic_buf);
|
||||
hasher.update(&rest[0..17]); // seq + op + key_len + val_len
|
||||
hasher.update(&payload);
|
||||
let computed = hasher.finalize();
|
||||
|
||||
if computed != stored_crc {
|
||||
// Corrupt WAL entry — skip it (best-effort recovery)
|
||||
tracing::warn!(
|
||||
seq,
|
||||
"skipping corrupt WAL record: CRC mismatch (expected 0x{stored_crc:08X}, got 0x{computed:08X})"
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
let key = payload[..key_len].to_vec();
|
||||
let value = payload[key_len..].to_vec();
|
||||
items.push(WalItem::Record(WalEntry {
|
||||
seq,
|
||||
op,
|
||||
key,
|
||||
value,
|
||||
}));
|
||||
}
|
||||
WAL_COMMIT_MAGIC => {
|
||||
// Read rest: seq(8) + crc(4) = 12
|
||||
let mut rest = [0u8; 12];
|
||||
match reader.read_exact(&mut rest) {
|
||||
Ok(()) => {}
|
||||
Err(e) if e.kind() == io::ErrorKind::UnexpectedEof => break,
|
||||
Err(e) => return Err(e.into()),
|
||||
}
|
||||
|
||||
let seq = u64::from_le_bytes(rest[0..8].try_into().unwrap());
|
||||
let stored_crc = u32::from_le_bytes(rest[8..12].try_into().unwrap());
|
||||
|
||||
let mut hasher = crc32fast::Hasher::new();
|
||||
hasher.update(&magic_buf);
|
||||
hasher.update(&rest[0..8]);
|
||||
let computed = hasher.finalize();
|
||||
|
||||
if computed != stored_crc {
|
||||
tracing::warn!(
|
||||
seq,
|
||||
"skipping corrupt WAL commit marker: CRC mismatch"
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
items.push(WalItem::Commit(seq));
|
||||
}
|
||||
_ => {
|
||||
// Unknown magic — file is corrupt past this point
|
||||
tracing::warn!("unknown WAL magic 0x{magic:04X}, stopping scan");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(items)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn make_wal(dir: &tempfile::TempDir) -> BinaryWal {
|
||||
let path = dir.path().join("test.wal");
|
||||
let wal = BinaryWal::new(path);
|
||||
wal.initialize().unwrap();
|
||||
wal
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn append_and_commit() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let wal = make_wal(&dir);
|
||||
|
||||
let seq = wal
|
||||
.append(WalOpType::Insert, b"key1", b"value1")
|
||||
.unwrap();
|
||||
assert_eq!(seq, 1);
|
||||
|
||||
wal.append_commit(seq).unwrap();
|
||||
|
||||
// All committed — recover should return empty
|
||||
let uncommitted = wal.recover().unwrap();
|
||||
assert!(uncommitted.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn uncommitted_entries_recovered() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let wal = make_wal(&dir);
|
||||
|
||||
let s1 = wal
|
||||
.append(WalOpType::Insert, b"k1", b"v1")
|
||||
.unwrap();
|
||||
wal.append_commit(s1).unwrap();
|
||||
|
||||
// s2 is NOT committed
|
||||
let s2 = wal
|
||||
.append(WalOpType::Update, b"k2", b"v2")
|
||||
.unwrap();
|
||||
|
||||
let uncommitted = wal.recover().unwrap();
|
||||
assert_eq!(uncommitted.len(), 1);
|
||||
assert_eq!(uncommitted[0].seq, s2);
|
||||
assert_eq!(uncommitted[0].op, WalOpType::Update);
|
||||
assert_eq!(uncommitted[0].key, b"k2");
|
||||
assert_eq!(uncommitted[0].value, b"v2");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn truncate_clears_wal() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let wal = make_wal(&dir);
|
||||
|
||||
wal.append(WalOpType::Insert, b"k", b"v").unwrap();
|
||||
wal.truncate().unwrap();
|
||||
|
||||
let uncommitted = wal.recover().unwrap();
|
||||
assert!(uncommitted.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn multiple_operations() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let wal = make_wal(&dir);
|
||||
|
||||
let s1 = wal.append(WalOpType::Insert, b"a", b"1").unwrap();
|
||||
let s2 = wal.append(WalOpType::Update, b"b", b"2").unwrap();
|
||||
let s3 = wal.append(WalOpType::Delete, b"c", b"").unwrap();
|
||||
|
||||
// Commit only s1 and s3
|
||||
wal.append_commit(s1).unwrap();
|
||||
wal.append_commit(s3).unwrap();
|
||||
|
||||
let uncommitted = wal.recover().unwrap();
|
||||
assert_eq!(uncommitted.len(), 1);
|
||||
assert_eq!(uncommitted[0].seq, s2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sequence_numbers_persist_across_reinit() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let path = dir.path().join("persist.wal");
|
||||
|
||||
{
|
||||
let wal = BinaryWal::new(path.clone());
|
||||
wal.initialize().unwrap();
|
||||
let s1 = wal.append(WalOpType::Insert, b"k", b"v").unwrap();
|
||||
assert_eq!(s1, 1);
|
||||
wal.append_commit(s1).unwrap();
|
||||
}
|
||||
|
||||
// Re-open — seq should continue from 2+ (since max committed was 1)
|
||||
{
|
||||
let wal = BinaryWal::new(path);
|
||||
wal.initialize().unwrap();
|
||||
let s2 = wal.append(WalOpType::Insert, b"k2", b"v2").unwrap();
|
||||
assert!(s2 >= 2, "seq should continue: got {s2}");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn delete_has_empty_value() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let wal = make_wal(&dir);
|
||||
|
||||
let seq = wal.append(WalOpType::Delete, b"key", b"").unwrap();
|
||||
|
||||
let uncommitted = wal.recover().unwrap();
|
||||
assert_eq!(uncommitted.len(), 1);
|
||||
assert_eq!(uncommitted[0].seq, seq);
|
||||
assert_eq!(uncommitted[0].op, WalOpType::Delete);
|
||||
assert!(uncommitted[0].value.is_empty());
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,270 @@
|
||||
//! Compaction for the Bitcask-style storage engine.
|
||||
//!
|
||||
//! Over time, the data file accumulates dead records (superseded by updates,
|
||||
//! tombstones from deletes). Compaction rewrites the data file with only live
|
||||
//! records, reclaiming disk space.
|
||||
//!
|
||||
//! The process is:
|
||||
//! 1. Create a new `data.rdb.compact` file with a fresh file header.
|
||||
//! 2. Iterate all live entries from the KeyDir.
|
||||
//! 3. Read each live document from the old data file, write to the new file.
|
||||
//! 4. Atomically rename `data.rdb.compact` → `data.rdb`.
|
||||
//! 5. Update KeyDir entries with new offsets.
|
||||
//! 6. Reset dead_bytes counter.
|
||||
|
||||
use std::io::{Seek, SeekFrom, Write};
|
||||
use std::path::Path;
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
use tracing::info;
|
||||
|
||||
use crate::error::StorageResult;
|
||||
use crate::keydir::{KeyDir, KeyDirEntry};
|
||||
use crate::record::{DataRecord, FileHeader, FileType, FILE_HEADER_SIZE};
|
||||
|
||||
/// Result of a compaction operation.
|
||||
#[derive(Debug)]
|
||||
pub struct CompactionResult {
|
||||
/// Number of live records written.
|
||||
pub records_written: u64,
|
||||
/// Bytes reclaimed (old file size - new file size).
|
||||
pub bytes_reclaimed: u64,
|
||||
/// New data file size.
|
||||
pub new_file_size: u64,
|
||||
}
|
||||
|
||||
/// Compact a collection's data file.
|
||||
///
|
||||
/// This function:
|
||||
/// - Reads all live documents (entries present in the KeyDir) from the old data file
|
||||
/// - Writes them sequentially to a new file
|
||||
/// - Atomically renames the new file over the old one
|
||||
/// - Updates all KeyDir entries with their new offsets
|
||||
///
|
||||
/// The caller must hold the collection's write lock during this operation.
|
||||
pub fn compact_data_file(
|
||||
data_path: &Path,
|
||||
keydir: &KeyDir,
|
||||
dead_bytes: &std::sync::atomic::AtomicU64,
|
||||
data_file_size: &std::sync::atomic::AtomicU64,
|
||||
) -> StorageResult<CompactionResult> {
|
||||
let compact_path = data_path.with_extension("rdb.compact");
|
||||
|
||||
let old_file_size = std::fs::metadata(data_path)
|
||||
.map(|m| m.len())
|
||||
.unwrap_or(0);
|
||||
|
||||
// Collect all live entries with their keys
|
||||
let mut live_entries: Vec<(String, KeyDirEntry)> = Vec::with_capacity(keydir.len() as usize);
|
||||
keydir.for_each(|key, entry| {
|
||||
live_entries.push((key.to_string(), *entry));
|
||||
});
|
||||
|
||||
// Sort by offset for sequential reads (cache-friendly)
|
||||
live_entries.sort_by_key(|(_, e)| e.offset);
|
||||
|
||||
// Create compact file with header
|
||||
let mut compact_file = std::fs::File::create(&compact_path)?;
|
||||
let hdr = FileHeader::new(FileType::Data);
|
||||
compact_file.write_all(&hdr.encode())?;
|
||||
|
||||
let mut current_offset = FILE_HEADER_SIZE as u64;
|
||||
let mut new_entries: Vec<(String, KeyDirEntry)> = Vec::with_capacity(live_entries.len());
|
||||
let mut old_data_file = std::fs::File::open(data_path)?;
|
||||
|
||||
for (key, entry) in &live_entries {
|
||||
// Read the record from the old file
|
||||
old_data_file.seek(SeekFrom::Start(entry.offset))?;
|
||||
let (record, _disk_size) = DataRecord::decode_from(&mut old_data_file)?
|
||||
.ok_or_else(|| {
|
||||
crate::error::StorageError::CorruptRecord(format!(
|
||||
"compaction: unexpected EOF reading doc '{key}' at offset {}",
|
||||
entry.offset
|
||||
))
|
||||
})?;
|
||||
|
||||
// Write to compact file
|
||||
let encoded = record.encode();
|
||||
let new_disk_size = encoded.len() as u32;
|
||||
compact_file.write_all(&encoded)?;
|
||||
|
||||
new_entries.push((
|
||||
key.clone(),
|
||||
KeyDirEntry {
|
||||
offset: current_offset,
|
||||
record_len: new_disk_size,
|
||||
value_len: entry.value_len,
|
||||
timestamp: entry.timestamp,
|
||||
},
|
||||
));
|
||||
|
||||
current_offset += new_disk_size as u64;
|
||||
}
|
||||
|
||||
compact_file.sync_all()?;
|
||||
drop(compact_file);
|
||||
drop(old_data_file);
|
||||
|
||||
// Atomic rename
|
||||
std::fs::rename(&compact_path, data_path)?;
|
||||
|
||||
// Update KeyDir with new offsets
|
||||
for (key, new_entry) in new_entries {
|
||||
keydir.insert(key, new_entry);
|
||||
}
|
||||
|
||||
// Reset counters
|
||||
dead_bytes.store(0, Ordering::Relaxed);
|
||||
data_file_size.store(current_offset, Ordering::Relaxed);
|
||||
|
||||
let bytes_reclaimed = old_file_size.saturating_sub(current_offset);
|
||||
|
||||
info!(
|
||||
records = live_entries.len(),
|
||||
old_size = old_file_size,
|
||||
new_size = current_offset,
|
||||
reclaimed = bytes_reclaimed,
|
||||
"compaction complete"
|
||||
);
|
||||
|
||||
Ok(CompactionResult {
|
||||
records_written: live_entries.len() as u64,
|
||||
bytes_reclaimed,
|
||||
new_file_size: current_offset,
|
||||
})
|
||||
}
|
||||
|
||||
/// Check if compaction is warranted for a collection.
|
||||
/// Returns true if dead bytes exceed 50% of live data.
|
||||
pub fn should_compact(dead_bytes: u64, data_file_size: u64) -> bool {
|
||||
if data_file_size <= FILE_HEADER_SIZE as u64 {
|
||||
return false;
|
||||
}
|
||||
let useful_bytes = data_file_size - FILE_HEADER_SIZE as u64;
|
||||
// Trigger when dead > 50% of total useful data
|
||||
dead_bytes > useful_bytes / 2
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::keydir::KeyDir;
|
||||
use crate::record::{now_ms, DataRecord, FileHeader, FileType};
|
||||
use std::io::Write;
|
||||
use std::sync::atomic::AtomicU64;
|
||||
|
||||
#[test]
|
||||
fn compact_removes_dead_records() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let data_path = dir.path().join("data.rdb");
|
||||
|
||||
// Write a data file: insert A, update A (new version), insert B
|
||||
let mut f = std::fs::File::create(&data_path).unwrap();
|
||||
let hdr = FileHeader::new(FileType::Data);
|
||||
f.write_all(&hdr.encode()).unwrap();
|
||||
|
||||
let ts = now_ms();
|
||||
|
||||
// Record 1: A v1 (will be superseded)
|
||||
let r1 = DataRecord {
|
||||
timestamp: ts,
|
||||
key: b"aaa".to_vec(),
|
||||
value: b"old_value".to_vec(),
|
||||
};
|
||||
let r1_enc = r1.encode();
|
||||
let r1_offset = FILE_HEADER_SIZE as u64;
|
||||
let r1_size = r1_enc.len();
|
||||
f.write_all(&r1_enc).unwrap();
|
||||
|
||||
// Record 2: A v2 (current)
|
||||
let r2 = DataRecord {
|
||||
timestamp: ts + 1,
|
||||
key: b"aaa".to_vec(),
|
||||
value: b"new_value".to_vec(),
|
||||
};
|
||||
let r2_enc = r2.encode();
|
||||
let r2_offset = r1_offset + r1_size as u64;
|
||||
let r2_size = r2_enc.len();
|
||||
f.write_all(&r2_enc).unwrap();
|
||||
|
||||
// Record 3: B (live)
|
||||
let r3 = DataRecord {
|
||||
timestamp: ts + 2,
|
||||
key: b"bbb".to_vec(),
|
||||
value: b"bbb_value".to_vec(),
|
||||
};
|
||||
let r3_enc = r3.encode();
|
||||
let r3_offset = r2_offset + r2_size as u64;
|
||||
f.write_all(&r3_enc).unwrap();
|
||||
f.sync_all().unwrap();
|
||||
drop(f);
|
||||
|
||||
let total_size = std::fs::metadata(&data_path).unwrap().len();
|
||||
|
||||
// Build KeyDir — only points to latest versions
|
||||
let keydir = KeyDir::new();
|
||||
keydir.insert(
|
||||
"aaa".into(),
|
||||
KeyDirEntry {
|
||||
offset: r2_offset,
|
||||
record_len: r2_size as u32,
|
||||
value_len: r2.value.len() as u32,
|
||||
timestamp: ts + 1,
|
||||
},
|
||||
);
|
||||
keydir.insert(
|
||||
"bbb".into(),
|
||||
KeyDirEntry {
|
||||
offset: r3_offset,
|
||||
record_len: r3.encode().len() as u32,
|
||||
value_len: r3.value.len() as u32,
|
||||
timestamp: ts + 2,
|
||||
},
|
||||
);
|
||||
|
||||
let dead_bytes_counter = AtomicU64::new(r1_size as u64);
|
||||
let data_file_size_counter = AtomicU64::new(total_size);
|
||||
|
||||
let result = compact_data_file(
|
||||
&data_path,
|
||||
&keydir,
|
||||
&dead_bytes_counter,
|
||||
&data_file_size_counter,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(result.records_written, 2);
|
||||
assert!(result.bytes_reclaimed > 0);
|
||||
assert!(result.new_file_size < total_size);
|
||||
|
||||
// Verify dead_bytes was reset
|
||||
assert_eq!(dead_bytes_counter.load(Ordering::Relaxed), 0);
|
||||
|
||||
// Verify KeyDir was updated with new offsets
|
||||
let a_entry = keydir.get("aaa").unwrap();
|
||||
assert_eq!(a_entry.offset, FILE_HEADER_SIZE as u64); // first record after header
|
||||
assert_eq!(a_entry.value_len, b"new_value".len() as u32);
|
||||
|
||||
let b_entry = keydir.get("bbb").unwrap();
|
||||
assert!(b_entry.offset > a_entry.offset);
|
||||
|
||||
// Verify the compacted file can be used to rebuild KeyDir
|
||||
let (rebuilt, dead, _stats) = KeyDir::build_from_data_file(&data_path).unwrap();
|
||||
assert_eq!(rebuilt.len(), 2);
|
||||
assert_eq!(dead, 0); // no dead records in compacted file
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_compact_thresholds() {
|
||||
// Under threshold
|
||||
assert!(!should_compact(10, 100 + FILE_HEADER_SIZE as u64));
|
||||
// Over threshold (dead > 50% of useful)
|
||||
assert!(should_compact(60, 100 + FILE_HEADER_SIZE as u64));
|
||||
// Empty file
|
||||
assert!(!should_compact(0, FILE_HEADER_SIZE as u64));
|
||||
}
|
||||
}
|
||||
@@ -17,6 +17,15 @@ pub enum StorageError {
|
||||
|
||||
#[error("conflict detected: {0}")]
|
||||
ConflictError(String),
|
||||
|
||||
#[error("corrupt record: {0}")]
|
||||
CorruptRecord(String),
|
||||
|
||||
#[error("checksum mismatch: expected 0x{expected:08X}, got 0x{actual:08X}")]
|
||||
ChecksumMismatch { expected: u32, actual: u32 },
|
||||
|
||||
#[error("WAL error: {0}")]
|
||||
WalError(String),
|
||||
}
|
||||
|
||||
impl From<serde_json::Error> for StorageError {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,610 @@
|
||||
//! KeyDir — in-memory document location index for the Bitcask storage engine.
|
||||
//!
|
||||
//! Maps document `_id` (hex string) to its location in the append-only data file.
|
||||
//! Backed by `DashMap` for lock-free concurrent reads and fine-grained write locking.
|
||||
//!
|
||||
//! The KeyDir can be rebuilt from a data file scan, or loaded quickly from a
|
||||
//! persisted hint file for fast restart.
|
||||
|
||||
use std::io::{self, BufReader, BufWriter, Read, Seek, SeekFrom, Write};
|
||||
use std::path::Path;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
|
||||
use dashmap::DashMap;
|
||||
|
||||
use crate::error::{StorageError, StorageResult};
|
||||
use crate::record::{
|
||||
DataRecord, FileHeader, FileType, FILE_HEADER_SIZE, FORMAT_VERSION,
|
||||
};
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// KeyDirEntry
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Location of a single document in the data file.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct KeyDirEntry {
|
||||
/// Byte offset of the record in `data.rdb`.
|
||||
pub offset: u64,
|
||||
/// Total record size on disk (header + payload).
|
||||
pub record_len: u32,
|
||||
/// BSON value length. 0 means tombstone (used during compaction accounting).
|
||||
pub value_len: u32,
|
||||
/// Timestamp (epoch ms) from the record. Used for conflict detection.
|
||||
pub timestamp: u64,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// BuildStats — statistics from building KeyDir from a data file scan
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Statistics collected while building a KeyDir from a data file scan.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct BuildStats {
|
||||
/// Total records scanned (live + tombstones + superseded).
|
||||
pub total_records_scanned: u64,
|
||||
/// Number of live documents in the final KeyDir.
|
||||
pub live_documents: u64,
|
||||
/// Number of tombstone records encountered.
|
||||
pub tombstones: u64,
|
||||
/// Number of records superseded by a later write for the same key.
|
||||
pub superseded_records: u64,
|
||||
/// Byte offset immediately after the last valid record.
|
||||
pub valid_data_end: u64,
|
||||
/// Number of invalid tail bytes after the last valid record.
|
||||
pub invalid_tail_bytes: u64,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// KeyDir
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// In-memory index mapping document ID → data file location.
|
||||
pub struct KeyDir {
|
||||
map: DashMap<String, KeyDirEntry>,
|
||||
/// Running count of live documents.
|
||||
doc_count: AtomicU64,
|
||||
}
|
||||
|
||||
impl KeyDir {
|
||||
/// Create an empty KeyDir.
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
map: DashMap::new(),
|
||||
doc_count: AtomicU64::new(0),
|
||||
}
|
||||
}
|
||||
|
||||
/// Insert or update an entry. Returns the previous entry if one existed.
|
||||
pub fn insert(&self, key: String, entry: KeyDirEntry) -> Option<KeyDirEntry> {
|
||||
let prev = self.map.insert(key, entry);
|
||||
if prev.is_none() {
|
||||
self.doc_count.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
prev
|
||||
}
|
||||
|
||||
/// Look up an entry by key.
|
||||
pub fn get(&self, key: &str) -> Option<KeyDirEntry> {
|
||||
self.map.get(key).map(|r| *r.value())
|
||||
}
|
||||
|
||||
/// Remove an entry. Returns the removed entry if it existed.
|
||||
pub fn remove(&self, key: &str) -> Option<KeyDirEntry> {
|
||||
let removed = self.map.remove(key).map(|(_, v)| v);
|
||||
if removed.is_some() {
|
||||
self.doc_count.fetch_sub(1, Ordering::Relaxed);
|
||||
}
|
||||
removed
|
||||
}
|
||||
|
||||
/// Number of live documents.
|
||||
pub fn len(&self) -> u64 {
|
||||
self.doc_count.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
/// Whether the index is empty.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.len() == 0
|
||||
}
|
||||
|
||||
/// Check if a key exists.
|
||||
pub fn contains(&self, key: &str) -> bool {
|
||||
self.map.contains_key(key)
|
||||
}
|
||||
|
||||
/// Iterate over all entries. The closure receives (key, entry).
|
||||
pub fn for_each(&self, mut f: impl FnMut(&str, &KeyDirEntry)) {
|
||||
for entry in self.map.iter() {
|
||||
f(entry.key(), entry.value());
|
||||
}
|
||||
}
|
||||
|
||||
/// Collect all keys.
|
||||
pub fn keys(&self) -> Vec<String> {
|
||||
self.map.iter().map(|e| e.key().clone()).collect()
|
||||
}
|
||||
|
||||
/// Clear all entries.
|
||||
pub fn clear(&self) {
|
||||
self.map.clear();
|
||||
self.doc_count.store(0, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Build from data file
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
/// Rebuild the KeyDir by scanning an entire data file.
|
||||
/// The file must start with a valid `FileHeader`.
|
||||
/// Returns `(keydir, dead_bytes, stats)` where `dead_bytes` is the total size of
|
||||
/// stale records (superseded by later writes or tombstoned).
|
||||
pub fn build_from_data_file(path: &Path) -> StorageResult<(Self, u64, BuildStats)> {
|
||||
let file = std::fs::File::open(path)?;
|
||||
let file_len = file.metadata()?.len();
|
||||
let mut reader = BufReader::new(file);
|
||||
|
||||
// Read and validate file header
|
||||
let mut hdr_buf = [0u8; FILE_HEADER_SIZE];
|
||||
reader.read_exact(&mut hdr_buf)?;
|
||||
let hdr = FileHeader::decode(&hdr_buf)?;
|
||||
if hdr.file_type != FileType::Data {
|
||||
return Err(StorageError::CorruptRecord(format!(
|
||||
"expected data file (type 1), got type {:?}",
|
||||
hdr.file_type
|
||||
)));
|
||||
}
|
||||
|
||||
let keydir = KeyDir::new();
|
||||
let mut dead_bytes: u64 = 0;
|
||||
let mut stats = BuildStats {
|
||||
valid_data_end: FILE_HEADER_SIZE as u64,
|
||||
..BuildStats::default()
|
||||
};
|
||||
|
||||
loop {
|
||||
let record_offset = stats.valid_data_end;
|
||||
let (record, disk_size) = match DataRecord::decode_from(&mut reader) {
|
||||
Ok(Some((record, disk_size))) => (record, disk_size),
|
||||
Ok(None) => {
|
||||
if file_len > record_offset {
|
||||
stats.invalid_tail_bytes = file_len - record_offset;
|
||||
}
|
||||
break;
|
||||
}
|
||||
Err(StorageError::IoError(e)) if e.kind() == io::ErrorKind::UnexpectedEof => {
|
||||
stats.invalid_tail_bytes = file_len.saturating_sub(record_offset);
|
||||
break;
|
||||
}
|
||||
Err(StorageError::ChecksumMismatch { expected, actual }) => {
|
||||
tracing::warn!(
|
||||
path = %path.display(),
|
||||
offset = record_offset,
|
||||
"stopping data file scan at checksum mismatch: expected 0x{expected:08X}, got 0x{actual:08X}"
|
||||
);
|
||||
stats.invalid_tail_bytes = file_len.saturating_sub(record_offset);
|
||||
break;
|
||||
}
|
||||
Err(StorageError::CorruptRecord(message)) => {
|
||||
tracing::warn!(
|
||||
path = %path.display(),
|
||||
offset = record_offset,
|
||||
"stopping data file scan at corrupt record: {message}"
|
||||
);
|
||||
stats.invalid_tail_bytes = file_len.saturating_sub(record_offset);
|
||||
break;
|
||||
}
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
|
||||
stats.valid_data_end += disk_size as u64;
|
||||
let is_tombstone = record.is_tombstone();
|
||||
let disk_size = disk_size as u32;
|
||||
let value_len = record.value.len() as u32;
|
||||
let timestamp = record.timestamp;
|
||||
let key = String::from_utf8(record.key)
|
||||
.map_err(|e| StorageError::CorruptRecord(format!("invalid UTF-8 key: {e}")))?;
|
||||
|
||||
stats.total_records_scanned += 1;
|
||||
|
||||
if is_tombstone {
|
||||
stats.tombstones += 1;
|
||||
// Remove from index; the tombstone itself is dead weight
|
||||
if let Some(prev) = keydir.remove(&key) {
|
||||
dead_bytes += prev.record_len as u64;
|
||||
}
|
||||
dead_bytes += disk_size as u64;
|
||||
} else {
|
||||
let entry = KeyDirEntry {
|
||||
offset: record_offset,
|
||||
record_len: disk_size,
|
||||
value_len,
|
||||
timestamp,
|
||||
};
|
||||
if let Some(prev) = keydir.insert(key, entry) {
|
||||
// Previous version of same key is now dead
|
||||
dead_bytes += prev.record_len as u64;
|
||||
stats.superseded_records += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stats.live_documents = keydir.len();
|
||||
Ok((keydir, dead_bytes, stats))
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Hint file persistence (for fast startup)
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
/// Persist the KeyDir to a hint file for fast restart.
|
||||
///
|
||||
/// `data_file_size` is the current size of data.rdb — stored in the hint header
|
||||
/// so that on next load we can detect if data.rdb changed (stale hint).
|
||||
///
|
||||
/// Hint file format (after the 64-byte file header):
|
||||
/// For each entry: [key_len:u32 LE][key bytes][offset:u64 LE][record_len:u32 LE][value_len:u32 LE][timestamp:u64 LE]
|
||||
pub fn persist_to_hint_file(&self, path: &Path, data_file_size: u64) -> StorageResult<()> {
|
||||
let file = std::fs::File::create(path)?;
|
||||
let mut writer = BufWriter::new(file);
|
||||
|
||||
// Write file header with data_file_size for staleness detection
|
||||
let hdr = FileHeader::new_hint(data_file_size);
|
||||
writer.write_all(&hdr.encode())?;
|
||||
|
||||
// Write entries
|
||||
for entry in self.map.iter() {
|
||||
let key_bytes = entry.key().as_bytes();
|
||||
let key_len = key_bytes.len() as u32;
|
||||
writer.write_all(&key_len.to_le_bytes())?;
|
||||
writer.write_all(key_bytes)?;
|
||||
writer.write_all(&entry.value().offset.to_le_bytes())?;
|
||||
writer.write_all(&entry.value().record_len.to_le_bytes())?;
|
||||
writer.write_all(&entry.value().value_len.to_le_bytes())?;
|
||||
writer.write_all(&entry.value().timestamp.to_le_bytes())?;
|
||||
}
|
||||
|
||||
writer.flush()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Load a KeyDir from a hint file. Returns None if the file doesn't exist.
|
||||
/// Returns `(keydir, stored_data_file_size)` where `stored_data_file_size` is the
|
||||
/// data.rdb size recorded when the hint was written (0 = old format, unknown).
|
||||
pub fn load_from_hint_file(path: &Path) -> StorageResult<Option<(Self, u64)>> {
|
||||
if !path.exists() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let file = std::fs::File::open(path)?;
|
||||
let mut reader = BufReader::new(file);
|
||||
|
||||
// Read and validate header
|
||||
let mut hdr_buf = [0u8; FILE_HEADER_SIZE];
|
||||
match reader.read_exact(&mut hdr_buf) {
|
||||
Ok(()) => {}
|
||||
Err(e) if e.kind() == io::ErrorKind::UnexpectedEof => return Ok(None),
|
||||
Err(e) => return Err(e.into()),
|
||||
}
|
||||
let hdr = FileHeader::decode(&hdr_buf)?;
|
||||
if hdr.file_type != FileType::Hint {
|
||||
return Err(StorageError::CorruptRecord(format!(
|
||||
"expected hint file (type 3), got type {:?}",
|
||||
hdr.file_type
|
||||
)));
|
||||
}
|
||||
if hdr.version > FORMAT_VERSION {
|
||||
return Err(StorageError::CorruptRecord(format!(
|
||||
"hint file version {} is newer than supported {}",
|
||||
hdr.version, FORMAT_VERSION
|
||||
)));
|
||||
}
|
||||
|
||||
let stored_data_file_size = hdr.data_file_size;
|
||||
let keydir = KeyDir::new();
|
||||
|
||||
loop {
|
||||
// Read key_len
|
||||
let mut key_len_buf = [0u8; 4];
|
||||
match reader.read_exact(&mut key_len_buf) {
|
||||
Ok(()) => {}
|
||||
Err(e) if e.kind() == io::ErrorKind::UnexpectedEof => break,
|
||||
Err(e) => return Err(e.into()),
|
||||
}
|
||||
let key_len = u32::from_le_bytes(key_len_buf) as usize;
|
||||
|
||||
// Read key
|
||||
let mut key_buf = vec![0u8; key_len];
|
||||
reader.read_exact(&mut key_buf)?;
|
||||
let key = String::from_utf8(key_buf)
|
||||
.map_err(|e| StorageError::CorruptRecord(format!("invalid UTF-8 key: {e}")))?;
|
||||
|
||||
// Read entry fields
|
||||
let mut fields = [0u8; 8 + 4 + 4 + 8]; // offset + record_len + value_len + timestamp = 24
|
||||
reader.read_exact(&mut fields)?;
|
||||
|
||||
let offset = u64::from_le_bytes(fields[0..8].try_into().unwrap());
|
||||
let record_len = u32::from_le_bytes(fields[8..12].try_into().unwrap());
|
||||
let value_len = u32::from_le_bytes(fields[12..16].try_into().unwrap());
|
||||
let timestamp = u64::from_le_bytes(fields[16..24].try_into().unwrap());
|
||||
|
||||
keydir.insert(
|
||||
key,
|
||||
KeyDirEntry {
|
||||
offset,
|
||||
record_len,
|
||||
value_len,
|
||||
timestamp,
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
Ok(Some((keydir, stored_data_file_size)))
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Hint file validation
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
/// Validate this KeyDir (loaded from a hint file) against the actual data file.
|
||||
/// Returns `Ok(true)` if the hint appears consistent, `Ok(false)` if a rebuild
|
||||
/// from the data file is recommended.
|
||||
///
|
||||
/// Checks:
|
||||
/// 1. All entry offsets + record_len fit within the data file size.
|
||||
/// 2. All entry offsets are >= FILE_HEADER_SIZE.
|
||||
/// 3. A random sample of entries is spot-checked by reading the record at
|
||||
/// the offset and verifying the key matches.
|
||||
pub fn validate_against_data_file(&self, data_path: &Path, sample_size: usize) -> StorageResult<bool> {
|
||||
let file_size = std::fs::metadata(data_path)
|
||||
.map(|m| m.len())
|
||||
.unwrap_or(0);
|
||||
|
||||
if file_size < FILE_HEADER_SIZE as u64 {
|
||||
// Data file is too small to even contain a header
|
||||
return Ok(self.is_empty());
|
||||
}
|
||||
|
||||
// Pass 1: bounds check all entries
|
||||
let mut all_keys: Vec<(String, KeyDirEntry)> = Vec::with_capacity(self.len() as usize);
|
||||
let mut bounds_ok = true;
|
||||
self.for_each(|key, entry| {
|
||||
if entry.offset < FILE_HEADER_SIZE as u64
|
||||
|| entry.offset + entry.record_len as u64 > file_size
|
||||
{
|
||||
bounds_ok = false;
|
||||
}
|
||||
all_keys.push((key.to_string(), *entry));
|
||||
});
|
||||
|
||||
if !bounds_ok {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// Pass 2: spot-check a sample of entries by reading records from data.rdb
|
||||
if all_keys.is_empty() {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
// Sort by offset for sequential I/O, take first `sample_size` entries
|
||||
all_keys.sort_by_key(|(_, e)| e.offset);
|
||||
let step = if all_keys.len() <= sample_size {
|
||||
1
|
||||
} else {
|
||||
all_keys.len() / sample_size
|
||||
};
|
||||
|
||||
let mut file = std::fs::File::open(data_path)?;
|
||||
let mut checked = 0usize;
|
||||
for (i, (expected_key, entry)) in all_keys.iter().enumerate() {
|
||||
if checked >= sample_size {
|
||||
break;
|
||||
}
|
||||
if i % step != 0 {
|
||||
continue;
|
||||
}
|
||||
// Seek to the entry's offset and try to decode the record
|
||||
file.seek(SeekFrom::Start(entry.offset))?;
|
||||
match DataRecord::decode_from(&mut file) {
|
||||
Ok(Some((record, _disk_size))) => {
|
||||
let record_key = String::from_utf8_lossy(&record.key);
|
||||
if record_key != *expected_key {
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
Ok(None) | Err(_) => {
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
checked += 1;
|
||||
}
|
||||
|
||||
Ok(true)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for KeyDir {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::record::DataRecord;
|
||||
use std::io::Write;
|
||||
|
||||
#[test]
|
||||
fn basic_insert_get_remove() {
|
||||
let kd = KeyDir::new();
|
||||
assert!(kd.is_empty());
|
||||
|
||||
let entry = KeyDirEntry {
|
||||
offset: 100,
|
||||
record_len: 50,
|
||||
value_len: 30,
|
||||
timestamp: 1700000000000,
|
||||
};
|
||||
|
||||
assert!(kd.insert("abc".into(), entry).is_none());
|
||||
assert_eq!(kd.len(), 1);
|
||||
assert!(kd.contains("abc"));
|
||||
|
||||
let got = kd.get("abc").unwrap();
|
||||
assert_eq!(got.offset, 100);
|
||||
assert_eq!(got.value_len, 30);
|
||||
|
||||
let removed = kd.remove("abc").unwrap();
|
||||
assert_eq!(removed.offset, 100);
|
||||
assert_eq!(kd.len(), 0);
|
||||
assert!(!kd.contains("abc"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn insert_overwrites_returns_previous() {
|
||||
let kd = KeyDir::new();
|
||||
let e1 = KeyDirEntry {
|
||||
offset: 100,
|
||||
record_len: 50,
|
||||
value_len: 30,
|
||||
timestamp: 1,
|
||||
};
|
||||
let e2 = KeyDirEntry {
|
||||
offset: 200,
|
||||
record_len: 60,
|
||||
value_len: 40,
|
||||
timestamp: 2,
|
||||
};
|
||||
|
||||
kd.insert("k".into(), e1);
|
||||
assert_eq!(kd.len(), 1);
|
||||
|
||||
let prev = kd.insert("k".into(), e2).unwrap();
|
||||
assert_eq!(prev.offset, 100);
|
||||
// Count stays at 1 (overwrite, not new)
|
||||
assert_eq!(kd.len(), 1);
|
||||
assert_eq!(kd.get("k").unwrap().offset, 200);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn build_from_data_file() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let data_path = dir.path().join("data.rdb");
|
||||
|
||||
// Write a data file with 3 records: insert A, insert B, delete A
|
||||
{
|
||||
let mut f = std::fs::File::create(&data_path).unwrap();
|
||||
let hdr = FileHeader::new(FileType::Data);
|
||||
f.write_all(&hdr.encode()).unwrap();
|
||||
|
||||
let r1 = DataRecord {
|
||||
timestamp: 1,
|
||||
key: b"aaa".to_vec(),
|
||||
value: b"val_a".to_vec(),
|
||||
};
|
||||
let r2 = DataRecord {
|
||||
timestamp: 2,
|
||||
key: b"bbb".to_vec(),
|
||||
value: b"val_b".to_vec(),
|
||||
};
|
||||
let r3 = DataRecord {
|
||||
timestamp: 3,
|
||||
key: b"aaa".to_vec(),
|
||||
value: vec![], // tombstone
|
||||
};
|
||||
f.write_all(&r1.encode()).unwrap();
|
||||
f.write_all(&r2.encode()).unwrap();
|
||||
f.write_all(&r3.encode()).unwrap();
|
||||
}
|
||||
|
||||
let (kd, dead_bytes, stats) = KeyDir::build_from_data_file(&data_path).unwrap();
|
||||
|
||||
// Only B should be live
|
||||
assert_eq!(kd.len(), 1);
|
||||
assert!(kd.contains("bbb"));
|
||||
assert!(!kd.contains("aaa"));
|
||||
|
||||
// Dead bytes: r1 (aaa live, then superseded by tombstone) + r3 (tombstone itself)
|
||||
assert!(dead_bytes > 0);
|
||||
|
||||
// Stats
|
||||
assert_eq!(stats.total_records_scanned, 3);
|
||||
assert_eq!(stats.live_documents, 1);
|
||||
assert_eq!(stats.tombstones, 1);
|
||||
assert_eq!(stats.superseded_records, 0); // aaa was removed by tombstone, not superseded
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hint_file_roundtrip() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let hint_path = dir.path().join("keydir.hint");
|
||||
|
||||
let kd = KeyDir::new();
|
||||
kd.insert(
|
||||
"doc1".into(),
|
||||
KeyDirEntry {
|
||||
offset: 64,
|
||||
record_len: 100,
|
||||
value_len: 80,
|
||||
timestamp: 1000,
|
||||
},
|
||||
);
|
||||
kd.insert(
|
||||
"doc2".into(),
|
||||
KeyDirEntry {
|
||||
offset: 164,
|
||||
record_len: 200,
|
||||
value_len: 150,
|
||||
timestamp: 2000,
|
||||
},
|
||||
);
|
||||
|
||||
kd.persist_to_hint_file(&hint_path, 12345).unwrap();
|
||||
let (loaded, stored_size) = KeyDir::load_from_hint_file(&hint_path).unwrap().unwrap();
|
||||
|
||||
assert_eq!(stored_size, 12345);
|
||||
assert_eq!(loaded.len(), 2);
|
||||
let e1 = loaded.get("doc1").unwrap();
|
||||
assert_eq!(e1.offset, 64);
|
||||
assert_eq!(e1.record_len, 100);
|
||||
assert_eq!(e1.value_len, 80);
|
||||
assert_eq!(e1.timestamp, 1000);
|
||||
|
||||
let e2 = loaded.get("doc2").unwrap();
|
||||
assert_eq!(e2.offset, 164);
|
||||
assert_eq!(e2.timestamp, 2000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hint_file_nonexistent_returns_none() {
|
||||
let result = KeyDir::load_from_hint_file(Path::new("/tmp/nonexistent_hint_file.hint"));
|
||||
assert!(result.unwrap().is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn for_each_and_keys() {
|
||||
let kd = KeyDir::new();
|
||||
let e = KeyDirEntry {
|
||||
offset: 0,
|
||||
record_len: 10,
|
||||
value_len: 5,
|
||||
timestamp: 1,
|
||||
};
|
||||
kd.insert("x".into(), e);
|
||||
kd.insert("y".into(), e);
|
||||
|
||||
let mut collected = Vec::new();
|
||||
kd.for_each(|k, _| collected.push(k.to_string()));
|
||||
collected.sort();
|
||||
assert_eq!(collected, vec!["x", "y"]);
|
||||
|
||||
let mut keys = kd.keys();
|
||||
keys.sort();
|
||||
assert_eq!(keys, vec!["x", "y"]);
|
||||
}
|
||||
}
|
||||
@@ -2,21 +2,31 @@
|
||||
//!
|
||||
//! Provides the [`StorageAdapter`] trait and two concrete implementations:
|
||||
//! - [`MemoryStorageAdapter`] -- fast in-memory store backed by `DashMap`
|
||||
//! - [`FileStorageAdapter`] -- JSON-file-per-collection persistent store
|
||||
//! - [`FileStorageAdapter`] -- Bitcask-style append-only log with crash recovery
|
||||
//!
|
||||
//! Also includes an [`OpLog`] for operation logging and a [`WriteAheadLog`]
|
||||
//! for crash recovery.
|
||||
//! Also includes an [`OpLog`] for operation logging, a [`BinaryWal`] for
|
||||
//! write-ahead logging, and [`compaction`] for dead record reclamation.
|
||||
|
||||
pub mod adapter;
|
||||
pub mod binary_wal;
|
||||
pub mod compaction;
|
||||
pub mod error;
|
||||
pub mod file;
|
||||
pub mod keydir;
|
||||
pub mod memory;
|
||||
pub mod oplog;
|
||||
pub mod wal;
|
||||
pub mod record;
|
||||
pub mod validate;
|
||||
|
||||
pub use adapter::StorageAdapter;
|
||||
pub use binary_wal::{BinaryWal, WalEntry, WalOpType};
|
||||
pub use compaction::{compact_data_file, should_compact, CompactionResult};
|
||||
pub use error::{StorageError, StorageResult};
|
||||
pub use file::FileStorageAdapter;
|
||||
pub use keydir::{BuildStats, KeyDir, KeyDirEntry};
|
||||
pub use memory::MemoryStorageAdapter;
|
||||
pub use oplog::{OpLog, OpLogEntry, OpLogStats, OpType};
|
||||
pub use wal::{WalOp, WalRecord, WriteAheadLog};
|
||||
pub use record::{
|
||||
DataRecord, FileHeader, FileType, RecordScanner, FILE_HEADER_SIZE, FILE_MAGIC, FORMAT_VERSION,
|
||||
RECORD_HEADER_SIZE, RECORD_MAGIC,
|
||||
};
|
||||
|
||||
@@ -0,0 +1,472 @@
|
||||
//! Binary data record format for the Bitcask-style storage engine.
|
||||
//!
|
||||
//! # File Version Header (64 bytes, at offset 0 of every .rdb / .hint file)
|
||||
//!
|
||||
//! ```text
|
||||
//! ┌──────────────┬──────────┬──────────┬──────────┬──────────┬───────────────┐
|
||||
//! │ magic │ version │ file_type│ flags │ created │ reserved │
|
||||
//! │ 8 bytes │ u16 LE │ u8 │ u32 LE │ u64 LE │ 41 bytes │
|
||||
//! │ "SMARTDB\0" │ │ │ │ epoch_ms │ (zeros) │
|
||||
//! └──────────────┴──────────┴──────────┴──────────┴──────────┴───────────────┘
|
||||
//! ```
|
||||
//!
|
||||
//! # Data Record (appended after the header)
|
||||
//!
|
||||
//! ```text
|
||||
//! ┌──────────┬──────────┬──────────┬──────────┬──────────┬──────────────────┐
|
||||
//! │ magic │ timestamp│ key_len │ val_len │ crc32 │ payload │
|
||||
//! │ u16 LE │ u64 LE │ u32 LE │ u32 LE │ u32 LE │ [key][value] │
|
||||
//! │ 0xDB01 │ epoch_ms │ │ 0=delete │ │ │
|
||||
//! └──────────┴──────────┴──────────┴──────────┴──────────┴──────────────────┘
|
||||
//! ```
|
||||
|
||||
use std::io::{self, Read};
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
use crate::error::{StorageError, StorageResult};
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Constants
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// File-level magic: b"SMARTDB\0"
|
||||
pub const FILE_MAGIC: &[u8; 8] = b"SMARTDB\0";
|
||||
|
||||
/// Current storage format version.
|
||||
pub const FORMAT_VERSION: u16 = 1;
|
||||
|
||||
/// File version header size.
|
||||
pub const FILE_HEADER_SIZE: usize = 64;
|
||||
|
||||
/// Per-record magic.
|
||||
pub const RECORD_MAGIC: u16 = 0xDB01;
|
||||
|
||||
/// Per-record header size (before payload).
|
||||
pub const RECORD_HEADER_SIZE: usize = 2 + 8 + 4 + 4 + 4; // 22 bytes
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// File type tag stored in the version header
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
#[repr(u8)]
|
||||
pub enum FileType {
|
||||
Data = 1,
|
||||
Wal = 2,
|
||||
Hint = 3,
|
||||
}
|
||||
|
||||
impl FileType {
|
||||
pub fn from_u8(v: u8) -> StorageResult<Self> {
|
||||
match v {
|
||||
1 => Ok(FileType::Data),
|
||||
2 => Ok(FileType::Wal),
|
||||
3 => Ok(FileType::Hint),
|
||||
_ => Err(StorageError::CorruptRecord(format!(
|
||||
"unknown file type tag: {v}"
|
||||
))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// File Version Header
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct FileHeader {
|
||||
pub version: u16,
|
||||
pub file_type: FileType,
|
||||
pub flags: u32,
|
||||
pub created_ms: u64,
|
||||
/// For hint files: the data.rdb file size at the time the hint was written.
|
||||
/// Used to detect stale hints after ungraceful shutdown. 0 = unknown (old format).
|
||||
pub data_file_size: u64,
|
||||
}
|
||||
|
||||
impl FileHeader {
|
||||
/// Create a new header for the current format version.
|
||||
pub fn new(file_type: FileType) -> Self {
|
||||
Self {
|
||||
version: FORMAT_VERSION,
|
||||
file_type,
|
||||
flags: 0,
|
||||
created_ms: now_ms(),
|
||||
data_file_size: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new hint header that records the data file size.
|
||||
pub fn new_hint(data_file_size: u64) -> Self {
|
||||
Self {
|
||||
version: FORMAT_VERSION,
|
||||
file_type: FileType::Hint,
|
||||
flags: 0,
|
||||
created_ms: now_ms(),
|
||||
data_file_size,
|
||||
}
|
||||
}
|
||||
|
||||
/// Encode the header to a 64-byte buffer.
|
||||
pub fn encode(&self) -> [u8; FILE_HEADER_SIZE] {
|
||||
let mut buf = [0u8; FILE_HEADER_SIZE];
|
||||
buf[0..8].copy_from_slice(FILE_MAGIC);
|
||||
buf[8..10].copy_from_slice(&self.version.to_le_bytes());
|
||||
buf[10] = self.file_type as u8;
|
||||
buf[11..15].copy_from_slice(&self.flags.to_le_bytes());
|
||||
buf[15..23].copy_from_slice(&self.created_ms.to_le_bytes());
|
||||
buf[23..31].copy_from_slice(&self.data_file_size.to_le_bytes());
|
||||
// bytes 31..64 are reserved (zeros)
|
||||
buf
|
||||
}
|
||||
|
||||
/// Decode a 64-byte header. Validates magic and version.
|
||||
pub fn decode(buf: &[u8; FILE_HEADER_SIZE]) -> StorageResult<Self> {
|
||||
if &buf[0..8] != FILE_MAGIC {
|
||||
return Err(StorageError::CorruptRecord(
|
||||
"invalid file magic — not a SmartDB file".into(),
|
||||
));
|
||||
}
|
||||
let version = u16::from_le_bytes([buf[8], buf[9]]);
|
||||
if version > FORMAT_VERSION {
|
||||
return Err(StorageError::CorruptRecord(format!(
|
||||
"file format version {version} is newer than supported version {FORMAT_VERSION} — please upgrade"
|
||||
)));
|
||||
}
|
||||
if version == 0 {
|
||||
return Err(StorageError::CorruptRecord(
|
||||
"file format version 0 is invalid".into(),
|
||||
));
|
||||
}
|
||||
let file_type = FileType::from_u8(buf[10])?;
|
||||
let flags = u32::from_le_bytes([buf[11], buf[12], buf[13], buf[14]]);
|
||||
let created_ms = u64::from_le_bytes([
|
||||
buf[15], buf[16], buf[17], buf[18], buf[19], buf[20], buf[21], buf[22],
|
||||
]);
|
||||
let data_file_size = u64::from_le_bytes([
|
||||
buf[23], buf[24], buf[25], buf[26], buf[27], buf[28], buf[29], buf[30],
|
||||
]);
|
||||
Ok(Self {
|
||||
version,
|
||||
file_type,
|
||||
flags,
|
||||
created_ms,
|
||||
data_file_size,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Data Record
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// A single data record (live document or tombstone).
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DataRecord {
|
||||
pub timestamp: u64,
|
||||
pub key: Vec<u8>,
|
||||
/// BSON value bytes. Empty for tombstones.
|
||||
pub value: Vec<u8>,
|
||||
}
|
||||
|
||||
impl DataRecord {
|
||||
/// Whether this record is a tombstone (delete marker).
|
||||
pub fn is_tombstone(&self) -> bool {
|
||||
self.value.is_empty()
|
||||
}
|
||||
|
||||
/// Total size on disk (header + payload).
|
||||
pub fn disk_size(&self) -> usize {
|
||||
RECORD_HEADER_SIZE + self.key.len() + self.value.len()
|
||||
}
|
||||
|
||||
/// Encode to bytes. CRC32 covers magic + timestamp + key_len + val_len + payload.
|
||||
pub fn encode(&self) -> Vec<u8> {
|
||||
let key_len = self.key.len() as u32;
|
||||
let val_len = self.value.len() as u32;
|
||||
let total = RECORD_HEADER_SIZE + self.key.len() + self.value.len();
|
||||
let mut buf = Vec::with_capacity(total);
|
||||
|
||||
// Write fields WITHOUT crc first to compute checksum.
|
||||
buf.extend_from_slice(&RECORD_MAGIC.to_le_bytes()); // 2
|
||||
buf.extend_from_slice(&self.timestamp.to_le_bytes()); // 8
|
||||
buf.extend_from_slice(&key_len.to_le_bytes()); // 4
|
||||
buf.extend_from_slice(&val_len.to_le_bytes()); // 4
|
||||
// placeholder for crc32 — we'll fill it after computing
|
||||
buf.extend_from_slice(&0u32.to_le_bytes()); // 4
|
||||
buf.extend_from_slice(&self.key); // key_len
|
||||
buf.extend_from_slice(&self.value); // val_len
|
||||
|
||||
// CRC covers everything except the crc32 field itself:
|
||||
// bytes [0..18] (magic+ts+key_len+val_len) + bytes [22..] (payload)
|
||||
let mut hasher = crc32fast::Hasher::new();
|
||||
hasher.update(&buf[0..18]);
|
||||
hasher.update(&buf[22..]);
|
||||
let crc = hasher.finalize();
|
||||
buf[18..22].copy_from_slice(&crc.to_le_bytes());
|
||||
|
||||
buf
|
||||
}
|
||||
|
||||
/// Decode a record from a reader. Returns the record and its total disk size.
|
||||
/// On EOF at the very start (no bytes to read), returns Ok(None).
|
||||
pub fn decode_from<R: Read>(reader: &mut R) -> StorageResult<Option<(Self, usize)>> {
|
||||
// Read header
|
||||
let mut hdr = [0u8; RECORD_HEADER_SIZE];
|
||||
match reader.read_exact(&mut hdr) {
|
||||
Ok(()) => {}
|
||||
Err(e) if e.kind() == io::ErrorKind::UnexpectedEof => return Ok(None),
|
||||
Err(e) => return Err(e.into()),
|
||||
}
|
||||
|
||||
let magic = u16::from_le_bytes([hdr[0], hdr[1]]);
|
||||
if magic != RECORD_MAGIC {
|
||||
return Err(StorageError::CorruptRecord(format!(
|
||||
"invalid record magic: 0x{magic:04X}, expected 0x{RECORD_MAGIC:04X}"
|
||||
)));
|
||||
}
|
||||
|
||||
let timestamp = u64::from_le_bytes(hdr[2..10].try_into().unwrap());
|
||||
let key_len = u32::from_le_bytes(hdr[10..14].try_into().unwrap()) as usize;
|
||||
let val_len = u32::from_le_bytes(hdr[14..18].try_into().unwrap()) as usize;
|
||||
let stored_crc = u32::from_le_bytes(hdr[18..22].try_into().unwrap());
|
||||
|
||||
// Read payload
|
||||
let payload_len = key_len + val_len;
|
||||
let mut payload = vec![0u8; payload_len];
|
||||
reader.read_exact(&mut payload)?;
|
||||
|
||||
// Verify CRC: covers header bytes [0..18] + payload
|
||||
let mut hasher = crc32fast::Hasher::new();
|
||||
hasher.update(&hdr[0..18]);
|
||||
hasher.update(&payload);
|
||||
let computed_crc = hasher.finalize();
|
||||
if computed_crc != stored_crc {
|
||||
return Err(StorageError::ChecksumMismatch {
|
||||
expected: stored_crc,
|
||||
actual: computed_crc,
|
||||
});
|
||||
}
|
||||
|
||||
let key = payload[..key_len].to_vec();
|
||||
let value = payload[key_len..].to_vec();
|
||||
let disk_size = RECORD_HEADER_SIZE + payload_len;
|
||||
|
||||
Ok(Some((
|
||||
DataRecord {
|
||||
timestamp,
|
||||
key,
|
||||
value,
|
||||
},
|
||||
disk_size,
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Record Scanner — iterate records from a byte slice or reader
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Scans records sequentially from a reader, yielding (offset, record) pairs.
|
||||
/// Starts reading from the current reader position. The `base_offset` parameter
|
||||
/// indicates the byte offset in the file where reading begins (typically
|
||||
/// `FILE_HEADER_SIZE` for a data file).
|
||||
pub struct RecordScanner<R> {
|
||||
reader: R,
|
||||
offset: u64,
|
||||
}
|
||||
|
||||
impl<R: Read> RecordScanner<R> {
|
||||
pub fn new(reader: R, base_offset: u64) -> Self {
|
||||
Self {
|
||||
reader,
|
||||
offset: base_offset,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: Read> Iterator for RecordScanner<R> {
|
||||
/// (file_offset, record) or an error. Iteration stops on EOF or error.
|
||||
type Item = StorageResult<(u64, DataRecord)>;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
match DataRecord::decode_from(&mut self.reader) {
|
||||
Ok(Some((record, disk_size))) => {
|
||||
let offset = self.offset;
|
||||
self.offset += disk_size as u64;
|
||||
Some(Ok((offset, record)))
|
||||
}
|
||||
Ok(None) => None, // clean EOF
|
||||
Err(e) => Some(Err(e)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Current time in milliseconds since UNIX epoch.
|
||||
pub fn now_ms() -> u64 {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_millis() as u64
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn file_header_roundtrip() {
|
||||
let hdr = FileHeader::new(FileType::Data);
|
||||
let buf = hdr.encode();
|
||||
assert_eq!(buf.len(), FILE_HEADER_SIZE);
|
||||
|
||||
let decoded = FileHeader::decode(&buf).unwrap();
|
||||
assert_eq!(decoded.version, FORMAT_VERSION);
|
||||
assert_eq!(decoded.file_type, FileType::Data);
|
||||
assert_eq!(decoded.flags, 0);
|
||||
assert_eq!(decoded.created_ms, hdr.created_ms);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn file_header_rejects_bad_magic() {
|
||||
let mut buf = [0u8; FILE_HEADER_SIZE];
|
||||
buf[0..8].copy_from_slice(b"BADMAGIC");
|
||||
assert!(FileHeader::decode(&buf).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn file_header_rejects_future_version() {
|
||||
let mut hdr = FileHeader::new(FileType::Data);
|
||||
hdr.version = FORMAT_VERSION + 1;
|
||||
let buf = hdr.encode();
|
||||
// Manually patch the version in the buffer
|
||||
let mut buf2 = buf;
|
||||
buf2[8..10].copy_from_slice(&(FORMAT_VERSION + 1).to_le_bytes());
|
||||
assert!(FileHeader::decode(&buf2).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn record_roundtrip_live() {
|
||||
let rec = DataRecord {
|
||||
timestamp: 1700000000000,
|
||||
key: b"abc123".to_vec(),
|
||||
value: b"\x10\x00\x00\x00\x02hi\x00\x03\x00\x00\x00ok\x00\x00".to_vec(),
|
||||
};
|
||||
let encoded = rec.encode();
|
||||
assert_eq!(encoded.len(), rec.disk_size());
|
||||
|
||||
let mut cursor = std::io::Cursor::new(&encoded);
|
||||
let (decoded, size) = DataRecord::decode_from(&mut cursor).unwrap().unwrap();
|
||||
assert_eq!(size, encoded.len());
|
||||
assert_eq!(decoded.timestamp, rec.timestamp);
|
||||
assert_eq!(decoded.key, rec.key);
|
||||
assert_eq!(decoded.value, rec.value);
|
||||
assert!(!decoded.is_tombstone());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn record_roundtrip_tombstone() {
|
||||
let rec = DataRecord {
|
||||
timestamp: 1700000000000,
|
||||
key: b"def456".to_vec(),
|
||||
value: vec![],
|
||||
};
|
||||
assert!(rec.is_tombstone());
|
||||
let encoded = rec.encode();
|
||||
|
||||
let mut cursor = std::io::Cursor::new(&encoded);
|
||||
let (decoded, _) = DataRecord::decode_from(&mut cursor).unwrap().unwrap();
|
||||
assert!(decoded.is_tombstone());
|
||||
assert_eq!(decoded.key, b"def456");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn record_detects_corruption() {
|
||||
let rec = DataRecord {
|
||||
timestamp: 42,
|
||||
key: b"key".to_vec(),
|
||||
value: b"value".to_vec(),
|
||||
};
|
||||
let mut encoded = rec.encode();
|
||||
// Flip a bit in the payload
|
||||
let last = encoded.len() - 1;
|
||||
encoded[last] ^= 0xFF;
|
||||
|
||||
let mut cursor = std::io::Cursor::new(&encoded);
|
||||
let result = DataRecord::decode_from(&mut cursor);
|
||||
assert!(matches!(result, Err(StorageError::ChecksumMismatch { .. })));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn record_detects_bad_magic() {
|
||||
let rec = DataRecord {
|
||||
timestamp: 42,
|
||||
key: b"key".to_vec(),
|
||||
value: b"value".to_vec(),
|
||||
};
|
||||
let mut encoded = rec.encode();
|
||||
encoded[0] = 0xFF;
|
||||
encoded[1] = 0xFF;
|
||||
|
||||
let mut cursor = std::io::Cursor::new(&encoded);
|
||||
let result = DataRecord::decode_from(&mut cursor);
|
||||
assert!(matches!(result, Err(StorageError::CorruptRecord(_))));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn eof_returns_none() {
|
||||
let empty: &[u8] = &[];
|
||||
let mut cursor = std::io::Cursor::new(empty);
|
||||
let result = DataRecord::decode_from(&mut cursor).unwrap();
|
||||
assert!(result.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn scanner_iterates_multiple_records() {
|
||||
let records = vec![
|
||||
DataRecord {
|
||||
timestamp: 1,
|
||||
key: b"a".to_vec(),
|
||||
value: b"v1".to_vec(),
|
||||
},
|
||||
DataRecord {
|
||||
timestamp: 2,
|
||||
key: b"b".to_vec(),
|
||||
value: b"v2".to_vec(),
|
||||
},
|
||||
DataRecord {
|
||||
timestamp: 3,
|
||||
key: b"c".to_vec(),
|
||||
value: vec![],
|
||||
},
|
||||
];
|
||||
|
||||
let mut buf = Vec::new();
|
||||
for r in &records {
|
||||
buf.extend_from_slice(&r.encode());
|
||||
}
|
||||
|
||||
let scanner = RecordScanner::new(std::io::Cursor::new(&buf), 0);
|
||||
let results: Vec<_> = scanner.collect::<Result<Vec<_>, _>>().unwrap();
|
||||
assert_eq!(results.len(), 3);
|
||||
assert_eq!(results[0].1.key, b"a");
|
||||
assert_eq!(results[1].1.key, b"b");
|
||||
assert!(results[2].1.is_tombstone());
|
||||
|
||||
// Verify offsets are correct
|
||||
assert_eq!(results[0].0, 0);
|
||||
assert_eq!(results[1].0, records[0].disk_size() as u64);
|
||||
assert_eq!(
|
||||
results[2].0,
|
||||
(records[0].disk_size() + records[1].disk_size()) as u64
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,330 @@
|
||||
//! Data integrity validation for RustDb storage directories.
|
||||
//!
|
||||
//! Provides offline validation of data files without starting the server.
|
||||
//! Checks header magic, record CRC32 checksums, duplicate IDs, and
|
||||
//! keydir.hint consistency.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::io::{BufReader, Read};
|
||||
use std::path::Path;
|
||||
|
||||
use crate::error::{StorageError, StorageResult};
|
||||
use crate::keydir::KeyDir;
|
||||
use crate::record::{FileHeader, FileType, RecordScanner, FILE_HEADER_SIZE};
|
||||
|
||||
/// Result of validating an entire data directory.
|
||||
pub struct ValidationReport {
|
||||
pub collections: Vec<CollectionReport>,
|
||||
}
|
||||
|
||||
/// Result of validating a single collection.
|
||||
pub struct CollectionReport {
|
||||
pub db: String,
|
||||
pub collection: String,
|
||||
pub header_valid: bool,
|
||||
pub total_records: u64,
|
||||
pub live_documents: u64,
|
||||
pub tombstones: u64,
|
||||
pub duplicate_ids: Vec<String>,
|
||||
pub checksum_errors: u64,
|
||||
pub decode_errors: u64,
|
||||
pub data_file_size: u64,
|
||||
pub hint_file_exists: bool,
|
||||
pub orphaned_hint_entries: u64,
|
||||
pub errors: Vec<String>,
|
||||
}
|
||||
|
||||
impl ValidationReport {
|
||||
/// Whether any errors were found across all collections.
|
||||
pub fn has_errors(&self) -> bool {
|
||||
self.collections.iter().any(|c| {
|
||||
!c.header_valid
|
||||
|| !c.duplicate_ids.is_empty()
|
||||
|| c.checksum_errors > 0
|
||||
|| c.decode_errors > 0
|
||||
|| c.orphaned_hint_entries > 0
|
||||
|| !c.errors.is_empty()
|
||||
})
|
||||
}
|
||||
|
||||
/// Print a human-readable summary to stdout.
|
||||
pub fn print_summary(&self) {
|
||||
println!("=== SmartDB Data Integrity Report ===");
|
||||
println!();
|
||||
|
||||
let mut total_errors = 0u64;
|
||||
|
||||
for report in &self.collections {
|
||||
println!("Database: {}", report.db);
|
||||
println!(" Collection: {}", report.collection);
|
||||
println!(
|
||||
" Header: {}",
|
||||
if report.header_valid { "OK" } else { "INVALID" }
|
||||
);
|
||||
println!(
|
||||
" Records: {} ({} live, {} tombstones)",
|
||||
report.total_records, report.live_documents, report.tombstones
|
||||
);
|
||||
println!(" Data size: {} bytes", report.data_file_size);
|
||||
|
||||
if report.duplicate_ids.is_empty() {
|
||||
println!(" Duplicates: 0");
|
||||
} else {
|
||||
let ids_preview: Vec<&str> = report.duplicate_ids.iter().take(5).map(|s| s.as_str()).collect();
|
||||
let suffix = if report.duplicate_ids.len() > 5 {
|
||||
format!(", ... and {} more", report.duplicate_ids.len() - 5)
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
println!(
|
||||
" Duplicates: {} (ids: {}{})",
|
||||
report.duplicate_ids.len(),
|
||||
ids_preview.join(", "),
|
||||
suffix
|
||||
);
|
||||
}
|
||||
|
||||
if report.checksum_errors > 0 {
|
||||
println!(" CRC errors: {}", report.checksum_errors);
|
||||
} else {
|
||||
println!(" CRC errors: 0");
|
||||
}
|
||||
|
||||
if report.decode_errors > 0 {
|
||||
println!(" Decode errors: {}", report.decode_errors);
|
||||
}
|
||||
|
||||
if report.hint_file_exists {
|
||||
if report.orphaned_hint_entries > 0 {
|
||||
println!(
|
||||
" Hint file: STALE ({} orphaned entries)",
|
||||
report.orphaned_hint_entries
|
||||
);
|
||||
} else {
|
||||
println!(" Hint file: OK");
|
||||
}
|
||||
} else {
|
||||
println!(" Hint file: absent");
|
||||
}
|
||||
|
||||
for err in &report.errors {
|
||||
println!(" ERROR: {}", err);
|
||||
}
|
||||
|
||||
println!();
|
||||
|
||||
if !report.header_valid { total_errors += 1; }
|
||||
total_errors += report.duplicate_ids.len() as u64;
|
||||
total_errors += report.checksum_errors;
|
||||
total_errors += report.decode_errors;
|
||||
total_errors += report.orphaned_hint_entries;
|
||||
total_errors += report.errors.len() as u64;
|
||||
}
|
||||
|
||||
println!(
|
||||
"Summary: {} collection(s) checked, {} error(s) found.",
|
||||
self.collections.len(),
|
||||
total_errors
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Validate all collections in a data directory.
|
||||
///
|
||||
/// The directory structure is expected to be:
|
||||
/// ```text
|
||||
/// {base_path}/{db}/{collection}/data.rdb
|
||||
/// ```
|
||||
pub fn validate_data_directory(base_path: &str) -> StorageResult<ValidationReport> {
|
||||
let base = Path::new(base_path);
|
||||
if !base.exists() {
|
||||
return Err(StorageError::IoError(std::io::Error::new(
|
||||
std::io::ErrorKind::NotFound,
|
||||
format!("data directory not found: {base_path}"),
|
||||
)));
|
||||
}
|
||||
|
||||
let mut collections = Vec::new();
|
||||
|
||||
// Iterate database directories
|
||||
let entries = std::fs::read_dir(base)?;
|
||||
for entry in entries {
|
||||
let entry = entry?;
|
||||
if !entry.file_type()?.is_dir() {
|
||||
continue;
|
||||
}
|
||||
let db_name = match entry.file_name().to_str() {
|
||||
Some(s) => s.to_string(),
|
||||
None => continue,
|
||||
};
|
||||
|
||||
// Iterate collection directories
|
||||
let db_entries = std::fs::read_dir(entry.path())?;
|
||||
for coll_entry in db_entries {
|
||||
let coll_entry = coll_entry?;
|
||||
if !coll_entry.file_type()?.is_dir() {
|
||||
continue;
|
||||
}
|
||||
let coll_name = match coll_entry.file_name().to_str() {
|
||||
Some(s) => s.to_string(),
|
||||
None => continue,
|
||||
};
|
||||
|
||||
let data_path = coll_entry.path().join("data.rdb");
|
||||
if !data_path.exists() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let report = validate_collection(&db_name, &coll_name, &coll_entry.path());
|
||||
collections.push(report);
|
||||
}
|
||||
}
|
||||
|
||||
// Sort for deterministic output
|
||||
collections.sort_by(|a, b| (&a.db, &a.collection).cmp(&(&b.db, &b.collection)));
|
||||
|
||||
Ok(ValidationReport { collections })
|
||||
}
|
||||
|
||||
/// Validate a single collection directory.
|
||||
fn validate_collection(db: &str, coll: &str, coll_dir: &Path) -> CollectionReport {
|
||||
let data_path = coll_dir.join("data.rdb");
|
||||
let hint_path = coll_dir.join("keydir.hint");
|
||||
|
||||
let mut report = CollectionReport {
|
||||
db: db.to_string(),
|
||||
collection: coll.to_string(),
|
||||
header_valid: false,
|
||||
total_records: 0,
|
||||
live_documents: 0,
|
||||
tombstones: 0,
|
||||
duplicate_ids: Vec::new(),
|
||||
checksum_errors: 0,
|
||||
decode_errors: 0,
|
||||
data_file_size: 0,
|
||||
hint_file_exists: hint_path.exists(),
|
||||
orphaned_hint_entries: 0,
|
||||
errors: Vec::new(),
|
||||
};
|
||||
|
||||
// Get file size
|
||||
match std::fs::metadata(&data_path) {
|
||||
Ok(m) => report.data_file_size = m.len(),
|
||||
Err(e) => {
|
||||
report.errors.push(format!("cannot stat data.rdb: {e}"));
|
||||
return report;
|
||||
}
|
||||
}
|
||||
|
||||
// Open and validate header
|
||||
let file = match std::fs::File::open(&data_path) {
|
||||
Ok(f) => f,
|
||||
Err(e) => {
|
||||
report.errors.push(format!("cannot open data.rdb: {e}"));
|
||||
return report;
|
||||
}
|
||||
};
|
||||
let mut reader = BufReader::new(file);
|
||||
|
||||
let mut hdr_buf = [0u8; FILE_HEADER_SIZE];
|
||||
if let Err(e) = reader.read_exact(&mut hdr_buf) {
|
||||
report.errors.push(format!("cannot read header: {e}"));
|
||||
return report;
|
||||
}
|
||||
|
||||
match FileHeader::decode(&hdr_buf) {
|
||||
Ok(hdr) => {
|
||||
if hdr.file_type != FileType::Data {
|
||||
report.errors.push(format!(
|
||||
"wrong file type: expected Data, got {:?}",
|
||||
hdr.file_type
|
||||
));
|
||||
} else {
|
||||
report.header_valid = true;
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
report.errors.push(format!("invalid header: {e}"));
|
||||
return report;
|
||||
}
|
||||
}
|
||||
|
||||
// Scan all records
|
||||
let mut id_counts: HashMap<String, u64> = HashMap::new();
|
||||
let mut live_ids: std::collections::HashSet<String> = std::collections::HashSet::new();
|
||||
let scanner = RecordScanner::new(reader, FILE_HEADER_SIZE as u64);
|
||||
|
||||
for result in scanner {
|
||||
match result {
|
||||
Ok((_offset, record)) => {
|
||||
report.total_records += 1;
|
||||
let key = String::from_utf8_lossy(&record.key).to_string();
|
||||
|
||||
if record.is_tombstone() {
|
||||
report.tombstones += 1;
|
||||
live_ids.remove(&key);
|
||||
} else {
|
||||
*id_counts.entry(key.clone()).or_insert(0) += 1;
|
||||
live_ids.insert(key);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
let err_str = e.to_string();
|
||||
if err_str.contains("checksum") || err_str.contains("Checksum") {
|
||||
report.checksum_errors += 1;
|
||||
} else {
|
||||
report.decode_errors += 1;
|
||||
}
|
||||
// Cannot continue scanning after a decode error — the stream position is lost
|
||||
report.errors.push(format!("record decode error: {e}"));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
report.live_documents = live_ids.len() as u64;
|
||||
|
||||
// Find duplicates (keys that appeared more than once as live inserts)
|
||||
for (id, count) in &id_counts {
|
||||
if *count > 1 {
|
||||
report.duplicate_ids.push(id.clone());
|
||||
}
|
||||
}
|
||||
report.duplicate_ids.sort();
|
||||
|
||||
// Validate hint file if present
|
||||
if hint_path.exists() {
|
||||
match KeyDir::load_from_hint_file(&hint_path) {
|
||||
Ok(Some((hint_kd, stored_size))) => {
|
||||
if stored_size > 0 && stored_size != report.data_file_size {
|
||||
report.errors.push(format!(
|
||||
"hint file is stale: recorded data size {} but actual is {}",
|
||||
stored_size, report.data_file_size
|
||||
));
|
||||
}
|
||||
// Check for orphaned entries: keys in hint but not live in data
|
||||
hint_kd.for_each(|key, _entry| {
|
||||
if !live_ids.contains(key) {
|
||||
report.orphaned_hint_entries += 1;
|
||||
}
|
||||
});
|
||||
|
||||
// Also check if hint references offsets beyond file size
|
||||
hint_kd.for_each(|_key, entry| {
|
||||
if entry.offset + entry.record_len as u64 > report.data_file_size {
|
||||
report.orphaned_hint_entries += 1;
|
||||
}
|
||||
});
|
||||
}
|
||||
Ok(None) => {
|
||||
// File existed but was empty or unreadable
|
||||
report.errors.push("hint file exists but is empty".into());
|
||||
}
|
||||
Err(e) => {
|
||||
report.errors.push(format!("hint file decode error: {e}"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
report
|
||||
}
|
||||
@@ -1,186 +0,0 @@
|
||||
//! Write-Ahead Log (WAL) for crash recovery.
|
||||
//!
|
||||
//! Before any mutation is applied to storage, it is first written to the WAL.
|
||||
//! On recovery, uncommitted WAL entries can be replayed or discarded.
|
||||
|
||||
use std::path::PathBuf;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
|
||||
use bson::Document;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::io::AsyncWriteExt;
|
||||
use tracing::{debug, warn};
|
||||
|
||||
use crate::error::StorageResult;
|
||||
|
||||
/// WAL operation kind.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub enum WalOp {
|
||||
Insert,
|
||||
Update,
|
||||
Delete,
|
||||
}
|
||||
|
||||
/// A single WAL record.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct WalRecord {
|
||||
/// Sequence number.
|
||||
pub seq: u64,
|
||||
/// Operation kind.
|
||||
pub op: WalOp,
|
||||
/// Database name.
|
||||
pub db: String,
|
||||
/// Collection name.
|
||||
pub collection: String,
|
||||
/// Document id (hex string).
|
||||
pub document_id: String,
|
||||
/// Document data (for insert/update).
|
||||
pub document: Option<Document>,
|
||||
/// Whether this record has been committed (applied to storage).
|
||||
pub committed: bool,
|
||||
/// CRC32 checksum of the serialized payload for integrity verification.
|
||||
pub checksum: u32,
|
||||
}
|
||||
|
||||
/// Write-ahead log that persists records to a file.
|
||||
pub struct WriteAheadLog {
|
||||
path: PathBuf,
|
||||
next_seq: AtomicU64,
|
||||
}
|
||||
|
||||
impl WriteAheadLog {
|
||||
/// Create a new WAL at the given file path.
|
||||
pub fn new(path: PathBuf) -> Self {
|
||||
Self {
|
||||
path,
|
||||
next_seq: AtomicU64::new(1),
|
||||
}
|
||||
}
|
||||
|
||||
/// Initialize the WAL (create file if needed, load sequence counter).
|
||||
pub async fn initialize(&self) -> StorageResult<()> {
|
||||
if let Some(parent) = self.path.parent() {
|
||||
tokio::fs::create_dir_all(parent).await?;
|
||||
}
|
||||
if self.path.exists() {
|
||||
// Load existing records to find the max sequence number.
|
||||
let records = self.read_all().await?;
|
||||
if let Some(max_seq) = records.iter().map(|r| r.seq).max() {
|
||||
self.next_seq.store(max_seq + 1, Ordering::SeqCst);
|
||||
}
|
||||
}
|
||||
debug!("WAL initialized at {:?}", self.path);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Append a record to the WAL. Returns the sequence number.
|
||||
pub async fn append(
|
||||
&self,
|
||||
op: WalOp,
|
||||
db: &str,
|
||||
collection: &str,
|
||||
document_id: &str,
|
||||
document: Option<Document>,
|
||||
) -> StorageResult<u64> {
|
||||
let seq = self.next_seq.fetch_add(1, Ordering::SeqCst);
|
||||
|
||||
// Compute checksum over the payload.
|
||||
let payload = serde_json::json!({
|
||||
"op": op,
|
||||
"db": db,
|
||||
"collection": collection,
|
||||
"document_id": document_id,
|
||||
});
|
||||
let payload_bytes = serde_json::to_vec(&payload)?;
|
||||
let checksum = crc32fast::hash(&payload_bytes);
|
||||
|
||||
let record = WalRecord {
|
||||
seq,
|
||||
op,
|
||||
db: db.to_string(),
|
||||
collection: collection.to_string(),
|
||||
document_id: document_id.to_string(),
|
||||
document,
|
||||
committed: false,
|
||||
checksum,
|
||||
};
|
||||
|
||||
let line = serde_json::to_string(&record)?;
|
||||
let mut file = tokio::fs::OpenOptions::new()
|
||||
.create(true)
|
||||
.append(true)
|
||||
.open(&self.path)
|
||||
.await?;
|
||||
file.write_all(line.as_bytes()).await?;
|
||||
file.write_all(b"\n").await?;
|
||||
file.flush().await?;
|
||||
|
||||
Ok(seq)
|
||||
}
|
||||
|
||||
/// Mark a WAL record as committed by rewriting the file.
|
||||
pub async fn mark_committed(&self, seq: u64) -> StorageResult<()> {
|
||||
let mut records = self.read_all().await?;
|
||||
for record in &mut records {
|
||||
if record.seq == seq {
|
||||
record.committed = true;
|
||||
}
|
||||
}
|
||||
self.write_all(&records).await
|
||||
}
|
||||
|
||||
/// Read all WAL records.
|
||||
pub async fn read_all(&self) -> StorageResult<Vec<WalRecord>> {
|
||||
if !self.path.exists() {
|
||||
return Ok(vec![]);
|
||||
}
|
||||
let data = tokio::fs::read_to_string(&self.path).await?;
|
||||
let mut records = Vec::new();
|
||||
for line in data.lines() {
|
||||
if line.trim().is_empty() {
|
||||
continue;
|
||||
}
|
||||
match serde_json::from_str::<WalRecord>(line) {
|
||||
Ok(record) => records.push(record),
|
||||
Err(e) => {
|
||||
warn!("skipping corrupt WAL record: {e}");
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(records)
|
||||
}
|
||||
|
||||
/// Get all uncommitted records (for replay during recovery).
|
||||
pub async fn uncommitted(&self) -> StorageResult<Vec<WalRecord>> {
|
||||
let records = self.read_all().await?;
|
||||
Ok(records.into_iter().filter(|r| !r.committed).collect())
|
||||
}
|
||||
|
||||
/// Truncate the WAL, removing all committed records.
|
||||
pub async fn truncate_committed(&self) -> StorageResult<()> {
|
||||
let records = self.read_all().await?;
|
||||
let uncommitted: Vec<_> = records.into_iter().filter(|r| !r.committed).collect();
|
||||
self.write_all(&uncommitted).await
|
||||
}
|
||||
|
||||
/// Clear the entire WAL.
|
||||
pub async fn clear(&self) -> StorageResult<()> {
|
||||
if self.path.exists() {
|
||||
tokio::fs::write(&self.path, "").await?;
|
||||
}
|
||||
self.next_seq.store(1, Ordering::SeqCst);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Write all records to the WAL file (overwrites).
|
||||
async fn write_all(&self, records: &[WalRecord]) -> StorageResult<()> {
|
||||
let mut content = String::new();
|
||||
for record in records {
|
||||
let line = serde_json::to_string(record)?;
|
||||
content.push_str(&line);
|
||||
content.push('\n');
|
||||
}
|
||||
tokio::fs::write(&self.path, content).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -21,9 +21,12 @@ rustdb-query = { workspace = true }
|
||||
rustdb-storage = { workspace = true }
|
||||
rustdb-index = { workspace = true }
|
||||
rustdb-txn = { workspace = true }
|
||||
rustdb-auth = { workspace = true }
|
||||
rustdb-commands = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
tokio-util = { workspace = true }
|
||||
tokio-rustls = { workspace = true }
|
||||
rustls-pemfile = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
tracing-subscriber = { workspace = true }
|
||||
clap = { workspace = true }
|
||||
|
||||
+213
-12
@@ -1,8 +1,12 @@
|
||||
pub mod management;
|
||||
|
||||
use std::fs::File;
|
||||
use std::io::BufReader;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Result;
|
||||
use anyhow::{Context, Result};
|
||||
use dashmap::DashMap;
|
||||
use tokio::net::TcpListener;
|
||||
#[cfg(unix)]
|
||||
@@ -10,13 +14,17 @@ use tokio::net::UnixListener;
|
||||
use tokio_util::codec::Framed;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
use rustdb_config::{RustDbOptions, StorageType};
|
||||
use rustdb_config::{RustDbOptions, StorageType, TlsOptions};
|
||||
use rustdb_wire::{WireCodec, OP_QUERY};
|
||||
use rustdb_wire::{encode_op_msg_response, encode_op_reply_response};
|
||||
use rustdb_storage::{StorageAdapter, MemoryStorageAdapter, FileStorageAdapter, OpLog};
|
||||
// IndexEngine is used indirectly via CommandContext
|
||||
use rustdb_index::{IndexEngine, IndexOptions};
|
||||
use rustdb_txn::{TransactionEngine, SessionEngine};
|
||||
use rustdb_commands::{CommandRouter, CommandContext};
|
||||
use rustdb_auth::AuthEngine;
|
||||
use rustdb_commands::{CommandRouter, CommandContext, ConnectionState};
|
||||
use tokio_rustls::rustls::{RootCertStore, ServerConfig};
|
||||
use tokio_rustls::rustls::server::WebPkiClientVerifier;
|
||||
use tokio_rustls::TlsAcceptor;
|
||||
|
||||
/// The main RustDb server.
|
||||
pub struct RustDb {
|
||||
@@ -33,7 +41,16 @@ impl RustDb {
|
||||
// Create storage adapter
|
||||
let storage: Arc<dyn StorageAdapter> = match options.storage {
|
||||
StorageType::Memory => {
|
||||
let adapter = MemoryStorageAdapter::new();
|
||||
let adapter = if let Some(ref pp) = options.persist_path {
|
||||
tracing::info!("MemoryStorageAdapter with periodic persistence to {}", pp);
|
||||
MemoryStorageAdapter::with_persist_path(PathBuf::from(pp))
|
||||
} else {
|
||||
tracing::warn!(
|
||||
"SmartDB is using in-memory storage — data will NOT survive a restart. \
|
||||
Set storage to 'file' for durable persistence."
|
||||
);
|
||||
MemoryStorageAdapter::new()
|
||||
};
|
||||
Arc::new(adapter)
|
||||
}
|
||||
StorageType::File => {
|
||||
@@ -49,14 +66,107 @@ impl RustDb {
|
||||
// Initialize storage
|
||||
storage.initialize().await?;
|
||||
|
||||
// Restore any previously persisted state (no-op for file storage and
|
||||
// memory storage without a persist_path).
|
||||
storage.restore().await?;
|
||||
|
||||
// Spawn periodic persistence task for memory storage with persist_path.
|
||||
if options.storage == StorageType::Memory && options.persist_path.is_some() {
|
||||
let persist_storage = storage.clone();
|
||||
let interval_ms = options.persist_interval_ms;
|
||||
tokio::spawn(async move {
|
||||
let mut interval = tokio::time::interval(Duration::from_millis(interval_ms));
|
||||
interval.tick().await; // skip the immediate first tick
|
||||
loop {
|
||||
interval.tick().await;
|
||||
if let Err(e) = persist_storage.persist().await {
|
||||
tracing::error!("Periodic persist failed: {}", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
let indexes: Arc<DashMap<String, IndexEngine>> = Arc::new(DashMap::new());
|
||||
|
||||
// Restore persisted indexes from storage.
|
||||
if let Ok(databases) = storage.list_databases().await {
|
||||
for db_name in &databases {
|
||||
if let Ok(collections) = storage.list_collections(db_name).await {
|
||||
for coll_name in &collections {
|
||||
if let Ok(specs) = storage.get_indexes(db_name, coll_name).await {
|
||||
let has_custom = specs.iter().any(|s| {
|
||||
s.get_str("name").unwrap_or("_id_") != "_id_"
|
||||
});
|
||||
if !has_custom {
|
||||
continue;
|
||||
}
|
||||
|
||||
let ns_key = format!("{}.{}", db_name, coll_name);
|
||||
let mut engine = IndexEngine::new();
|
||||
|
||||
for spec in &specs {
|
||||
let name = spec.get_str("name").unwrap_or("").to_string();
|
||||
if name == "_id_" {
|
||||
continue; // already created by IndexEngine::new()
|
||||
}
|
||||
let key = match spec.get("key") {
|
||||
Some(bson::Bson::Document(k)) => k.clone(),
|
||||
_ => continue,
|
||||
};
|
||||
let unique = matches!(spec.get("unique"), Some(bson::Bson::Boolean(true)));
|
||||
let sparse = matches!(spec.get("sparse"), Some(bson::Bson::Boolean(true)));
|
||||
let expire_after_seconds = match spec.get("expireAfterSeconds") {
|
||||
Some(bson::Bson::Int32(n)) => Some(*n as u64),
|
||||
Some(bson::Bson::Int64(n)) => Some(*n as u64),
|
||||
_ => None,
|
||||
};
|
||||
|
||||
let options = IndexOptions {
|
||||
name: Some(name.clone()),
|
||||
unique,
|
||||
sparse,
|
||||
expire_after_seconds,
|
||||
};
|
||||
if let Err(e) = engine.create_index(key, options) {
|
||||
tracing::warn!(
|
||||
namespace = %ns_key,
|
||||
index = %name,
|
||||
error = %e,
|
||||
"failed to restore index"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Rebuild index data from existing documents.
|
||||
if let Ok(docs) = storage.find_all(db_name, coll_name).await {
|
||||
if !docs.is_empty() {
|
||||
engine.rebuild_from_documents(&docs);
|
||||
}
|
||||
}
|
||||
|
||||
tracing::info!(
|
||||
namespace = %ns_key,
|
||||
indexes = engine.list_indexes().len(),
|
||||
"restored indexes"
|
||||
);
|
||||
indexes.insert(ns_key, engine);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let auth = Arc::new(AuthEngine::from_options(&options.auth)?);
|
||||
|
||||
let ctx = Arc::new(CommandContext {
|
||||
storage,
|
||||
indexes: Arc::new(DashMap::new()),
|
||||
indexes,
|
||||
transactions: Arc::new(TransactionEngine::new()),
|
||||
sessions: Arc::new(SessionEngine::new(30 * 60 * 1000, 60 * 1000)),
|
||||
cursors: Arc::new(DashMap::new()),
|
||||
start_time: std::time::Instant::now(),
|
||||
oplog: Arc::new(OpLog::new()),
|
||||
auth,
|
||||
});
|
||||
|
||||
let router = Arc::new(CommandRouter::new(ctx.clone()));
|
||||
@@ -114,7 +224,12 @@ impl RustDb {
|
||||
} else {
|
||||
let addr = format!("{}:{}", self.options.host, self.options.port);
|
||||
let listener = TcpListener::bind(&addr).await?;
|
||||
tracing::info!("RustDb listening on {}", addr);
|
||||
let tls_acceptor = if self.options.tls.enabled {
|
||||
Some(build_tls_acceptor(&self.options.tls)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
tracing::info!(tls = self.options.tls.enabled, "RustDb listening on {}", addr);
|
||||
|
||||
let handle = tokio::spawn(async move {
|
||||
loop {
|
||||
@@ -125,9 +240,21 @@ impl RustDb {
|
||||
Ok((stream, _addr)) => {
|
||||
let _ = stream.set_nodelay(true);
|
||||
let router = router.clone();
|
||||
tokio::spawn(async move {
|
||||
handle_connection(stream, router).await;
|
||||
});
|
||||
match tls_acceptor.clone() {
|
||||
Some(acceptor) => {
|
||||
tokio::spawn(async move {
|
||||
match acceptor.accept(stream).await {
|
||||
Ok(tls_stream) => handle_connection(tls_stream, router).await,
|
||||
Err(e) => tracing::debug!("TLS handshake failed: {}", e),
|
||||
}
|
||||
});
|
||||
}
|
||||
None => {
|
||||
tokio::spawn(async move {
|
||||
handle_connection(stream, router).await;
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!("Accept error: {}", e);
|
||||
@@ -174,14 +301,88 @@ impl RustDb {
|
||||
}
|
||||
}
|
||||
|
||||
fn build_tls_acceptor(options: &TlsOptions) -> Result<TlsAcceptor> {
|
||||
let cert_path = options
|
||||
.cert_path
|
||||
.as_deref()
|
||||
.context("tls.certPath is required when tls.enabled is true")?;
|
||||
let key_path = options
|
||||
.key_path
|
||||
.as_deref()
|
||||
.context("tls.keyPath is required when tls.enabled is true")?;
|
||||
|
||||
let certs = load_certs(cert_path)?;
|
||||
let key = load_private_key(key_path)?;
|
||||
|
||||
let config = if options.require_client_cert {
|
||||
let ca_path = options
|
||||
.ca_path
|
||||
.as_deref()
|
||||
.context("tls.caPath is required when tls.requireClientCert is true")?;
|
||||
let roots = load_root_store(ca_path)?;
|
||||
let verifier = WebPkiClientVerifier::builder(Arc::new(roots))
|
||||
.build()
|
||||
.context("failed to build TLS client certificate verifier")?;
|
||||
ServerConfig::builder()
|
||||
.with_client_cert_verifier(verifier)
|
||||
.with_single_cert(certs, key)
|
||||
.context("failed to build TLS server configuration")?
|
||||
} else {
|
||||
ServerConfig::builder()
|
||||
.with_no_client_auth()
|
||||
.with_single_cert(certs, key)
|
||||
.context("failed to build TLS server configuration")?
|
||||
};
|
||||
|
||||
Ok(TlsAcceptor::from(Arc::new(config)))
|
||||
}
|
||||
|
||||
fn load_certs(path: &str) -> Result<Vec<tokio_rustls::rustls::pki_types::CertificateDer<'static>>> {
|
||||
let file = File::open(path).with_context(|| format!("failed to open TLS certificate file '{}'", path))?;
|
||||
let mut reader = BufReader::new(file);
|
||||
let certs = rustls_pemfile::certs(&mut reader)
|
||||
.collect::<std::result::Result<Vec<_>, _>>()
|
||||
.with_context(|| format!("failed to parse TLS certificate file '{}'", path))?;
|
||||
|
||||
if certs.is_empty() {
|
||||
anyhow::bail!("TLS certificate file '{}' did not contain any certificates", path);
|
||||
}
|
||||
|
||||
Ok(certs)
|
||||
}
|
||||
|
||||
fn load_private_key(path: &str) -> Result<tokio_rustls::rustls::pki_types::PrivateKeyDer<'static>> {
|
||||
let file = File::open(path).with_context(|| format!("failed to open TLS private key file '{}'", path))?;
|
||||
let mut reader = BufReader::new(file);
|
||||
rustls_pemfile::private_key(&mut reader)
|
||||
.with_context(|| format!("failed to parse TLS private key file '{}'", path))?
|
||||
.with_context(|| format!("TLS private key file '{}' did not contain a private key", path))
|
||||
}
|
||||
|
||||
fn load_root_store(path: &str) -> Result<RootCertStore> {
|
||||
let mut roots = RootCertStore::empty();
|
||||
for cert in load_certs(path)? {
|
||||
roots
|
||||
.add(cert)
|
||||
.with_context(|| format!("failed to add TLS client CA certificate from '{}'", path))?;
|
||||
}
|
||||
|
||||
if roots.is_empty() {
|
||||
anyhow::bail!("TLS client CA file '{}' did not contain usable certificates", path);
|
||||
}
|
||||
|
||||
Ok(roots)
|
||||
}
|
||||
|
||||
/// Handle a single client connection using the wire protocol codec.
|
||||
async fn handle_connection<S>(stream: S, router: Arc<CommandRouter>)
|
||||
where
|
||||
S: tokio::io::AsyncRead + tokio::io::AsyncWrite + Unpin,
|
||||
S: tokio::io::AsyncRead + tokio::io::AsyncWrite + Unpin + Send + 'static,
|
||||
{
|
||||
use futures_util::{SinkExt, StreamExt};
|
||||
|
||||
let mut framed = Framed::new(stream, WireCodec);
|
||||
let mut connection = ConnectionState::new();
|
||||
|
||||
while let Some(result) = framed.next().await {
|
||||
match result {
|
||||
@@ -189,7 +390,7 @@ where
|
||||
let request_id = parsed_cmd.request_id;
|
||||
let op_code = parsed_cmd.op_code;
|
||||
|
||||
let response_doc = router.route(&parsed_cmd).await;
|
||||
let response_doc = router.route(&parsed_cmd, &mut connection).await;
|
||||
|
||||
let response_id = next_request_id();
|
||||
|
||||
|
||||
@@ -25,6 +25,10 @@ struct Cli {
|
||||
#[arg(long)]
|
||||
validate: bool,
|
||||
|
||||
/// Validate data integrity of a storage directory (offline check)
|
||||
#[arg(long, value_name = "PATH")]
|
||||
validate_data: Option<String>,
|
||||
|
||||
/// Run in management mode (JSON-over-stdin IPC for TypeScript wrapper)
|
||||
#[arg(long)]
|
||||
management: bool,
|
||||
@@ -55,7 +59,7 @@ async fn main() -> Result<()> {
|
||||
let options = RustDbOptions::from_file(&cli.config)
|
||||
.map_err(|e| anyhow::anyhow!("Failed to load config '{}': {}", cli.config, e))?;
|
||||
|
||||
// Validate-only mode
|
||||
// Validate-only mode (config)
|
||||
if cli.validate {
|
||||
match options.validate() {
|
||||
Ok(()) => {
|
||||
@@ -69,6 +73,18 @@ async fn main() -> Result<()> {
|
||||
}
|
||||
}
|
||||
|
||||
// Validate data integrity mode
|
||||
if let Some(ref data_path) = cli.validate_data {
|
||||
tracing::info!("Validating data integrity at {}", data_path);
|
||||
let report = rustdb_storage::validate::validate_data_directory(data_path)
|
||||
.map_err(|e| anyhow::anyhow!("Validation failed: {}", e))?;
|
||||
report.print_summary();
|
||||
if report.has_errors() {
|
||||
std::process::exit(1);
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Create and start server
|
||||
let mut db = RustDb::new(options).await?;
|
||||
db.start().await?;
|
||||
|
||||
@@ -167,6 +167,9 @@ async fn handle_start(
|
||||
Ok(o) => o,
|
||||
Err(e) => return ManagementResponse::err(id.to_string(), format!("Invalid config: {}", e)),
|
||||
};
|
||||
if let Err(e) = options.validate() {
|
||||
return ManagementResponse::err(id.to_string(), format!("Invalid config: {}", e));
|
||||
}
|
||||
|
||||
let connection_uri = options.connection_uri();
|
||||
|
||||
|
||||
@@ -0,0 +1,173 @@
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
import * as smartdb from '../ts/index.js';
|
||||
import { MongoClient } from 'mongodb';
|
||||
import * as fs from 'fs';
|
||||
import * as os from 'os';
|
||||
import * as path from 'path';
|
||||
|
||||
let server: smartdb.SmartdbServer;
|
||||
let authedClient: MongoClient;
|
||||
let openClient: MongoClient;
|
||||
let readerClient: MongoClient;
|
||||
let tmpDir: string;
|
||||
let usersPath: string;
|
||||
|
||||
function makeTmpDir(): string {
|
||||
return fs.mkdtempSync(path.join(os.tmpdir(), 'smartdb-auth-test-'));
|
||||
}
|
||||
|
||||
function cleanTmpDir(dir: string): void {
|
||||
if (fs.existsSync(dir)) {
|
||||
fs.rmSync(dir, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
tap.test('auth: should start server with SCRAM-SHA-256 auth enabled', async () => {
|
||||
tmpDir = makeTmpDir();
|
||||
usersPath = path.join(tmpDir, 'users.json');
|
||||
server = new smartdb.SmartdbServer({
|
||||
port: 27118,
|
||||
auth: {
|
||||
enabled: true,
|
||||
usersPath,
|
||||
scramIterations: 4096,
|
||||
users: [
|
||||
{
|
||||
username: 'root',
|
||||
password: 'secret',
|
||||
database: 'admin',
|
||||
roles: ['root'],
|
||||
},
|
||||
],
|
||||
},
|
||||
});
|
||||
await server.start();
|
||||
expect(server.running).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('auth: should reject protected commands before authentication', async () => {
|
||||
openClient = new MongoClient('mongodb://127.0.0.1:27118', {
|
||||
directConnection: true,
|
||||
serverSelectionTimeoutMS: 5000,
|
||||
});
|
||||
await openClient.connect();
|
||||
|
||||
let threw = false;
|
||||
try {
|
||||
await openClient.db('admin').command({ ping: 1 });
|
||||
} catch (err: any) {
|
||||
threw = true;
|
||||
expect(err.code).toEqual(13);
|
||||
}
|
||||
expect(threw).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('auth: should reject invalid credentials', async () => {
|
||||
const badClient = new MongoClient('mongodb://root:wrong@127.0.0.1:27118/admin?authSource=admin', {
|
||||
directConnection: true,
|
||||
serverSelectionTimeoutMS: 5000,
|
||||
});
|
||||
|
||||
let threw = false;
|
||||
try {
|
||||
await badClient.connect();
|
||||
await badClient.db('admin').command({ ping: 1 });
|
||||
} catch {
|
||||
threw = true;
|
||||
} finally {
|
||||
await badClient.close().catch(() => undefined);
|
||||
}
|
||||
expect(threw).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('auth: should authenticate valid credentials', async () => {
|
||||
authedClient = new MongoClient('mongodb://root:secret@127.0.0.1:27118/admin?authSource=admin', {
|
||||
directConnection: true,
|
||||
serverSelectionTimeoutMS: 5000,
|
||||
});
|
||||
await authedClient.connect();
|
||||
const result = await authedClient.db('admin').command({ ping: 1 });
|
||||
expect(result.ok).toEqual(1);
|
||||
});
|
||||
|
||||
tap.test('auth: should allow CRUD after authentication', async () => {
|
||||
const coll = authedClient.db('securedb').collection('notes');
|
||||
const inserted = await coll.insertOne({ title: 'enterprise auth' });
|
||||
expect(inserted.acknowledged).toBeTrue();
|
||||
|
||||
const doc = await coll.findOne({ _id: inserted.insertedId });
|
||||
expect(doc).toBeTruthy();
|
||||
expect(doc!.title).toEqual('enterprise auth');
|
||||
});
|
||||
|
||||
tap.test('auth: root should create a read-only user', async () => {
|
||||
const result = await authedClient.db('admin').command({
|
||||
createUser: 'reader',
|
||||
pwd: 'readpass',
|
||||
roles: [{ role: 'read', db: 'securedb' }],
|
||||
});
|
||||
expect(result.ok).toEqual(1);
|
||||
|
||||
const usersInfo = await authedClient.db('admin').command({ usersInfo: 'reader' });
|
||||
expect(usersInfo.ok).toEqual(1);
|
||||
expect(usersInfo.users.length).toEqual(1);
|
||||
expect(usersInfo.users[0].user).toEqual('reader');
|
||||
});
|
||||
|
||||
tap.test('auth: read-only user should read but not write', async () => {
|
||||
readerClient = new MongoClient('mongodb://reader:readpass@127.0.0.1:27118/admin?authSource=admin', {
|
||||
directConnection: true,
|
||||
serverSelectionTimeoutMS: 5000,
|
||||
});
|
||||
await readerClient.connect();
|
||||
|
||||
const doc = await readerClient.db('securedb').collection('notes').findOne({ title: 'enterprise auth' });
|
||||
expect(doc).toBeTruthy();
|
||||
|
||||
let threw = false;
|
||||
try {
|
||||
await readerClient.db('securedb').collection('notes').insertOne({ title: 'denied write' });
|
||||
} catch (err: any) {
|
||||
threw = true;
|
||||
expect(err.code).toEqual(13);
|
||||
}
|
||||
expect(threw).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('auth: persisted users should survive server restart', async () => {
|
||||
await readerClient.close();
|
||||
await authedClient.close();
|
||||
await server.stop();
|
||||
|
||||
// Simulates a crash after writing the temporary auth metadata file but before rename.
|
||||
fs.writeFileSync(path.join(tmpDir, 'users.tmp'), '{ invalid json');
|
||||
|
||||
server = new smartdb.SmartdbServer({
|
||||
port: 27118,
|
||||
auth: {
|
||||
enabled: true,
|
||||
usersPath,
|
||||
users: [],
|
||||
scramIterations: 4096,
|
||||
},
|
||||
});
|
||||
await server.start();
|
||||
|
||||
readerClient = new MongoClient('mongodb://reader:readpass@127.0.0.1:27118/admin?authSource=admin', {
|
||||
directConnection: true,
|
||||
serverSelectionTimeoutMS: 5000,
|
||||
});
|
||||
await readerClient.connect();
|
||||
const result = await readerClient.db('admin').command({ ping: 1 });
|
||||
expect(result.ok).toEqual(1);
|
||||
});
|
||||
|
||||
tap.test('auth: cleanup', async () => {
|
||||
await openClient.close();
|
||||
await readerClient.close();
|
||||
await server.stop();
|
||||
expect(server.running).toBeFalse();
|
||||
cleanTmpDir(tmpDir);
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
@@ -0,0 +1,256 @@
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
import * as smartdb from '../ts/index.js';
|
||||
import { MongoClient, Db } from 'mongodb';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
import * as os from 'os';
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
let tmpDir: string;
|
||||
let server: smartdb.SmartdbServer;
|
||||
let client: MongoClient;
|
||||
let db: Db;
|
||||
|
||||
function makeTmpDir(): string {
|
||||
return fs.mkdtempSync(path.join(os.tmpdir(), 'smartdb-compact-test-'));
|
||||
}
|
||||
|
||||
function cleanTmpDir(dir: string): void {
|
||||
if (fs.existsSync(dir)) {
|
||||
fs.rmSync(dir, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
function getDataFileSize(storagePath: string, dbName: string, collName: string): number {
|
||||
const dataPath = path.join(storagePath, dbName, collName, 'data.rdb');
|
||||
if (!fs.existsSync(dataPath)) return 0;
|
||||
return fs.statSync(dataPath).size;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Compaction: Setup
|
||||
// ============================================================================
|
||||
|
||||
tap.test('compaction: start server with file storage', async () => {
|
||||
tmpDir = makeTmpDir();
|
||||
server = new smartdb.SmartdbServer({
|
||||
socketPath: path.join(os.tmpdir(), `smartdb-compact-${Date.now()}-${Math.random().toString(36).slice(2)}.sock`),
|
||||
storage: 'file',
|
||||
storagePath: tmpDir,
|
||||
});
|
||||
await server.start();
|
||||
|
||||
client = new MongoClient(server.getConnectionUri(), {
|
||||
directConnection: true,
|
||||
serverSelectionTimeoutMS: 5000,
|
||||
});
|
||||
await client.connect();
|
||||
db = client.db('compactdb');
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// Compaction: Updates grow the data file
|
||||
// ============================================================================
|
||||
|
||||
tap.test('compaction: repeated updates grow the data file', async () => {
|
||||
const coll = db.collection('growing');
|
||||
|
||||
// Insert a document
|
||||
await coll.insertOne({ key: 'target', counter: 0, payload: 'x'.repeat(200) });
|
||||
|
||||
const sizeAfterInsert = getDataFileSize(tmpDir, 'compactdb', 'growing');
|
||||
expect(sizeAfterInsert).toBeGreaterThan(0);
|
||||
|
||||
// Update the same document 50 times — each update appends a new record
|
||||
for (let i = 1; i <= 50; i++) {
|
||||
await coll.updateOne(
|
||||
{ key: 'target' },
|
||||
{ $set: { counter: i, payload: 'y'.repeat(200) } }
|
||||
);
|
||||
}
|
||||
|
||||
const sizeAfterUpdates = getDataFileSize(tmpDir, 'compactdb', 'growing');
|
||||
// Compaction may have run during updates, so we can't assert the file is
|
||||
// much larger. What matters is the data is correct.
|
||||
|
||||
// The collection still has just 1 document
|
||||
const count = await coll.countDocuments();
|
||||
expect(count).toEqual(1);
|
||||
|
||||
const doc = await coll.findOne({ key: 'target' });
|
||||
expect(doc!.counter).toEqual(50);
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// Compaction: Deletes create tombstones
|
||||
// ============================================================================
|
||||
|
||||
tap.test('compaction: insert-then-delete creates dead space', async () => {
|
||||
const coll = db.collection('tombstones');
|
||||
|
||||
// Insert 100 documents
|
||||
const docs = [];
|
||||
for (let i = 0; i < 100; i++) {
|
||||
docs.push({ idx: i, data: 'delete-me-' + 'z'.repeat(100) });
|
||||
}
|
||||
await coll.insertMany(docs);
|
||||
|
||||
const sizeAfterInsert = getDataFileSize(tmpDir, 'compactdb', 'tombstones');
|
||||
|
||||
// Delete all 100
|
||||
await coll.deleteMany({});
|
||||
|
||||
const sizeAfterDelete = getDataFileSize(tmpDir, 'compactdb', 'tombstones');
|
||||
// File may have been compacted during deletes (dead > 50% threshold),
|
||||
// but the operation itself should succeed regardless of file size.
|
||||
// After deleting all docs, the file might be very small (just header + compacted).
|
||||
|
||||
// But count is 0
|
||||
const count = await coll.countDocuments();
|
||||
expect(count).toEqual(0);
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// Compaction: Data integrity after compaction trigger
|
||||
// ============================================================================
|
||||
|
||||
tap.test('compaction: data file shrinks after heavy updates trigger compaction', async () => {
|
||||
const coll = db.collection('shrinktest');
|
||||
|
||||
// Insert 10 documents with large payloads
|
||||
const docs = [];
|
||||
for (let i = 0; i < 10; i++) {
|
||||
docs.push({ idx: i, data: 'a'.repeat(500) });
|
||||
}
|
||||
await coll.insertMany(docs);
|
||||
|
||||
const sizeAfterInsert = getDataFileSize(tmpDir, 'compactdb', 'shrinktest');
|
||||
|
||||
// Update each document 20 times (creates 200 dead records vs 10 live)
|
||||
// This should trigger compaction (dead > 50% threshold)
|
||||
for (let round = 0; round < 20; round++) {
|
||||
for (let i = 0; i < 10; i++) {
|
||||
await coll.updateOne(
|
||||
{ idx: i },
|
||||
{ $set: { data: `round-${round}-` + 'b'.repeat(500) } }
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// After compaction, file should be smaller than the pre-compaction peak
|
||||
// (We can't measure the peak exactly, but the final size should be reasonable)
|
||||
const sizeAfterCompaction = getDataFileSize(tmpDir, 'compactdb', 'shrinktest');
|
||||
|
||||
// The file should not be 20x the insert size since compaction should have run
|
||||
// With 10 live records of ~530 bytes each, the file should be roughly that
|
||||
// plus header overhead. Without compaction it would be 210 * ~530 bytes.
|
||||
const maxExpectedSize = sizeAfterInsert * 5; // generous upper bound
|
||||
expect(sizeAfterCompaction).toBeLessThanOrEqual(maxExpectedSize);
|
||||
|
||||
// All documents should still be readable and correct
|
||||
const count = await coll.countDocuments();
|
||||
expect(count).toEqual(10);
|
||||
|
||||
for (let i = 0; i < 10; i++) {
|
||||
const doc = await coll.findOne({ idx: i });
|
||||
expect(doc).toBeTruthy();
|
||||
expect(doc!.data.startsWith('round-19-')).toBeTrue();
|
||||
}
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// Compaction: Persistence after compaction + restart
|
||||
// ============================================================================
|
||||
|
||||
tap.test('compaction: data survives compaction + restart', async () => {
|
||||
await client.close();
|
||||
await server.stop();
|
||||
|
||||
server = new smartdb.SmartdbServer({
|
||||
socketPath: path.join(os.tmpdir(), `smartdb-compact-${Date.now()}-${Math.random().toString(36).slice(2)}.sock`),
|
||||
storage: 'file',
|
||||
storagePath: tmpDir,
|
||||
});
|
||||
await server.start();
|
||||
|
||||
client = new MongoClient(server.getConnectionUri(), {
|
||||
directConnection: true,
|
||||
serverSelectionTimeoutMS: 5000,
|
||||
});
|
||||
await client.connect();
|
||||
db = client.db('compactdb');
|
||||
|
||||
// Verify shrinktest data
|
||||
const coll = db.collection('shrinktest');
|
||||
const count = await coll.countDocuments();
|
||||
expect(count).toEqual(10);
|
||||
|
||||
for (let i = 0; i < 10; i++) {
|
||||
const doc = await coll.findOne({ idx: i });
|
||||
expect(doc).toBeTruthy();
|
||||
expect(doc!.data.startsWith('round-19-')).toBeTrue();
|
||||
}
|
||||
|
||||
// Verify growing collection
|
||||
const growing = db.collection('growing');
|
||||
const growDoc = await growing.findOne({ key: 'target' });
|
||||
expect(growDoc).toBeTruthy();
|
||||
expect(growDoc!.counter).toEqual(50);
|
||||
|
||||
// Verify tombstones collection is empty
|
||||
const tombCount = await db.collection('tombstones').countDocuments();
|
||||
expect(tombCount).toEqual(0);
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// Compaction: Mixed operations stress test
|
||||
// ============================================================================
|
||||
|
||||
tap.test('compaction: mixed insert-update-delete stress test', async () => {
|
||||
const coll = db.collection('stress');
|
||||
|
||||
// Phase 1: Insert 200 documents
|
||||
const batch = [];
|
||||
for (let i = 0; i < 200; i++) {
|
||||
batch.push({ idx: i, value: `initial-${i}`, alive: true });
|
||||
}
|
||||
await coll.insertMany(batch);
|
||||
|
||||
// Phase 2: Update every even-indexed document
|
||||
for (let i = 0; i < 200; i += 2) {
|
||||
await coll.updateOne({ idx: i }, { $set: { value: `updated-${i}` } });
|
||||
}
|
||||
|
||||
// Phase 3: Delete every document where idx % 3 === 0
|
||||
await coll.deleteMany({ idx: { $in: Array.from({ length: 67 }, (_, k) => k * 3) } });
|
||||
|
||||
// Verify: documents where idx % 3 !== 0 should remain
|
||||
const remaining = await coll.find({}).toArray();
|
||||
for (const doc of remaining) {
|
||||
expect(doc.idx % 3).not.toEqual(0);
|
||||
if (doc.idx % 2 === 0) {
|
||||
expect(doc.value).toEqual(`updated-${doc.idx}`);
|
||||
} else {
|
||||
expect(doc.value).toEqual(`initial-${doc.idx}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Count should be 200 - 67 = 133
|
||||
const count = await coll.countDocuments();
|
||||
expect(count).toEqual(133);
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// Cleanup
|
||||
// ============================================================================
|
||||
|
||||
tap.test('compaction: cleanup', async () => {
|
||||
await client.close();
|
||||
await server.stop();
|
||||
cleanTmpDir(tmpDir);
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
@@ -0,0 +1,91 @@
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
import * as smartdb from '../ts/index.js';
|
||||
import { MongoClient, Db } from 'mongodb';
|
||||
import * as fs from 'fs';
|
||||
import * as os from 'os';
|
||||
import * as path from 'path';
|
||||
|
||||
let tmpDir: string;
|
||||
let localDb: smartdb.LocalSmartDb;
|
||||
let client: MongoClient;
|
||||
let db: Db;
|
||||
let dataPath: string;
|
||||
let corruptedSize: number;
|
||||
|
||||
function makeTmpDir(): string {
|
||||
return fs.mkdtempSync(path.join(os.tmpdir(), 'smartdb-crash-test-'));
|
||||
}
|
||||
|
||||
function cleanTmpDir(dir: string): void {
|
||||
if (fs.existsSync(dir)) {
|
||||
fs.rmSync(dir, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
async function startAndConnect(): Promise<void> {
|
||||
localDb = new smartdb.LocalSmartDb({ folderPath: tmpDir });
|
||||
const info = await localDb.start();
|
||||
client = new MongoClient(info.connectionUri, {
|
||||
directConnection: true,
|
||||
serverSelectionTimeoutMS: 5000,
|
||||
});
|
||||
await client.connect();
|
||||
db = client.db('crashtest');
|
||||
}
|
||||
|
||||
tap.test('crash-recovery: create baseline data', async () => {
|
||||
tmpDir = makeTmpDir();
|
||||
await startAndConnect();
|
||||
|
||||
await db.collection('docs').insertMany([
|
||||
{ key: 'a', value: 1 },
|
||||
{ key: 'b', value: 2 },
|
||||
{ key: 'c', value: 3 },
|
||||
]);
|
||||
|
||||
await client.close();
|
||||
await localDb.stop();
|
||||
|
||||
dataPath = path.join(tmpDir, 'crashtest', 'docs', 'data.rdb');
|
||||
expect(fs.existsSync(dataPath)).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('crash-recovery: append a torn final record', async () => {
|
||||
const data = fs.readFileSync(dataPath);
|
||||
const partialRecord = data.subarray(64, 94);
|
||||
expect(partialRecord.length).toEqual(30);
|
||||
|
||||
fs.appendFileSync(dataPath, partialRecord);
|
||||
corruptedSize = fs.statSync(dataPath).size;
|
||||
expect(corruptedSize).toEqual(data.length + partialRecord.length);
|
||||
});
|
||||
|
||||
tap.test('crash-recovery: restart truncates invalid tail and preserves valid records', async () => {
|
||||
await startAndConnect();
|
||||
|
||||
const repairedSize = fs.statSync(dataPath).size;
|
||||
expect(repairedSize < corruptedSize).toBeTrue();
|
||||
|
||||
const docs = await db.collection('docs').find({}).sort({ key: 1 }).toArray();
|
||||
expect(docs.map(doc => doc.key)).toEqual(['a', 'b', 'c']);
|
||||
});
|
||||
|
||||
tap.test('crash-recovery: future writes remain durable after tail repair', async () => {
|
||||
await db.collection('docs').insertOne({ key: 'd', value: 4 });
|
||||
expect(await db.collection('docs').countDocuments()).toEqual(4);
|
||||
|
||||
await client.close();
|
||||
await localDb.stop();
|
||||
|
||||
await startAndConnect();
|
||||
const docs = await db.collection('docs').find({}).sort({ key: 1 }).toArray();
|
||||
expect(docs.map(doc => doc.key)).toEqual(['a', 'b', 'c', 'd']);
|
||||
});
|
||||
|
||||
tap.test('crash-recovery: cleanup', async () => {
|
||||
await client.close();
|
||||
await localDb.stop();
|
||||
cleanTmpDir(tmpDir);
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
@@ -0,0 +1,191 @@
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
import * as smartdb from '../ts/index.js';
|
||||
import { MongoClient, Db } from 'mongodb';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
import * as os from 'os';
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Test: Deletes persist across restart (tombstone + hint staleness detection)
|
||||
// Covers: append_tombstone to data.rdb, hint file data_file_size tracking,
|
||||
// stale hint detection on restart
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
let tmpDir: string;
|
||||
let localDb: smartdb.LocalSmartDb;
|
||||
let client: MongoClient;
|
||||
let db: Db;
|
||||
|
||||
function makeTmpDir(): string {
|
||||
return fs.mkdtempSync(path.join(os.tmpdir(), 'smartdb-delete-test-'));
|
||||
}
|
||||
|
||||
function cleanTmpDir(dir: string): void {
|
||||
if (fs.existsSync(dir)) {
|
||||
fs.rmSync(dir, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Setup
|
||||
// ============================================================================
|
||||
|
||||
tap.test('setup: start local db and insert documents', async () => {
|
||||
tmpDir = makeTmpDir();
|
||||
localDb = new smartdb.LocalSmartDb({ folderPath: tmpDir });
|
||||
const info = await localDb.start();
|
||||
client = new MongoClient(info.connectionUri, {
|
||||
directConnection: true,
|
||||
serverSelectionTimeoutMS: 5000,
|
||||
});
|
||||
await client.connect();
|
||||
db = client.db('deletetest');
|
||||
|
||||
const coll = db.collection('items');
|
||||
await coll.insertMany([
|
||||
{ name: 'keep-1', value: 100 },
|
||||
{ name: 'keep-2', value: 200 },
|
||||
{ name: 'delete-me', value: 999 },
|
||||
{ name: 'keep-3', value: 300 },
|
||||
]);
|
||||
const count = await coll.countDocuments();
|
||||
expect(count).toEqual(4);
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// Delete and verify
|
||||
// ============================================================================
|
||||
|
||||
tap.test('delete-persistence: delete a document', async () => {
|
||||
const coll = db.collection('items');
|
||||
const result = await coll.deleteOne({ name: 'delete-me' });
|
||||
expect(result.deletedCount).toEqual(1);
|
||||
|
||||
const remaining = await coll.countDocuments();
|
||||
expect(remaining).toEqual(3);
|
||||
|
||||
const deleted = await coll.findOne({ name: 'delete-me' });
|
||||
expect(deleted).toBeNull();
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// Graceful restart: delete survives
|
||||
// ============================================================================
|
||||
|
||||
tap.test('delete-persistence: graceful stop and restart', async () => {
|
||||
await client.close();
|
||||
await localDb.stop(); // graceful — writes hint file
|
||||
|
||||
localDb = new smartdb.LocalSmartDb({ folderPath: tmpDir });
|
||||
const info = await localDb.start();
|
||||
client = new MongoClient(info.connectionUri, {
|
||||
directConnection: true,
|
||||
serverSelectionTimeoutMS: 5000,
|
||||
});
|
||||
await client.connect();
|
||||
db = client.db('deletetest');
|
||||
});
|
||||
|
||||
tap.test('delete-persistence: deleted doc stays deleted after graceful restart', async () => {
|
||||
const coll = db.collection('items');
|
||||
const count = await coll.countDocuments();
|
||||
expect(count).toEqual(3);
|
||||
|
||||
const deleted = await coll.findOne({ name: 'delete-me' });
|
||||
expect(deleted).toBeNull();
|
||||
|
||||
// The remaining docs are intact
|
||||
const keep1 = await coll.findOne({ name: 'keep-1' });
|
||||
expect(keep1).toBeTruthy();
|
||||
expect(keep1!.value).toEqual(100);
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// Simulate ungraceful restart: delete after hint write, then restart
|
||||
// The hint file data_file_size check should detect the stale hint
|
||||
// ============================================================================
|
||||
|
||||
tap.test('delete-persistence: insert and delete more docs, then restart', async () => {
|
||||
const coll = db.collection('items');
|
||||
|
||||
// Insert a new doc
|
||||
await coll.insertOne({ name: 'temporary', value: 777 });
|
||||
expect(await coll.countDocuments()).toEqual(4);
|
||||
|
||||
// Delete it
|
||||
await coll.deleteOne({ name: 'temporary' });
|
||||
expect(await coll.countDocuments()).toEqual(3);
|
||||
|
||||
const gone = await coll.findOne({ name: 'temporary' });
|
||||
expect(gone).toBeNull();
|
||||
});
|
||||
|
||||
tap.test('delete-persistence: stop and restart again', async () => {
|
||||
await client.close();
|
||||
await localDb.stop();
|
||||
|
||||
localDb = new smartdb.LocalSmartDb({ folderPath: tmpDir });
|
||||
const info = await localDb.start();
|
||||
client = new MongoClient(info.connectionUri, {
|
||||
directConnection: true,
|
||||
serverSelectionTimeoutMS: 5000,
|
||||
});
|
||||
await client.connect();
|
||||
db = client.db('deletetest');
|
||||
});
|
||||
|
||||
tap.test('delete-persistence: all deletes survived second restart', async () => {
|
||||
const coll = db.collection('items');
|
||||
const count = await coll.countDocuments();
|
||||
expect(count).toEqual(3);
|
||||
|
||||
// Both deletes are permanent
|
||||
expect(await coll.findOne({ name: 'delete-me' })).toBeNull();
|
||||
expect(await coll.findOne({ name: 'temporary' })).toBeNull();
|
||||
|
||||
// Survivors intact
|
||||
const names = (await coll.find({}).toArray()).map(d => d.name).sort();
|
||||
expect(names).toEqual(['keep-1', 'keep-2', 'keep-3']);
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// Delete all docs and verify empty after restart
|
||||
// ============================================================================
|
||||
|
||||
tap.test('delete-persistence: delete all remaining docs', async () => {
|
||||
const coll = db.collection('items');
|
||||
await coll.deleteMany({});
|
||||
expect(await coll.countDocuments()).toEqual(0);
|
||||
});
|
||||
|
||||
tap.test('delete-persistence: restart with empty collection', async () => {
|
||||
await client.close();
|
||||
await localDb.stop();
|
||||
|
||||
localDb = new smartdb.LocalSmartDb({ folderPath: tmpDir });
|
||||
const info = await localDb.start();
|
||||
client = new MongoClient(info.connectionUri, {
|
||||
directConnection: true,
|
||||
serverSelectionTimeoutMS: 5000,
|
||||
});
|
||||
await client.connect();
|
||||
db = client.db('deletetest');
|
||||
});
|
||||
|
||||
tap.test('delete-persistence: collection is empty after restart', async () => {
|
||||
const coll = db.collection('items');
|
||||
const count = await coll.countDocuments();
|
||||
expect(count).toEqual(0);
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// Cleanup
|
||||
// ============================================================================
|
||||
|
||||
tap.test('delete-persistence: cleanup', async () => {
|
||||
await client.close();
|
||||
await localDb.stop();
|
||||
cleanTmpDir(tmpDir);
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
@@ -0,0 +1,394 @@
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
import * as smartdb from '../ts/index.js';
|
||||
import { MongoClient, Db } from 'mongodb';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
import * as os from 'os';
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
let tmpDir: string;
|
||||
let server: smartdb.SmartdbServer;
|
||||
let client: MongoClient;
|
||||
let db: Db;
|
||||
|
||||
function makeTmpDir(): string {
|
||||
return fs.mkdtempSync(path.join(os.tmpdir(), 'smartdb-test-'));
|
||||
}
|
||||
|
||||
function cleanTmpDir(dir: string): void {
|
||||
if (fs.existsSync(dir)) {
|
||||
fs.rmSync(dir, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// File Storage: Startup
|
||||
// ============================================================================
|
||||
|
||||
tap.test('file-storage: should start server with file storage', async () => {
|
||||
tmpDir = makeTmpDir();
|
||||
server = new smartdb.SmartdbServer({
|
||||
port: 27118,
|
||||
storage: 'file',
|
||||
storagePath: tmpDir,
|
||||
});
|
||||
await server.start();
|
||||
expect(server.running).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('file-storage: should connect MongoClient', async () => {
|
||||
client = new MongoClient('mongodb://127.0.0.1:27118', {
|
||||
directConnection: true,
|
||||
serverSelectionTimeoutMS: 5000,
|
||||
});
|
||||
await client.connect();
|
||||
db = client.db('filetest');
|
||||
expect(db).toBeTruthy();
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// File Storage: Data files are created on disk
|
||||
// ============================================================================
|
||||
|
||||
tap.test('file-storage: inserting creates data files on disk', async () => {
|
||||
const coll = db.collection('diskcheck');
|
||||
await coll.insertOne({ name: 'disk-test', value: 42 });
|
||||
|
||||
// The storage directory should now contain a database directory
|
||||
const dbDir = path.join(tmpDir, 'filetest');
|
||||
expect(fs.existsSync(dbDir)).toBeTrue();
|
||||
|
||||
// Collection directory with data.rdb should exist
|
||||
const collDir = path.join(dbDir, 'diskcheck');
|
||||
expect(fs.existsSync(collDir)).toBeTrue();
|
||||
|
||||
const dataFile = path.join(collDir, 'data.rdb');
|
||||
expect(fs.existsSync(dataFile)).toBeTrue();
|
||||
|
||||
// data.rdb should have the SMARTDB magic header
|
||||
const header = Buffer.alloc(8);
|
||||
const fd = fs.openSync(dataFile, 'r');
|
||||
fs.readSync(fd, header, 0, 8, 0);
|
||||
fs.closeSync(fd);
|
||||
expect(header.toString('ascii')).toEqual('SMARTDB\0');
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// File Storage: Full CRUD cycle
|
||||
// ============================================================================
|
||||
|
||||
tap.test('file-storage: insertOne returns valid id', async () => {
|
||||
const coll = db.collection('crud');
|
||||
const result = await coll.insertOne({ name: 'Alice', age: 30 });
|
||||
expect(result.acknowledged).toBeTrue();
|
||||
expect(result.insertedId).toBeTruthy();
|
||||
});
|
||||
|
||||
tap.test('file-storage: insertMany returns all ids', async () => {
|
||||
const coll = db.collection('crud');
|
||||
const result = await coll.insertMany([
|
||||
{ name: 'Bob', age: 25 },
|
||||
{ name: 'Charlie', age: 35 },
|
||||
{ name: 'Diana', age: 28 },
|
||||
{ name: 'Eve', age: 32 },
|
||||
]);
|
||||
expect(result.insertedCount).toEqual(4);
|
||||
});
|
||||
|
||||
tap.test('file-storage: findOne retrieves correct document', async () => {
|
||||
const coll = db.collection('crud');
|
||||
const doc = await coll.findOne({ name: 'Alice' });
|
||||
expect(doc).toBeTruthy();
|
||||
expect(doc!.name).toEqual('Alice');
|
||||
expect(doc!.age).toEqual(30);
|
||||
});
|
||||
|
||||
tap.test('file-storage: find with filter returns correct subset', async () => {
|
||||
const coll = db.collection('crud');
|
||||
const docs = await coll.find({ age: { $gte: 30 } }).toArray();
|
||||
expect(docs.length).toEqual(3); // Alice(30), Charlie(35), Eve(32)
|
||||
expect(docs.every(d => d.age >= 30)).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('file-storage: updateOne modifies document', async () => {
|
||||
const coll = db.collection('crud');
|
||||
const result = await coll.updateOne(
|
||||
{ name: 'Alice' },
|
||||
{ $set: { age: 31, updated: true } }
|
||||
);
|
||||
expect(result.modifiedCount).toEqual(1);
|
||||
|
||||
const doc = await coll.findOne({ name: 'Alice' });
|
||||
expect(doc!.age).toEqual(31);
|
||||
expect(doc!.updated).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('file-storage: deleteOne removes document', async () => {
|
||||
const coll = db.collection('crud');
|
||||
const result = await coll.deleteOne({ name: 'Eve' });
|
||||
expect(result.deletedCount).toEqual(1);
|
||||
|
||||
const doc = await coll.findOne({ name: 'Eve' });
|
||||
expect(doc).toBeNull();
|
||||
});
|
||||
|
||||
tap.test('file-storage: count reflects current state', async () => {
|
||||
const coll = db.collection('crud');
|
||||
const count = await coll.countDocuments();
|
||||
expect(count).toEqual(4); // 5 inserted - 1 deleted = 4
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// File Storage: Persistence across server restart
|
||||
// ============================================================================
|
||||
|
||||
tap.test('file-storage: stop server for restart test', async () => {
|
||||
await client.close();
|
||||
await server.stop();
|
||||
expect(server.running).toBeFalse();
|
||||
});
|
||||
|
||||
tap.test('file-storage: restart server with same data path', async () => {
|
||||
server = new smartdb.SmartdbServer({
|
||||
port: 27118,
|
||||
storage: 'file',
|
||||
storagePath: tmpDir,
|
||||
});
|
||||
await server.start();
|
||||
expect(server.running).toBeTrue();
|
||||
|
||||
client = new MongoClient('mongodb://127.0.0.1:27118', {
|
||||
directConnection: true,
|
||||
serverSelectionTimeoutMS: 5000,
|
||||
});
|
||||
await client.connect();
|
||||
db = client.db('filetest');
|
||||
});
|
||||
|
||||
tap.test('file-storage: data persists after restart', async () => {
|
||||
const coll = db.collection('crud');
|
||||
|
||||
// Alice should still be there with updated age
|
||||
const alice = await coll.findOne({ name: 'Alice' });
|
||||
expect(alice).toBeTruthy();
|
||||
expect(alice!.age).toEqual(31);
|
||||
expect(alice!.updated).toBeTrue();
|
||||
|
||||
// Bob, Charlie, Diana should be there
|
||||
const bob = await coll.findOne({ name: 'Bob' });
|
||||
expect(bob).toBeTruthy();
|
||||
expect(bob!.age).toEqual(25);
|
||||
|
||||
const charlie = await coll.findOne({ name: 'Charlie' });
|
||||
expect(charlie).toBeTruthy();
|
||||
|
||||
const diana = await coll.findOne({ name: 'Diana' });
|
||||
expect(diana).toBeTruthy();
|
||||
|
||||
// Eve should still be deleted
|
||||
const eve = await coll.findOne({ name: 'Eve' });
|
||||
expect(eve).toBeNull();
|
||||
});
|
||||
|
||||
tap.test('file-storage: count is correct after restart', async () => {
|
||||
const coll = db.collection('crud');
|
||||
const count = await coll.countDocuments();
|
||||
expect(count).toEqual(4);
|
||||
});
|
||||
|
||||
tap.test('file-storage: can write new data after restart', async () => {
|
||||
const coll = db.collection('crud');
|
||||
const result = await coll.insertOne({ name: 'Frank', age: 45 });
|
||||
expect(result.acknowledged).toBeTrue();
|
||||
|
||||
const doc = await coll.findOne({ name: 'Frank' });
|
||||
expect(doc).toBeTruthy();
|
||||
expect(doc!.age).toEqual(45);
|
||||
|
||||
const count = await coll.countDocuments();
|
||||
expect(count).toEqual(5);
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// File Storage: Multiple collections in same database
|
||||
// ============================================================================
|
||||
|
||||
tap.test('file-storage: multiple collections are independent', async () => {
|
||||
const products = db.collection('products');
|
||||
const orders = db.collection('orders');
|
||||
|
||||
await products.insertMany([
|
||||
{ sku: 'A001', name: 'Widget', price: 9.99 },
|
||||
{ sku: 'A002', name: 'Gadget', price: 19.99 },
|
||||
]);
|
||||
|
||||
await orders.insertMany([
|
||||
{ orderId: 1, sku: 'A001', qty: 3 },
|
||||
{ orderId: 2, sku: 'A002', qty: 1 },
|
||||
{ orderId: 3, sku: 'A001', qty: 2 },
|
||||
]);
|
||||
|
||||
const productCount = await products.countDocuments();
|
||||
const orderCount = await orders.countDocuments();
|
||||
expect(productCount).toEqual(2);
|
||||
expect(orderCount).toEqual(3);
|
||||
|
||||
// Deleting from one collection doesn't affect the other
|
||||
await products.deleteOne({ sku: 'A001' });
|
||||
expect(await products.countDocuments()).toEqual(1);
|
||||
expect(await orders.countDocuments()).toEqual(3);
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// File Storage: Multiple databases
|
||||
// ============================================================================
|
||||
|
||||
tap.test('file-storage: multiple databases are independent', async () => {
|
||||
const db2 = client.db('filetest2');
|
||||
const coll2 = db2.collection('items');
|
||||
|
||||
await coll2.insertOne({ name: 'cross-db-test', source: 'db2' });
|
||||
|
||||
// db2 has 1 doc
|
||||
const count2 = await coll2.countDocuments();
|
||||
expect(count2).toEqual(1);
|
||||
|
||||
// original db is unaffected
|
||||
const crudCount = await db.collection('crud').countDocuments();
|
||||
expect(crudCount).toEqual(5);
|
||||
|
||||
await db2.dropDatabase();
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// File Storage: Large batch insert and retrieval
|
||||
// ============================================================================
|
||||
|
||||
tap.test('file-storage: bulk insert 1000 documents', async () => {
|
||||
const coll = db.collection('bulk');
|
||||
const docs = [];
|
||||
for (let i = 0; i < 1000; i++) {
|
||||
docs.push({ index: i, data: `value-${i}`, timestamp: Date.now() });
|
||||
}
|
||||
const result = await coll.insertMany(docs);
|
||||
expect(result.insertedCount).toEqual(1000);
|
||||
});
|
||||
|
||||
tap.test('file-storage: find all 1000 documents', async () => {
|
||||
const coll = db.collection('bulk');
|
||||
const docs = await coll.find({}).toArray();
|
||||
expect(docs.length).toEqual(1000);
|
||||
});
|
||||
|
||||
tap.test('file-storage: range query on 1000 documents', async () => {
|
||||
const coll = db.collection('bulk');
|
||||
const docs = await coll.find({ index: { $gte: 500, $lt: 600 } }).toArray();
|
||||
expect(docs.length).toEqual(100);
|
||||
expect(docs.every(d => d.index >= 500 && d.index < 600)).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('file-storage: sorted retrieval with limit', async () => {
|
||||
const coll = db.collection('bulk');
|
||||
const docs = await coll.find({}).sort({ index: -1 }).limit(10).toArray();
|
||||
expect(docs.length).toEqual(10);
|
||||
expect(docs[0].index).toEqual(999);
|
||||
expect(docs[9].index).toEqual(990);
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// File Storage: Update many and verify persistence
|
||||
// ============================================================================
|
||||
|
||||
tap.test('file-storage: updateMany on bulk collection', async () => {
|
||||
const coll = db.collection('bulk');
|
||||
const result = await coll.updateMany(
|
||||
{ index: { $lt: 100 } },
|
||||
{ $set: { batch: 'first-hundred' } }
|
||||
);
|
||||
expect(result.modifiedCount).toEqual(100);
|
||||
|
||||
const updated = await coll.find({ batch: 'first-hundred' }).toArray();
|
||||
expect(updated.length).toEqual(100);
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// File Storage: Delete many and verify
|
||||
// ============================================================================
|
||||
|
||||
tap.test('file-storage: deleteMany removes correct documents', async () => {
|
||||
const coll = db.collection('bulk');
|
||||
const result = await coll.deleteMany({ index: { $gte: 900 } });
|
||||
expect(result.deletedCount).toEqual(100);
|
||||
|
||||
const remaining = await coll.countDocuments();
|
||||
expect(remaining).toEqual(900);
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// File Storage: Persistence of bulk data across restart
|
||||
// ============================================================================
|
||||
|
||||
tap.test('file-storage: stop server for bulk restart test', async () => {
|
||||
await client.close();
|
||||
await server.stop();
|
||||
expect(server.running).toBeFalse();
|
||||
});
|
||||
|
||||
tap.test('file-storage: restart and verify bulk data', async () => {
|
||||
server = new smartdb.SmartdbServer({
|
||||
port: 27118,
|
||||
storage: 'file',
|
||||
storagePath: tmpDir,
|
||||
});
|
||||
await server.start();
|
||||
|
||||
client = new MongoClient('mongodb://127.0.0.1:27118', {
|
||||
directConnection: true,
|
||||
serverSelectionTimeoutMS: 5000,
|
||||
});
|
||||
await client.connect();
|
||||
db = client.db('filetest');
|
||||
|
||||
const coll = db.collection('bulk');
|
||||
const count = await coll.countDocuments();
|
||||
expect(count).toEqual(900);
|
||||
|
||||
// Verify the updateMany persisted
|
||||
const firstHundred = await coll.find({ batch: 'first-hundred' }).toArray();
|
||||
expect(firstHundred.length).toEqual(100);
|
||||
|
||||
// Verify deleted docs are gone
|
||||
const over900 = await coll.find({ index: { $gte: 900 } }).toArray();
|
||||
expect(over900.length).toEqual(0);
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// File Storage: Index persistence
|
||||
// ============================================================================
|
||||
|
||||
tap.test('file-storage: default indexes.json exists on disk', async () => {
|
||||
// The indexes.json is created when the collection is first created,
|
||||
// containing the default _id_ index spec.
|
||||
const indexFile = path.join(tmpDir, 'filetest', 'crud', 'indexes.json');
|
||||
expect(fs.existsSync(indexFile)).toBeTrue();
|
||||
|
||||
const indexData = JSON.parse(fs.readFileSync(indexFile, 'utf-8'));
|
||||
const names = indexData.map((i: any) => i.name);
|
||||
expect(names).toContain('_id_');
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// Cleanup
|
||||
// ============================================================================
|
||||
|
||||
tap.test('file-storage: cleanup', async () => {
|
||||
await client.close();
|
||||
await server.stop();
|
||||
expect(server.running).toBeFalse();
|
||||
cleanTmpDir(tmpDir);
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
@@ -0,0 +1,126 @@
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
import * as smartdb from '../ts/index.js';
|
||||
import { MongoClient, Db } from 'mongodb';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
import * as os from 'os';
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Test: Missing data.rdb header recovery + startup logging
|
||||
// Covers: ensure_data_header, BuildStats, info-level startup logging
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
let tmpDir: string;
|
||||
let localDb: smartdb.LocalSmartDb;
|
||||
let client: MongoClient;
|
||||
let db: Db;
|
||||
|
||||
function makeTmpDir(): string {
|
||||
return fs.mkdtempSync(path.join(os.tmpdir(), 'smartdb-header-test-'));
|
||||
}
|
||||
|
||||
function cleanTmpDir(dir: string): void {
|
||||
if (fs.existsSync(dir)) {
|
||||
fs.rmSync(dir, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Setup: create data, then corrupt it
|
||||
// ============================================================================
|
||||
|
||||
tap.test('setup: start, insert data, stop', async () => {
|
||||
tmpDir = makeTmpDir();
|
||||
localDb = new smartdb.LocalSmartDb({ folderPath: tmpDir });
|
||||
const info = await localDb.start();
|
||||
client = new MongoClient(info.connectionUri, {
|
||||
directConnection: true,
|
||||
serverSelectionTimeoutMS: 5000,
|
||||
});
|
||||
await client.connect();
|
||||
db = client.db('headertest');
|
||||
|
||||
const coll = db.collection('docs');
|
||||
await coll.insertMany([
|
||||
{ key: 'a', val: 1 },
|
||||
{ key: 'b', val: 2 },
|
||||
{ key: 'c', val: 3 },
|
||||
]);
|
||||
|
||||
await client.close();
|
||||
await localDb.stop();
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// Delete hint file and restart: should rebuild from data.rdb scan
|
||||
// ============================================================================
|
||||
|
||||
tap.test('header-recovery: delete hint file and restart', async () => {
|
||||
// Find and delete hint files
|
||||
const dbDir = path.join(tmpDir, 'headertest', 'docs');
|
||||
const hintPath = path.join(dbDir, 'keydir.hint');
|
||||
if (fs.existsSync(hintPath)) {
|
||||
fs.unlinkSync(hintPath);
|
||||
}
|
||||
|
||||
localDb = new smartdb.LocalSmartDb({ folderPath: tmpDir });
|
||||
const info = await localDb.start();
|
||||
client = new MongoClient(info.connectionUri, {
|
||||
directConnection: true,
|
||||
serverSelectionTimeoutMS: 5000,
|
||||
});
|
||||
await client.connect();
|
||||
db = client.db('headertest');
|
||||
});
|
||||
|
||||
tap.test('header-recovery: data intact after hint deletion', async () => {
|
||||
const coll = db.collection('docs');
|
||||
const count = await coll.countDocuments();
|
||||
expect(count).toEqual(3);
|
||||
|
||||
const a = await coll.findOne({ key: 'a' });
|
||||
expect(a!.val).toEqual(1);
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// Write new data after restart, stop, restart again
|
||||
// ============================================================================
|
||||
|
||||
tap.test('header-recovery: write after hint-less restart', async () => {
|
||||
const coll = db.collection('docs');
|
||||
await coll.insertOne({ key: 'd', val: 4 });
|
||||
expect(await coll.countDocuments()).toEqual(4);
|
||||
});
|
||||
|
||||
tap.test('header-recovery: restart and verify all data', async () => {
|
||||
await client.close();
|
||||
await localDb.stop();
|
||||
|
||||
localDb = new smartdb.LocalSmartDb({ folderPath: tmpDir });
|
||||
const info = await localDb.start();
|
||||
client = new MongoClient(info.connectionUri, {
|
||||
directConnection: true,
|
||||
serverSelectionTimeoutMS: 5000,
|
||||
});
|
||||
await client.connect();
|
||||
db = client.db('headertest');
|
||||
|
||||
const coll = db.collection('docs');
|
||||
const count = await coll.countDocuments();
|
||||
expect(count).toEqual(4);
|
||||
|
||||
const keys = (await coll.find({}).toArray()).map(d => d.key).sort();
|
||||
expect(keys).toEqual(['a', 'b', 'c', 'd']);
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// Cleanup
|
||||
// ============================================================================
|
||||
|
||||
tap.test('header-recovery: cleanup', async () => {
|
||||
await client.close();
|
||||
await localDb.stop();
|
||||
cleanTmpDir(tmpDir);
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
@@ -0,0 +1,235 @@
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
import * as smartdb from '../ts/index.js';
|
||||
import { MongoClient, Db } from 'mongodb';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
import * as os from 'os';
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
let tmpDir: string;
|
||||
let localDb: smartdb.LocalSmartDb;
|
||||
let client: MongoClient;
|
||||
let db: Db;
|
||||
|
||||
function makeTmpDir(): string {
|
||||
return fs.mkdtempSync(path.join(os.tmpdir(), 'smartdb-local-test-'));
|
||||
}
|
||||
|
||||
function cleanTmpDir(dir: string): void {
|
||||
if (fs.existsSync(dir)) {
|
||||
fs.rmSync(dir, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// LocalSmartDb: Lifecycle
|
||||
// ============================================================================
|
||||
|
||||
tap.test('localsmartdb: should start with just a folder path', async () => {
|
||||
tmpDir = makeTmpDir();
|
||||
localDb = new smartdb.LocalSmartDb({ folderPath: tmpDir });
|
||||
const info = await localDb.start();
|
||||
|
||||
expect(localDb.running).toBeTrue();
|
||||
expect(info.socketPath).toBeTruthy();
|
||||
expect(info.connectionUri).toBeTruthy();
|
||||
expect(info.connectionUri.startsWith('mongodb://')).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('localsmartdb: should connect via returned connectionUri', async () => {
|
||||
const info = localDb.getConnectionInfo();
|
||||
client = new MongoClient(info.connectionUri, {
|
||||
directConnection: true,
|
||||
serverSelectionTimeoutMS: 5000,
|
||||
});
|
||||
await client.connect();
|
||||
db = client.db('localtest');
|
||||
expect(db).toBeTruthy();
|
||||
});
|
||||
|
||||
tap.test('localsmartdb: should reject double start', async () => {
|
||||
let threw = false;
|
||||
try {
|
||||
await localDb.start();
|
||||
} catch {
|
||||
threw = true;
|
||||
}
|
||||
expect(threw).toBeTrue();
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// LocalSmartDb: CRUD via Unix socket
|
||||
// ============================================================================
|
||||
|
||||
tap.test('localsmartdb: insert and find documents', async () => {
|
||||
const coll = db.collection('notes');
|
||||
await coll.insertMany([
|
||||
{ title: 'Note 1', body: 'First note', priority: 1 },
|
||||
{ title: 'Note 2', body: 'Second note', priority: 2 },
|
||||
{ title: 'Note 3', body: 'Third note', priority: 3 },
|
||||
]);
|
||||
|
||||
const all = await coll.find({}).toArray();
|
||||
expect(all.length).toEqual(3);
|
||||
|
||||
const high = await coll.findOne({ priority: 3 });
|
||||
expect(high).toBeTruthy();
|
||||
expect(high!.title).toEqual('Note 3');
|
||||
});
|
||||
|
||||
tap.test('localsmartdb: update and verify', async () => {
|
||||
const coll = db.collection('notes');
|
||||
await coll.updateOne(
|
||||
{ title: 'Note 2' },
|
||||
{ $set: { body: 'Updated second note', edited: true } }
|
||||
);
|
||||
|
||||
const doc = await coll.findOne({ title: 'Note 2' });
|
||||
expect(doc!.body).toEqual('Updated second note');
|
||||
expect(doc!.edited).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('localsmartdb: delete and verify', async () => {
|
||||
const coll = db.collection('notes');
|
||||
await coll.deleteOne({ title: 'Note 1' });
|
||||
|
||||
const count = await coll.countDocuments();
|
||||
expect(count).toEqual(2);
|
||||
|
||||
const deleted = await coll.findOne({ title: 'Note 1' });
|
||||
expect(deleted).toBeNull();
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// LocalSmartDb: Persistence across restart
|
||||
// ============================================================================
|
||||
|
||||
tap.test('localsmartdb: stop for restart', async () => {
|
||||
await client.close();
|
||||
await localDb.stop();
|
||||
expect(localDb.running).toBeFalse();
|
||||
});
|
||||
|
||||
tap.test('localsmartdb: restart with same folder', async () => {
|
||||
localDb = new smartdb.LocalSmartDb({ folderPath: tmpDir });
|
||||
const info = await localDb.start();
|
||||
expect(localDb.running).toBeTrue();
|
||||
|
||||
client = new MongoClient(info.connectionUri, {
|
||||
directConnection: true,
|
||||
serverSelectionTimeoutMS: 5000,
|
||||
});
|
||||
await client.connect();
|
||||
db = client.db('localtest');
|
||||
});
|
||||
|
||||
tap.test('localsmartdb: data persists after restart', async () => {
|
||||
const coll = db.collection('notes');
|
||||
|
||||
const count = await coll.countDocuments();
|
||||
expect(count).toEqual(2); // 3 inserted - 1 deleted
|
||||
|
||||
const note2 = await coll.findOne({ title: 'Note 2' });
|
||||
expect(note2!.body).toEqual('Updated second note');
|
||||
expect(note2!.edited).toBeTrue();
|
||||
|
||||
const note3 = await coll.findOne({ title: 'Note 3' });
|
||||
expect(note3!.priority).toEqual(3);
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// LocalSmartDb: Custom socket path
|
||||
// ============================================================================
|
||||
|
||||
tap.test('localsmartdb: works with custom socket path', async () => {
|
||||
await client.close();
|
||||
await localDb.stop();
|
||||
|
||||
const customSocket = path.join(os.tmpdir(), `smartdb-custom-${Date.now()}.sock`);
|
||||
const tmpDir2 = makeTmpDir();
|
||||
const localDb2 = new smartdb.LocalSmartDb({
|
||||
folderPath: tmpDir2,
|
||||
socketPath: customSocket,
|
||||
});
|
||||
|
||||
const info = await localDb2.start();
|
||||
expect(info.socketPath).toEqual(customSocket);
|
||||
|
||||
const client2 = new MongoClient(info.connectionUri, {
|
||||
directConnection: true,
|
||||
serverSelectionTimeoutMS: 5000,
|
||||
});
|
||||
await client2.connect();
|
||||
const testDb = client2.db('customsock');
|
||||
await testDb.collection('test').insertOne({ x: 1 });
|
||||
const doc = await testDb.collection('test').findOne({ x: 1 });
|
||||
expect(doc).toBeTruthy();
|
||||
|
||||
await client2.close();
|
||||
await localDb2.stop();
|
||||
cleanTmpDir(tmpDir2);
|
||||
|
||||
// Reconnect original for remaining tests
|
||||
localDb = new smartdb.LocalSmartDb({ folderPath: tmpDir });
|
||||
const origInfo = await localDb.start();
|
||||
client = new MongoClient(origInfo.connectionUri, {
|
||||
directConnection: true,
|
||||
serverSelectionTimeoutMS: 5000,
|
||||
});
|
||||
await client.connect();
|
||||
db = client.db('localtest');
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// LocalSmartDb: getConnectionUri and getServer helpers
|
||||
// ============================================================================
|
||||
|
||||
tap.test('localsmartdb: getConnectionUri returns valid uri', async () => {
|
||||
const uri = localDb.getConnectionUri();
|
||||
expect(uri.startsWith('mongodb://')).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('localsmartdb: getServer returns the SmartdbServer', async () => {
|
||||
const srv = localDb.getServer();
|
||||
expect(srv).toBeTruthy();
|
||||
expect(srv.running).toBeTrue();
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// LocalSmartDb: Data isolation between databases
|
||||
// ============================================================================
|
||||
|
||||
tap.test('localsmartdb: databases are isolated', async () => {
|
||||
const dbA = client.db('isoA');
|
||||
const dbB = client.db('isoB');
|
||||
|
||||
await dbA.collection('shared').insertOne({ source: 'A', val: 1 });
|
||||
await dbB.collection('shared').insertOne({ source: 'B', val: 2 });
|
||||
|
||||
const docsA = await dbA.collection('shared').find({}).toArray();
|
||||
const docsB = await dbB.collection('shared').find({}).toArray();
|
||||
|
||||
expect(docsA.length).toEqual(1);
|
||||
expect(docsA[0].source).toEqual('A');
|
||||
expect(docsB.length).toEqual(1);
|
||||
expect(docsB[0].source).toEqual('B');
|
||||
|
||||
await dbA.dropDatabase();
|
||||
await dbB.dropDatabase();
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// Cleanup
|
||||
// ============================================================================
|
||||
|
||||
tap.test('localsmartdb: cleanup', async () => {
|
||||
await client.close();
|
||||
await localDb.stop();
|
||||
expect(localDb.running).toBeFalse();
|
||||
cleanTmpDir(tmpDir);
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
@@ -0,0 +1,269 @@
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
import * as smartdb from '../ts/index.js';
|
||||
import { MongoClient, Db } from 'mongodb';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
import * as os from 'os';
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
let tmpDir: string;
|
||||
|
||||
function makeTmpDir(): string {
|
||||
return fs.mkdtempSync(path.join(os.tmpdir(), 'smartdb-migration-test-'));
|
||||
}
|
||||
|
||||
function cleanTmpDir(dir: string): void {
|
||||
if (fs.existsSync(dir)) {
|
||||
fs.rmSync(dir, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a v0 (legacy JSON) storage layout:
|
||||
* {base}/{db}/{coll}.json
|
||||
* {base}/{db}/{coll}.indexes.json
|
||||
*/
|
||||
function createV0Layout(basePath: string, dbName: string, collName: string, docs: any[]): void {
|
||||
const dbDir = path.join(basePath, dbName);
|
||||
fs.mkdirSync(dbDir, { recursive: true });
|
||||
|
||||
// Convert docs to the extended JSON format that the old Rust engine wrote:
|
||||
// ObjectId is stored as { "$oid": "hex" }
|
||||
const jsonDocs = docs.map(doc => {
|
||||
const clone = { ...doc };
|
||||
if (!clone._id) {
|
||||
// Generate a fake ObjectId-like hex string
|
||||
const hex = [...Array(24)].map(() => Math.floor(Math.random() * 16).toString(16)).join('');
|
||||
clone._id = { '$oid': hex };
|
||||
}
|
||||
return clone;
|
||||
});
|
||||
|
||||
const collPath = path.join(dbDir, `${collName}.json`);
|
||||
fs.writeFileSync(collPath, JSON.stringify(jsonDocs, null, 2));
|
||||
|
||||
const indexPath = path.join(dbDir, `${collName}.indexes.json`);
|
||||
fs.writeFileSync(indexPath, JSON.stringify([
|
||||
{ name: '_id_', key: { _id: 1 } },
|
||||
], null, 2));
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Migration: v0 → v1 basic
|
||||
// ============================================================================
|
||||
|
||||
tap.test('migration: detects v0 format and migrates on startup', async () => {
|
||||
tmpDir = makeTmpDir();
|
||||
|
||||
// Create v0 layout with test data
|
||||
createV0Layout(tmpDir, 'mydb', 'users', [
|
||||
{ name: 'Alice', age: 30, email: 'alice@test.com' },
|
||||
{ name: 'Bob', age: 25, email: 'bob@test.com' },
|
||||
{ name: 'Charlie', age: 35, email: 'charlie@test.com' },
|
||||
]);
|
||||
|
||||
createV0Layout(tmpDir, 'mydb', 'products', [
|
||||
{ sku: 'W001', name: 'Widget', price: 9.99 },
|
||||
{ sku: 'G001', name: 'Gadget', price: 19.99 },
|
||||
]);
|
||||
|
||||
// Verify v0 files exist
|
||||
expect(fs.existsSync(path.join(tmpDir, 'mydb', 'users.json'))).toBeTrue();
|
||||
expect(fs.existsSync(path.join(tmpDir, 'mydb', 'products.json'))).toBeTrue();
|
||||
|
||||
// Start server — migration should run automatically
|
||||
const server = new smartdb.SmartdbServer({
|
||||
socketPath: path.join(os.tmpdir(), `smartdb-mig-${Date.now()}-${Math.random().toString(36).slice(2)}.sock`),
|
||||
storage: 'file',
|
||||
storagePath: tmpDir,
|
||||
});
|
||||
await server.start();
|
||||
|
||||
// v1 directories should now exist
|
||||
expect(fs.existsSync(path.join(tmpDir, 'mydb', 'users', 'data.rdb'))).toBeTrue();
|
||||
expect(fs.existsSync(path.join(tmpDir, 'mydb', 'products', 'data.rdb'))).toBeTrue();
|
||||
|
||||
// v0 files should still exist (not deleted)
|
||||
expect(fs.existsSync(path.join(tmpDir, 'mydb', 'users.json'))).toBeTrue();
|
||||
expect(fs.existsSync(path.join(tmpDir, 'mydb', 'products.json'))).toBeTrue();
|
||||
|
||||
// Connect and verify data is accessible
|
||||
const client = new MongoClient(server.getConnectionUri(), {
|
||||
directConnection: true,
|
||||
serverSelectionTimeoutMS: 5000,
|
||||
});
|
||||
await client.connect();
|
||||
const db = client.db('mydb');
|
||||
|
||||
// Users collection
|
||||
const users = await db.collection('users').find({}).toArray();
|
||||
expect(users.length).toEqual(3);
|
||||
const alice = users.find(u => u.name === 'Alice');
|
||||
expect(alice).toBeTruthy();
|
||||
expect(alice!.age).toEqual(30);
|
||||
expect(alice!.email).toEqual('alice@test.com');
|
||||
|
||||
// Products collection
|
||||
const products = await db.collection('products').find({}).toArray();
|
||||
expect(products.length).toEqual(2);
|
||||
const widget = products.find(p => p.sku === 'W001');
|
||||
expect(widget).toBeTruthy();
|
||||
expect(widget!.price).toEqual(9.99);
|
||||
|
||||
await client.close();
|
||||
await server.stop();
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// Migration: migrated data survives another restart
|
||||
// ============================================================================
|
||||
|
||||
tap.test('migration: migrated data persists across restart', async () => {
|
||||
const server = new smartdb.SmartdbServer({
|
||||
socketPath: path.join(os.tmpdir(), `smartdb-mig-${Date.now()}-${Math.random().toString(36).slice(2)}.sock`),
|
||||
storage: 'file',
|
||||
storagePath: tmpDir,
|
||||
});
|
||||
await server.start();
|
||||
|
||||
const client = new MongoClient(server.getConnectionUri(), {
|
||||
directConnection: true,
|
||||
serverSelectionTimeoutMS: 5000,
|
||||
});
|
||||
await client.connect();
|
||||
const db = client.db('mydb');
|
||||
|
||||
const users = await db.collection('users').find({}).toArray();
|
||||
expect(users.length).toEqual(3);
|
||||
|
||||
const products = await db.collection('products').find({}).toArray();
|
||||
expect(products.length).toEqual(2);
|
||||
|
||||
await client.close();
|
||||
await server.stop();
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// Migration: can write new data after migration
|
||||
// ============================================================================
|
||||
|
||||
tap.test('migration: new writes work after migration', async () => {
|
||||
const server = new smartdb.SmartdbServer({
|
||||
socketPath: path.join(os.tmpdir(), `smartdb-mig-${Date.now()}-${Math.random().toString(36).slice(2)}.sock`),
|
||||
storage: 'file',
|
||||
storagePath: tmpDir,
|
||||
});
|
||||
await server.start();
|
||||
|
||||
const client = new MongoClient(server.getConnectionUri(), {
|
||||
directConnection: true,
|
||||
serverSelectionTimeoutMS: 5000,
|
||||
});
|
||||
await client.connect();
|
||||
const db = client.db('mydb');
|
||||
|
||||
// Insert new documents
|
||||
await db.collection('users').insertOne({ name: 'Diana', age: 28 });
|
||||
const count = await db.collection('users').countDocuments();
|
||||
expect(count).toEqual(4);
|
||||
|
||||
// Update existing migrated document
|
||||
await db.collection('users').updateOne(
|
||||
{ name: 'Alice' },
|
||||
{ $set: { age: 31 } }
|
||||
);
|
||||
const alice = await db.collection('users').findOne({ name: 'Alice' });
|
||||
expect(alice!.age).toEqual(31);
|
||||
|
||||
// Delete a migrated document
|
||||
await db.collection('products').deleteOne({ sku: 'G001' });
|
||||
const prodCount = await db.collection('products').countDocuments();
|
||||
expect(prodCount).toEqual(1);
|
||||
|
||||
await client.close();
|
||||
await server.stop();
|
||||
cleanTmpDir(tmpDir);
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// Migration: skips already-migrated data
|
||||
// ============================================================================
|
||||
|
||||
tap.test('migration: no-op for v1 format', async () => {
|
||||
tmpDir = makeTmpDir();
|
||||
|
||||
// Start fresh to create v1 layout
|
||||
const server = new smartdb.SmartdbServer({
|
||||
socketPath: path.join(os.tmpdir(), `smartdb-mig-${Date.now()}-${Math.random().toString(36).slice(2)}.sock`),
|
||||
storage: 'file',
|
||||
storagePath: tmpDir,
|
||||
});
|
||||
await server.start();
|
||||
|
||||
const client = new MongoClient(server.getConnectionUri(), {
|
||||
directConnection: true,
|
||||
serverSelectionTimeoutMS: 5000,
|
||||
});
|
||||
await client.connect();
|
||||
const db = client.db('v1test');
|
||||
await db.collection('items').insertOne({ x: 1 });
|
||||
await client.close();
|
||||
await server.stop();
|
||||
|
||||
// Restart — migration should detect v1 and skip
|
||||
const server2 = new smartdb.SmartdbServer({
|
||||
socketPath: path.join(os.tmpdir(), `smartdb-mig-${Date.now()}-${Math.random().toString(36).slice(2)}.sock`),
|
||||
storage: 'file',
|
||||
storagePath: tmpDir,
|
||||
});
|
||||
await server2.start();
|
||||
|
||||
const client2 = new MongoClient(server2.getConnectionUri(), {
|
||||
directConnection: true,
|
||||
serverSelectionTimeoutMS: 5000,
|
||||
});
|
||||
await client2.connect();
|
||||
const db2 = client2.db('v1test');
|
||||
const doc = await db2.collection('items').findOne({ x: 1 });
|
||||
expect(doc).toBeTruthy();
|
||||
|
||||
await client2.close();
|
||||
await server2.stop();
|
||||
cleanTmpDir(tmpDir);
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// Migration: empty storage is handled gracefully
|
||||
// ============================================================================
|
||||
|
||||
tap.test('migration: empty storage directory works', async () => {
|
||||
tmpDir = makeTmpDir();
|
||||
|
||||
const server = new smartdb.SmartdbServer({
|
||||
socketPath: path.join(os.tmpdir(), `smartdb-mig-${Date.now()}-${Math.random().toString(36).slice(2)}.sock`),
|
||||
storage: 'file',
|
||||
storagePath: tmpDir,
|
||||
});
|
||||
await server.start();
|
||||
|
||||
const client = new MongoClient(server.getConnectionUri(), {
|
||||
directConnection: true,
|
||||
serverSelectionTimeoutMS: 5000,
|
||||
});
|
||||
await client.connect();
|
||||
|
||||
// Should work fine with empty storage
|
||||
const db = client.db('emptytest');
|
||||
await db.collection('first').insertOne({ hello: 'world' });
|
||||
const doc = await db.collection('first').findOne({ hello: 'world' });
|
||||
expect(doc).toBeTruthy();
|
||||
|
||||
await client.close();
|
||||
await server.stop();
|
||||
cleanTmpDir(tmpDir);
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
+83
-1
@@ -1,6 +1,6 @@
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
import * as smartdb from '../ts/index.js';
|
||||
import { MongoClient, Db, Collection } from 'mongodb';
|
||||
import { MongoClient, Db, Collection, ObjectId } from 'mongodb';
|
||||
|
||||
let server: smartdb.SmartdbServer;
|
||||
let client: MongoClient;
|
||||
@@ -252,6 +252,71 @@ tap.test('smartdb: update - upsert creates new document', async () => {
|
||||
expect(inserted!.email).toEqual('new@example.com');
|
||||
});
|
||||
|
||||
tap.test('smartdb: update - aggregation pipeline updateOne', async () => {
|
||||
const collection = db.collection('users');
|
||||
await collection.insertOne({ name: 'PipelineUser', source: 'alpha', legacy: true, visits: 2 });
|
||||
|
||||
const result = await collection.updateOne(
|
||||
{ name: 'PipelineUser' },
|
||||
[
|
||||
{ $set: { sourceCopy: '$source', pipelineStatus: 'updated' } },
|
||||
{ $unset: ['legacy'] },
|
||||
]
|
||||
);
|
||||
|
||||
expect(result.matchedCount).toEqual(1);
|
||||
expect(result.modifiedCount).toEqual(1);
|
||||
|
||||
const updated = await collection.findOne({ name: 'PipelineUser' });
|
||||
expect(updated).toBeTruthy();
|
||||
expect(updated!.sourceCopy).toEqual('alpha');
|
||||
expect(updated!.pipelineStatus).toEqual('updated');
|
||||
expect(updated!.legacy).toBeUndefined();
|
||||
});
|
||||
|
||||
tap.test('smartdb: update - aggregation pipeline upsert', async () => {
|
||||
const collection = db.collection('users');
|
||||
const result = await collection.updateOne(
|
||||
{ name: 'PipelineUpsert' },
|
||||
[
|
||||
{ $set: { email: 'pipeline@example.com', status: 'new', mirroredName: '$name' } },
|
||||
],
|
||||
{ upsert: true }
|
||||
);
|
||||
|
||||
expect(result.upsertedCount).toEqual(1);
|
||||
|
||||
const inserted = await collection.findOne({ name: 'PipelineUpsert' });
|
||||
expect(inserted).toBeTruthy();
|
||||
expect(inserted!.email).toEqual('pipeline@example.com');
|
||||
expect(inserted!.status).toEqual('new');
|
||||
expect(inserted!.mirroredName).toEqual('PipelineUpsert');
|
||||
});
|
||||
|
||||
tap.test('smartdb: update - cannot modify immutable _id through pipeline', async () => {
|
||||
const collection = db.collection('users');
|
||||
const inserted = await collection.insertOne({ name: 'ImmutableIdUser' });
|
||||
|
||||
let threw = false;
|
||||
try {
|
||||
await collection.updateOne(
|
||||
{ _id: inserted.insertedId },
|
||||
[
|
||||
{ $set: { _id: new ObjectId() } },
|
||||
]
|
||||
);
|
||||
} catch (err: any) {
|
||||
threw = true;
|
||||
expect(err.code).toEqual(66);
|
||||
}
|
||||
|
||||
expect(threw).toBeTrue();
|
||||
|
||||
const persisted = await collection.findOne({ _id: inserted.insertedId });
|
||||
expect(persisted).toBeTruthy();
|
||||
expect(persisted!.name).toEqual('ImmutableIdUser');
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// Cursor Tests
|
||||
// ============================================================================
|
||||
@@ -306,6 +371,23 @@ tap.test('smartdb: findOneAndUpdate - returns updated document', async () => {
|
||||
expect(result!.status).toEqual('active');
|
||||
});
|
||||
|
||||
tap.test('smartdb: findOneAndUpdate - supports aggregation pipeline updates', async () => {
|
||||
const collection = db.collection('users');
|
||||
await collection.insertOne({ name: 'PipelineFindAndModify', sourceName: 'Finder' });
|
||||
|
||||
const result = await collection.findOneAndUpdate(
|
||||
{ name: 'PipelineFindAndModify' },
|
||||
[
|
||||
{ $set: { displayName: '$sourceName', mode: 'pipeline' } },
|
||||
],
|
||||
{ returnDocument: 'after' }
|
||||
);
|
||||
|
||||
expect(result).toBeTruthy();
|
||||
expect(result!.displayName).toEqual('Finder');
|
||||
expect(result!.mode).toEqual('pipeline');
|
||||
});
|
||||
|
||||
tap.test('smartdb: findOneAndDelete - returns deleted document', async () => {
|
||||
const collection = db.collection('users');
|
||||
|
||||
|
||||
@@ -0,0 +1,82 @@
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
import * as smartdb from '../ts/index.js';
|
||||
import * as fs from 'fs';
|
||||
import * as net from 'net';
|
||||
import * as path from 'path';
|
||||
import * as os from 'os';
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Test: Stale socket cleanup on startup
|
||||
// Covers: LocalSmartDb.cleanStaleSockets(), isSocketAlive()
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
function makeTmpDir(): string {
|
||||
return fs.mkdtempSync(path.join(os.tmpdir(), 'smartdb-socket-test-'));
|
||||
}
|
||||
|
||||
function cleanTmpDir(dir: string): void {
|
||||
if (fs.existsSync(dir)) {
|
||||
fs.rmSync(dir, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Stale socket cleanup: active sockets are preserved
|
||||
// ============================================================================
|
||||
|
||||
tap.test('stale-sockets: does not remove active sockets', async () => {
|
||||
const tmpDir = makeTmpDir();
|
||||
const activeSocketPath = path.join(os.tmpdir(), `smartdb-active-${Date.now()}.sock`);
|
||||
|
||||
// Create an active socket (server still listening)
|
||||
const activeServer = net.createServer();
|
||||
await new Promise<void>((resolve) => activeServer.listen(activeSocketPath, resolve));
|
||||
|
||||
expect(fs.existsSync(activeSocketPath)).toBeTrue();
|
||||
|
||||
// Start LocalSmartDb — should NOT remove the active socket
|
||||
const localDb = new smartdb.LocalSmartDb({ folderPath: tmpDir });
|
||||
await localDb.start();
|
||||
|
||||
expect(fs.existsSync(activeSocketPath)).toBeTrue();
|
||||
|
||||
// Cleanup
|
||||
await localDb.stop();
|
||||
await new Promise<void>((resolve) => activeServer.close(() => resolve()));
|
||||
try { fs.unlinkSync(activeSocketPath); } catch {}
|
||||
cleanTmpDir(tmpDir);
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// Stale socket cleanup: startup works with no stale sockets
|
||||
// ============================================================================
|
||||
|
||||
tap.test('stale-sockets: startup works cleanly with no stale sockets', async () => {
|
||||
const tmpDir = makeTmpDir();
|
||||
const localDb = new smartdb.LocalSmartDb({ folderPath: tmpDir });
|
||||
const info = await localDb.start();
|
||||
expect(localDb.running).toBeTrue();
|
||||
expect(info.socketPath).toBeTruthy();
|
||||
await localDb.stop();
|
||||
cleanTmpDir(tmpDir);
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// Stale socket cleanup: the socket file for the current instance is cleaned on stop
|
||||
// ============================================================================
|
||||
|
||||
tap.test('stale-sockets: own socket file is removed on stop', async () => {
|
||||
const tmpDir = makeTmpDir();
|
||||
const localDb = new smartdb.LocalSmartDb({ folderPath: tmpDir });
|
||||
const info = await localDb.start();
|
||||
|
||||
expect(fs.existsSync(info.socketPath)).toBeTrue();
|
||||
|
||||
await localDb.stop();
|
||||
|
||||
// Socket file should be gone after graceful stop
|
||||
expect(fs.existsSync(info.socketPath)).toBeFalse();
|
||||
cleanTmpDir(tmpDir);
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
@@ -0,0 +1,171 @@
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
import * as smartdb from '../ts/index.js';
|
||||
import { MongoClient } from 'mongodb';
|
||||
import * as fs from 'fs';
|
||||
import * as net from 'net';
|
||||
import * as os from 'os';
|
||||
import * as path from 'path';
|
||||
|
||||
// Static test-only CA and server certificate. The private key is intentionally
|
||||
// non-secret test fixture material and must not be reused outside tests.
|
||||
const CA_PEM = `-----BEGIN CERTIFICATE-----
|
||||
MIIDFTCCAf2gAwIBAgIUXQlk6FLuWELDKLw9KXi0UIYmU50wDQYJKoZIhvcNAQEL
|
||||
BQAwGjEYMBYGA1UEAwwPU21hcnREQiBUZXN0IENBMB4XDTI2MDQyOTIxMjYxNFoX
|
||||
DTM2MDQyNjIxMjYxNFowGjEYMBYGA1UEAwwPU21hcnREQiBUZXN0IENBMIIBIjAN
|
||||
BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEApnRgZvodreKEKkSodwgDe2JKsA3N
|
||||
GC4c7dmqmOBRQst0OYRoW0kjHnzCVHoGlMTAnjJWXRayPeJCroSA0WhEZIjgHAjW
|
||||
FuWIr+MUYdCG7czdbDEqZYGsrBDUwv+ydgsDNhLKtbfVfcJckdmFp+TT+Po3sf8o
|
||||
u5AfOlcjhM22reBLhZJ2FfM2IbqygRbBxNvU3tH5E1kgu2CpYieXQsmqBwkOPM0S
|
||||
fgkCjlqFeeqV7Jjdq1P6srIItzg6n8/5KGBTxc7VB11WxVAZMIxnOtwpOCpSjbiy
|
||||
jymBLKvyZxklWGpG9HT6RzUTdp0WpwnO7FlbYqD97jrbwA7PfhbJVUkTeQIDAQAB
|
||||
o1MwUTAdBgNVHQ4EFgQUaqFWiFvibBYpJjluNW4XlocmqOQwHwYDVR0jBBgwFoAU
|
||||
aqFWiFvibBYpJjluNW4XlocmqOQwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0B
|
||||
AQsFAAOCAQEAdbmRCxeHwfq6Mw0BRXWYM81xrzDMDBwLkIyaVkBJXCEX4Ybj8QHv
|
||||
tplNqgQae1Hr1qYyNzkivDI/hPnvv/wDsAnT8Wz0/udPpcASTXC03xhRtFXwBSGq
|
||||
2GtLa53cZHJLoGu1S2ntM6Xo3gropXSx/+LIfefsQvqRO/5WxRrEE10OiFr19rA7
|
||||
md0nD6zXdwrMRghu6ACuxX6Ext6QJbTL4r1UGbHg2a9UbdBjcb8sfFPLyEjiLpBK
|
||||
DYvRjddKOwbOpFPoLwmed59Pa6bcqT9NnkRHL+aXUm3M3HfVhNKae7JJShUmCzdx
|
||||
rbKNJQAUp/mMHnBOSxYS7aqgwBKCiKtP4A==
|
||||
-----END CERTIFICATE-----
|
||||
`;
|
||||
|
||||
const SERVER_CERT_PEM = `-----BEGIN CERTIFICATE-----
|
||||
MIIDPTCCAiWgAwIBAgIUMfuX4VHvVJ8Vo6o1U2+f7MHU7dowDQYJKoZIhvcNAQEL
|
||||
BQAwGjEYMBYGA1UEAwwPU21hcnREQiBUZXN0IENBMB4XDTI2MDQyOTIxMjYxNFoX
|
||||
DTM2MDQyNjIxMjYxNFowFDESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG
|
||||
9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5eFz1q4juQsEE7cPN5eFrLvRJW/zOMGBmiet
|
||||
VTQSqVZ/3j3NBWsgxK2xQnNbEXGMlTEE11ih0cCQacc/JnbuvwOt3QX8X6oy4pmb
|
||||
LMGQJEk2FgdpP6OtGqqYbt/fT7QBY39nt6z/RzxYZI7t5g/nkHnlzmzD+ila6k9b
|
||||
TzBSfSmtHHKW/c6az/Dh/xe50zDgrzlBA7e5zoleKqRJFRZlDnDoLyx0EOUbbTbQ
|
||||
vipMynP5bq8l6Fc0N9DAWmXvV4o2x0ZQjfEx5LTvbxNkVWtv8w9w4t4vAZqXwrXd
|
||||
5OZETMWdy7ezxL0E9Snwc6sSfatlVenD/8P5hWJ/C0vCiw21RwIDAQABo4GAMH4w
|
||||
GgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMAsGA1UdDwQEAwIFoDATBgNVHSUE
|
||||
DDAKBggrBgEFBQcDATAdBgNVHQ4EFgQUK2nSXereMZek6gxLweY1AVt9OaswHwYD
|
||||
VR0jBBgwFoAUaqFWiFvibBYpJjluNW4XlocmqOQwDQYJKoZIhvcNAQELBQADggEB
|
||||
AAkC6suxamn+OEmJLMqgaGCvEtFbob5pMijYC32vJNPev+bUHMOB4Oo0FyO59sX3
|
||||
zfLLwk7jagbWJi37T714aSjyJwUHd4XA7McSabP4+1hOOL0NqfiE4yRnxPhlvf3E
|
||||
9otoStAAJ86067DwIs5id7jYm+qrxn6bL+P1h+P1tYxnPOoD0v1cHVbtUNV2tH2E
|
||||
eBhdtTbF+NHrj+oXFGI3jiI7qcwpJ9DFUo/w0sC0POY0T5aWl4ptSXVgEc7nkE91
|
||||
bbPOPyoMjjZ4WhKAW5UzfOafB0bO7+4E0GHcAkBJmS4V8g5qt56nftr+d58R/odY
|
||||
0hQjpoIwzl9RCEW0h8xkqMQ=
|
||||
-----END CERTIFICATE-----
|
||||
`;
|
||||
|
||||
const SERVER_KEY_PEM = `-----BEGIN PRIVATE KEY-----
|
||||
MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDl4XPWriO5CwQT
|
||||
tw83l4Wsu9Elb/M4wYGaJ61VNBKpVn/ePc0FayDErbFCc1sRcYyVMQTXWKHRwJBp
|
||||
xz8mdu6/A63dBfxfqjLimZsswZAkSTYWB2k/o60aqphu399PtAFjf2e3rP9HPFhk
|
||||
ju3mD+eQeeXObMP6KVrqT1tPMFJ9Ka0ccpb9zprP8OH/F7nTMOCvOUEDt7nOiV4q
|
||||
pEkVFmUOcOgvLHQQ5RttNtC+KkzKc/luryXoVzQ30MBaZe9XijbHRlCN8THktO9v
|
||||
E2RVa2/zD3Di3i8BmpfCtd3k5kRMxZ3Lt7PEvQT1KfBzqxJ9q2VV6cP/w/mFYn8L
|
||||
S8KLDbVHAgMBAAECggEAAInWJR8US1cow8kOepFayUxJUZ6hAbWGUa+dGtF757Sh
|
||||
qQoZBFW7ZmqHu0Gc6X4MF79dJQn6mwyp6e2DCtqFdaITEqz0ad7yrpAwilrLtSIM
|
||||
w+FxkCoYejMDF2Nj2QJxbGO8gPQhRu/vvxCMoxjPcImwjZq4nMnjAiB8dMOGte9V
|
||||
av/RoWUOFXqeiJHqAXiE372I4BupwYhGrSUQyuVj3SugDRbzvPepTQNRxaBJQPgy
|
||||
4ZtZ8FjJdPFvlyxv6fmLFULHwPNcS6PLWPuwpj7oEQzG4/Q9ojYj4EPdpoOW7qoH
|
||||
h1Y6ag1vk5A/m9DjvMhIDzmUJmq8mlldxqbCBpH0+QKBgQD3Eh7F0ZXdLQe/aG5t
|
||||
ul9hTv68NZa5M0JzJinB6WjXl2s0bUgIvAE9ZmfUYHs8AMvTu4YwJqsrpMuzFOT9
|
||||
Ct5wBSyFbPzVOt9MYE1Gipxx8RfEMSq7Sp0MjarX3h0Va8ry83NWzrN1CvyP8BQq
|
||||
CuXo/IislCDgPg0uXhLD/7GsWQKBgQDuMEptldCKtpW6CdLdYih6xh0j1mdGU4Kb
|
||||
7mTzo3OU3nDnGXGhqvJt/xpksPl7GPRHYQ1dqRzvLKHDtTJqhkedZBnE6A94LkVl
|
||||
uNJnR8v4PkR9nKKg0uK2ug9VcfSiXUpl2yyYiDc123WjHdwH2U6BV3smb/7KwEvv
|
||||
FWaP7PO6nwKBgAE2w5PxPa1ChWE5YCGF4uYVf0bpdH4gdFkgfOAJB4zXn504VDxG
|
||||
wDLPB/+RIcnfryCxMS2XYwvp2V5d4eokXYdrXxagvHVHvsUfTAHmuHIO3zEFlNIq
|
||||
wa7IG2jIHJh4WRzseUqZ5WPT0/3ZDiBOwWZtpzZB3A99/o6Vw73WycaxAoGAHTeR
|
||||
OaYB4bIJ5bskwYEz4/N/SZEYM/k0cTop6fTnzaAHi2GEncchW7rKGwXWZHIoLMVL
|
||||
5WxEH1aDNUV5vLVh/X1058FrfFt4qcSlEoQtEfNZZWscS8vygWWLUfjbgDsfUCU1
|
||||
cDRtSU71PCACiHfweE8pzQo539b8uYQPg6IWN5MCgYA6z/kvGiBB9xFBUAJPsj+w
|
||||
XW/UGbn7svZaCob+N5RA9Rs/0idv/bO2nAauZyHG/nn6HXII6U5pmRyVqWKhI22q
|
||||
K3J0LCP42Zb6/eYzQPbP1jWHCMaL2QJQGsl4NMZixlnNJV0aG/5CButqzSC/cMbG
|
||||
DX0n+YqqWmCgHWU2csnlAA==
|
||||
-----END PRIVATE KEY-----
|
||||
`;
|
||||
|
||||
let server: smartdb.SmartdbServer;
|
||||
let client: MongoClient;
|
||||
let tmpDir: string;
|
||||
let caPath: string;
|
||||
let certPath: string;
|
||||
let keyPath: string;
|
||||
let port: number;
|
||||
|
||||
function makeTmpDir(): string {
|
||||
return fs.mkdtempSync(path.join(os.tmpdir(), 'smartdb-tls-test-'));
|
||||
}
|
||||
|
||||
function cleanTmpDir(dir: string): void {
|
||||
if (fs.existsSync(dir)) {
|
||||
fs.rmSync(dir, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
async function getFreePort(): Promise<number> {
|
||||
return await new Promise((resolve, reject) => {
|
||||
const probe = net.createServer();
|
||||
probe.once('error', reject);
|
||||
probe.listen(0, '127.0.0.1', () => {
|
||||
const address = probe.address();
|
||||
if (!address || typeof address === 'string') {
|
||||
probe.close(() => reject(new Error('Failed to allocate TCP port')));
|
||||
return;
|
||||
}
|
||||
probe.close(() => resolve(address.port));
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
tap.test('tls: should start server with TLS enabled', async () => {
|
||||
tmpDir = makeTmpDir();
|
||||
port = await getFreePort();
|
||||
caPath = path.join(tmpDir, 'ca.pem');
|
||||
certPath = path.join(tmpDir, 'server.pem');
|
||||
keyPath = path.join(tmpDir, 'server.key');
|
||||
|
||||
fs.writeFileSync(caPath, CA_PEM);
|
||||
fs.writeFileSync(certPath, SERVER_CERT_PEM);
|
||||
fs.writeFileSync(keyPath, SERVER_KEY_PEM, { mode: 0o600 });
|
||||
|
||||
server = new smartdb.SmartdbServer({
|
||||
port,
|
||||
tls: {
|
||||
enabled: true,
|
||||
certPath,
|
||||
keyPath,
|
||||
},
|
||||
});
|
||||
await server.start();
|
||||
|
||||
expect(server.running).toBeTrue();
|
||||
expect(server.getConnectionUri()).toEqual(`mongodb://127.0.0.1:${port}/?tls=true`);
|
||||
});
|
||||
|
||||
tap.test('tls: should connect with official MongoClient and CA validation', async () => {
|
||||
client = new MongoClient(server.getConnectionUri(), {
|
||||
directConnection: true,
|
||||
serverSelectionTimeoutMS: 5000,
|
||||
tlsCAFile: caPath,
|
||||
});
|
||||
await client.connect();
|
||||
|
||||
const ping = await client.db('admin').command({ ping: 1 });
|
||||
expect(ping.ok).toEqual(1);
|
||||
});
|
||||
|
||||
tap.test('tls: should support CRUD over encrypted transport', async () => {
|
||||
const collection = client.db('tlsdb').collection('notes');
|
||||
const inserted = await collection.insertOne({ title: 'encrypted transport' });
|
||||
expect(inserted.acknowledged).toBeTrue();
|
||||
|
||||
const doc = await collection.findOne({ _id: inserted.insertedId });
|
||||
expect(doc).toBeTruthy();
|
||||
expect(doc!.title).toEqual('encrypted transport');
|
||||
});
|
||||
|
||||
tap.test('tls: cleanup', async () => {
|
||||
await client.close();
|
||||
await server.stop();
|
||||
expect(server.running).toBeFalse();
|
||||
cleanTmpDir(tmpDir);
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
@@ -0,0 +1,115 @@
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
import * as smartdb from '../ts/index.js';
|
||||
import { MongoClient } from 'mongodb';
|
||||
import * as net from 'net';
|
||||
|
||||
let server: smartdb.SmartdbServer;
|
||||
let client: MongoClient;
|
||||
let port: number;
|
||||
|
||||
async function getFreePort(): Promise<number> {
|
||||
return await new Promise((resolve, reject) => {
|
||||
const probe = net.createServer();
|
||||
probe.once('error', reject);
|
||||
probe.listen(0, '127.0.0.1', () => {
|
||||
const address = probe.address();
|
||||
if (!address || typeof address === 'string') {
|
||||
probe.close(() => reject(new Error('Failed to allocate TCP port')));
|
||||
return;
|
||||
}
|
||||
probe.close(() => resolve(address.port));
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
tap.test('transactions: should start server and connect', async () => {
|
||||
port = await getFreePort();
|
||||
server = new smartdb.SmartdbServer({ port });
|
||||
await server.start();
|
||||
|
||||
client = new MongoClient(`mongodb://127.0.0.1:${port}`, {
|
||||
directConnection: true,
|
||||
serverSelectionTimeoutMS: 5000,
|
||||
});
|
||||
await client.connect();
|
||||
expect(server.running).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('transactions: should still support explicit sessions', async () => {
|
||||
const result = await client.db('admin').command({ startSession: 1 });
|
||||
expect(result.ok).toEqual(1);
|
||||
expect(result.id).toBeTruthy();
|
||||
|
||||
const end = await client.db('admin').command({ endSessions: [result.id] });
|
||||
expect(end.ok).toEqual(1);
|
||||
});
|
||||
|
||||
tap.test('transactions: should reject raw transaction-scoped writes before mutation', async () => {
|
||||
const db = client.db('txntest');
|
||||
const coll = db.collection('docs');
|
||||
await coll.insertOne({ key: 'outside', value: 1 });
|
||||
|
||||
let threw = false;
|
||||
try {
|
||||
await db.command({
|
||||
insert: 'docs',
|
||||
documents: [{ key: 'inside-raw', value: 2 }],
|
||||
startTransaction: true,
|
||||
autocommit: false,
|
||||
});
|
||||
} catch (err: any) {
|
||||
threw = true;
|
||||
expect(err.code).toEqual(20);
|
||||
expect(err.codeName).toEqual('IllegalOperation');
|
||||
}
|
||||
expect(threw).toBeTrue();
|
||||
|
||||
expect(await coll.countDocuments({ key: 'inside-raw' })).toEqual(0);
|
||||
expect(await coll.countDocuments({ key: 'outside' })).toEqual(1);
|
||||
});
|
||||
|
||||
tap.test('transactions: official driver transaction should fail without committing writes', async () => {
|
||||
const coll = client.db('txntest').collection('driverdocs');
|
||||
await coll.insertOne({ key: 'outside-driver', value: 0 });
|
||||
const session = client.startSession();
|
||||
|
||||
let threw = false;
|
||||
try {
|
||||
session.startTransaction();
|
||||
await coll.insertOne({ key: 'inside-driver', value: 1 }, { session });
|
||||
await session.commitTransaction();
|
||||
} catch (err: any) {
|
||||
threw = true;
|
||||
expect(err.code).toEqual(20);
|
||||
expect(err.codeName).toEqual('IllegalOperation');
|
||||
await session.abortTransaction().catch(() => undefined);
|
||||
} finally {
|
||||
await session.endSession();
|
||||
}
|
||||
|
||||
expect(threw).toBeTrue();
|
||||
expect(await coll.countDocuments({ key: 'inside-driver' })).toEqual(0);
|
||||
expect(await coll.countDocuments({ key: 'outside-driver' })).toEqual(1);
|
||||
});
|
||||
|
||||
tap.test('transactions: commit and abort commands should be explicit unsupported errors', async () => {
|
||||
for (const command of [{ commitTransaction: 1 }, { abortTransaction: 1 }]) {
|
||||
let threw = false;
|
||||
try {
|
||||
await client.db('admin').command(command);
|
||||
} catch (err: any) {
|
||||
threw = true;
|
||||
expect(err.code).toEqual(20);
|
||||
expect(err.codeName).toEqual('IllegalOperation');
|
||||
}
|
||||
expect(threw).toBeTrue();
|
||||
}
|
||||
});
|
||||
|
||||
tap.test('transactions: cleanup', async () => {
|
||||
await client.close();
|
||||
await server.stop();
|
||||
expect(server.running).toBeFalse();
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
@@ -0,0 +1,180 @@
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
import * as smartdb from '../ts/index.js';
|
||||
import { MongoClient, Db } from 'mongodb';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
import * as os from 'os';
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Test: Unique index enforcement via wire protocol
|
||||
// Covers: unique index pre-check, createIndexes persistence, index restoration
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
let tmpDir: string;
|
||||
let localDb: smartdb.LocalSmartDb;
|
||||
let client: MongoClient;
|
||||
let db: Db;
|
||||
|
||||
function makeTmpDir(): string {
|
||||
return fs.mkdtempSync(path.join(os.tmpdir(), 'smartdb-unique-test-'));
|
||||
}
|
||||
|
||||
function cleanTmpDir(dir: string): void {
|
||||
if (fs.existsSync(dir)) {
|
||||
fs.rmSync(dir, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Setup
|
||||
// ============================================================================
|
||||
|
||||
tap.test('setup: start local db', async () => {
|
||||
tmpDir = makeTmpDir();
|
||||
localDb = new smartdb.LocalSmartDb({ folderPath: tmpDir });
|
||||
const info = await localDb.start();
|
||||
client = new MongoClient(info.connectionUri, {
|
||||
directConnection: true,
|
||||
serverSelectionTimeoutMS: 5000,
|
||||
});
|
||||
await client.connect();
|
||||
db = client.db('uniquetest');
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// Unique index enforcement on insert
|
||||
// ============================================================================
|
||||
|
||||
tap.test('unique-index: createIndex with unique: true', async () => {
|
||||
const coll = db.collection('users');
|
||||
await coll.insertOne({ email: 'alice@example.com', name: 'Alice' });
|
||||
const indexName = await coll.createIndex({ email: 1 }, { unique: true });
|
||||
expect(indexName).toBeTruthy();
|
||||
});
|
||||
|
||||
tap.test('unique-index: reject duplicate on insertOne', async () => {
|
||||
const coll = db.collection('users');
|
||||
let threw = false;
|
||||
try {
|
||||
await coll.insertOne({ email: 'alice@example.com', name: 'Alice2' });
|
||||
} catch (err: any) {
|
||||
threw = true;
|
||||
expect(err.code).toEqual(11000);
|
||||
}
|
||||
expect(threw).toBeTrue();
|
||||
|
||||
// Verify only 1 document exists
|
||||
const count = await coll.countDocuments();
|
||||
expect(count).toEqual(1);
|
||||
});
|
||||
|
||||
tap.test('unique-index: allow insert with different unique value', async () => {
|
||||
const coll = db.collection('users');
|
||||
await coll.insertOne({ email: 'bob@example.com', name: 'Bob' });
|
||||
const count = await coll.countDocuments();
|
||||
expect(count).toEqual(2);
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// Unique index enforcement on update
|
||||
// ============================================================================
|
||||
|
||||
tap.test('unique-index: reject duplicate on updateOne that changes unique field', async () => {
|
||||
const coll = db.collection('users');
|
||||
let threw = false;
|
||||
try {
|
||||
await coll.updateOne(
|
||||
{ email: 'bob@example.com' },
|
||||
{ $set: { email: 'alice@example.com' } }
|
||||
);
|
||||
} catch (err: any) {
|
||||
threw = true;
|
||||
expect(err.code).toEqual(11000);
|
||||
}
|
||||
expect(threw).toBeTrue();
|
||||
|
||||
// Bob's email should be unchanged
|
||||
const bob = await coll.findOne({ name: 'Bob' });
|
||||
expect(bob!.email).toEqual('bob@example.com');
|
||||
});
|
||||
|
||||
tap.test('unique-index: allow update that keeps same unique value', async () => {
|
||||
const coll = db.collection('users');
|
||||
await coll.updateOne(
|
||||
{ email: 'bob@example.com' },
|
||||
{ $set: { name: 'Robert' } }
|
||||
);
|
||||
const bob = await coll.findOne({ email: 'bob@example.com' });
|
||||
expect(bob!.name).toEqual('Robert');
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// Unique index enforcement on upsert
|
||||
// ============================================================================
|
||||
|
||||
tap.test('unique-index: reject duplicate on upsert insert', async () => {
|
||||
const coll = db.collection('users');
|
||||
let threw = false;
|
||||
try {
|
||||
await coll.updateOne(
|
||||
{ email: 'new@example.com' },
|
||||
{ $set: { email: 'alice@example.com', name: 'Imposter' } },
|
||||
{ upsert: true }
|
||||
);
|
||||
} catch (err: any) {
|
||||
threw = true;
|
||||
}
|
||||
expect(threw).toBeTrue();
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// Unique index survives restart (persistence + restoration)
|
||||
// ============================================================================
|
||||
|
||||
tap.test('unique-index: stop and restart', async () => {
|
||||
await client.close();
|
||||
await localDb.stop();
|
||||
|
||||
localDb = new smartdb.LocalSmartDb({ folderPath: tmpDir });
|
||||
const info = await localDb.start();
|
||||
client = new MongoClient(info.connectionUri, {
|
||||
directConnection: true,
|
||||
serverSelectionTimeoutMS: 5000,
|
||||
});
|
||||
await client.connect();
|
||||
db = client.db('uniquetest');
|
||||
});
|
||||
|
||||
tap.test('unique-index: enforcement persists after restart', async () => {
|
||||
const coll = db.collection('users');
|
||||
|
||||
// Data should still be there
|
||||
const count = await coll.countDocuments();
|
||||
expect(count).toEqual(2);
|
||||
|
||||
// Unique constraint should still be enforced without calling createIndex again
|
||||
let threw = false;
|
||||
try {
|
||||
await coll.insertOne({ email: 'alice@example.com', name: 'Alice3' });
|
||||
} catch (err: any) {
|
||||
threw = true;
|
||||
expect(err.code).toEqual(11000);
|
||||
}
|
||||
expect(threw).toBeTrue();
|
||||
|
||||
// Count unchanged
|
||||
const countAfter = await coll.countDocuments();
|
||||
expect(countAfter).toEqual(2);
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// Cleanup
|
||||
// ============================================================================
|
||||
|
||||
tap.test('unique-index: cleanup', async () => {
|
||||
await client.close();
|
||||
await localDb.stop();
|
||||
cleanTmpDir(tmpDir);
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
@@ -3,6 +3,6 @@
|
||||
*/
|
||||
export const commitinfo = {
|
||||
name: '@push.rocks/smartdb',
|
||||
version: '2.1.0',
|
||||
version: '2.7.1',
|
||||
description: 'A MongoDB-compatible embedded database server with wire protocol support, backed by a high-performance Rust engine.'
|
||||
}
|
||||
|
||||
@@ -7,6 +7,9 @@ export * from './ts_smartdb/index.js';
|
||||
export { LocalSmartDb } from './ts_local/index.js';
|
||||
export type { ILocalSmartDbOptions, ILocalSmartDbConnectionInfo } from './ts_local/index.js';
|
||||
|
||||
// Export migration
|
||||
export { StorageMigrator } from './ts_migration/index.js';
|
||||
|
||||
// Export commitinfo
|
||||
export { commitinfo };
|
||||
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
import * as crypto from 'crypto';
|
||||
import * as fs from 'fs/promises';
|
||||
import * as net from 'net';
|
||||
import * as path from 'path';
|
||||
import * as os from 'os';
|
||||
import { SmartdbServer } from '../ts_smartdb/index.js';
|
||||
import { StorageMigrator } from '../ts_migration/index.js';
|
||||
|
||||
/**
|
||||
* Connection information returned by LocalSmartDb.start()
|
||||
@@ -65,6 +68,55 @@ export class LocalSmartDb {
|
||||
return path.join(os.tmpdir(), `smartdb-${randomId}.sock`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a Unix socket is alive by attempting to connect.
|
||||
*/
|
||||
private static isSocketAlive(socketPath: string): Promise<boolean> {
|
||||
return new Promise((resolve) => {
|
||||
const client = net.createConnection({ path: socketPath }, () => {
|
||||
client.destroy();
|
||||
resolve(true);
|
||||
});
|
||||
client.on('error', () => {
|
||||
resolve(false);
|
||||
});
|
||||
client.setTimeout(500, () => {
|
||||
client.destroy();
|
||||
resolve(false);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove stale smartdb-*.sock files from /tmp.
|
||||
* A socket is considered stale if connecting to it fails.
|
||||
*/
|
||||
private static async cleanStaleSockets(): Promise<void> {
|
||||
const tmpDir = os.tmpdir();
|
||||
let entries: string[];
|
||||
try {
|
||||
entries = await fs.readdir(tmpDir);
|
||||
} catch {
|
||||
return;
|
||||
}
|
||||
const socketFiles = entries.filter(
|
||||
(f) => f.startsWith('smartdb-') && f.endsWith('.sock')
|
||||
);
|
||||
for (const name of socketFiles) {
|
||||
const fullPath = path.join(tmpDir, name);
|
||||
try {
|
||||
const stat = await fs.stat(fullPath);
|
||||
if (!stat.isSocket()) continue;
|
||||
const alive = await LocalSmartDb.isSocketAlive(fullPath);
|
||||
if (!alive) {
|
||||
await fs.unlink(fullPath);
|
||||
}
|
||||
} catch {
|
||||
// File may have been removed already; ignore
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Start the local SmartDB server and return connection info
|
||||
*/
|
||||
@@ -73,6 +125,13 @@ export class LocalSmartDb {
|
||||
throw new Error('LocalSmartDb is already running');
|
||||
}
|
||||
|
||||
// Clean up stale sockets from previous crashed instances
|
||||
await LocalSmartDb.cleanStaleSockets();
|
||||
|
||||
// Run storage migration before starting the Rust engine
|
||||
const migrator = new StorageMigrator(this.options.folderPath);
|
||||
await migrator.run();
|
||||
|
||||
// Use provided socket path or generate one
|
||||
this.generatedSocketPath = this.options.socketPath ?? this.generateSocketPath();
|
||||
|
||||
|
||||
@@ -0,0 +1,93 @@
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
import { migrateV0ToV1 } from './migrators/v0_to_v1.js';
|
||||
|
||||
/**
|
||||
* Detected storage format version.
|
||||
* - v0: Legacy JSON format ({db}/{coll}.json files)
|
||||
* - v1: Bitcask binary format ({db}/{coll}/data.rdb directories)
|
||||
*/
|
||||
type TStorageVersion = 0 | 1;
|
||||
|
||||
/**
|
||||
* StorageMigrator — runs before the Rust engine starts.
|
||||
*
|
||||
* Detects the current storage format version and runs the appropriate
|
||||
* migration chain. The Rust engine only knows the current format (v1).
|
||||
*
|
||||
* Migration is safe: original files are never modified or deleted.
|
||||
* On success, a console hint is printed about which old files can be removed.
|
||||
*/
|
||||
export class StorageMigrator {
|
||||
private storagePath: string;
|
||||
|
||||
constructor(storagePath: string) {
|
||||
this.storagePath = storagePath;
|
||||
}
|
||||
|
||||
/**
|
||||
* Run any needed migrations. Safe to call even if storage is already current.
|
||||
*/
|
||||
async run(): Promise<void> {
|
||||
if (!fs.existsSync(this.storagePath)) {
|
||||
return; // No data yet — nothing to migrate
|
||||
}
|
||||
|
||||
const version = this.detectVersion();
|
||||
|
||||
if (version === 1) {
|
||||
return; // Already current
|
||||
}
|
||||
|
||||
if (version === 0) {
|
||||
console.log(`[smartdb] Detected v0 (JSON) storage format at ${this.storagePath}`);
|
||||
console.log(`[smartdb] Running migration v0 → v1 (Bitcask binary format)...`);
|
||||
|
||||
const deletableFiles = await migrateV0ToV1(this.storagePath);
|
||||
|
||||
if (deletableFiles.length > 0) {
|
||||
console.log(`[smartdb] Migration v0 → v1 complete.`);
|
||||
console.log(`[smartdb] The following old files can be safely deleted:`);
|
||||
for (const f of deletableFiles) {
|
||||
console.log(`[smartdb] ${f}`);
|
||||
}
|
||||
} else {
|
||||
console.log(`[smartdb] Migration v0 → v1 complete. No old files to clean up.`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Detect the storage format version by inspecting the directory structure.
|
||||
*
|
||||
* v0: {db}/{coll}.json files exist
|
||||
* v1: {db}/{coll}/data.rdb directories exist
|
||||
*/
|
||||
private detectVersion(): TStorageVersion {
|
||||
const entries = fs.readdirSync(this.storagePath, { withFileTypes: true });
|
||||
|
||||
for (const entry of entries) {
|
||||
if (!entry.isDirectory()) continue;
|
||||
|
||||
const dbDir = path.join(this.storagePath, entry.name);
|
||||
const dbEntries = fs.readdirSync(dbDir, { withFileTypes: true });
|
||||
|
||||
for (const dbEntry of dbEntries) {
|
||||
// v1: subdirectory with data.rdb
|
||||
if (dbEntry.isDirectory()) {
|
||||
const dataRdb = path.join(dbDir, dbEntry.name, 'data.rdb');
|
||||
if (fs.existsSync(dataRdb)) {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
// v0: .json file (not .indexes.json)
|
||||
if (dbEntry.isFile() && dbEntry.name.endsWith('.json') && !dbEntry.name.endsWith('.indexes.json')) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Empty or unrecognized — treat as v1 (fresh start)
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1 @@
|
||||
export { StorageMigrator } from './classes.storagemigrator.js';
|
||||
@@ -0,0 +1,253 @@
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
import * as crypto from 'crypto';
|
||||
import { BSON } from 'bson';
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Binary format constants (must match Rust: record.rs)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/** File-level magic: "SMARTDB\0" */
|
||||
const FILE_MAGIC = Buffer.from('SMARTDB\0', 'ascii');
|
||||
/** Current format version */
|
||||
const FORMAT_VERSION = 1;
|
||||
/** File type tags */
|
||||
const FILE_TYPE_DATA = 1;
|
||||
const FILE_TYPE_HINT = 3;
|
||||
/** File header total size */
|
||||
const FILE_HEADER_SIZE = 64;
|
||||
/** Per-record magic */
|
||||
const RECORD_MAGIC = 0xDB01;
|
||||
/** Per-record header size */
|
||||
const RECORD_HEADER_SIZE = 22; // 2 + 8 + 4 + 4 + 4
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Binary encoding helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
function writeFileHeader(fileType: number): Buffer {
|
||||
const buf = Buffer.alloc(FILE_HEADER_SIZE, 0);
|
||||
FILE_MAGIC.copy(buf, 0);
|
||||
buf.writeUInt16LE(FORMAT_VERSION, 8);
|
||||
buf.writeUInt8(fileType, 10);
|
||||
buf.writeUInt32LE(0, 11); // flags
|
||||
const now = BigInt(Date.now());
|
||||
buf.writeBigUInt64LE(now, 15);
|
||||
// bytes 23..64 are reserved (zeros)
|
||||
return buf;
|
||||
}
|
||||
|
||||
function encodeDataRecord(timestamp: bigint, key: Buffer, value: Buffer): Buffer {
|
||||
const keyLen = key.length;
|
||||
const valLen = value.length;
|
||||
const totalSize = RECORD_HEADER_SIZE + keyLen + valLen;
|
||||
const buf = Buffer.alloc(totalSize);
|
||||
|
||||
// Write header fields (without CRC)
|
||||
buf.writeUInt16LE(RECORD_MAGIC, 0);
|
||||
buf.writeBigUInt64LE(timestamp, 2);
|
||||
buf.writeUInt32LE(keyLen, 10);
|
||||
buf.writeUInt32LE(valLen, 14);
|
||||
// CRC placeholder at offset 18..22 (will fill below)
|
||||
key.copy(buf, RECORD_HEADER_SIZE);
|
||||
value.copy(buf, RECORD_HEADER_SIZE + keyLen);
|
||||
|
||||
// CRC32 covers everything except the CRC field itself:
|
||||
// bytes [0..18] + bytes [22..]
|
||||
const crc = crc32(Buffer.concat([
|
||||
buf.subarray(0, 18),
|
||||
buf.subarray(22),
|
||||
]));
|
||||
buf.writeUInt32LE(crc, 18);
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
function encodeHintEntry(key: string, offset: bigint, recordLen: number, valueLen: number, timestamp: bigint): Buffer {
|
||||
const keyBuf = Buffer.from(key, 'utf-8');
|
||||
const buf = Buffer.alloc(4 + keyBuf.length + 8 + 4 + 4 + 8);
|
||||
let pos = 0;
|
||||
buf.writeUInt32LE(keyBuf.length, pos); pos += 4;
|
||||
keyBuf.copy(buf, pos); pos += keyBuf.length;
|
||||
buf.writeBigUInt64LE(offset, pos); pos += 8;
|
||||
buf.writeUInt32LE(recordLen, pos); pos += 4;
|
||||
buf.writeUInt32LE(valueLen, pos); pos += 4;
|
||||
buf.writeBigUInt64LE(timestamp, pos);
|
||||
return buf;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// CRC32 (matching crc32fast in Rust)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const CRC32_TABLE = (() => {
|
||||
const table = new Uint32Array(256);
|
||||
for (let i = 0; i < 256; i++) {
|
||||
let crc = i;
|
||||
for (let j = 0; j < 8; j++) {
|
||||
crc = (crc & 1) ? (0xEDB88320 ^ (crc >>> 1)) : (crc >>> 1);
|
||||
}
|
||||
table[i] = crc;
|
||||
}
|
||||
return table;
|
||||
})();
|
||||
|
||||
function crc32(data: Buffer): number {
|
||||
let crc = 0xFFFFFFFF;
|
||||
for (let i = 0; i < data.length; i++) {
|
||||
crc = CRC32_TABLE[(crc ^ data[i]) & 0xFF] ^ (crc >>> 8);
|
||||
}
|
||||
return (crc ^ 0xFFFFFFFF) >>> 0;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Migration: v0 (JSON) → v1 (Bitcask binary)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
interface IKeyDirEntry {
|
||||
offset: bigint;
|
||||
recordLen: number;
|
||||
valueLen: number;
|
||||
timestamp: bigint;
|
||||
}
|
||||
|
||||
/**
|
||||
* Migrate a storage directory from v0 (JSON-per-collection) to v1 (Bitcask binary).
|
||||
*
|
||||
* - Original .json files are NOT modified or deleted.
|
||||
* - New v1 files are written into {db}/{coll}/ subdirectories.
|
||||
* - Returns a list of old files that can be safely deleted.
|
||||
* - On failure, cleans up any partial new files and throws.
|
||||
*/
|
||||
export async function migrateV0ToV1(storagePath: string): Promise<string[]> {
|
||||
const deletableFiles: string[] = [];
|
||||
const createdDirs: string[] = [];
|
||||
|
||||
try {
|
||||
const dbEntries = fs.readdirSync(storagePath, { withFileTypes: true });
|
||||
|
||||
for (const dbEntry of dbEntries) {
|
||||
if (!dbEntry.isDirectory()) continue;
|
||||
|
||||
const dbDir = path.join(storagePath, dbEntry.name);
|
||||
const collFiles = fs.readdirSync(dbDir, { withFileTypes: true });
|
||||
|
||||
for (const collFile of collFiles) {
|
||||
if (!collFile.isFile()) continue;
|
||||
if (!collFile.name.endsWith('.json')) continue;
|
||||
if (collFile.name.endsWith('.indexes.json')) continue;
|
||||
|
||||
const collName = collFile.name.replace(/\.json$/, '');
|
||||
const jsonPath = path.join(dbDir, collFile.name);
|
||||
const indexJsonPath = path.join(dbDir, `${collName}.indexes.json`);
|
||||
|
||||
// Target directory
|
||||
const collDir = path.join(dbDir, collName);
|
||||
if (fs.existsSync(collDir)) {
|
||||
// Already migrated
|
||||
continue;
|
||||
}
|
||||
|
||||
console.log(`[smartdb] Migrating ${dbEntry.name}.${collName}...`);
|
||||
|
||||
// Read the JSON collection
|
||||
const jsonData = fs.readFileSync(jsonPath, 'utf-8');
|
||||
const docs: any[] = JSON.parse(jsonData);
|
||||
|
||||
// Create collection directory
|
||||
fs.mkdirSync(collDir, { recursive: true });
|
||||
createdDirs.push(collDir);
|
||||
|
||||
// Write data.rdb
|
||||
const dataPath = path.join(collDir, 'data.rdb');
|
||||
const fd = fs.openSync(dataPath, 'w');
|
||||
|
||||
try {
|
||||
// File header
|
||||
const headerBuf = writeFileHeader(FILE_TYPE_DATA);
|
||||
fs.writeSync(fd, headerBuf);
|
||||
|
||||
let currentOffset = BigInt(FILE_HEADER_SIZE);
|
||||
const keydir: Map<string, IKeyDirEntry> = new Map();
|
||||
const ts = BigInt(Date.now());
|
||||
|
||||
for (const doc of docs) {
|
||||
// Extract _id
|
||||
let idHex: string;
|
||||
if (doc._id && doc._id.$oid) {
|
||||
idHex = doc._id.$oid;
|
||||
} else if (typeof doc._id === 'string') {
|
||||
idHex = doc._id;
|
||||
} else if (doc._id) {
|
||||
idHex = String(doc._id);
|
||||
} else {
|
||||
// Generate a new ObjectId
|
||||
idHex = crypto.randomBytes(12).toString('hex');
|
||||
doc._id = { $oid: idHex };
|
||||
}
|
||||
|
||||
// Serialize to BSON
|
||||
const bsonBytes = BSON.serialize(doc);
|
||||
const keyBuf = Buffer.from(idHex, 'utf-8');
|
||||
const valueBuf = Buffer.from(bsonBytes);
|
||||
|
||||
const record = encodeDataRecord(ts, keyBuf, valueBuf);
|
||||
fs.writeSync(fd, record);
|
||||
|
||||
keydir.set(idHex, {
|
||||
offset: currentOffset,
|
||||
recordLen: record.length,
|
||||
valueLen: valueBuf.length,
|
||||
timestamp: ts,
|
||||
});
|
||||
|
||||
currentOffset += BigInt(record.length);
|
||||
}
|
||||
|
||||
fs.fsyncSync(fd);
|
||||
fs.closeSync(fd);
|
||||
|
||||
// Write keydir.hint
|
||||
const hintPath = path.join(collDir, 'keydir.hint');
|
||||
const hintFd = fs.openSync(hintPath, 'w');
|
||||
fs.writeSync(hintFd, writeFileHeader(FILE_TYPE_HINT));
|
||||
for (const [key, entry] of keydir) {
|
||||
fs.writeSync(hintFd, encodeHintEntry(key, entry.offset, entry.recordLen, entry.valueLen, entry.timestamp));
|
||||
}
|
||||
fs.fsyncSync(hintFd);
|
||||
fs.closeSync(hintFd);
|
||||
|
||||
} catch (writeErr) {
|
||||
// Clean up on write failure
|
||||
try { fs.closeSync(fd); } catch {}
|
||||
throw writeErr;
|
||||
}
|
||||
|
||||
// Copy indexes.json if it exists
|
||||
if (fs.existsSync(indexJsonPath)) {
|
||||
const destIndexPath = path.join(collDir, 'indexes.json');
|
||||
fs.copyFileSync(indexJsonPath, destIndexPath);
|
||||
deletableFiles.push(indexJsonPath);
|
||||
} else {
|
||||
// Write default _id index
|
||||
const destIndexPath = path.join(collDir, 'indexes.json');
|
||||
fs.writeFileSync(destIndexPath, JSON.stringify([{ name: '_id_', key: { _id: 1 } }], null, 2));
|
||||
}
|
||||
|
||||
deletableFiles.push(jsonPath);
|
||||
|
||||
console.log(`[smartdb] Migrated ${dbEntry.name}.${collName}: ${docs.length} documents`);
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
// Clean up any partially created directories
|
||||
for (const dir of createdDirs) {
|
||||
try {
|
||||
fs.rmSync(dir, { recursive: true, force: true });
|
||||
} catch {}
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
|
||||
return deletableFiles;
|
||||
}
|
||||
@@ -2,7 +2,12 @@
|
||||
|
||||
// Export server (the main entry point for using SmartDB)
|
||||
export { SmartdbServer } from './server/SmartdbServer.js';
|
||||
export type { ISmartdbServerOptions } from './server/SmartdbServer.js';
|
||||
export type {
|
||||
ISmartdbAuthOptions,
|
||||
ISmartdbAuthUser,
|
||||
ISmartdbServerOptions,
|
||||
ISmartdbTlsOptions,
|
||||
} from './server/SmartdbServer.js';
|
||||
|
||||
// Export bridge for advanced usage
|
||||
export { RustDbBridge } from './rust-db-bridge.js';
|
||||
|
||||
@@ -117,6 +117,24 @@ interface ISmartDbRustConfig {
|
||||
storagePath?: string;
|
||||
persistPath?: string;
|
||||
persistIntervalMs?: number;
|
||||
auth?: {
|
||||
enabled?: boolean;
|
||||
users?: Array<{
|
||||
username: string;
|
||||
password: string;
|
||||
database?: string;
|
||||
roles?: string[];
|
||||
}>;
|
||||
usersPath?: string;
|
||||
scramIterations?: number;
|
||||
};
|
||||
tls?: {
|
||||
enabled?: boolean;
|
||||
certPath?: string;
|
||||
keyPath?: string;
|
||||
caPath?: string;
|
||||
requireClientCert?: boolean;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { RustDbBridge } from '../rust-db-bridge.js';
|
||||
import { StorageMigrator } from '../../ts_migration/index.js';
|
||||
import type {
|
||||
IOpLogEntry,
|
||||
IOpLogResult,
|
||||
@@ -27,6 +28,32 @@ export interface ISmartdbServerOptions {
|
||||
persistPath?: string;
|
||||
/** Persistence interval in ms (default: 60000) */
|
||||
persistIntervalMs?: number;
|
||||
/** Authentication configuration. Disabled by default. */
|
||||
auth?: ISmartdbAuthOptions;
|
||||
/** TLS transport configuration for TCP listeners. Disabled by default. */
|
||||
tls?: ISmartdbTlsOptions;
|
||||
}
|
||||
|
||||
export interface ISmartdbAuthOptions {
|
||||
enabled?: boolean;
|
||||
users?: ISmartdbAuthUser[];
|
||||
usersPath?: string;
|
||||
scramIterations?: number;
|
||||
}
|
||||
|
||||
export interface ISmartdbAuthUser {
|
||||
username: string;
|
||||
password: string;
|
||||
database?: string;
|
||||
roles?: string[];
|
||||
}
|
||||
|
||||
export interface ISmartdbTlsOptions {
|
||||
enabled?: boolean;
|
||||
certPath?: string;
|
||||
keyPath?: string;
|
||||
caPath?: string;
|
||||
requireClientCert?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -63,6 +90,8 @@ export class SmartdbServer {
|
||||
storagePath: options.storagePath ?? './data',
|
||||
persistPath: options.persistPath,
|
||||
persistIntervalMs: options.persistIntervalMs ?? 60000,
|
||||
auth: options.auth,
|
||||
tls: options.tls,
|
||||
};
|
||||
this.bridge = new RustDbBridge();
|
||||
}
|
||||
@@ -75,6 +104,12 @@ export class SmartdbServer {
|
||||
throw new Error('Server is already running');
|
||||
}
|
||||
|
||||
// Run storage migration for file-based storage before starting Rust engine
|
||||
if (this.options.storage === 'file' && this.options.storagePath) {
|
||||
const migrator = new StorageMigrator(this.options.storagePath);
|
||||
await migrator.run();
|
||||
}
|
||||
|
||||
const spawned = await this.bridge.spawn();
|
||||
if (!spawned) {
|
||||
throw new Error(
|
||||
@@ -99,6 +134,8 @@ export class SmartdbServer {
|
||||
storagePath: this.options.storagePath,
|
||||
persistPath: this.options.persistPath,
|
||||
persistIntervalMs: this.options.persistIntervalMs,
|
||||
auth: this.options.auth,
|
||||
tls: this.options.tls,
|
||||
});
|
||||
|
||||
this.resolvedConnectionUri = result.connectionUri;
|
||||
@@ -135,7 +172,8 @@ export class SmartdbServer {
|
||||
const encodedPath = encodeURIComponent(this.options.socketPath);
|
||||
return `mongodb://${encodedPath}`;
|
||||
}
|
||||
return `mongodb://${this.options.host ?? '127.0.0.1'}:${this.options.port ?? 27017}`;
|
||||
const baseUri = `mongodb://${this.options.host ?? '127.0.0.1'}:${this.options.port ?? 27017}`;
|
||||
return this.options.tls?.enabled ? `${baseUri}/?tls=true` : baseUri;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user