feat(smartdb): add operation log APIs, point-in-time revert support, and a web-based debug dashboard
This commit is contained in:
@@ -32,6 +32,17 @@
|
||||
"@git.zone/tsdoc": {
|
||||
"legal": "\n## License and Legal Information\n\nThis repository contains open-source code that is licensed under the MIT License. A copy of the MIT License can be found in the [license](license) file within this repository. \n\n**Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.\n\n### Trademarks\n\nThis project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and any related products or services are trademarks of Task Venture Capital GmbH and are not included within the scope of the MIT license granted herein. Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines, and any usage must be approved in writing by Task Venture Capital GmbH.\n\n### Company Information\n\nTask Venture Capital GmbH \nRegistered at District court Bremen HRB 35230 HB, Germany\n\nFor any legal inquiries or if you require further information, please contact us via email at hello@task.vc.\n\nBy using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.\n"
|
||||
},
|
||||
"@git.zone/tsbundle": {
|
||||
"bundles": [
|
||||
{
|
||||
"from": "./ts_debugui/index.ts",
|
||||
"to": "./ts_debugserver/bundled.ts",
|
||||
"outputMode": "base64ts",
|
||||
"bundler": "esbuild",
|
||||
"includeFiles": ["./html/index.html"]
|
||||
}
|
||||
]
|
||||
},
|
||||
"@git.zone/tsrust": {
|
||||
"targets": ["linux_amd64", "linux_arm64"]
|
||||
},
|
||||
|
||||
@@ -1,5 +1,12 @@
|
||||
# Changelog
|
||||
|
||||
## 2026-04-02 - 2.1.0 - feat(smartdb)
|
||||
add operation log APIs, point-in-time revert support, and a web-based debug dashboard
|
||||
|
||||
- records insert, update, and delete operations with before/after document snapshots in the Rust oplog
|
||||
- adds management and TypeScript APIs for metrics, oplog queries, collection browsing, document browsing, and revert-to-sequence operations
|
||||
- introduces new debugserver and debugui package exports with bundled browser assets served through typedserver
|
||||
|
||||
## 2026-03-26 - 2.0.0 - BREAKING CHANGE(core)
|
||||
replace the TypeScript database engine with a Rust-backed embedded server and bridge
|
||||
|
||||
|
||||
13
html/index.html
Normal file
13
html/index.html
Normal file
@@ -0,0 +1,13 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>SmartDB Debug</title>
|
||||
<style>body { margin: 0; background: #09090b; }</style>
|
||||
<script type="module" src="/bundle.js"></script>
|
||||
</head>
|
||||
<body>
|
||||
<smartdb-debugui></smartdb-debugui>
|
||||
</body>
|
||||
</html>
|
||||
@@ -4,7 +4,9 @@
|
||||
"private": false,
|
||||
"description": "A MongoDB-compatible embedded database server with wire protocol support, backed by a high-performance Rust engine.",
|
||||
"exports": {
|
||||
".": "./dist_ts/index.js"
|
||||
".": "./dist_ts/index.js",
|
||||
"./debugui": "./dist_ts_debugui/index.js",
|
||||
"./debugserver": "./dist_ts_debugserver/index.js"
|
||||
},
|
||||
"type": "module",
|
||||
"author": "Task Venture Capital GmbH",
|
||||
@@ -12,7 +14,7 @@
|
||||
"scripts": {
|
||||
"test:before": "(tsrust)",
|
||||
"test": "(tstest test/. --verbose --logfile --timeout 60)",
|
||||
"build": "(tsbuild tsfolders) && (tsrust)",
|
||||
"build": "(tsbundle) && (tsbuild tsfolders) && (tsrust)",
|
||||
"buildDocs": "tsdoc"
|
||||
},
|
||||
"devDependencies": {
|
||||
@@ -25,6 +27,8 @@
|
||||
"mongodb": "^7.1.1"
|
||||
},
|
||||
"dependencies": {
|
||||
"@api.global/typedserver": "^8.0.0",
|
||||
"@design.estate/dees-element": "^2.0.0",
|
||||
"@push.rocks/smartrust": "^1.3.2"
|
||||
},
|
||||
"browserslist": [
|
||||
|
||||
1331
pnpm-lock.yaml
generated
1331
pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
203
readme.md
203
readme.md
@@ -1,6 +1,6 @@
|
||||
# @push.rocks/smartdb
|
||||
|
||||
A MongoDB-compatible embedded database server powered by Rust 🦀⚡ — use the official `mongodb` driver and it just works. No binary downloads, instant startup, zero config.
|
||||
A MongoDB-compatible embedded database server powered by Rust 🦀⚡ — use the official `mongodb` driver and it just works. No binary downloads, instant startup, zero config. Features a built-in **operation log** with **point-in-time revert** and a web-based **debug dashboard**.
|
||||
|
||||
## Install
|
||||
|
||||
@@ -14,6 +14,8 @@ npm install @push.rocks/smartdb
|
||||
|
||||
For reporting bugs, issues, or security vulnerabilities, please visit [community.foss.global/](https://community.foss.global/). This is the central community hub for all issue reporting. Developers who sign and comply with our contribution agreement and go through identification can also get a [code.foss.global/](https://code.foss.global/) account to submit Pull Requests directly.
|
||||
|
||||
---
|
||||
|
||||
## What It Does
|
||||
|
||||
`@push.rocks/smartdb` is a **real database server** that speaks the wire protocol used by MongoDB drivers. The core engine is written in Rust for high performance, with a thin TypeScript orchestration layer. Connect with the standard `mongodb` Node.js driver — no mocks, no stubs, no external binaries required.
|
||||
@@ -26,52 +28,55 @@ For reporting bugs, issues, or security vulnerabilities, please visit [community
|
||||
| **Binary download** | Bundled (~7MB) | ~200MB+ |
|
||||
| **Install** | `pnpm add` | System package / Docker |
|
||||
| **Persistence** | Memory or file-based | Full disk engine |
|
||||
| **Debug UI** | Built-in 🖥️ | External tooling |
|
||||
| **Point-in-time revert** | Built-in ⏪ | Requires oplog tailing |
|
||||
| **Perfect for** | Unit tests, CI/CD, prototyping, local dev, embedded | Production at scale |
|
||||
|
||||
### Two Ways to Use It
|
||||
### Three Ways to Use It
|
||||
|
||||
- 🏗️ **`SmartdbServer`** — Full control. Configure port, host, storage backend, Unix sockets. Great for test fixtures or custom setups.
|
||||
- 🎯 **`LocalSmartDb`** — Zero-config convenience. Give it a folder path, get a persistent database over a Unix socket. Done.
|
||||
- 🏗️ **`SmartdbServer`** — Full control. Configure port, host, storage backend, Unix sockets. Great for test fixtures or custom setups.
|
||||
- 🖥️ **`SmartdbDebugServer`** — Launch a web dashboard to visually browse collections, inspect the operation log, and revert to any point in time.
|
||||
|
||||
### Architecture: TypeScript + Rust 🦀
|
||||
|
||||
SmartDB uses the same **sidecar binary** pattern as [@push.rocks/smartproxy](https://code.foss.global/push.rocks/smartproxy):
|
||||
SmartDB uses a **sidecar binary** pattern — TypeScript handles lifecycle, Rust handles all database operations:
|
||||
|
||||
```
|
||||
┌──────────────────────────────────────────────────────────┐
|
||||
│ Your Application │
|
||||
│ (TypeScript / Node.js) │
|
||||
│ ┌─────────────────┐ ┌───────────────────────────┐ │
|
||||
│ │ SmartdbServer │────▶│ RustDbBridge (IPC) │ │
|
||||
│ │ or LocalSmartDb │ │ @push.rocks/smartrust │ │
|
||||
│ └─────────────────┘ └───────────┬───────────────┘ │
|
||||
└──────────────────────────────────────┼───────────────────┘
|
||||
┌──────────────────────────────────────────────────────────────┐
|
||||
│ Your Application │
|
||||
│ (TypeScript / Node.js) │
|
||||
│ ┌─────────────────┐ ┌───────────────────────────┐ │
|
||||
│ │ SmartdbServer │────▶│ RustDbBridge (IPC) │ │
|
||||
│ │ or LocalSmartDb │ │ @push.rocks/smartrust │ │
|
||||
│ └─────────────────┘ └───────────┬───────────────┘ │
|
||||
└──────────────────────────────────────┼───────────────────────┘
|
||||
│ spawn + JSON IPC
|
||||
▼
|
||||
┌──────────────────────────────────────────────────────────┐
|
||||
│ rustdb binary 🦀 │
|
||||
│ │
|
||||
│ ┌──────────────┐ ┌──────────────┐ ┌───────────────┐ │
|
||||
│ │ Wire Protocol│→ │Command Router│→ │ Handlers │ │
|
||||
│ │ (OP_MSG) │ │ (40 cmds) │ │ Find,Insert.. │ │
|
||||
│ └──────────────┘ └──────────────┘ └───────┬───────┘ │
|
||||
│ │ │
|
||||
│ ┌─────────┐ ┌────────┐ ┌───────────┐ ┌──────┴──────┐ │
|
||||
│ │ Query │ │ Update │ │Aggregation│ │ Index │ │
|
||||
│ │ Matcher │ │ Engine │ │ Engine │ │ Engine │ │
|
||||
│ └─────────┘ └────────┘ └───────────┘ └─────────────┘ │
|
||||
│ │
|
||||
│ ┌──────────────────┐ ┌──────────────────┐ │
|
||||
│ │ MemoryStorage │ │ FileStorage │ │
|
||||
│ └──────────────────┘ └──────────────────┘ │
|
||||
└──────────────────────────────────────────────────────────┘
|
||||
┌──────────────────────────────────────────────────────────────┐
|
||||
│ rustdb binary 🦀 │
|
||||
│ │
|
||||
│ ┌──────────────┐ ┌──────────────┐ ┌───────────────┐ │
|
||||
│ │ Wire Protocol│→ │Command Router│→ │ Handlers │ │
|
||||
│ │ (OP_MSG) │ │ (40+ cmds) │ │ Find,Insert.. │ │
|
||||
│ └──────────────┘ └──────────────┘ └───────┬───────┘ │
|
||||
│ │ │
|
||||
│ ┌─────────┐ ┌────────┐ ┌───────────┐ ┌──────┴──────┐ │
|
||||
│ │ Query │ │ Update │ │Aggregation│ │ Index │ │
|
||||
│ │ Matcher │ │ Engine │ │ Engine │ │ Engine │ │
|
||||
│ └─────────┘ └────────┘ └───────────┘ └─────────────┘ │
|
||||
│ │
|
||||
│ ┌──────────────────┐ ┌──────────────────┐ ┌──────────┐ │
|
||||
│ │ MemoryStorage │ │ FileStorage │ │ OpLog │ │
|
||||
│ └──────────────────┘ └──────────────────┘ └──────────┘ │
|
||||
└──────────────────────────────────────────────────────────────┘
|
||||
▲
|
||||
│ TCP / Unix Socket (wire protocol)
|
||||
│
|
||||
┌─────────────┴────────────────────────────────────────────┐
|
||||
│ MongoClient (mongodb npm driver) │
|
||||
│ Connects directly to Rust binary │
|
||||
└──────────────────────────────────────────────────────────┘
|
||||
┌─────────────┴────────────────────────────────────────────────┐
|
||||
│ MongoClient (mongodb npm driver) │
|
||||
│ Connects directly to Rust binary │
|
||||
└──────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
The TypeScript layer handles **lifecycle only** (start/stop/configure via IPC). All database operations flow directly from the `MongoClient` to the Rust binary over TCP or Unix sockets — **zero per-query IPC overhead**.
|
||||
@@ -128,6 +133,87 @@ await client.close();
|
||||
await server.stop();
|
||||
```
|
||||
|
||||
### Option 3: Debug Server (Visual Dashboard) 🖥️
|
||||
|
||||
Launch a web-based dashboard to inspect your database in real time:
|
||||
|
||||
```typescript
|
||||
import { SmartdbServer } from '@push.rocks/smartdb';
|
||||
import { SmartdbDebugServer } from '@push.rocks/smartdb/debugserver';
|
||||
|
||||
const server = new SmartdbServer({ storage: 'memory' });
|
||||
await server.start();
|
||||
|
||||
const debugServer = new SmartdbDebugServer(server, { port: 4000 });
|
||||
await debugServer.start();
|
||||
// Open http://localhost:4000 in your browser 🚀
|
||||
```
|
||||
|
||||
The debug dashboard gives you:
|
||||
- 📊 **Dashboard** — server status, uptime, database/collection counts, operation breakdown
|
||||
- 📁 **Collection Browser** — browse databases, collections, and documents interactively
|
||||
- 📝 **OpLog Timeline** — every insert, update, and delete with expandable field-level diffs
|
||||
- ⏪ **Point-in-Time Revert** — select any oplog sequence, preview what will be undone, and execute
|
||||
|
||||
---
|
||||
|
||||
## 📝 Operation Log & Point-in-Time Revert
|
||||
|
||||
Every write operation (insert, update, delete) is automatically recorded in an in-memory **operation log (OpLog)** with full before/after document snapshots. This enables:
|
||||
|
||||
- **Change tracking** — see exactly what changed, when, and in which collection
|
||||
- **Field-level diffs** — compare previous and new document states
|
||||
- **Point-in-time revert** — undo operations back to any sequence number
|
||||
- **Dry-run preview** — see what would be reverted before executing
|
||||
|
||||
### Programmatic OpLog API
|
||||
|
||||
```typescript
|
||||
import { SmartdbServer } from '@push.rocks/smartdb';
|
||||
|
||||
const server = new SmartdbServer({ port: 27017 });
|
||||
await server.start();
|
||||
|
||||
// ... perform some CRUD operations via MongoClient ...
|
||||
|
||||
// Get oplog entries
|
||||
const oplog = await server.getOpLog({ limit: 50 });
|
||||
console.log(oplog.entries);
|
||||
// [{ seq: 1, op: 'insert', db: 'myapp', collection: 'users', document: {...}, previousDocument: null }, ...]
|
||||
|
||||
// Get aggregate stats
|
||||
const stats = await server.getOpLogStats();
|
||||
console.log(stats);
|
||||
// { currentSeq: 42, totalEntries: 42, entriesByOp: { insert: 20, update: 15, delete: 7 } }
|
||||
|
||||
// Preview a revert (dry run)
|
||||
const preview = await server.revertToSeq(30, true);
|
||||
console.log(`Would undo ${preview.reverted} operations`);
|
||||
|
||||
// Execute the revert — undoes all operations after seq 30
|
||||
const result = await server.revertToSeq(30, false);
|
||||
console.log(`Reverted ${result.reverted} operations`);
|
||||
|
||||
// Browse collections programmatically
|
||||
const collections = await server.getCollections();
|
||||
const docs = await server.getDocuments('myapp', 'users', 50, 0);
|
||||
```
|
||||
|
||||
### OpLog Entry Structure
|
||||
|
||||
Each entry contains:
|
||||
|
||||
| Field | Type | Description |
|
||||
|---|---|---|
|
||||
| `seq` | `number` | Monotonically increasing sequence number |
|
||||
| `timestampMs` | `number` | Unix timestamp in milliseconds |
|
||||
| `op` | `'insert' \| 'update' \| 'delete'` | Operation type |
|
||||
| `db` | `string` | Database name |
|
||||
| `collection` | `string` | Collection name |
|
||||
| `documentId` | `string` | Document `_id` as hex string |
|
||||
| `document` | `object \| null` | New document state (null for deletes) |
|
||||
| `previousDocument` | `object \| null` | Previous document state (null for inserts) |
|
||||
|
||||
---
|
||||
|
||||
## API Reference
|
||||
@@ -175,6 +261,12 @@ const server = new SmartdbServer({
|
||||
| `port` | `number` | Configured port (TCP mode) |
|
||||
| `host` | `string` | Configured host (TCP mode) |
|
||||
| `socketPath` | `string \| undefined` | Socket path (socket mode) |
|
||||
| `getMetrics()` | `Promise<ISmartDbMetrics>` | Server metrics (db/collection counts, uptime) |
|
||||
| `getOpLog(params?)` | `Promise<IOpLogResult>` | Query oplog entries with optional filters |
|
||||
| `getOpLogStats()` | `Promise<IOpLogStats>` | Aggregate oplog statistics |
|
||||
| `revertToSeq(seq, dryRun?)` | `Promise<IRevertResult>` | Revert to a specific oplog sequence |
|
||||
| `getCollections(db?)` | `Promise<ICollectionInfo[]>` | List all collections with counts |
|
||||
| `getDocuments(db, coll, limit?, skip?)` | `Promise<IDocumentsResult>` | Browse documents with pagination |
|
||||
|
||||
### LocalSmartDb
|
||||
|
||||
@@ -202,13 +294,34 @@ const db = new LocalSmartDb({
|
||||
| `getServer()` | `SmartdbServer` | Access the underlying server |
|
||||
| `running` | `boolean` | Whether the server is running |
|
||||
|
||||
#### Connection Info (`ILocalSmartDbConnectionInfo`)
|
||||
### SmartdbDebugServer
|
||||
|
||||
Web-based debug dashboard served via `@api.global/typedserver`. Import from the `debugserver` subpath:
|
||||
|
||||
```typescript
|
||||
interface ILocalSmartDbConnectionInfo {
|
||||
socketPath: string; // e.g., /tmp/smartdb-abc123.sock
|
||||
connectionUri: string; // e.g., mongodb://%2Ftmp%2Fsmartdb-abc123.sock
|
||||
}
|
||||
import { SmartdbDebugServer } from '@push.rocks/smartdb/debugserver';
|
||||
|
||||
const debugServer = new SmartdbDebugServer(server, { port: 4000 });
|
||||
await debugServer.start();
|
||||
// Dashboard at http://localhost:4000
|
||||
|
||||
await debugServer.stop();
|
||||
```
|
||||
|
||||
The UI is bundled as base64-encoded content (via `@git.zone/tsbundle`) and served from memory — no static file directory needed.
|
||||
|
||||
### SmartdbDebugUi (Web Component)
|
||||
|
||||
For embedding the debug UI directly into your own web application, import the `<smartdb-debugui>` web component:
|
||||
|
||||
```typescript
|
||||
import { SmartdbDebugUi } from '@push.rocks/smartdb/debugui';
|
||||
|
||||
// In your HTML/lit template:
|
||||
// <smartdb-debugui .server=${mySmartdbServer}></smartdb-debugui>
|
||||
//
|
||||
// Or in HTTP mode (when served by SmartdbDebugServer):
|
||||
// <smartdb-debugui apiBaseUrl=""></smartdb-debugui>
|
||||
```
|
||||
|
||||
---
|
||||
@@ -377,10 +490,10 @@ The Rust engine is organized as a Cargo workspace with 8 focused crates:
|
||||
| `rustdb-config` | Server configuration types (serde, camelCase JSON) |
|
||||
| `rustdb-wire` | Wire protocol parser/encoder (OP_MSG, OP_QUERY, OP_REPLY) |
|
||||
| `rustdb-query` | Query matcher, update engine, aggregation, sort, projection |
|
||||
| `rustdb-storage` | Storage backends (memory, file) + WAL + OpLog |
|
||||
| `rustdb-storage` | Storage backends (memory, file), OpLog with point-in-time replay |
|
||||
| `rustdb-index` | B-tree/hash indexes, query planner (IXSCAN/COLLSCAN) |
|
||||
| `rustdb-txn` | Transaction + session management with snapshot isolation |
|
||||
| `rustdb-commands` | 40 command handlers wiring everything together |
|
||||
| `rustdb-commands` | 40+ command handlers wiring everything together |
|
||||
|
||||
Cross-compiled for `linux_amd64` and `linux_arm64` via [@git.zone/tsrust](https://www.npmjs.com/package/@git.zone/tsrust).
|
||||
|
||||
@@ -410,6 +523,12 @@ tap.test('should insert and find', async () => {
|
||||
expect(item?.price).toEqual(9.99);
|
||||
});
|
||||
|
||||
tap.test('should track changes in oplog', async () => {
|
||||
const oplog = await server.getOpLog();
|
||||
expect(oplog.entries.length).toBeGreaterThan(0);
|
||||
expect(oplog.entries[0].op).toEqual('insert');
|
||||
});
|
||||
|
||||
tap.test('teardown', async () => {
|
||||
await client.close();
|
||||
await server.stop();
|
||||
@@ -422,7 +541,7 @@ export default tap.start();
|
||||
|
||||
## License and Legal Information
|
||||
|
||||
This repository contains open-source code licensed under the MIT License. A copy of the license can be found in the [license](./license) file.
|
||||
This repository contains open-source code licensed under the MIT License. A copy of the license can be found in the [LICENSE](./LICENSE) file.
|
||||
|
||||
**Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.
|
||||
|
||||
@@ -434,7 +553,7 @@ Use of these trademarks must comply with Task Venture Capital GmbH's Trademark G
|
||||
|
||||
### Company Information
|
||||
|
||||
Task Venture Capital GmbH
|
||||
Task Venture Capital GmbH
|
||||
Registered at District Court Bremen HRB 35230 HB, Germany
|
||||
|
||||
For any legal inquiries or further information, please contact us via email at hello@task.vc.
|
||||
|
||||
@@ -3,7 +3,7 @@ use std::sync::Arc;
|
||||
use bson::Document;
|
||||
use dashmap::DashMap;
|
||||
use rustdb_index::IndexEngine;
|
||||
use rustdb_storage::StorageAdapter;
|
||||
use rustdb_storage::{OpLog, StorageAdapter};
|
||||
use rustdb_txn::{SessionEngine, TransactionEngine};
|
||||
|
||||
/// Shared command execution context, passed to all handlers.
|
||||
@@ -20,6 +20,8 @@ pub struct CommandContext {
|
||||
pub cursors: Arc<DashMap<i64, CursorState>>,
|
||||
/// Server start time (for uptime reporting).
|
||||
pub start_time: std::time::Instant,
|
||||
/// Operation log for point-in-time replay.
|
||||
pub oplog: Arc<OpLog>,
|
||||
}
|
||||
|
||||
/// State of an open cursor from a find or aggregate command.
|
||||
|
||||
@@ -2,6 +2,7 @@ use std::collections::HashSet;
|
||||
|
||||
use bson::{doc, Bson, Document};
|
||||
use rustdb_query::QueryMatcher;
|
||||
use rustdb_storage::OpType;
|
||||
use tracing::debug;
|
||||
|
||||
use crate::context::CommandContext;
|
||||
@@ -171,6 +172,16 @@ async fn delete_matching(
|
||||
.await
|
||||
.map_err(|e| CommandError::StorageError(e.to_string()))?;
|
||||
|
||||
// Record in oplog.
|
||||
ctx.oplog.append(
|
||||
OpType::Delete,
|
||||
db,
|
||||
coll,
|
||||
&id_str,
|
||||
None,
|
||||
Some(doc.clone()),
|
||||
);
|
||||
|
||||
// Update index engine.
|
||||
if let Some(mut engine) = ctx.indexes.get_mut(ns_key) {
|
||||
engine.on_delete(doc);
|
||||
|
||||
@@ -2,6 +2,7 @@ use std::collections::HashMap;
|
||||
|
||||
use bson::{doc, oid::ObjectId, Bson, Document};
|
||||
use rustdb_index::IndexEngine;
|
||||
use rustdb_storage::OpType;
|
||||
use tracing::{debug, warn};
|
||||
|
||||
use crate::context::CommandContext;
|
||||
@@ -63,7 +64,17 @@ pub async fn handle(
|
||||
|
||||
// Attempt storage insert.
|
||||
match ctx.storage.insert_one(db, coll, doc.clone()).await {
|
||||
Ok(_id_str) => {
|
||||
Ok(id_str) => {
|
||||
// Record in oplog.
|
||||
ctx.oplog.append(
|
||||
OpType::Insert,
|
||||
db,
|
||||
coll,
|
||||
&id_str,
|
||||
Some(doc.clone()),
|
||||
None,
|
||||
);
|
||||
|
||||
// Update index engine.
|
||||
let mut engine = ctx
|
||||
.indexes
|
||||
|
||||
@@ -3,6 +3,7 @@ use std::collections::HashSet;
|
||||
use bson::{doc, oid::ObjectId, Bson, Document};
|
||||
use rustdb_index::IndexEngine;
|
||||
use rustdb_query::{QueryMatcher, UpdateEngine, sort_documents, apply_projection};
|
||||
use rustdb_storage::OpType;
|
||||
use tracing::debug;
|
||||
|
||||
use crate::context::CommandContext;
|
||||
@@ -151,7 +152,17 @@ async fn handle_update(
|
||||
|
||||
// Insert the new document.
|
||||
match ctx.storage.insert_one(db, coll, updated.clone()).await {
|
||||
Ok(_) => {
|
||||
Ok(id_str) => {
|
||||
// Record upsert in oplog as an insert.
|
||||
ctx.oplog.append(
|
||||
OpType::Insert,
|
||||
db,
|
||||
coll,
|
||||
&id_str,
|
||||
Some(updated.clone()),
|
||||
None,
|
||||
);
|
||||
|
||||
// Update index.
|
||||
let mut engine = ctx
|
||||
.indexes
|
||||
@@ -212,6 +223,16 @@ async fn handle_update(
|
||||
.await
|
||||
{
|
||||
Ok(()) => {
|
||||
// Record in oplog.
|
||||
ctx.oplog.append(
|
||||
OpType::Update,
|
||||
db,
|
||||
coll,
|
||||
&id_str,
|
||||
Some(updated_doc.clone()),
|
||||
Some(matched_doc.clone()),
|
||||
);
|
||||
|
||||
// Update index.
|
||||
if let Some(mut engine) = ctx.indexes.get_mut(&ns_key) {
|
||||
let _ = engine.on_update(matched_doc, &updated_doc);
|
||||
@@ -362,6 +383,16 @@ async fn handle_find_and_modify(
|
||||
let id_str = extract_id_string(doc);
|
||||
ctx.storage.delete_by_id(db, coll, &id_str).await?;
|
||||
|
||||
// Record in oplog.
|
||||
ctx.oplog.append(
|
||||
OpType::Delete,
|
||||
db,
|
||||
coll,
|
||||
&id_str,
|
||||
None,
|
||||
Some(doc.clone()),
|
||||
);
|
||||
|
||||
// Update index.
|
||||
if let Some(mut engine) = ctx.indexes.get_mut(&ns_key) {
|
||||
engine.on_delete(doc);
|
||||
@@ -418,6 +449,16 @@ async fn handle_find_and_modify(
|
||||
.update_by_id(db, coll, &id_str, updated_doc.clone())
|
||||
.await?;
|
||||
|
||||
// Record in oplog.
|
||||
ctx.oplog.append(
|
||||
OpType::Update,
|
||||
db,
|
||||
coll,
|
||||
&id_str,
|
||||
Some(updated_doc.clone()),
|
||||
Some(original_doc.clone()),
|
||||
);
|
||||
|
||||
// Update index.
|
||||
if let Some(mut engine) = ctx.indexes.get_mut(&ns_key) {
|
||||
let _ = engine.on_update(&original_doc, &updated_doc);
|
||||
@@ -464,10 +505,20 @@ async fn handle_find_and_modify(
|
||||
updated_doc.get("_id").unwrap().clone()
|
||||
};
|
||||
|
||||
ctx.storage
|
||||
let inserted_id_str = ctx.storage
|
||||
.insert_one(db, coll, updated_doc.clone())
|
||||
.await?;
|
||||
|
||||
// Record upsert in oplog as an insert.
|
||||
ctx.oplog.append(
|
||||
OpType::Insert,
|
||||
db,
|
||||
coll,
|
||||
&inserted_id_str,
|
||||
Some(updated_doc.clone()),
|
||||
None,
|
||||
);
|
||||
|
||||
// Update index.
|
||||
{
|
||||
let mut engine = ctx
|
||||
|
||||
@@ -18,5 +18,5 @@ pub use adapter::StorageAdapter;
|
||||
pub use error::{StorageError, StorageResult};
|
||||
pub use file::FileStorageAdapter;
|
||||
pub use memory::MemoryStorageAdapter;
|
||||
pub use oplog::{OpLog, OpLogEntry, OpType};
|
||||
pub use oplog::{OpLog, OpLogEntry, OpLogStats, OpType};
|
||||
pub use wal::{WalOp, WalRecord, WriteAheadLog};
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
//!
|
||||
//! The OpLog records every write operation so that changes can be replayed,
|
||||
//! replicated, or used for change-stream style notifications.
|
||||
//! Each entry stores both the new and previous document state, enabling
|
||||
//! point-in-time replay and revert.
|
||||
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
@@ -33,8 +35,21 @@ pub struct OpLogEntry {
|
||||
pub collection: String,
|
||||
/// Document id (hex string).
|
||||
pub document_id: String,
|
||||
/// The document snapshot (for insert/update; None for delete).
|
||||
/// The new document snapshot (for insert/update; None for delete).
|
||||
pub document: Option<Document>,
|
||||
/// The previous document snapshot (for update/delete; None for insert).
|
||||
pub previous_document: Option<Document>,
|
||||
}
|
||||
|
||||
/// Aggregate statistics about the oplog.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct OpLogStats {
|
||||
pub current_seq: u64,
|
||||
pub total_entries: usize,
|
||||
pub oldest_seq: u64,
|
||||
pub inserts: usize,
|
||||
pub updates: usize,
|
||||
pub deletes: usize,
|
||||
}
|
||||
|
||||
/// In-memory operation log.
|
||||
@@ -61,6 +76,7 @@ impl OpLog {
|
||||
collection: &str,
|
||||
document_id: &str,
|
||||
document: Option<Document>,
|
||||
previous_document: Option<Document>,
|
||||
) -> u64 {
|
||||
let seq = self.next_seq.fetch_add(1, Ordering::SeqCst);
|
||||
let entry = OpLogEntry {
|
||||
@@ -74,11 +90,17 @@ impl OpLog {
|
||||
collection: collection.to_string(),
|
||||
document_id: document_id.to_string(),
|
||||
document,
|
||||
previous_document,
|
||||
};
|
||||
self.entries.insert(seq, entry);
|
||||
seq
|
||||
}
|
||||
|
||||
/// Get a single entry by sequence number.
|
||||
pub fn get_entry(&self, seq: u64) -> Option<OpLogEntry> {
|
||||
self.entries.get(&seq).map(|e| e.value().clone())
|
||||
}
|
||||
|
||||
/// Get all entries with sequence number >= `since`.
|
||||
pub fn entries_since(&self, since: u64) -> Vec<OpLogEntry> {
|
||||
let mut result: Vec<_> = self
|
||||
@@ -91,11 +113,72 @@ impl OpLog {
|
||||
result
|
||||
}
|
||||
|
||||
/// Get entries in range [from_seq, to_seq] inclusive, sorted by seq.
|
||||
pub fn entries_range(&self, from_seq: u64, to_seq: u64) -> Vec<OpLogEntry> {
|
||||
let mut result: Vec<_> = self
|
||||
.entries
|
||||
.iter()
|
||||
.filter(|e| {
|
||||
let k = *e.key();
|
||||
k >= from_seq && k <= to_seq
|
||||
})
|
||||
.map(|e| e.value().clone())
|
||||
.collect();
|
||||
result.sort_by_key(|e| e.seq);
|
||||
result
|
||||
}
|
||||
|
||||
/// Remove all entries with seq > `after_seq` and reset the next_seq counter.
|
||||
pub fn truncate_after(&self, after_seq: u64) {
|
||||
let keys_to_remove: Vec<u64> = self
|
||||
.entries
|
||||
.iter()
|
||||
.filter(|e| *e.key() > after_seq)
|
||||
.map(|e| *e.key())
|
||||
.collect();
|
||||
for key in keys_to_remove {
|
||||
self.entries.remove(&key);
|
||||
}
|
||||
self.next_seq.store(after_seq + 1, Ordering::SeqCst);
|
||||
}
|
||||
|
||||
/// Get the current (latest) sequence number. Returns 0 if empty.
|
||||
pub fn current_seq(&self) -> u64 {
|
||||
self.next_seq.load(Ordering::SeqCst).saturating_sub(1)
|
||||
}
|
||||
|
||||
/// Get aggregate statistics.
|
||||
pub fn stats(&self) -> OpLogStats {
|
||||
let mut inserts = 0usize;
|
||||
let mut updates = 0usize;
|
||||
let mut deletes = 0usize;
|
||||
let mut oldest_seq = u64::MAX;
|
||||
|
||||
for entry in self.entries.iter() {
|
||||
match entry.value().op {
|
||||
OpType::Insert => inserts += 1,
|
||||
OpType::Update => updates += 1,
|
||||
OpType::Delete => deletes += 1,
|
||||
}
|
||||
if entry.value().seq < oldest_seq {
|
||||
oldest_seq = entry.value().seq;
|
||||
}
|
||||
}
|
||||
|
||||
if oldest_seq == u64::MAX {
|
||||
oldest_seq = 0;
|
||||
}
|
||||
|
||||
OpLogStats {
|
||||
current_seq: self.current_seq(),
|
||||
total_entries: self.entries.len(),
|
||||
oldest_seq,
|
||||
inserts,
|
||||
updates,
|
||||
deletes,
|
||||
}
|
||||
}
|
||||
|
||||
/// Clear all entries.
|
||||
pub fn clear(&self) {
|
||||
self.entries.clear();
|
||||
|
||||
@@ -13,7 +13,7 @@ use tokio_util::sync::CancellationToken;
|
||||
use rustdb_config::{RustDbOptions, StorageType};
|
||||
use rustdb_wire::{WireCodec, OP_QUERY};
|
||||
use rustdb_wire::{encode_op_msg_response, encode_op_reply_response};
|
||||
use rustdb_storage::{StorageAdapter, MemoryStorageAdapter, FileStorageAdapter};
|
||||
use rustdb_storage::{StorageAdapter, MemoryStorageAdapter, FileStorageAdapter, OpLog};
|
||||
// IndexEngine is used indirectly via CommandContext
|
||||
use rustdb_txn::{TransactionEngine, SessionEngine};
|
||||
use rustdb_commands::{CommandRouter, CommandContext};
|
||||
@@ -56,6 +56,7 @@ impl RustDb {
|
||||
sessions: Arc::new(SessionEngine::new(30 * 60 * 1000, 60 * 1000)),
|
||||
cursors: Arc::new(DashMap::new()),
|
||||
start_time: std::time::Instant::now(),
|
||||
oplog: Arc::new(OpLog::new()),
|
||||
});
|
||||
|
||||
let router = Arc::new(CommandRouter::new(ctx.clone()));
|
||||
@@ -166,6 +167,11 @@ impl RustDb {
|
||||
pub fn connection_uri(&self) -> String {
|
||||
self.options.connection_uri()
|
||||
}
|
||||
|
||||
/// Get a reference to the shared command context (for management IPC access to oplog, storage, etc.).
|
||||
pub fn ctx(&self) -> &Arc<CommandContext> {
|
||||
&self.ctx
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle a single client connection using the wire protocol codec.
|
||||
|
||||
@@ -139,7 +139,12 @@ async fn handle_request(
|
||||
"start" => handle_start(&id, &request.params, db).await,
|
||||
"stop" => handle_stop(&id, db).await,
|
||||
"getStatus" => handle_get_status(&id, db),
|
||||
"getMetrics" => handle_get_metrics(&id, db),
|
||||
"getMetrics" => handle_get_metrics(&id, db).await,
|
||||
"getOpLog" => handle_get_oplog(&id, &request.params, db),
|
||||
"getOpLogStats" => handle_get_oplog_stats(&id, db),
|
||||
"revertToSeq" => handle_revert_to_seq(&id, &request.params, db).await,
|
||||
"getCollections" => handle_get_collections(&id, &request.params, db).await,
|
||||
"getDocuments" => handle_get_documents(&id, &request.params, db).await,
|
||||
_ => ManagementResponse::err(id, format!("Unknown method: {}", request.method)),
|
||||
}
|
||||
}
|
||||
@@ -223,18 +228,333 @@ fn handle_get_status(
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_get_metrics(
|
||||
async fn handle_get_metrics(
|
||||
id: &str,
|
||||
db: &Option<RustDb>,
|
||||
) -> ManagementResponse {
|
||||
match db.as_ref() {
|
||||
Some(_d) => ManagementResponse::ok(
|
||||
id.to_string(),
|
||||
serde_json::json!({
|
||||
"connections": 0,
|
||||
"databases": 0,
|
||||
}),
|
||||
),
|
||||
Some(d) => {
|
||||
let ctx = d.ctx();
|
||||
let db_list = ctx.storage.list_databases().await.unwrap_or_default();
|
||||
let mut total_collections = 0u64;
|
||||
for db_name in &db_list {
|
||||
if let Ok(colls) = ctx.storage.list_collections(db_name).await {
|
||||
total_collections += colls.len() as u64;
|
||||
}
|
||||
}
|
||||
let oplog_stats = ctx.oplog.stats();
|
||||
let uptime_secs = ctx.start_time.elapsed().as_secs();
|
||||
|
||||
ManagementResponse::ok(
|
||||
id.to_string(),
|
||||
serde_json::json!({
|
||||
"databases": db_list.len(),
|
||||
"collections": total_collections,
|
||||
"oplogEntries": oplog_stats.total_entries,
|
||||
"oplogCurrentSeq": oplog_stats.current_seq,
|
||||
"uptimeSeconds": uptime_secs,
|
||||
}),
|
||||
)
|
||||
}
|
||||
None => ManagementResponse::err(id.to_string(), "Server is not running".to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_get_oplog(
|
||||
id: &str,
|
||||
params: &serde_json::Value,
|
||||
db: &Option<RustDb>,
|
||||
) -> ManagementResponse {
|
||||
let d = match db.as_ref() {
|
||||
Some(d) => d,
|
||||
None => return ManagementResponse::err(id.to_string(), "Server is not running".to_string()),
|
||||
};
|
||||
|
||||
let ctx = d.ctx();
|
||||
let since_seq = params.get("sinceSeq").and_then(|v| v.as_u64()).unwrap_or(1);
|
||||
let limit = params.get("limit").and_then(|v| v.as_u64()).unwrap_or(100) as usize;
|
||||
let filter_db = params.get("db").and_then(|v| v.as_str());
|
||||
let filter_coll = params.get("collection").and_then(|v| v.as_str());
|
||||
|
||||
let mut entries = ctx.oplog.entries_since(since_seq);
|
||||
|
||||
// Apply filters.
|
||||
if let Some(fdb) = filter_db {
|
||||
entries.retain(|e| e.db == fdb);
|
||||
}
|
||||
if let Some(fcoll) = filter_coll {
|
||||
entries.retain(|e| e.collection == fcoll);
|
||||
}
|
||||
|
||||
let total = entries.len();
|
||||
entries.truncate(limit);
|
||||
|
||||
// Serialize entries to JSON.
|
||||
let entries_json: Vec<serde_json::Value> = entries
|
||||
.iter()
|
||||
.map(|e| {
|
||||
let doc_json = e.document.as_ref().map(|d| bson_doc_to_json(d));
|
||||
let prev_json = e.previous_document.as_ref().map(|d| bson_doc_to_json(d));
|
||||
serde_json::json!({
|
||||
"seq": e.seq,
|
||||
"timestampMs": e.timestamp_ms,
|
||||
"op": match e.op {
|
||||
rustdb_storage::OpType::Insert => "insert",
|
||||
rustdb_storage::OpType::Update => "update",
|
||||
rustdb_storage::OpType::Delete => "delete",
|
||||
},
|
||||
"db": e.db,
|
||||
"collection": e.collection,
|
||||
"documentId": e.document_id,
|
||||
"document": doc_json,
|
||||
"previousDocument": prev_json,
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
ManagementResponse::ok(
|
||||
id.to_string(),
|
||||
serde_json::json!({
|
||||
"entries": entries_json,
|
||||
"currentSeq": ctx.oplog.current_seq(),
|
||||
"totalEntries": total,
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
fn handle_get_oplog_stats(
|
||||
id: &str,
|
||||
db: &Option<RustDb>,
|
||||
) -> ManagementResponse {
|
||||
let d = match db.as_ref() {
|
||||
Some(d) => d,
|
||||
None => return ManagementResponse::err(id.to_string(), "Server is not running".to_string()),
|
||||
};
|
||||
|
||||
let stats = d.ctx().oplog.stats();
|
||||
ManagementResponse::ok(
|
||||
id.to_string(),
|
||||
serde_json::json!({
|
||||
"currentSeq": stats.current_seq,
|
||||
"totalEntries": stats.total_entries,
|
||||
"oldestSeq": stats.oldest_seq,
|
||||
"entriesByOp": {
|
||||
"insert": stats.inserts,
|
||||
"update": stats.updates,
|
||||
"delete": stats.deletes,
|
||||
},
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
async fn handle_revert_to_seq(
|
||||
id: &str,
|
||||
params: &serde_json::Value,
|
||||
db: &Option<RustDb>,
|
||||
) -> ManagementResponse {
|
||||
let d = match db.as_ref() {
|
||||
Some(d) => d,
|
||||
None => return ManagementResponse::err(id.to_string(), "Server is not running".to_string()),
|
||||
};
|
||||
|
||||
let target_seq = match params.get("seq").and_then(|v| v.as_u64()) {
|
||||
Some(s) => s,
|
||||
None => return ManagementResponse::err(id.to_string(), "Missing 'seq' parameter".to_string()),
|
||||
};
|
||||
let dry_run = params.get("dryRun").and_then(|v| v.as_bool()).unwrap_or(false);
|
||||
|
||||
let ctx = d.ctx();
|
||||
let current = ctx.oplog.current_seq();
|
||||
|
||||
if target_seq > current {
|
||||
return ManagementResponse::err(
|
||||
id.to_string(),
|
||||
format!("Target seq {} is beyond current seq {}", target_seq, current),
|
||||
);
|
||||
}
|
||||
|
||||
// Collect entries to revert (from target+1 to current), sorted descending for reverse processing.
|
||||
let mut entries_to_revert = ctx.oplog.entries_range(target_seq + 1, current);
|
||||
entries_to_revert.reverse();
|
||||
|
||||
if dry_run {
|
||||
let entries_json: Vec<serde_json::Value> = entries_to_revert
|
||||
.iter()
|
||||
.map(|e| {
|
||||
serde_json::json!({
|
||||
"seq": e.seq,
|
||||
"op": match e.op {
|
||||
rustdb_storage::OpType::Insert => "insert",
|
||||
rustdb_storage::OpType::Update => "update",
|
||||
rustdb_storage::OpType::Delete => "delete",
|
||||
},
|
||||
"db": e.db,
|
||||
"collection": e.collection,
|
||||
"documentId": e.document_id,
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
return ManagementResponse::ok(
|
||||
id.to_string(),
|
||||
serde_json::json!({
|
||||
"dryRun": true,
|
||||
"reverted": entries_to_revert.len(),
|
||||
"entries": entries_json,
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
// Execute revert: process each entry in reverse, using storage directly.
|
||||
let mut reverted = 0u64;
|
||||
let mut errors: Vec<String> = Vec::new();
|
||||
|
||||
for entry in &entries_to_revert {
|
||||
let result = match entry.op {
|
||||
rustdb_storage::OpType::Insert => {
|
||||
// Undo insert -> delete the document.
|
||||
ctx.storage.delete_by_id(&entry.db, &entry.collection, &entry.document_id).await
|
||||
}
|
||||
rustdb_storage::OpType::Update => {
|
||||
// Undo update -> restore the previous document.
|
||||
if let Some(ref prev_doc) = entry.previous_document {
|
||||
ctx.storage
|
||||
.update_by_id(&entry.db, &entry.collection, &entry.document_id, prev_doc.clone())
|
||||
.await
|
||||
} else {
|
||||
errors.push(format!("seq {}: update entry missing previous_document", entry.seq));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
rustdb_storage::OpType::Delete => {
|
||||
// Undo delete -> re-insert the previous document.
|
||||
if let Some(ref prev_doc) = entry.previous_document {
|
||||
ctx.storage
|
||||
.insert_one(&entry.db, &entry.collection, prev_doc.clone())
|
||||
.await
|
||||
.map(|_| ())
|
||||
} else {
|
||||
errors.push(format!("seq {}: delete entry missing previous_document", entry.seq));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
match result {
|
||||
Ok(()) => reverted += 1,
|
||||
Err(e) => errors.push(format!("seq {}: {}", entry.seq, e)),
|
||||
}
|
||||
}
|
||||
|
||||
// Truncate the oplog to the target sequence.
|
||||
ctx.oplog.truncate_after(target_seq);
|
||||
|
||||
let mut response = serde_json::json!({
|
||||
"dryRun": false,
|
||||
"reverted": reverted,
|
||||
"targetSeq": target_seq,
|
||||
});
|
||||
|
||||
if !errors.is_empty() {
|
||||
response["errors"] = serde_json::json!(errors);
|
||||
}
|
||||
|
||||
ManagementResponse::ok(id.to_string(), response)
|
||||
}
|
||||
|
||||
async fn handle_get_collections(
|
||||
id: &str,
|
||||
params: &serde_json::Value,
|
||||
db: &Option<RustDb>,
|
||||
) -> ManagementResponse {
|
||||
let d = match db.as_ref() {
|
||||
Some(d) => d,
|
||||
None => return ManagementResponse::err(id.to_string(), "Server is not running".to_string()),
|
||||
};
|
||||
|
||||
let ctx = d.ctx();
|
||||
let filter_db = params.get("db").and_then(|v| v.as_str());
|
||||
|
||||
let databases = match ctx.storage.list_databases().await {
|
||||
Ok(dbs) => dbs,
|
||||
Err(e) => return ManagementResponse::err(id.to_string(), format!("Failed to list databases: {}", e)),
|
||||
};
|
||||
|
||||
let mut collections: Vec<serde_json::Value> = Vec::new();
|
||||
|
||||
for db_name in &databases {
|
||||
if let Some(fdb) = filter_db {
|
||||
if db_name != fdb {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if let Ok(colls) = ctx.storage.list_collections(db_name).await {
|
||||
for coll_name in colls {
|
||||
let count = ctx.storage.count(db_name, &coll_name).await.unwrap_or(0);
|
||||
collections.push(serde_json::json!({
|
||||
"db": db_name,
|
||||
"name": coll_name,
|
||||
"count": count,
|
||||
}));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ManagementResponse::ok(
|
||||
id.to_string(),
|
||||
serde_json::json!({ "collections": collections }),
|
||||
)
|
||||
}
|
||||
|
||||
async fn handle_get_documents(
|
||||
id: &str,
|
||||
params: &serde_json::Value,
|
||||
db: &Option<RustDb>,
|
||||
) -> ManagementResponse {
|
||||
let d = match db.as_ref() {
|
||||
Some(d) => d,
|
||||
None => return ManagementResponse::err(id.to_string(), "Server is not running".to_string()),
|
||||
};
|
||||
|
||||
let db_name = match params.get("db").and_then(|v| v.as_str()) {
|
||||
Some(s) => s,
|
||||
None => return ManagementResponse::err(id.to_string(), "Missing 'db' parameter".to_string()),
|
||||
};
|
||||
let coll_name = match params.get("collection").and_then(|v| v.as_str()) {
|
||||
Some(s) => s,
|
||||
None => return ManagementResponse::err(id.to_string(), "Missing 'collection' parameter".to_string()),
|
||||
};
|
||||
let limit = params.get("limit").and_then(|v| v.as_u64()).unwrap_or(50) as usize;
|
||||
let skip = params.get("skip").and_then(|v| v.as_u64()).unwrap_or(0) as usize;
|
||||
|
||||
let ctx = d.ctx();
|
||||
|
||||
let all_docs = match ctx.storage.find_all(db_name, coll_name).await {
|
||||
Ok(docs) => docs,
|
||||
Err(e) => return ManagementResponse::err(id.to_string(), format!("Failed to find documents: {}", e)),
|
||||
};
|
||||
|
||||
let total = all_docs.len();
|
||||
let docs: Vec<serde_json::Value> = all_docs
|
||||
.into_iter()
|
||||
.skip(skip)
|
||||
.take(limit)
|
||||
.map(|d| bson_doc_to_json(&d))
|
||||
.collect();
|
||||
|
||||
ManagementResponse::ok(
|
||||
id.to_string(),
|
||||
serde_json::json!({
|
||||
"documents": docs,
|
||||
"total": total,
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
/// Convert a BSON Document to a serde_json::Value.
|
||||
fn bson_doc_to_json(doc: &bson::Document) -> serde_json::Value {
|
||||
// Use bson's built-in relaxed extended JSON serialization.
|
||||
let bson_val = bson::Bson::Document(doc.clone());
|
||||
bson_val.into_relaxed_extjson()
|
||||
}
|
||||
|
||||
@@ -3,6 +3,6 @@
|
||||
*/
|
||||
export const commitinfo = {
|
||||
name: '@push.rocks/smartdb',
|
||||
version: '2.0.0',
|
||||
version: '2.1.0',
|
||||
description: 'A MongoDB-compatible embedded database server with wire protocol support, backed by a high-performance Rust engine.'
|
||||
}
|
||||
|
||||
11
ts/index.ts
11
ts/index.ts
@@ -9,3 +9,14 @@ export type { ILocalSmartDbOptions, ILocalSmartDbConnectionInfo } from './ts_loc
|
||||
|
||||
// Export commitinfo
|
||||
export { commitinfo };
|
||||
|
||||
// Re-export oplog / debug types for convenience
|
||||
export type {
|
||||
IOpLogEntry,
|
||||
IOpLogResult,
|
||||
IOpLogStats,
|
||||
IRevertResult,
|
||||
ICollectionInfo,
|
||||
IDocumentsResult,
|
||||
ISmartDbMetrics,
|
||||
} from './ts_smartdb/index.js';
|
||||
|
||||
@@ -6,3 +6,14 @@ export type { ISmartdbServerOptions } from './server/SmartdbServer.js';
|
||||
|
||||
// Export bridge for advanced usage
|
||||
export { RustDbBridge } from './rust-db-bridge.js';
|
||||
|
||||
// Export oplog / debug types
|
||||
export type {
|
||||
IOpLogEntry,
|
||||
IOpLogResult,
|
||||
IOpLogStats,
|
||||
IRevertResult,
|
||||
ICollectionInfo,
|
||||
IDocumentsResult,
|
||||
ISmartDbMetrics,
|
||||
} from './rust-db-bridge.js';
|
||||
|
||||
@@ -3,6 +3,82 @@ import * as path from 'path';
|
||||
import * as url from 'url';
|
||||
import { EventEmitter } from 'events';
|
||||
|
||||
/**
|
||||
* A single oplog entry returned from the Rust engine.
|
||||
*/
|
||||
export interface IOpLogEntry {
|
||||
seq: number;
|
||||
timestampMs: number;
|
||||
op: 'insert' | 'update' | 'delete';
|
||||
db: string;
|
||||
collection: string;
|
||||
documentId: string;
|
||||
document: Record<string, any> | null;
|
||||
previousDocument: Record<string, any> | null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Aggregate oplog statistics.
|
||||
*/
|
||||
export interface IOpLogStats {
|
||||
currentSeq: number;
|
||||
totalEntries: number;
|
||||
oldestSeq: number;
|
||||
entriesByOp: {
|
||||
insert: number;
|
||||
update: number;
|
||||
delete: number;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Result of a getOpLog query.
|
||||
*/
|
||||
export interface IOpLogResult {
|
||||
entries: IOpLogEntry[];
|
||||
currentSeq: number;
|
||||
totalEntries: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Result of a revertToSeq command.
|
||||
*/
|
||||
export interface IRevertResult {
|
||||
dryRun: boolean;
|
||||
reverted: number;
|
||||
targetSeq?: number;
|
||||
entries?: IOpLogEntry[];
|
||||
errors?: string[];
|
||||
}
|
||||
|
||||
/**
|
||||
* A collection info entry.
|
||||
*/
|
||||
export interface ICollectionInfo {
|
||||
db: string;
|
||||
name: string;
|
||||
count: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Result of a getDocuments query.
|
||||
*/
|
||||
export interface IDocumentsResult {
|
||||
documents: Record<string, any>[];
|
||||
total: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Server metrics.
|
||||
*/
|
||||
export interface ISmartDbMetrics {
|
||||
databases: number;
|
||||
collections: number;
|
||||
oplogEntries: number;
|
||||
oplogCurrentSeq: number;
|
||||
uptimeSeconds: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Type-safe command definitions for the RustDb IPC protocol.
|
||||
*/
|
||||
@@ -10,7 +86,24 @@ type TSmartDbCommands = {
|
||||
start: { params: { config: ISmartDbRustConfig }; result: { connectionUri: string } };
|
||||
stop: { params: Record<string, never>; result: void };
|
||||
getStatus: { params: Record<string, never>; result: { running: boolean } };
|
||||
getMetrics: { params: Record<string, never>; result: any };
|
||||
getMetrics: { params: Record<string, never>; result: ISmartDbMetrics };
|
||||
getOpLog: {
|
||||
params: { sinceSeq?: number; limit?: number; db?: string; collection?: string };
|
||||
result: IOpLogResult;
|
||||
};
|
||||
getOpLogStats: { params: Record<string, never>; result: IOpLogStats };
|
||||
revertToSeq: {
|
||||
params: { seq: number; dryRun?: boolean };
|
||||
result: IRevertResult;
|
||||
};
|
||||
getCollections: {
|
||||
params: { db?: string };
|
||||
result: { collections: ICollectionInfo[] };
|
||||
};
|
||||
getDocuments: {
|
||||
params: { db: string; collection: string; limit?: number; skip?: number };
|
||||
result: IDocumentsResult;
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -132,7 +225,38 @@ export class RustDbBridge extends EventEmitter {
|
||||
return await this.bridge.sendCommand('getStatus', {} as Record<string, never>) as { running: boolean };
|
||||
}
|
||||
|
||||
public async getMetrics(): Promise<any> {
|
||||
return this.bridge.sendCommand('getMetrics', {} as Record<string, never>);
|
||||
public async getMetrics(): Promise<ISmartDbMetrics> {
|
||||
return this.bridge.sendCommand('getMetrics', {} as Record<string, never>) as Promise<ISmartDbMetrics>;
|
||||
}
|
||||
|
||||
public async getOpLog(params: {
|
||||
sinceSeq?: number;
|
||||
limit?: number;
|
||||
db?: string;
|
||||
collection?: string;
|
||||
} = {}): Promise<IOpLogResult> {
|
||||
return this.bridge.sendCommand('getOpLog', params) as Promise<IOpLogResult>;
|
||||
}
|
||||
|
||||
public async getOpLogStats(): Promise<IOpLogStats> {
|
||||
return this.bridge.sendCommand('getOpLogStats', {} as Record<string, never>) as Promise<IOpLogStats>;
|
||||
}
|
||||
|
||||
public async revertToSeq(seq: number, dryRun = false): Promise<IRevertResult> {
|
||||
return this.bridge.sendCommand('revertToSeq', { seq, dryRun }) as Promise<IRevertResult>;
|
||||
}
|
||||
|
||||
public async getCollections(db?: string): Promise<ICollectionInfo[]> {
|
||||
const result = await this.bridge.sendCommand('getCollections', db ? { db } : {}) as { collections: ICollectionInfo[] };
|
||||
return result.collections;
|
||||
}
|
||||
|
||||
public async getDocuments(
|
||||
db: string,
|
||||
collection: string,
|
||||
limit = 50,
|
||||
skip = 0,
|
||||
): Promise<IDocumentsResult> {
|
||||
return this.bridge.sendCommand('getDocuments', { db, collection, limit, skip }) as Promise<IDocumentsResult>;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,13 @@
|
||||
import { RustDbBridge } from '../rust-db-bridge.js';
|
||||
import type {
|
||||
IOpLogEntry,
|
||||
IOpLogResult,
|
||||
IOpLogStats,
|
||||
IRevertResult,
|
||||
ICollectionInfo,
|
||||
IDocumentsResult,
|
||||
ISmartDbMetrics,
|
||||
} from '../rust-db-bridge.js';
|
||||
|
||||
/**
|
||||
* Server configuration options
|
||||
@@ -156,4 +165,59 @@ export class SmartdbServer {
|
||||
get host(): string {
|
||||
return this.options.host ?? '127.0.0.1';
|
||||
}
|
||||
|
||||
// --- OpLog / Debug API ---
|
||||
|
||||
/**
|
||||
* Get oplog entries, optionally filtered.
|
||||
*/
|
||||
async getOpLog(params: {
|
||||
sinceSeq?: number;
|
||||
limit?: number;
|
||||
db?: string;
|
||||
collection?: string;
|
||||
} = {}): Promise<IOpLogResult> {
|
||||
return this.bridge.getOpLog(params);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get aggregate oplog statistics.
|
||||
*/
|
||||
async getOpLogStats(): Promise<IOpLogStats> {
|
||||
return this.bridge.getOpLogStats();
|
||||
}
|
||||
|
||||
/**
|
||||
* Revert database state to a specific oplog sequence number.
|
||||
* Use dryRun=true to preview which entries would be reverted.
|
||||
*/
|
||||
async revertToSeq(seq: number, dryRun = false): Promise<IRevertResult> {
|
||||
return this.bridge.revertToSeq(seq, dryRun);
|
||||
}
|
||||
|
||||
/**
|
||||
* List all collections across all databases, with document counts.
|
||||
*/
|
||||
async getCollections(db?: string): Promise<ICollectionInfo[]> {
|
||||
return this.bridge.getCollections(db);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get documents from a collection with pagination.
|
||||
*/
|
||||
async getDocuments(
|
||||
db: string,
|
||||
collection: string,
|
||||
limit = 50,
|
||||
skip = 0,
|
||||
): Promise<IDocumentsResult> {
|
||||
return this.bridge.getDocuments(db, collection, limit, skip);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get server metrics including database/collection counts and oplog info.
|
||||
*/
|
||||
async getMetrics(): Promise<ISmartDbMetrics> {
|
||||
return this.bridge.getMetrics();
|
||||
}
|
||||
}
|
||||
|
||||
11
ts_debugserver/bundled.ts
Normal file
11
ts_debugserver/bundled.ts
Normal file
File diff suppressed because one or more lines are too long
120
ts_debugserver/classes.debugserver.ts
Normal file
120
ts_debugserver/classes.debugserver.ts
Normal file
@@ -0,0 +1,120 @@
|
||||
import * as plugins from './plugins.js';
|
||||
import { files as bundledFiles } from './bundled.js';
|
||||
import type { SmartdbServer } from '../ts/index.js';
|
||||
|
||||
export interface IDebugServerOptions {
|
||||
/** Port to listen on (default: 4000). */
|
||||
port?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Serves the SmartDB debug UI as a web application with API proxy endpoints.
|
||||
*
|
||||
* @example
|
||||
* ```typescript
|
||||
* import { SmartdbServer } from '@push.rocks/smartdb';
|
||||
* import { SmartdbDebugServer } from '@push.rocks/smartdb/debugserver';
|
||||
*
|
||||
* const db = new SmartdbServer({ storage: 'memory' });
|
||||
* await db.start();
|
||||
*
|
||||
* const debugServer = new SmartdbDebugServer(db, { port: 4000 });
|
||||
* await debugServer.start();
|
||||
* // Open http://localhost:4000
|
||||
* ```
|
||||
*/
|
||||
export class SmartdbDebugServer {
|
||||
private server: plugins.typedserver.TypedServer;
|
||||
private smartdbServer: SmartdbServer;
|
||||
private port: number;
|
||||
|
||||
constructor(smartdbServer: SmartdbServer, options: IDebugServerOptions = {}) {
|
||||
this.smartdbServer = smartdbServer;
|
||||
this.port = options.port ?? 4000;
|
||||
|
||||
this.server = new plugins.typedserver.TypedServer({
|
||||
cors: true,
|
||||
port: this.port,
|
||||
bundledContent: bundledFiles,
|
||||
spaFallback: true,
|
||||
noCache: true,
|
||||
});
|
||||
|
||||
this.setupApiRoutes();
|
||||
}
|
||||
|
||||
private setupApiRoutes() {
|
||||
const jsonResponse = (data: any, status = 200) =>
|
||||
new Response(JSON.stringify(data), {
|
||||
status,
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
});
|
||||
|
||||
// Metrics
|
||||
this.server.addRoute('/api/smartdb/metrics', 'GET', async () => {
|
||||
return jsonResponse(await this.smartdbServer.getMetrics());
|
||||
});
|
||||
|
||||
// OpLog stats
|
||||
this.server.addRoute('/api/smartdb/oplog/stats', 'GET', async () => {
|
||||
return jsonResponse(await this.smartdbServer.getOpLogStats());
|
||||
});
|
||||
|
||||
// OpLog entries
|
||||
this.server.addRoute('/api/smartdb/oplog', 'GET', async (ctx) => {
|
||||
const result = await this.smartdbServer.getOpLog({
|
||||
sinceSeq: ctx.query.sinceSeq ? parseInt(ctx.query.sinceSeq) : undefined,
|
||||
limit: ctx.query.limit ? parseInt(ctx.query.limit) : undefined,
|
||||
db: ctx.query.db || undefined,
|
||||
collection: ctx.query.collection || undefined,
|
||||
});
|
||||
return jsonResponse(result);
|
||||
});
|
||||
|
||||
// Collections
|
||||
this.server.addRoute('/api/smartdb/collections', 'GET', async (ctx) => {
|
||||
const collections = await this.smartdbServer.getCollections(ctx.query.db || undefined);
|
||||
return jsonResponse({ collections });
|
||||
});
|
||||
|
||||
// Documents
|
||||
this.server.addRoute('/api/smartdb/documents', 'GET', async (ctx) => {
|
||||
const { db, collection } = ctx.query;
|
||||
if (!db || !collection) {
|
||||
return jsonResponse({ error: 'db and collection required' }, 400);
|
||||
}
|
||||
const limit = parseInt(ctx.query.limit ?? '50');
|
||||
const skip = parseInt(ctx.query.skip ?? '0');
|
||||
const result = await this.smartdbServer.getDocuments(db, collection, limit, skip);
|
||||
return jsonResponse(result);
|
||||
});
|
||||
|
||||
// Revert
|
||||
this.server.addRoute('/api/smartdb/revert', 'GET', async (ctx) => {
|
||||
const { seq, dryRun } = ctx.query;
|
||||
if (!seq) {
|
||||
return jsonResponse({ error: 'seq required' }, 400);
|
||||
}
|
||||
const result = await this.smartdbServer.revertToSeq(
|
||||
parseInt(seq),
|
||||
dryRun === 'true',
|
||||
);
|
||||
return jsonResponse(result);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Start the debug server.
|
||||
*/
|
||||
async start(): Promise<void> {
|
||||
await this.server.start();
|
||||
console.log(`SmartDB Debug UI available at http://localhost:${this.port}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop the debug server.
|
||||
*/
|
||||
async stop(): Promise<void> {
|
||||
await this.server.stop();
|
||||
}
|
||||
}
|
||||
2
ts_debugserver/index.ts
Normal file
2
ts_debugserver/index.ts
Normal file
@@ -0,0 +1,2 @@
|
||||
export { SmartdbDebugServer } from './classes.debugserver.js';
|
||||
export type { IDebugServerOptions } from './classes.debugserver.js';
|
||||
3
ts_debugserver/plugins.ts
Normal file
3
ts_debugserver/plugins.ts
Normal file
@@ -0,0 +1,3 @@
|
||||
import * as typedserver from '@api.global/typedserver';
|
||||
|
||||
export { typedserver };
|
||||
12
ts_debugui/index.ts
Normal file
12
ts_debugui/index.ts
Normal file
@@ -0,0 +1,12 @@
|
||||
export { SmartdbDebugUi } from './smartdb-debugui.js';
|
||||
|
||||
// Re-export relevant types from the main module
|
||||
export type {
|
||||
IOpLogEntry,
|
||||
IOpLogResult,
|
||||
IOpLogStats,
|
||||
IRevertResult,
|
||||
ICollectionInfo,
|
||||
IDocumentsResult,
|
||||
ISmartDbMetrics,
|
||||
} from '../ts/index.js';
|
||||
11
ts_debugui/plugins.ts
Normal file
11
ts_debugui/plugins.ts
Normal file
@@ -0,0 +1,11 @@
|
||||
export {
|
||||
DeesElement,
|
||||
customElement,
|
||||
html,
|
||||
css,
|
||||
property,
|
||||
state,
|
||||
cssManager,
|
||||
type TemplateResult,
|
||||
type CSSResult,
|
||||
} from '@design.estate/dees-element';
|
||||
1132
ts_debugui/smartdb-debugui.ts
Normal file
1132
ts_debugui/smartdb-debugui.ts
Normal file
File diff suppressed because it is too large
Load Diff
@@ -5,6 +5,8 @@
|
||||
"moduleResolution": "NodeNext",
|
||||
"esModuleInterop": true,
|
||||
"verbatimModuleSyntax": true,
|
||||
"experimentalDecorators": true,
|
||||
"useDefineForClassFields": false,
|
||||
"types": ["node"]
|
||||
},
|
||||
"exclude": [
|
||||
|
||||
Reference in New Issue
Block a user