15 Commits

17 changed files with 804 additions and 75 deletions
+3
View File
@@ -13,5 +13,8 @@ rust/target/
package-lock.json package-lock.json
yarn.lock yarn.lock
# generated bundle (rebuilt on every build, embeds version)
ts_debugserver/bundled.ts
# playwright # playwright
.playwright-mcp/ .playwright-mcp/
+32
View File
@@ -1,5 +1,37 @@
# Changelog # Changelog
## 2026-04-05 - 2.5.8 - fix(rustdb-storage)
detect stale hint files using data file size metadata and add restart persistence regression tests
- Store the current data.rdb size in hint file headers and validate it on load to rebuild KeyDir when hints are stale or written in the old format.
- Persist updated hint metadata after compaction and shutdown to avoid missing appended tombstones after restart.
- Add validation reporting for stale hint files based on recorded versus actual data file size.
- Add regression tests covering delete persistence across restarts, missing hint recovery, stale socket cleanup, and unique index enforcement persistence.
## 2026-04-05 - 2.5.7 - fix(repo)
no changes to commit
## 2026-04-05 - 2.5.6 - fix(repo)
no changes to commit
## 2026-04-05 - 2.5.5 - fix(repo)
no changes to commit
## 2026-04-05 - 2.5.4 - fix(package)
bump package version to 2.5.3
- Updates the package metadata version by one patch release.
## 2026-04-05 - 2.5.3 - fix(rustdb-commands)
restore persisted index initialization before writes to enforce unique constraints after restart
- load stored index specifications from storage when creating command context index engines
- rebuild index data from existing documents so custom indexes are active before insert, update, and upsert operations
- add @push.rocks/smartdata as a runtime dependency
## 2026-04-05 - 2.5.2 - fix(rustdb-indexes) ## 2026-04-05 - 2.5.2 - fix(rustdb-indexes)
persist created indexes and restore them on server startup persist created indexes and restore them on server startup
+2 -1
View File
@@ -1,6 +1,6 @@
{ {
"name": "@push.rocks/smartdb", "name": "@push.rocks/smartdb",
"version": "2.5.2", "version": "2.5.8",
"private": false, "private": false,
"description": "A MongoDB-compatible embedded database server with wire protocol support, backed by a high-performance Rust engine.", "description": "A MongoDB-compatible embedded database server with wire protocol support, backed by a high-performance Rust engine.",
"exports": { "exports": {
@@ -29,6 +29,7 @@
"dependencies": { "dependencies": {
"@api.global/typedserver": "^8.0.0", "@api.global/typedserver": "^8.0.0",
"@design.estate/dees-element": "^2.0.0", "@design.estate/dees-element": "^2.0.0",
"@push.rocks/smartdata": "7.1.5",
"@push.rocks/smartrust": "^1.3.2", "@push.rocks/smartrust": "^1.3.2",
"bson": "^7.2.0" "bson": "^7.2.0"
}, },
+7 -4
View File
@@ -14,6 +14,9 @@ importers:
'@design.estate/dees-element': '@design.estate/dees-element':
specifier: ^2.0.0 specifier: ^2.0.0
version: 2.2.3 version: 2.2.3
'@push.rocks/smartdata':
specifier: 7.1.5
version: 7.1.5(socks@2.8.7)
'@push.rocks/smartrust': '@push.rocks/smartrust':
specifier: ^1.3.2 specifier: ^1.3.2
version: 1.3.2 version: 1.3.2
@@ -1026,8 +1029,8 @@ packages:
'@push.rocks/smartcrypto@2.0.4': '@push.rocks/smartcrypto@2.0.4':
resolution: {integrity: sha512-1+/5bsjyataf5uUkUNnnVXGRAt+gHVk1KDzozjTqgqJxHvQk1d9fVDohL6CxUhUucTPtu5VR5xNBiV8YCDuGyw==} resolution: {integrity: sha512-1+/5bsjyataf5uUkUNnnVXGRAt+gHVk1KDzozjTqgqJxHvQk1d9fVDohL6CxUhUucTPtu5VR5xNBiV8YCDuGyw==}
'@push.rocks/smartdata@7.1.3': '@push.rocks/smartdata@7.1.5':
resolution: {integrity: sha512-7vQJ9pdRk450yn2m9tmGPdSRlQVmxFPZjHD4sGYsfqCQPg+GLFusu+H16zpf+jKzAq4F2ZBMPaYymJHXvXiVcw==} resolution: {integrity: sha512-7x7VedEg6RocWndqUPuTbY2Bh85Q/x0LOVHL4o/NVXyh3IGNtiVQ8ple4WR0qYqlHRAojX4eDSBPMiYzIasqAg==}
'@push.rocks/smartdelay@3.0.5': '@push.rocks/smartdelay@3.0.5':
resolution: {integrity: sha512-mUuI7kj2f7ztjpic96FvRIlf2RsKBa5arw81AHNsndbxO6asRcxuWL8dTVxouEIK8YsBUlj0AsrCkHhMbLQdHw==} resolution: {integrity: sha512-mUuI7kj2f7ztjpic96FvRIlf2RsKBa5arw81AHNsndbxO6asRcxuWL8dTVxouEIK8YsBUlj0AsrCkHhMbLQdHw==}
@@ -5665,7 +5668,7 @@ snapshots:
'@types/node-forge': 1.3.14 '@types/node-forge': 1.3.14
node-forge: 1.4.0 node-forge: 1.4.0
'@push.rocks/smartdata@7.1.3(socks@2.8.7)': '@push.rocks/smartdata@7.1.5(socks@2.8.7)':
dependencies: dependencies:
'@push.rocks/lik': 6.4.0 '@push.rocks/lik': 6.4.0
'@push.rocks/smartdelay': 3.0.5 '@push.rocks/smartdelay': 3.0.5
@@ -5899,7 +5902,7 @@ snapshots:
'@push.rocks/smartmongo@5.1.1(socks@2.8.7)': '@push.rocks/smartmongo@5.1.1(socks@2.8.7)':
dependencies: dependencies:
'@push.rocks/mongodump': 1.1.0(socks@2.8.7) '@push.rocks/mongodump': 1.1.0(socks@2.8.7)
'@push.rocks/smartdata': 7.1.3(socks@2.8.7) '@push.rocks/smartdata': 7.1.5(socks@2.8.7)
'@push.rocks/smartfs': 1.5.0 '@push.rocks/smartfs': 1.5.0
'@push.rocks/smartpath': 6.0.0 '@push.rocks/smartpath': 6.0.0
'@push.rocks/smartpromise': 4.2.3 '@push.rocks/smartpromise': 4.2.3
+63 -2
View File
@@ -1,8 +1,8 @@
use std::sync::Arc; use std::sync::Arc;
use bson::Document; use bson::{Bson, Document};
use dashmap::DashMap; use dashmap::DashMap;
use rustdb_index::IndexEngine; use rustdb_index::{IndexEngine, IndexOptions};
use rustdb_storage::{OpLog, StorageAdapter}; use rustdb_storage::{OpLog, StorageAdapter};
use rustdb_txn::{SessionEngine, TransactionEngine}; use rustdb_txn::{SessionEngine, TransactionEngine};
@@ -24,6 +24,67 @@ pub struct CommandContext {
pub oplog: Arc<OpLog>, pub oplog: Arc<OpLog>,
} }
impl CommandContext {
/// Get or lazily initialize an IndexEngine for a namespace.
///
/// If no IndexEngine exists yet for this namespace, loads persisted index
/// specs from `indexes.json` via the storage adapter, creates the engine
/// with those specs, and rebuilds index data from existing documents.
/// This ensures unique indexes are enforced even on the very first write
/// after a restart.
pub async fn get_or_init_index_engine(&self, db: &str, coll: &str) -> dashmap::mapref::one::RefMut<'_, String, IndexEngine> {
let ns_key = format!("{}.{}", db, coll);
// Fast path: engine already exists.
if self.indexes.contains_key(&ns_key) {
return self.indexes.entry(ns_key).or_insert_with(IndexEngine::new);
}
// Slow path: load from persisted specs.
let mut engine = IndexEngine::new();
let mut has_custom = false;
if let Ok(specs) = self.storage.get_indexes(db, coll).await {
for spec in &specs {
let name = spec.get_str("name").unwrap_or("").to_string();
if name == "_id_" || name.is_empty() {
continue;
}
let key = match spec.get("key") {
Some(Bson::Document(k)) => k.clone(),
_ => continue,
};
let unique = matches!(spec.get("unique"), Some(Bson::Boolean(true)));
let sparse = matches!(spec.get("sparse"), Some(Bson::Boolean(true)));
let expire_after_seconds = match spec.get("expireAfterSeconds") {
Some(Bson::Int32(n)) => Some(*n as u64),
Some(Bson::Int64(n)) => Some(*n as u64),
_ => None,
};
let options = IndexOptions {
name: Some(name),
unique,
sparse,
expire_after_seconds,
};
let _ = engine.create_index(key, options);
has_custom = true;
}
}
if has_custom {
// Rebuild index data from existing documents.
if let Ok(docs) = self.storage.find_all(db, coll).await {
if !docs.is_empty() {
engine.rebuild_from_documents(&docs);
}
}
}
self.indexes.entry(ns_key).or_insert(engine)
}
}
/// State of an open cursor from a find or aggregate command. /// State of an open cursor from a find or aggregate command.
pub struct CursorState { pub struct CursorState {
/// Documents remaining to be returned. /// Documents remaining to be returned.
@@ -1,7 +1,6 @@
use std::collections::HashMap; use std::collections::HashMap;
use bson::{doc, oid::ObjectId, Bson, Document}; use bson::{doc, oid::ObjectId, Bson, Document};
use rustdb_index::IndexEngine;
use rustdb_storage::OpType; use rustdb_storage::OpType;
use tracing::debug; use tracing::debug;
@@ -56,6 +55,11 @@ pub async fn handle(
let mut inserted_count: i32 = 0; let mut inserted_count: i32 = 0;
let mut write_errors: Vec<Document> = Vec::new(); let mut write_errors: Vec<Document> = Vec::new();
// Ensure the IndexEngine is loaded (with persisted specs from indexes.json).
// This must happen BEFORE any writes, so unique constraints are enforced
// even on the first write after a restart.
drop(ctx.get_or_init_index_engine(db, coll).await);
for (idx, mut doc) in docs.into_iter().enumerate() { for (idx, mut doc) in docs.into_iter().enumerate() {
// Auto-generate _id if not present. // Auto-generate _id if not present.
if !doc.contains_key("_id") { if !doc.contains_key("_id") {
@@ -63,6 +67,7 @@ pub async fn handle(
} }
// Pre-check unique index constraints BEFORE storage write. // Pre-check unique index constraints BEFORE storage write.
// The engine is guaranteed to exist from the get_or_init call above.
if let Some(engine) = ctx.indexes.get(&ns_key) { if let Some(engine) = ctx.indexes.get(&ns_key) {
if let Err(e) = engine.check_unique_constraints(&doc) { if let Err(e) = engine.check_unique_constraints(&doc) {
let err_msg = e.to_string(); let err_msg = e.to_string();
@@ -92,17 +97,15 @@ pub async fn handle(
None, None,
); );
// Update index engine. // Update index engine (already initialized above).
let mut engine = ctx if let Some(mut engine) = ctx.indexes.get_mut(&ns_key) {
.indexes if let Err(e) = engine.on_insert(&doc) {
.entry(ns_key.clone()) tracing::error!(
.or_insert_with(IndexEngine::new); namespace = %ns_key,
if let Err(e) = engine.on_insert(&doc) { error = %e,
tracing::error!( "index update failed after successful insert"
namespace = %ns_key, );
error = %e, }
"index update failed after successful insert (pre-check passed but insert failed)"
);
} }
inserted_count += 1; inserted_count += 1;
} }
@@ -1,7 +1,6 @@
use std::collections::HashSet; use std::collections::HashSet;
use bson::{doc, oid::ObjectId, Bson, Document}; use bson::{doc, oid::ObjectId, Bson, Document};
use rustdb_index::IndexEngine;
use rustdb_query::{QueryMatcher, UpdateEngine, sort_documents, apply_projection}; use rustdb_query::{QueryMatcher, UpdateEngine, sort_documents, apply_projection};
use rustdb_storage::OpType; use rustdb_storage::OpType;
use tracing::debug; use tracing::debug;
@@ -47,6 +46,10 @@ async fn handle_update(
ensure_collection_exists(db, coll, ctx).await?; ensure_collection_exists(db, coll, ctx).await?;
let ns_key = format!("{}.{}", db, coll); let ns_key = format!("{}.{}", db, coll);
// Ensure the IndexEngine is loaded with persisted specs from indexes.json.
drop(ctx.get_or_init_index_engine(db, coll).await);
let mut total_n: i32 = 0; let mut total_n: i32 = 0;
let mut total_n_modified: i32 = 0; let mut total_n_modified: i32 = 0;
let mut upserted_list: Vec<Document> = Vec::new(); let mut upserted_list: Vec<Document> = Vec::new();
@@ -179,13 +182,11 @@ async fn handle_update(
None, None,
); );
// Update index. // Update index (engine already initialized above).
let mut engine = ctx if let Some(mut engine) = ctx.indexes.get_mut(&ns_key) {
.indexes if let Err(e) = engine.on_insert(&updated) {
.entry(ns_key.clone()) tracing::error!(namespace = %ns_key, error = %e, "index update failed after upsert insert");
.or_insert_with(IndexEngine::new); }
if let Err(e) = engine.on_insert(&updated) {
tracing::error!(namespace = %ns_key, error = %e, "index update failed after upsert insert");
} }
total_n += 1; total_n += 1;
@@ -402,6 +403,9 @@ async fn handle_find_and_modify(
let ns_key = format!("{}.{}", db, coll); let ns_key = format!("{}.{}", db, coll);
// Ensure the IndexEngine is loaded with persisted specs.
drop(ctx.get_or_init_index_engine(db, coll).await);
// Load and filter documents. // Load and filter documents.
let mut matched = load_filtered_docs(db, coll, &query, &ns_key, ctx).await?; let mut matched = load_filtered_docs(db, coll, &query, &ns_key, ctx).await?;
@@ -573,12 +577,10 @@ async fn handle_find_and_modify(
// Update index. // Update index.
{ {
let mut engine = ctx if let Some(mut engine) = ctx.indexes.get_mut(&ns_key) {
.indexes if let Err(e) = engine.on_insert(&updated_doc) {
.entry(ns_key.clone()) tracing::error!(namespace = %ns_key, error = %e, "index update failed after findAndModify upsert");
.or_insert_with(IndexEngine::new); }
if let Err(e) = engine.on_insert(&updated_doc) {
tracing::error!(namespace = %ns_key, error = %e, "index update failed after findAndModify upsert");
} }
} }
+44 -21
View File
@@ -178,7 +178,8 @@ impl CollectionState {
tracing::warn!("compaction failed for {:?}: {e}", self.coll_dir); tracing::warn!("compaction failed for {:?}: {e}", self.coll_dir);
} else { } else {
// Persist hint file after successful compaction to prevent stale hints // Persist hint file after successful compaction to prevent stale hints
if let Err(e) = self.keydir.persist_to_hint_file(&self.hint_path()) { let current_size = self.data_file_size.load(Ordering::Relaxed);
if let Err(e) = self.keydir.persist_to_hint_file(&self.hint_path(), current_size) {
tracing::warn!("failed to persist hint after compaction for {:?}: {e}", self.coll_dir); tracing::warn!("failed to persist hint after compaction for {:?}: {e}", self.coll_dir);
} }
} }
@@ -257,26 +258,47 @@ impl FileStorageAdapter {
// Try loading from hint file first, fall back to data file scan // Try loading from hint file first, fall back to data file scan
let (keydir, dead_bytes, loaded_from_hint) = if hint_path.exists() && data_path.exists() { let (keydir, dead_bytes, loaded_from_hint) = if hint_path.exists() && data_path.exists() {
match KeyDir::load_from_hint_file(&hint_path) { match KeyDir::load_from_hint_file(&hint_path) {
Ok(Some(kd)) => { Ok(Some((kd, stored_size))) => {
// Validate hint against actual data file let actual_size = std::fs::metadata(&data_path)
let hint_valid = kd.validate_against_data_file(&data_path, 16) .map(|m| m.len())
.unwrap_or(false); .unwrap_or(0);
if hint_valid {
debug!("loaded KeyDir from hint file: {:?}", hint_path); // Check if data.rdb changed since the hint was written.
let file_size = std::fs::metadata(&data_path) // If stored_size is 0, this is an old-format hint without size tracking.
.map(|m| m.len()) let size_matches = stored_size > 0 && stored_size == actual_size;
.unwrap_or(FILE_HEADER_SIZE as u64);
let live_bytes: u64 = { if !size_matches {
let mut total = 0u64; // data.rdb size differs from hint snapshot — records were appended
kd.for_each(|_, e| total += e.record_len as u64); // (inserts, tombstones) after the hint was written. Full scan required
total // to pick up tombstones that would otherwise be invisible.
}; if stored_size == 0 {
let dead = file_size.saturating_sub(FILE_HEADER_SIZE as u64).saturating_sub(live_bytes); debug!("hint file {:?} has no size tracking, rebuilding from data file", hint_path);
(kd, dead, true) } else {
} else { tracing::warn!(
tracing::warn!("hint file {:?} is stale, rebuilding from data file", hint_path); "hint file {:?} is stale: data size changed ({} -> {}), rebuilding",
hint_path, stored_size, actual_size
);
}
let (kd, dead, _stats) = KeyDir::build_from_data_file(&data_path)?; let (kd, dead, _stats) = KeyDir::build_from_data_file(&data_path)?;
(kd, dead, false) (kd, dead, false)
} else {
// Size matches — validate entry integrity with spot-checks
let hint_valid = kd.validate_against_data_file(&data_path, 16)
.unwrap_or(false);
if hint_valid {
debug!("loaded KeyDir from hint file: {:?}", hint_path);
let live_bytes: u64 = {
let mut total = 0u64;
kd.for_each(|_, e| total += e.record_len as u64);
total
};
let dead = actual_size.saturating_sub(FILE_HEADER_SIZE as u64).saturating_sub(live_bytes);
(kd, dead, true)
} else {
tracing::warn!("hint file {:?} failed validation, rebuilding from data file", hint_path);
let (kd, dead, _stats) = KeyDir::build_from_data_file(&data_path)?;
(kd, dead, false)
}
} }
} }
_ => { _ => {
@@ -510,10 +532,11 @@ impl StorageAdapter for FileStorageAdapter {
handle.abort(); handle.abort();
} }
// Persist all KeyDir hint files // Persist all KeyDir hint files with current data file sizes
for entry in self.collections.iter() { for entry in self.collections.iter() {
let state = entry.value(); let state = entry.value();
let _ = state.keydir.persist_to_hint_file(&state.hint_path()); let current_size = state.data_file_size.load(Ordering::Relaxed);
let _ = state.keydir.persist_to_hint_file(&state.hint_path(), current_size);
} }
debug!("FileStorageAdapter closed"); debug!("FileStorageAdapter closed");
Ok(()) Ok(())
+14 -7
View File
@@ -198,14 +198,17 @@ impl KeyDir {
/// Persist the KeyDir to a hint file for fast restart. /// Persist the KeyDir to a hint file for fast restart.
/// ///
/// `data_file_size` is the current size of data.rdb — stored in the hint header
/// so that on next load we can detect if data.rdb changed (stale hint).
///
/// Hint file format (after the 64-byte file header): /// Hint file format (after the 64-byte file header):
/// For each entry: [key_len:u32 LE][key bytes][offset:u64 LE][record_len:u32 LE][value_len:u32 LE][timestamp:u64 LE] /// For each entry: [key_len:u32 LE][key bytes][offset:u64 LE][record_len:u32 LE][value_len:u32 LE][timestamp:u64 LE]
pub fn persist_to_hint_file(&self, path: &Path) -> StorageResult<()> { pub fn persist_to_hint_file(&self, path: &Path, data_file_size: u64) -> StorageResult<()> {
let file = std::fs::File::create(path)?; let file = std::fs::File::create(path)?;
let mut writer = BufWriter::new(file); let mut writer = BufWriter::new(file);
// Write file header // Write file header with data_file_size for staleness detection
let hdr = FileHeader::new(FileType::Hint); let hdr = FileHeader::new_hint(data_file_size);
writer.write_all(&hdr.encode())?; writer.write_all(&hdr.encode())?;
// Write entries // Write entries
@@ -225,7 +228,9 @@ impl KeyDir {
} }
/// Load a KeyDir from a hint file. Returns None if the file doesn't exist. /// Load a KeyDir from a hint file. Returns None if the file doesn't exist.
pub fn load_from_hint_file(path: &Path) -> StorageResult<Option<Self>> { /// Returns `(keydir, stored_data_file_size)` where `stored_data_file_size` is the
/// data.rdb size recorded when the hint was written (0 = old format, unknown).
pub fn load_from_hint_file(path: &Path) -> StorageResult<Option<(Self, u64)>> {
if !path.exists() { if !path.exists() {
return Ok(None); return Ok(None);
} }
@@ -254,6 +259,7 @@ impl KeyDir {
))); )));
} }
let stored_data_file_size = hdr.data_file_size;
let keydir = KeyDir::new(); let keydir = KeyDir::new();
loop { loop {
@@ -292,7 +298,7 @@ impl KeyDir {
); );
} }
Ok(Some(keydir)) Ok(Some((keydir, stored_data_file_size)))
} }
// ----------------------------------------------------------------------- // -----------------------------------------------------------------------
@@ -517,9 +523,10 @@ mod tests {
}, },
); );
kd.persist_to_hint_file(&hint_path).unwrap(); kd.persist_to_hint_file(&hint_path, 12345).unwrap();
let loaded = KeyDir::load_from_hint_file(&hint_path).unwrap().unwrap(); let (loaded, stored_size) = KeyDir::load_from_hint_file(&hint_path).unwrap().unwrap();
assert_eq!(stored_size, 12345);
assert_eq!(loaded.len(), 2); assert_eq!(loaded.len(), 2);
let e1 = loaded.get("doc1").unwrap(); let e1 = loaded.get("doc1").unwrap();
assert_eq!(e1.offset, 64); assert_eq!(e1.offset, 64);
+21 -1
View File
@@ -79,6 +79,9 @@ pub struct FileHeader {
pub file_type: FileType, pub file_type: FileType,
pub flags: u32, pub flags: u32,
pub created_ms: u64, pub created_ms: u64,
/// For hint files: the data.rdb file size at the time the hint was written.
/// Used to detect stale hints after ungraceful shutdown. 0 = unknown (old format).
pub data_file_size: u64,
} }
impl FileHeader { impl FileHeader {
@@ -89,6 +92,18 @@ impl FileHeader {
file_type, file_type,
flags: 0, flags: 0,
created_ms: now_ms(), created_ms: now_ms(),
data_file_size: 0,
}
}
/// Create a new hint header that records the data file size.
pub fn new_hint(data_file_size: u64) -> Self {
Self {
version: FORMAT_VERSION,
file_type: FileType::Hint,
flags: 0,
created_ms: now_ms(),
data_file_size,
} }
} }
@@ -100,7 +115,8 @@ impl FileHeader {
buf[10] = self.file_type as u8; buf[10] = self.file_type as u8;
buf[11..15].copy_from_slice(&self.flags.to_le_bytes()); buf[11..15].copy_from_slice(&self.flags.to_le_bytes());
buf[15..23].copy_from_slice(&self.created_ms.to_le_bytes()); buf[15..23].copy_from_slice(&self.created_ms.to_le_bytes());
// bytes 23..64 are reserved (zeros) buf[23..31].copy_from_slice(&self.data_file_size.to_le_bytes());
// bytes 31..64 are reserved (zeros)
buf buf
} }
@@ -127,11 +143,15 @@ impl FileHeader {
let created_ms = u64::from_le_bytes([ let created_ms = u64::from_le_bytes([
buf[15], buf[16], buf[17], buf[18], buf[19], buf[20], buf[21], buf[22], buf[15], buf[16], buf[17], buf[18], buf[19], buf[20], buf[21], buf[22],
]); ]);
let data_file_size = u64::from_le_bytes([
buf[23], buf[24], buf[25], buf[26], buf[27], buf[28], buf[29], buf[30],
]);
Ok(Self { Ok(Self {
version, version,
file_type, file_type,
flags, flags,
created_ms, created_ms,
data_file_size,
}) })
} }
} }
+7 -1
View File
@@ -295,7 +295,13 @@ fn validate_collection(db: &str, coll: &str, coll_dir: &Path) -> CollectionRepor
// Validate hint file if present // Validate hint file if present
if hint_path.exists() { if hint_path.exists() {
match KeyDir::load_from_hint_file(&hint_path) { match KeyDir::load_from_hint_file(&hint_path) {
Ok(Some(hint_kd)) => { Ok(Some((hint_kd, stored_size))) => {
if stored_size > 0 && stored_size != report.data_file_size {
report.errors.push(format!(
"hint file is stale: recorded data size {} but actual is {}",
stored_size, report.data_file_size
));
}
// Check for orphaned entries: keys in hint but not live in data // Check for orphaned entries: keys in hint but not live in data
hint_kd.for_each(|key, _entry| { hint_kd.for_each(|key, _entry| {
if !live_ids.contains(key) { if !live_ids.contains(key) {
+191
View File
@@ -0,0 +1,191 @@
import { expect, tap } from '@git.zone/tstest/tapbundle';
import * as smartdb from '../ts/index.js';
import { MongoClient, Db } from 'mongodb';
import * as fs from 'fs';
import * as path from 'path';
import * as os from 'os';
// ---------------------------------------------------------------------------
// Test: Deletes persist across restart (tombstone + hint staleness detection)
// Covers: append_tombstone to data.rdb, hint file data_file_size tracking,
// stale hint detection on restart
// ---------------------------------------------------------------------------
let tmpDir: string;
let localDb: smartdb.LocalSmartDb;
let client: MongoClient;
let db: Db;
function makeTmpDir(): string {
return fs.mkdtempSync(path.join(os.tmpdir(), 'smartdb-delete-test-'));
}
function cleanTmpDir(dir: string): void {
if (fs.existsSync(dir)) {
fs.rmSync(dir, { recursive: true, force: true });
}
}
// ============================================================================
// Setup
// ============================================================================
tap.test('setup: start local db and insert documents', async () => {
tmpDir = makeTmpDir();
localDb = new smartdb.LocalSmartDb({ folderPath: tmpDir });
const info = await localDb.start();
client = new MongoClient(info.connectionUri, {
directConnection: true,
serverSelectionTimeoutMS: 5000,
});
await client.connect();
db = client.db('deletetest');
const coll = db.collection('items');
await coll.insertMany([
{ name: 'keep-1', value: 100 },
{ name: 'keep-2', value: 200 },
{ name: 'delete-me', value: 999 },
{ name: 'keep-3', value: 300 },
]);
const count = await coll.countDocuments();
expect(count).toEqual(4);
});
// ============================================================================
// Delete and verify
// ============================================================================
tap.test('delete-persistence: delete a document', async () => {
const coll = db.collection('items');
const result = await coll.deleteOne({ name: 'delete-me' });
expect(result.deletedCount).toEqual(1);
const remaining = await coll.countDocuments();
expect(remaining).toEqual(3);
const deleted = await coll.findOne({ name: 'delete-me' });
expect(deleted).toBeNull();
});
// ============================================================================
// Graceful restart: delete survives
// ============================================================================
tap.test('delete-persistence: graceful stop and restart', async () => {
await client.close();
await localDb.stop(); // graceful — writes hint file
localDb = new smartdb.LocalSmartDb({ folderPath: tmpDir });
const info = await localDb.start();
client = new MongoClient(info.connectionUri, {
directConnection: true,
serverSelectionTimeoutMS: 5000,
});
await client.connect();
db = client.db('deletetest');
});
tap.test('delete-persistence: deleted doc stays deleted after graceful restart', async () => {
const coll = db.collection('items');
const count = await coll.countDocuments();
expect(count).toEqual(3);
const deleted = await coll.findOne({ name: 'delete-me' });
expect(deleted).toBeNull();
// The remaining docs are intact
const keep1 = await coll.findOne({ name: 'keep-1' });
expect(keep1).toBeTruthy();
expect(keep1!.value).toEqual(100);
});
// ============================================================================
// Simulate ungraceful restart: delete after hint write, then restart
// The hint file data_file_size check should detect the stale hint
// ============================================================================
tap.test('delete-persistence: insert and delete more docs, then restart', async () => {
const coll = db.collection('items');
// Insert a new doc
await coll.insertOne({ name: 'temporary', value: 777 });
expect(await coll.countDocuments()).toEqual(4);
// Delete it
await coll.deleteOne({ name: 'temporary' });
expect(await coll.countDocuments()).toEqual(3);
const gone = await coll.findOne({ name: 'temporary' });
expect(gone).toBeNull();
});
tap.test('delete-persistence: stop and restart again', async () => {
await client.close();
await localDb.stop();
localDb = new smartdb.LocalSmartDb({ folderPath: tmpDir });
const info = await localDb.start();
client = new MongoClient(info.connectionUri, {
directConnection: true,
serverSelectionTimeoutMS: 5000,
});
await client.connect();
db = client.db('deletetest');
});
tap.test('delete-persistence: all deletes survived second restart', async () => {
const coll = db.collection('items');
const count = await coll.countDocuments();
expect(count).toEqual(3);
// Both deletes are permanent
expect(await coll.findOne({ name: 'delete-me' })).toBeNull();
expect(await coll.findOne({ name: 'temporary' })).toBeNull();
// Survivors intact
const names = (await coll.find({}).toArray()).map(d => d.name).sort();
expect(names).toEqual(['keep-1', 'keep-2', 'keep-3']);
});
// ============================================================================
// Delete all docs and verify empty after restart
// ============================================================================
tap.test('delete-persistence: delete all remaining docs', async () => {
const coll = db.collection('items');
await coll.deleteMany({});
expect(await coll.countDocuments()).toEqual(0);
});
tap.test('delete-persistence: restart with empty collection', async () => {
await client.close();
await localDb.stop();
localDb = new smartdb.LocalSmartDb({ folderPath: tmpDir });
const info = await localDb.start();
client = new MongoClient(info.connectionUri, {
directConnection: true,
serverSelectionTimeoutMS: 5000,
});
await client.connect();
db = client.db('deletetest');
});
tap.test('delete-persistence: collection is empty after restart', async () => {
const coll = db.collection('items');
const count = await coll.countDocuments();
expect(count).toEqual(0);
});
// ============================================================================
// Cleanup
// ============================================================================
tap.test('delete-persistence: cleanup', async () => {
await client.close();
await localDb.stop();
cleanTmpDir(tmpDir);
});
export default tap.start();
+126
View File
@@ -0,0 +1,126 @@
import { expect, tap } from '@git.zone/tstest/tapbundle';
import * as smartdb from '../ts/index.js';
import { MongoClient, Db } from 'mongodb';
import * as fs from 'fs';
import * as path from 'path';
import * as os from 'os';
// ---------------------------------------------------------------------------
// Test: Missing data.rdb header recovery + startup logging
// Covers: ensure_data_header, BuildStats, info-level startup logging
// ---------------------------------------------------------------------------
let tmpDir: string;
let localDb: smartdb.LocalSmartDb;
let client: MongoClient;
let db: Db;
function makeTmpDir(): string {
return fs.mkdtempSync(path.join(os.tmpdir(), 'smartdb-header-test-'));
}
function cleanTmpDir(dir: string): void {
if (fs.existsSync(dir)) {
fs.rmSync(dir, { recursive: true, force: true });
}
}
// ============================================================================
// Setup: create data, then corrupt it
// ============================================================================
tap.test('setup: start, insert data, stop', async () => {
tmpDir = makeTmpDir();
localDb = new smartdb.LocalSmartDb({ folderPath: tmpDir });
const info = await localDb.start();
client = new MongoClient(info.connectionUri, {
directConnection: true,
serverSelectionTimeoutMS: 5000,
});
await client.connect();
db = client.db('headertest');
const coll = db.collection('docs');
await coll.insertMany([
{ key: 'a', val: 1 },
{ key: 'b', val: 2 },
{ key: 'c', val: 3 },
]);
await client.close();
await localDb.stop();
});
// ============================================================================
// Delete hint file and restart: should rebuild from data.rdb scan
// ============================================================================
tap.test('header-recovery: delete hint file and restart', async () => {
// Find and delete hint files
const dbDir = path.join(tmpDir, 'headertest', 'docs');
const hintPath = path.join(dbDir, 'keydir.hint');
if (fs.existsSync(hintPath)) {
fs.unlinkSync(hintPath);
}
localDb = new smartdb.LocalSmartDb({ folderPath: tmpDir });
const info = await localDb.start();
client = new MongoClient(info.connectionUri, {
directConnection: true,
serverSelectionTimeoutMS: 5000,
});
await client.connect();
db = client.db('headertest');
});
tap.test('header-recovery: data intact after hint deletion', async () => {
const coll = db.collection('docs');
const count = await coll.countDocuments();
expect(count).toEqual(3);
const a = await coll.findOne({ key: 'a' });
expect(a!.val).toEqual(1);
});
// ============================================================================
// Write new data after restart, stop, restart again
// ============================================================================
tap.test('header-recovery: write after hint-less restart', async () => {
const coll = db.collection('docs');
await coll.insertOne({ key: 'd', val: 4 });
expect(await coll.countDocuments()).toEqual(4);
});
tap.test('header-recovery: restart and verify all data', async () => {
await client.close();
await localDb.stop();
localDb = new smartdb.LocalSmartDb({ folderPath: tmpDir });
const info = await localDb.start();
client = new MongoClient(info.connectionUri, {
directConnection: true,
serverSelectionTimeoutMS: 5000,
});
await client.connect();
db = client.db('headertest');
const coll = db.collection('docs');
const count = await coll.countDocuments();
expect(count).toEqual(4);
const keys = (await coll.find({}).toArray()).map(d => d.key).sort();
expect(keys).toEqual(['a', 'b', 'c', 'd']);
});
// ============================================================================
// Cleanup
// ============================================================================
tap.test('header-recovery: cleanup', async () => {
await client.close();
await localDb.stop();
cleanTmpDir(tmpDir);
});
export default tap.start();
+82
View File
@@ -0,0 +1,82 @@
import { expect, tap } from '@git.zone/tstest/tapbundle';
import * as smartdb from '../ts/index.js';
import * as fs from 'fs';
import * as net from 'net';
import * as path from 'path';
import * as os from 'os';
// ---------------------------------------------------------------------------
// Test: Stale socket cleanup on startup
// Covers: LocalSmartDb.cleanStaleSockets(), isSocketAlive()
// ---------------------------------------------------------------------------
function makeTmpDir(): string {
return fs.mkdtempSync(path.join(os.tmpdir(), 'smartdb-socket-test-'));
}
function cleanTmpDir(dir: string): void {
if (fs.existsSync(dir)) {
fs.rmSync(dir, { recursive: true, force: true });
}
}
// ============================================================================
// Stale socket cleanup: active sockets are preserved
// ============================================================================
tap.test('stale-sockets: does not remove active sockets', async () => {
const tmpDir = makeTmpDir();
const activeSocketPath = path.join(os.tmpdir(), `smartdb-active-${Date.now()}.sock`);
// Create an active socket (server still listening)
const activeServer = net.createServer();
await new Promise<void>((resolve) => activeServer.listen(activeSocketPath, resolve));
expect(fs.existsSync(activeSocketPath)).toBeTrue();
// Start LocalSmartDb — should NOT remove the active socket
const localDb = new smartdb.LocalSmartDb({ folderPath: tmpDir });
await localDb.start();
expect(fs.existsSync(activeSocketPath)).toBeTrue();
// Cleanup
await localDb.stop();
await new Promise<void>((resolve) => activeServer.close(() => resolve()));
try { fs.unlinkSync(activeSocketPath); } catch {}
cleanTmpDir(tmpDir);
});
// ============================================================================
// Stale socket cleanup: startup works with no stale sockets
// ============================================================================
tap.test('stale-sockets: startup works cleanly with no stale sockets', async () => {
const tmpDir = makeTmpDir();
const localDb = new smartdb.LocalSmartDb({ folderPath: tmpDir });
const info = await localDb.start();
expect(localDb.running).toBeTrue();
expect(info.socketPath).toBeTruthy();
await localDb.stop();
cleanTmpDir(tmpDir);
});
// ============================================================================
// Stale socket cleanup: the socket file for the current instance is cleaned on stop
// ============================================================================
tap.test('stale-sockets: own socket file is removed on stop', async () => {
const tmpDir = makeTmpDir();
const localDb = new smartdb.LocalSmartDb({ folderPath: tmpDir });
const info = await localDb.start();
expect(fs.existsSync(info.socketPath)).toBeTrue();
await localDb.stop();
// Socket file should be gone after graceful stop
expect(fs.existsSync(info.socketPath)).toBeFalse();
cleanTmpDir(tmpDir);
});
export default tap.start();
+180
View File
@@ -0,0 +1,180 @@
import { expect, tap } from '@git.zone/tstest/tapbundle';
import * as smartdb from '../ts/index.js';
import { MongoClient, Db } from 'mongodb';
import * as fs from 'fs';
import * as path from 'path';
import * as os from 'os';
// ---------------------------------------------------------------------------
// Test: Unique index enforcement via wire protocol
// Covers: unique index pre-check, createIndexes persistence, index restoration
// ---------------------------------------------------------------------------
let tmpDir: string;
let localDb: smartdb.LocalSmartDb;
let client: MongoClient;
let db: Db;
function makeTmpDir(): string {
return fs.mkdtempSync(path.join(os.tmpdir(), 'smartdb-unique-test-'));
}
function cleanTmpDir(dir: string): void {
if (fs.existsSync(dir)) {
fs.rmSync(dir, { recursive: true, force: true });
}
}
// ============================================================================
// Setup
// ============================================================================
tap.test('setup: start local db', async () => {
tmpDir = makeTmpDir();
localDb = new smartdb.LocalSmartDb({ folderPath: tmpDir });
const info = await localDb.start();
client = new MongoClient(info.connectionUri, {
directConnection: true,
serverSelectionTimeoutMS: 5000,
});
await client.connect();
db = client.db('uniquetest');
});
// ============================================================================
// Unique index enforcement on insert
// ============================================================================
tap.test('unique-index: createIndex with unique: true', async () => {
const coll = db.collection('users');
await coll.insertOne({ email: 'alice@example.com', name: 'Alice' });
const indexName = await coll.createIndex({ email: 1 }, { unique: true });
expect(indexName).toBeTruthy();
});
tap.test('unique-index: reject duplicate on insertOne', async () => {
const coll = db.collection('users');
let threw = false;
try {
await coll.insertOne({ email: 'alice@example.com', name: 'Alice2' });
} catch (err: any) {
threw = true;
expect(err.code).toEqual(11000);
}
expect(threw).toBeTrue();
// Verify only 1 document exists
const count = await coll.countDocuments();
expect(count).toEqual(1);
});
tap.test('unique-index: allow insert with different unique value', async () => {
const coll = db.collection('users');
await coll.insertOne({ email: 'bob@example.com', name: 'Bob' });
const count = await coll.countDocuments();
expect(count).toEqual(2);
});
// ============================================================================
// Unique index enforcement on update
// ============================================================================
tap.test('unique-index: reject duplicate on updateOne that changes unique field', async () => {
const coll = db.collection('users');
let threw = false;
try {
await coll.updateOne(
{ email: 'bob@example.com' },
{ $set: { email: 'alice@example.com' } }
);
} catch (err: any) {
threw = true;
expect(err.code).toEqual(11000);
}
expect(threw).toBeTrue();
// Bob's email should be unchanged
const bob = await coll.findOne({ name: 'Bob' });
expect(bob!.email).toEqual('bob@example.com');
});
tap.test('unique-index: allow update that keeps same unique value', async () => {
const coll = db.collection('users');
await coll.updateOne(
{ email: 'bob@example.com' },
{ $set: { name: 'Robert' } }
);
const bob = await coll.findOne({ email: 'bob@example.com' });
expect(bob!.name).toEqual('Robert');
});
// ============================================================================
// Unique index enforcement on upsert
// ============================================================================
tap.test('unique-index: reject duplicate on upsert insert', async () => {
const coll = db.collection('users');
let threw = false;
try {
await coll.updateOne(
{ email: 'new@example.com' },
{ $set: { email: 'alice@example.com', name: 'Imposter' } },
{ upsert: true }
);
} catch (err: any) {
threw = true;
}
expect(threw).toBeTrue();
});
// ============================================================================
// Unique index survives restart (persistence + restoration)
// ============================================================================
tap.test('unique-index: stop and restart', async () => {
await client.close();
await localDb.stop();
localDb = new smartdb.LocalSmartDb({ folderPath: tmpDir });
const info = await localDb.start();
client = new MongoClient(info.connectionUri, {
directConnection: true,
serverSelectionTimeoutMS: 5000,
});
await client.connect();
db = client.db('uniquetest');
});
tap.test('unique-index: enforcement persists after restart', async () => {
const coll = db.collection('users');
// Data should still be there
const count = await coll.countDocuments();
expect(count).toEqual(2);
// Unique constraint should still be enforced without calling createIndex again
let threw = false;
try {
await coll.insertOne({ email: 'alice@example.com', name: 'Alice3' });
} catch (err: any) {
threw = true;
expect(err.code).toEqual(11000);
}
expect(threw).toBeTrue();
// Count unchanged
const countAfter = await coll.countDocuments();
expect(countAfter).toEqual(2);
});
// ============================================================================
// Cleanup
// ============================================================================
tap.test('unique-index: cleanup', async () => {
await client.close();
await localDb.stop();
cleanTmpDir(tmpDir);
});
export default tap.start();
+1 -1
View File
@@ -3,6 +3,6 @@
*/ */
export const commitinfo = { export const commitinfo = {
name: '@push.rocks/smartdb', name: '@push.rocks/smartdb',
version: '2.5.2', version: '2.5.8',
description: 'A MongoDB-compatible embedded database server with wire protocol support, backed by a high-performance Rust engine.' description: 'A MongoDB-compatible embedded database server with wire protocol support, backed by a high-performance Rust engine.'
} }
File diff suppressed because one or more lines are too long