fix(rustdb-commands): restore persisted index initialization before writes to enforce unique constraints after restart

This commit is contained in:
2026-04-05 03:51:58 +00:00
parent 1a10c32b12
commit e8161e6417
7 changed files with 110 additions and 33 deletions

View File

@@ -1,5 +1,12 @@
# Changelog # Changelog
## 2026-04-05 - 2.5.3 - fix(rustdb-commands)
restore persisted index initialization before writes to enforce unique constraints after restart
- load stored index specifications from storage when creating command context index engines
- rebuild index data from existing documents so custom indexes are active before insert, update, and upsert operations
- add @push.rocks/smartdata as a runtime dependency
## 2026-04-05 - 2.5.2 - fix(rustdb-indexes) ## 2026-04-05 - 2.5.2 - fix(rustdb-indexes)
persist created indexes and restore them on server startup persist created indexes and restore them on server startup

View File

@@ -29,6 +29,7 @@
"dependencies": { "dependencies": {
"@api.global/typedserver": "^8.0.0", "@api.global/typedserver": "^8.0.0",
"@design.estate/dees-element": "^2.0.0", "@design.estate/dees-element": "^2.0.0",
"@push.rocks/smartdata": "7.1.5",
"@push.rocks/smartrust": "^1.3.2", "@push.rocks/smartrust": "^1.3.2",
"bson": "^7.2.0" "bson": "^7.2.0"
}, },

11
pnpm-lock.yaml generated
View File

@@ -14,6 +14,9 @@ importers:
'@design.estate/dees-element': '@design.estate/dees-element':
specifier: ^2.0.0 specifier: ^2.0.0
version: 2.2.3 version: 2.2.3
'@push.rocks/smartdata':
specifier: 7.1.5
version: 7.1.5(socks@2.8.7)
'@push.rocks/smartrust': '@push.rocks/smartrust':
specifier: ^1.3.2 specifier: ^1.3.2
version: 1.3.2 version: 1.3.2
@@ -1026,8 +1029,8 @@ packages:
'@push.rocks/smartcrypto@2.0.4': '@push.rocks/smartcrypto@2.0.4':
resolution: {integrity: sha512-1+/5bsjyataf5uUkUNnnVXGRAt+gHVk1KDzozjTqgqJxHvQk1d9fVDohL6CxUhUucTPtu5VR5xNBiV8YCDuGyw==} resolution: {integrity: sha512-1+/5bsjyataf5uUkUNnnVXGRAt+gHVk1KDzozjTqgqJxHvQk1d9fVDohL6CxUhUucTPtu5VR5xNBiV8YCDuGyw==}
'@push.rocks/smartdata@7.1.3': '@push.rocks/smartdata@7.1.5':
resolution: {integrity: sha512-7vQJ9pdRk450yn2m9tmGPdSRlQVmxFPZjHD4sGYsfqCQPg+GLFusu+H16zpf+jKzAq4F2ZBMPaYymJHXvXiVcw==} resolution: {integrity: sha512-7x7VedEg6RocWndqUPuTbY2Bh85Q/x0LOVHL4o/NVXyh3IGNtiVQ8ple4WR0qYqlHRAojX4eDSBPMiYzIasqAg==}
'@push.rocks/smartdelay@3.0.5': '@push.rocks/smartdelay@3.0.5':
resolution: {integrity: sha512-mUuI7kj2f7ztjpic96FvRIlf2RsKBa5arw81AHNsndbxO6asRcxuWL8dTVxouEIK8YsBUlj0AsrCkHhMbLQdHw==} resolution: {integrity: sha512-mUuI7kj2f7ztjpic96FvRIlf2RsKBa5arw81AHNsndbxO6asRcxuWL8dTVxouEIK8YsBUlj0AsrCkHhMbLQdHw==}
@@ -5665,7 +5668,7 @@ snapshots:
'@types/node-forge': 1.3.14 '@types/node-forge': 1.3.14
node-forge: 1.4.0 node-forge: 1.4.0
'@push.rocks/smartdata@7.1.3(socks@2.8.7)': '@push.rocks/smartdata@7.1.5(socks@2.8.7)':
dependencies: dependencies:
'@push.rocks/lik': 6.4.0 '@push.rocks/lik': 6.4.0
'@push.rocks/smartdelay': 3.0.5 '@push.rocks/smartdelay': 3.0.5
@@ -5899,7 +5902,7 @@ snapshots:
'@push.rocks/smartmongo@5.1.1(socks@2.8.7)': '@push.rocks/smartmongo@5.1.1(socks@2.8.7)':
dependencies: dependencies:
'@push.rocks/mongodump': 1.1.0(socks@2.8.7) '@push.rocks/mongodump': 1.1.0(socks@2.8.7)
'@push.rocks/smartdata': 7.1.3(socks@2.8.7) '@push.rocks/smartdata': 7.1.5(socks@2.8.7)
'@push.rocks/smartfs': 1.5.0 '@push.rocks/smartfs': 1.5.0
'@push.rocks/smartpath': 6.0.0 '@push.rocks/smartpath': 6.0.0
'@push.rocks/smartpromise': 4.2.3 '@push.rocks/smartpromise': 4.2.3

View File

@@ -1,8 +1,8 @@
use std::sync::Arc; use std::sync::Arc;
use bson::Document; use bson::{Bson, Document};
use dashmap::DashMap; use dashmap::DashMap;
use rustdb_index::IndexEngine; use rustdb_index::{IndexEngine, IndexOptions};
use rustdb_storage::{OpLog, StorageAdapter}; use rustdb_storage::{OpLog, StorageAdapter};
use rustdb_txn::{SessionEngine, TransactionEngine}; use rustdb_txn::{SessionEngine, TransactionEngine};
@@ -24,6 +24,67 @@ pub struct CommandContext {
pub oplog: Arc<OpLog>, pub oplog: Arc<OpLog>,
} }
impl CommandContext {
/// Get or lazily initialize an IndexEngine for a namespace.
///
/// If no IndexEngine exists yet for this namespace, loads persisted index
/// specs from `indexes.json` via the storage adapter, creates the engine
/// with those specs, and rebuilds index data from existing documents.
/// This ensures unique indexes are enforced even on the very first write
/// after a restart.
pub async fn get_or_init_index_engine(&self, db: &str, coll: &str) -> dashmap::mapref::one::RefMut<'_, String, IndexEngine> {
let ns_key = format!("{}.{}", db, coll);
// Fast path: engine already exists.
if self.indexes.contains_key(&ns_key) {
return self.indexes.entry(ns_key).or_insert_with(IndexEngine::new);
}
// Slow path: load from persisted specs.
let mut engine = IndexEngine::new();
let mut has_custom = false;
if let Ok(specs) = self.storage.get_indexes(db, coll).await {
for spec in &specs {
let name = spec.get_str("name").unwrap_or("").to_string();
if name == "_id_" || name.is_empty() {
continue;
}
let key = match spec.get("key") {
Some(Bson::Document(k)) => k.clone(),
_ => continue,
};
let unique = matches!(spec.get("unique"), Some(Bson::Boolean(true)));
let sparse = matches!(spec.get("sparse"), Some(Bson::Boolean(true)));
let expire_after_seconds = match spec.get("expireAfterSeconds") {
Some(Bson::Int32(n)) => Some(*n as u64),
Some(Bson::Int64(n)) => Some(*n as u64),
_ => None,
};
let options = IndexOptions {
name: Some(name),
unique,
sparse,
expire_after_seconds,
};
let _ = engine.create_index(key, options);
has_custom = true;
}
}
if has_custom {
// Rebuild index data from existing documents.
if let Ok(docs) = self.storage.find_all(db, coll).await {
if !docs.is_empty() {
engine.rebuild_from_documents(&docs);
}
}
}
self.indexes.entry(ns_key).or_insert(engine)
}
}
/// State of an open cursor from a find or aggregate command. /// State of an open cursor from a find or aggregate command.
pub struct CursorState { pub struct CursorState {
/// Documents remaining to be returned. /// Documents remaining to be returned.

View File

@@ -1,7 +1,6 @@
use std::collections::HashMap; use std::collections::HashMap;
use bson::{doc, oid::ObjectId, Bson, Document}; use bson::{doc, oid::ObjectId, Bson, Document};
use rustdb_index::IndexEngine;
use rustdb_storage::OpType; use rustdb_storage::OpType;
use tracing::debug; use tracing::debug;
@@ -56,6 +55,11 @@ pub async fn handle(
let mut inserted_count: i32 = 0; let mut inserted_count: i32 = 0;
let mut write_errors: Vec<Document> = Vec::new(); let mut write_errors: Vec<Document> = Vec::new();
// Ensure the IndexEngine is loaded (with persisted specs from indexes.json).
// This must happen BEFORE any writes, so unique constraints are enforced
// even on the first write after a restart.
drop(ctx.get_or_init_index_engine(db, coll).await);
for (idx, mut doc) in docs.into_iter().enumerate() { for (idx, mut doc) in docs.into_iter().enumerate() {
// Auto-generate _id if not present. // Auto-generate _id if not present.
if !doc.contains_key("_id") { if !doc.contains_key("_id") {
@@ -63,6 +67,7 @@ pub async fn handle(
} }
// Pre-check unique index constraints BEFORE storage write. // Pre-check unique index constraints BEFORE storage write.
// The engine is guaranteed to exist from the get_or_init call above.
if let Some(engine) = ctx.indexes.get(&ns_key) { if let Some(engine) = ctx.indexes.get(&ns_key) {
if let Err(e) = engine.check_unique_constraints(&doc) { if let Err(e) = engine.check_unique_constraints(&doc) {
let err_msg = e.to_string(); let err_msg = e.to_string();
@@ -92,18 +97,16 @@ pub async fn handle(
None, None,
); );
// Update index engine. // Update index engine (already initialized above).
let mut engine = ctx if let Some(mut engine) = ctx.indexes.get_mut(&ns_key) {
.indexes
.entry(ns_key.clone())
.or_insert_with(IndexEngine::new);
if let Err(e) = engine.on_insert(&doc) { if let Err(e) = engine.on_insert(&doc) {
tracing::error!( tracing::error!(
namespace = %ns_key, namespace = %ns_key,
error = %e, error = %e,
"index update failed after successful insert (pre-check passed but insert failed)" "index update failed after successful insert"
); );
} }
}
inserted_count += 1; inserted_count += 1;
} }
Err(e) => { Err(e) => {

View File

@@ -1,7 +1,6 @@
use std::collections::HashSet; use std::collections::HashSet;
use bson::{doc, oid::ObjectId, Bson, Document}; use bson::{doc, oid::ObjectId, Bson, Document};
use rustdb_index::IndexEngine;
use rustdb_query::{QueryMatcher, UpdateEngine, sort_documents, apply_projection}; use rustdb_query::{QueryMatcher, UpdateEngine, sort_documents, apply_projection};
use rustdb_storage::OpType; use rustdb_storage::OpType;
use tracing::debug; use tracing::debug;
@@ -47,6 +46,10 @@ async fn handle_update(
ensure_collection_exists(db, coll, ctx).await?; ensure_collection_exists(db, coll, ctx).await?;
let ns_key = format!("{}.{}", db, coll); let ns_key = format!("{}.{}", db, coll);
// Ensure the IndexEngine is loaded with persisted specs from indexes.json.
drop(ctx.get_or_init_index_engine(db, coll).await);
let mut total_n: i32 = 0; let mut total_n: i32 = 0;
let mut total_n_modified: i32 = 0; let mut total_n_modified: i32 = 0;
let mut upserted_list: Vec<Document> = Vec::new(); let mut upserted_list: Vec<Document> = Vec::new();
@@ -179,14 +182,12 @@ async fn handle_update(
None, None,
); );
// Update index. // Update index (engine already initialized above).
let mut engine = ctx if let Some(mut engine) = ctx.indexes.get_mut(&ns_key) {
.indexes
.entry(ns_key.clone())
.or_insert_with(IndexEngine::new);
if let Err(e) = engine.on_insert(&updated) { if let Err(e) = engine.on_insert(&updated) {
tracing::error!(namespace = %ns_key, error = %e, "index update failed after upsert insert"); tracing::error!(namespace = %ns_key, error = %e, "index update failed after upsert insert");
} }
}
total_n += 1; total_n += 1;
upserted_list.push(doc! { upserted_list.push(doc! {
@@ -402,6 +403,9 @@ async fn handle_find_and_modify(
let ns_key = format!("{}.{}", db, coll); let ns_key = format!("{}.{}", db, coll);
// Ensure the IndexEngine is loaded with persisted specs.
drop(ctx.get_or_init_index_engine(db, coll).await);
// Load and filter documents. // Load and filter documents.
let mut matched = load_filtered_docs(db, coll, &query, &ns_key, ctx).await?; let mut matched = load_filtered_docs(db, coll, &query, &ns_key, ctx).await?;
@@ -573,14 +577,12 @@ async fn handle_find_and_modify(
// Update index. // Update index.
{ {
let mut engine = ctx if let Some(mut engine) = ctx.indexes.get_mut(&ns_key) {
.indexes
.entry(ns_key.clone())
.or_insert_with(IndexEngine::new);
if let Err(e) = engine.on_insert(&updated_doc) { if let Err(e) = engine.on_insert(&updated_doc) {
tracing::error!(namespace = %ns_key, error = %e, "index update failed after findAndModify upsert"); tracing::error!(namespace = %ns_key, error = %e, "index update failed after findAndModify upsert");
} }
} }
}
let value = if return_new { let value = if return_new {
apply_fields_projection(&updated_doc, &fields) apply_fields_projection(&updated_doc, &fields)

View File

@@ -3,6 +3,6 @@
*/ */
export const commitinfo = { export const commitinfo = {
name: '@push.rocks/smartdb', name: '@push.rocks/smartdb',
version: '2.5.2', version: '2.5.3',
description: 'A MongoDB-compatible embedded database server with wire protocol support, backed by a high-performance Rust engine.' description: 'A MongoDB-compatible embedded database server with wire protocol support, backed by a high-performance Rust engine.'
} }