BREAKING CHANGE(core): replace the TypeScript database engine with a Rust-backed embedded server and bridge

This commit is contained in:
2026-03-26 19:48:27 +00:00
parent 8ec2046908
commit e23a951dbe
106 changed files with 11567 additions and 10678 deletions

View File

@@ -0,0 +1,24 @@
[package]
name = "rustdb-commands"
version.workspace = true
edition.workspace = true
license.workspace = true
authors.workspace = true
description = "MongoDB-compatible command routing and handlers for RustDb"
[dependencies]
bson = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
dashmap = { workspace = true }
tokio = { workspace = true }
thiserror = { workspace = true }
tracing = { workspace = true }
uuid = { workspace = true }
async-trait = { workspace = true }
rustdb-config = { workspace = true }
rustdb-wire = { workspace = true }
rustdb-query = { workspace = true }
rustdb-storage = { workspace = true }
rustdb-index = { workspace = true }
rustdb-txn = { workspace = true }

View File

@@ -0,0 +1,35 @@
use std::sync::Arc;
use bson::Document;
use dashmap::DashMap;
use rustdb_index::IndexEngine;
use rustdb_storage::StorageAdapter;
use rustdb_txn::{SessionEngine, TransactionEngine};
/// Shared command execution context, passed to all handlers.
pub struct CommandContext {
/// The storage backend.
pub storage: Arc<dyn StorageAdapter>,
/// Index engines per namespace: "db.collection" -> IndexEngine.
pub indexes: Arc<DashMap<String, IndexEngine>>,
/// Transaction engine for multi-document transactions.
pub transactions: Arc<TransactionEngine>,
/// Session engine for logical sessions.
pub sessions: Arc<SessionEngine>,
/// Active cursors for getMore / killCursors.
pub cursors: Arc<DashMap<i64, CursorState>>,
/// Server start time (for uptime reporting).
pub start_time: std::time::Instant,
}
/// State of an open cursor from a find or aggregate command.
pub struct CursorState {
/// Documents remaining to be returned.
pub documents: Vec<Document>,
/// Current read position within `documents`.
pub position: usize,
/// Database the cursor belongs to.
pub database: String,
/// Collection the cursor belongs to.
pub collection: String,
}

View File

@@ -0,0 +1,76 @@
use thiserror::Error;
/// Errors that can occur during command processing.
#[derive(Debug, Error)]
pub enum CommandError {
#[error("command not implemented: {0}")]
NotImplemented(String),
#[error("invalid argument: {0}")]
InvalidArgument(String),
#[error("storage error: {0}")]
StorageError(String),
#[error("index error: {0}")]
IndexError(String),
#[error("transaction error: {0}")]
TransactionError(String),
#[error("namespace not found: {0}")]
NamespaceNotFound(String),
#[error("namespace already exists: {0}")]
NamespaceExists(String),
#[error("duplicate key: {0}")]
DuplicateKey(String),
#[error("internal error: {0}")]
InternalError(String),
}
impl CommandError {
/// Convert a CommandError to a BSON error response document.
pub fn to_error_doc(&self) -> bson::Document {
let (code, code_name) = match self {
CommandError::NotImplemented(_) => (59, "CommandNotFound"),
CommandError::InvalidArgument(_) => (14, "TypeMismatch"),
CommandError::StorageError(_) => (1, "InternalError"),
CommandError::IndexError(_) => (27, "IndexNotFound"),
CommandError::TransactionError(_) => (112, "WriteConflict"),
CommandError::NamespaceNotFound(_) => (26, "NamespaceNotFound"),
CommandError::NamespaceExists(_) => (48, "NamespaceExists"),
CommandError::DuplicateKey(_) => (11000, "DuplicateKey"),
CommandError::InternalError(_) => (1, "InternalError"),
};
bson::doc! {
"ok": 0,
"errmsg": self.to_string(),
"code": code,
"codeName": code_name,
}
}
}
impl From<rustdb_storage::StorageError> for CommandError {
fn from(e: rustdb_storage::StorageError) -> Self {
CommandError::StorageError(e.to_string())
}
}
impl From<rustdb_txn::TransactionError> for CommandError {
fn from(e: rustdb_txn::TransactionError) -> Self {
CommandError::TransactionError(e.to_string())
}
}
impl From<rustdb_index::IndexError> for CommandError {
fn from(e: rustdb_index::IndexError) -> Self {
CommandError::IndexError(e.to_string())
}
}
pub type CommandResult<T> = Result<T, CommandError>;

View File

@@ -0,0 +1,653 @@
use bson::{doc, Bson, Document};
use rustdb_index::IndexEngine;
use tracing::debug;
use crate::context::{CommandContext, CursorState};
use crate::error::{CommandError, CommandResult};
/// Handle various admin / diagnostic / session / auth commands.
pub async fn handle(
cmd: &Document,
db: &str,
ctx: &CommandContext,
command_name: &str,
) -> CommandResult<Document> {
match command_name {
"ping" => Ok(doc! { "ok": 1.0 }),
"buildInfo" | "buildinfo" => Ok(doc! {
"version": "7.0.0",
"gitVersion": "unknown",
"modules": [],
"sysInfo": "rustdb",
"versionArray": [7_i32, 0_i32, 0_i32, 0_i32],
"ok": 1.0,
}),
"serverStatus" => Ok(doc! {
"host": "localhost",
"version": "7.0.0",
"process": "rustdb",
"uptime": ctx.start_time.elapsed().as_secs() as i64,
"ok": 1.0,
}),
"hostInfo" => Ok(doc! {
"system": {
"hostname": "localhost",
},
"ok": 1.0,
}),
"whatsmyuri" => Ok(doc! {
"you": "127.0.0.1:0",
"ok": 1.0,
}),
"getLog" => {
let _log_type = cmd.get_str("getLog").unwrap_or("global");
Ok(doc! {
"totalLinesWritten": 0_i32,
"log": [],
"ok": 1.0,
})
}
"replSetGetStatus" => {
// Not a replica set.
Ok(doc! {
"ok": 0.0,
"errmsg": "not running with --replSet",
"code": 76_i32,
"codeName": "NoReplicationEnabled",
})
}
"getCmdLineOpts" => Ok(doc! {
"argv": ["rustdb"],
"parsed": {},
"ok": 1.0,
}),
"getParameter" => Ok(doc! {
"ok": 1.0,
}),
"getFreeMonitoringStatus" | "setFreeMonitoring" => Ok(doc! {
"state": "disabled",
"ok": 1.0,
}),
"getShardMap" | "shardingState" => Ok(doc! {
"enabled": false,
"ok": 1.0,
}),
"atlasVersion" => Ok(doc! {
"ok": 0.0,
"errmsg": "not supported",
"code": 59_i32,
"codeName": "CommandNotFound",
}),
"connectionStatus" => Ok(doc! {
"authInfo": {
"authenticatedUsers": [],
"authenticatedUserRoles": [],
},
"ok": 1.0,
}),
"listDatabases" => handle_list_databases(cmd, ctx).await,
"listCollections" => handle_list_collections(cmd, db, ctx).await,
"create" => handle_create(cmd, db, ctx).await,
"drop" => handle_drop(cmd, db, ctx).await,
"dropDatabase" => handle_drop_database(db, ctx).await,
"renameCollection" => handle_rename_collection(cmd, ctx).await,
"collStats" | "validate" => handle_coll_stats(cmd, db, ctx, command_name).await,
"dbStats" => handle_db_stats(db, ctx).await,
"explain" => Ok(doc! {
"queryPlanner": {},
"ok": 1.0,
}),
"startSession" => {
let session_id = uuid::Uuid::new_v4().to_string();
ctx.sessions.get_or_create_session(&session_id);
Ok(doc! {
"id": { "id": &session_id },
"timeoutMinutes": 30_i32,
"ok": 1.0,
})
}
"endSessions" | "killSessions" => {
// Attempt to end listed sessions.
if let Ok(sessions) = cmd
.get_array("endSessions")
.or_else(|_| cmd.get_array("killSessions"))
{
for s in sessions {
if let Some(sid) = rustdb_txn::SessionEngine::extract_session_id(s) {
ctx.sessions.end_session(&sid);
}
}
}
Ok(doc! { "ok": 1.0 })
}
"commitTransaction" => {
// Stub: acknowledge.
Ok(doc! { "ok": 1.0 })
}
"abortTransaction" => {
// Stub: acknowledge.
Ok(doc! { "ok": 1.0 })
}
// Auth stubs - accept silently.
"saslStart" => Ok(doc! {
"conversationId": 1_i32,
"done": true,
"payload": bson::Binary { subtype: bson::spec::BinarySubtype::Generic, bytes: vec![] },
"ok": 1.0,
}),
"saslContinue" => Ok(doc! {
"conversationId": 1_i32,
"done": true,
"payload": bson::Binary { subtype: bson::spec::BinarySubtype::Generic, bytes: vec![] },
"ok": 1.0,
}),
"authenticate" | "logout" => Ok(doc! { "ok": 1.0 }),
"currentOp" => Ok(doc! {
"inprog": [],
"ok": 1.0,
}),
"killOp" | "top" | "profile" | "compact" | "reIndex"
| "fsync" | "connPoolSync" => Ok(doc! { "ok": 1.0 }),
other => {
// Catch-all for any admin command we missed.
Ok(doc! {
"ok": 1.0,
"note": format!("stub response for command: {}", other),
})
}
}
}
/// Handle `listDatabases` command.
async fn handle_list_databases(
cmd: &Document,
ctx: &CommandContext,
) -> CommandResult<Document> {
let dbs = ctx.storage.list_databases().await?;
let name_only = match cmd.get("nameOnly") {
Some(Bson::Boolean(true)) => true,
_ => false,
};
let filter = match cmd.get("filter") {
Some(Bson::Document(d)) => Some(d.clone()),
_ => None,
};
let mut db_docs: Vec<Bson> = Vec::new();
let mut total_size: i64 = 0;
for db_name in &dbs {
let mut db_info = doc! { "name": db_name.as_str() };
if !name_only {
// Estimate size by counting documents across collections.
let mut db_size: i64 = 0;
if let Ok(collections) = ctx.storage.list_collections(db_name).await {
for coll in &collections {
if let Ok(count) = ctx.storage.count(db_name, coll).await {
// Rough estimate: 200 bytes per document.
db_size += count as i64 * 200;
}
}
}
db_info.insert("sizeOnDisk", db_size);
db_info.insert("empty", db_size == 0);
total_size += db_size;
}
// Apply filter if specified.
if let Some(ref f) = filter {
if !rustdb_query::QueryMatcher::matches(&db_info, f) {
continue;
}
}
db_docs.push(Bson::Document(db_info));
}
let mut response = doc! {
"databases": db_docs,
"ok": 1.0,
};
if !name_only {
response.insert("totalSize", total_size);
}
Ok(response)
}
/// Handle `listCollections` command.
async fn handle_list_collections(
cmd: &Document,
db: &str,
ctx: &CommandContext,
) -> CommandResult<Document> {
let collections = ctx.storage.list_collections(db).await?;
let filter = match cmd.get("filter") {
Some(Bson::Document(d)) => Some(d.clone()),
_ => None,
};
let name_only = match cmd.get("nameOnly") {
Some(Bson::Boolean(true)) => true,
_ => false,
};
let batch_size = cmd
.get_document("cursor")
.ok()
.and_then(|c| {
c.get_i32("batchSize")
.ok()
.map(|v| v as usize)
.or_else(|| c.get_i64("batchSize").ok().map(|v| v as usize))
})
.unwrap_or(usize::MAX);
let ns = format!("{}.$cmd.listCollections", db);
let mut coll_docs: Vec<Document> = Vec::new();
for coll_name in &collections {
let info_doc = if name_only {
doc! {
"name": coll_name.as_str(),
"type": "collection",
}
} else {
doc! {
"name": coll_name.as_str(),
"type": "collection",
"options": {},
"info": {
"readOnly": false,
},
"idIndex": {
"v": 2_i32,
"key": { "_id": 1_i32 },
"name": "_id_",
},
}
};
// Apply filter if specified.
if let Some(ref f) = filter {
if !rustdb_query::QueryMatcher::matches(&info_doc, f) {
continue;
}
}
coll_docs.push(info_doc);
}
if coll_docs.len() <= batch_size {
let first_batch: Vec<Bson> = coll_docs.into_iter().map(Bson::Document).collect();
Ok(doc! {
"cursor": {
"id": 0_i64,
"ns": &ns,
"firstBatch": first_batch,
},
"ok": 1.0,
})
} else {
let first_batch: Vec<Bson> = coll_docs[..batch_size]
.iter()
.cloned()
.map(Bson::Document)
.collect();
let remaining: Vec<Document> = coll_docs[batch_size..].to_vec();
let cursor_id = generate_cursor_id();
ctx.cursors.insert(
cursor_id,
CursorState {
documents: remaining,
position: 0,
database: db.to_string(),
collection: String::new(),
},
);
Ok(doc! {
"cursor": {
"id": cursor_id,
"ns": &ns,
"firstBatch": first_batch,
},
"ok": 1.0,
})
}
}
/// Handle `create` command.
async fn handle_create(
cmd: &Document,
db: &str,
ctx: &CommandContext,
) -> CommandResult<Document> {
let coll = cmd
.get_str("create")
.map_err(|_| CommandError::InvalidArgument("missing 'create' field".into()))?;
debug!(db = db, collection = coll, "create command");
// Create database (ignore AlreadyExists).
if let Err(e) = ctx.storage.create_database(db).await {
let msg = e.to_string();
if !msg.contains("AlreadyExists") && !msg.contains("already exists") {
return Err(CommandError::StorageError(msg));
}
}
// Create collection.
if let Err(e) = ctx.storage.create_collection(db, coll).await {
let msg = e.to_string();
if msg.contains("AlreadyExists") || msg.contains("already exists") {
return Err(CommandError::NamespaceExists(format!("{}.{}", db, coll)));
}
return Err(CommandError::StorageError(msg));
}
// Initialize index engine for the new collection.
let ns_key = format!("{}.{}", db, coll);
ctx.indexes
.entry(ns_key)
.or_insert_with(IndexEngine::new);
Ok(doc! { "ok": 1.0 })
}
/// Handle `drop` command.
async fn handle_drop(
cmd: &Document,
db: &str,
ctx: &CommandContext,
) -> CommandResult<Document> {
let coll = cmd
.get_str("drop")
.map_err(|_| CommandError::InvalidArgument("missing 'drop' field".into()))?;
let ns_key = format!("{}.{}", db, coll);
debug!(db = db, collection = coll, "drop command");
// Check if collection exists.
match ctx.storage.collection_exists(db, coll).await {
Ok(false) => {
return Err(CommandError::NamespaceNotFound(format!(
"ns not found: {}",
ns_key
)));
}
Err(_) => {}
_ => {}
}
// Drop from storage.
ctx.storage.drop_collection(db, coll).await?;
// Remove from indexes.
ctx.indexes.remove(&ns_key);
// Count of indexes that were on this collection (at least _id_).
Ok(doc! {
"ns": &ns_key,
"nIndexesWas": 1_i32,
"ok": 1.0,
})
}
/// Handle `dropDatabase` command.
async fn handle_drop_database(
db: &str,
ctx: &CommandContext,
) -> CommandResult<Document> {
debug!(db = db, "dropDatabase command");
// Remove all index entries for this database.
let prefix = format!("{}.", db);
let keys_to_remove: Vec<String> = ctx
.indexes
.iter()
.filter(|entry| entry.key().starts_with(&prefix))
.map(|entry| entry.key().clone())
.collect();
for key in keys_to_remove {
ctx.indexes.remove(&key);
}
// Drop from storage.
ctx.storage.drop_database(db).await?;
Ok(doc! {
"dropped": db,
"ok": 1.0,
})
}
/// Handle `renameCollection` command.
async fn handle_rename_collection(
cmd: &Document,
ctx: &CommandContext,
) -> CommandResult<Document> {
let source_ns = cmd
.get_str("renameCollection")
.map_err(|_| CommandError::InvalidArgument("missing 'renameCollection' field".into()))?;
let target_ns = cmd
.get_str("to")
.map_err(|_| CommandError::InvalidArgument("missing 'to' field".into()))?;
let drop_target = match cmd.get("dropTarget") {
Some(Bson::Boolean(b)) => *b,
_ => false,
};
// Parse "db.collection" format.
let (source_db, source_coll) = parse_namespace(source_ns)?;
let (target_db, target_coll) = parse_namespace(target_ns)?;
debug!(
source = source_ns,
target = target_ns,
drop_target = drop_target,
"renameCollection command"
);
// If cross-database rename, that's more complex. For now, support same-db rename.
if source_db != target_db {
return Err(CommandError::InvalidArgument(
"cross-database renameCollection not yet supported".into(),
));
}
// If dropTarget, drop the target collection first.
if drop_target {
let _ = ctx.storage.drop_collection(target_db, target_coll).await;
let target_ns_key = format!("{}.{}", target_db, target_coll);
ctx.indexes.remove(&target_ns_key);
} else {
// Check if target already exists.
if let Ok(true) = ctx.storage.collection_exists(target_db, target_coll).await {
return Err(CommandError::NamespaceExists(target_ns.to_string()));
}
}
// Rename in storage.
ctx.storage
.rename_collection(source_db, source_coll, target_coll)
.await?;
// Update index engine: move from old namespace to new.
let source_ns_key = format!("{}.{}", source_db, source_coll);
let target_ns_key = format!("{}.{}", target_db, target_coll);
if let Some((_, engine)) = ctx.indexes.remove(&source_ns_key) {
ctx.indexes.insert(target_ns_key, engine);
}
Ok(doc! { "ok": 1.0 })
}
/// Handle `collStats` command.
async fn handle_coll_stats(
cmd: &Document,
db: &str,
ctx: &CommandContext,
command_name: &str,
) -> CommandResult<Document> {
let coll = cmd
.get_str(command_name)
.unwrap_or("unknown");
let ns = format!("{}.{}", db, coll);
let count = ctx
.storage
.count(db, coll)
.await
.unwrap_or(0);
let n_indexes = match ctx.indexes.get(&ns) {
Some(engine) => engine.list_indexes().len() as i32,
None => 1_i32,
};
// Rough size estimate.
let data_size = count as i64 * 200;
Ok(doc! {
"ns": &ns,
"count": count as i64,
"size": data_size,
"avgObjSize": if count > 0 { 200_i64 } else { 0_i64 },
"storageSize": data_size,
"nindexes": n_indexes,
"totalIndexSize": 0_i64,
"ok": 1.0,
})
}
/// Handle `dbStats` command.
async fn handle_db_stats(
db: &str,
ctx: &CommandContext,
) -> CommandResult<Document> {
let collections = ctx
.storage
.list_collections(db)
.await
.unwrap_or_default();
let num_collections = collections.len() as i32;
let mut total_objects: i64 = 0;
let mut total_indexes: i32 = 0;
for coll in &collections {
if let Ok(count) = ctx.storage.count(db, coll).await {
total_objects += count as i64;
}
let ns_key = format!("{}.{}", db, coll);
if let Some(engine) = ctx.indexes.get(&ns_key) {
total_indexes += engine.list_indexes().len() as i32;
} else {
total_indexes += 1; // At least _id_.
}
}
let data_size = total_objects * 200;
Ok(doc! {
"db": db,
"collections": num_collections,
"objects": total_objects,
"avgObjSize": if total_objects > 0 { 200_i64 } else { 0_i64 },
"dataSize": data_size,
"storageSize": data_size,
"indexes": total_indexes,
"indexSize": 0_i64,
"ok": 1.0,
})
}
/// Parse a namespace string "db.collection" into (db, collection).
fn parse_namespace(ns: &str) -> CommandResult<(&str, &str)> {
let dot_pos = ns.find('.').ok_or_else(|| {
CommandError::InvalidArgument(format!(
"invalid namespace '{}': expected 'db.collection' format",
ns
))
})?;
let db = &ns[..dot_pos];
let coll = &ns[dot_pos + 1..];
if db.is_empty() || coll.is_empty() {
return Err(CommandError::InvalidArgument(format!(
"invalid namespace '{}': db and collection must not be empty",
ns
)));
}
Ok((db, coll))
}
/// Generate a pseudo-random cursor ID.
fn generate_cursor_id() -> i64 {
use std::collections::hash_map::RandomState;
use std::hash::{BuildHasher, Hasher};
let s = RandomState::new();
let mut hasher = s.build_hasher();
hasher.write_u64(
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_nanos() as u64,
);
let id = hasher.finish() as i64;
if id == 0 {
1
} else {
id.abs()
}
}

View File

@@ -0,0 +1,310 @@
use bson::{doc, Bson, Document};
use rustdb_query::AggregationEngine;
use rustdb_query::error::QueryError;
use tracing::debug;
use crate::context::{CommandContext, CursorState};
use crate::error::{CommandError, CommandResult};
/// A CollectionResolver that reads from the storage adapter.
struct StorageResolver<'a> {
storage: &'a dyn rustdb_storage::StorageAdapter,
/// We use a tokio runtime handle to call async methods synchronously,
/// since the CollectionResolver trait is synchronous.
handle: tokio::runtime::Handle,
}
impl<'a> rustdb_query::aggregation::CollectionResolver for StorageResolver<'a> {
fn resolve(&self, db: &str, coll: &str) -> Result<Vec<Document>, QueryError> {
self.handle
.block_on(async { self.storage.find_all(db, coll).await })
.map_err(|e| QueryError::AggregationError(format!("Failed to resolve {}.{}: {}", db, coll, e)))
}
}
/// Handle the `aggregate` command.
pub async fn handle(
cmd: &Document,
db: &str,
ctx: &CommandContext,
) -> CommandResult<Document> {
// The aggregate field can be a string (collection name) or integer 1 (db-level).
let (coll, is_db_level) = match cmd.get("aggregate") {
Some(Bson::String(s)) => (s.as_str().to_string(), false),
Some(Bson::Int32(1)) => (String::new(), true),
Some(Bson::Int64(1)) => (String::new(), true),
_ => {
return Err(CommandError::InvalidArgument(
"missing or invalid 'aggregate' field".into(),
));
}
};
let pipeline_bson = cmd
.get_array("pipeline")
.map_err(|_| CommandError::InvalidArgument("missing 'pipeline' array".into()))?;
// Convert pipeline to Vec<Document>.
let mut pipeline: Vec<Document> = Vec::with_capacity(pipeline_bson.len());
for stage in pipeline_bson {
match stage {
Bson::Document(d) => pipeline.push(d.clone()),
_ => {
return Err(CommandError::InvalidArgument(
"pipeline stage must be a document".into(),
));
}
}
}
// Check for $out and $merge as the last stage (handle after pipeline execution).
let out_stage = if let Some(last) = pipeline.last() {
if last.contains_key("$out") || last.contains_key("$merge") {
Some(pipeline.pop().unwrap())
} else {
None
}
} else {
None
};
let batch_size = cmd
.get_document("cursor")
.ok()
.and_then(|c| {
c.get_i32("batchSize")
.ok()
.map(|v| v as usize)
.or_else(|| c.get_i64("batchSize").ok().map(|v| v as usize))
})
.unwrap_or(101);
debug!(
db = db,
collection = %coll,
stages = pipeline.len(),
"aggregate command"
);
// Load source documents.
let source_docs = if is_db_level {
// Database-level aggregate: start with empty set (useful for $currentOp, etc.)
Vec::new()
} else {
ctx.storage.find_all(db, &coll).await?
};
// Create a resolver for $lookup and similar stages.
let handle = tokio::runtime::Handle::current();
let resolver = StorageResolver {
storage: ctx.storage.as_ref(),
handle,
};
// Run the aggregation pipeline.
let result_docs = AggregationEngine::aggregate(
source_docs,
&pipeline,
Some(&resolver),
db,
)
.map_err(|e| CommandError::InternalError(e.to_string()))?;
// Handle $out stage: write results to target collection.
if let Some(out) = out_stage {
if let Some(out_spec) = out.get("$out") {
handle_out_stage(db, out_spec, &result_docs, ctx).await?;
} else if let Some(merge_spec) = out.get("$merge") {
handle_merge_stage(db, merge_spec, &result_docs, ctx).await?;
}
}
// Build cursor response.
let ns = if is_db_level {
format!("{}.$cmd.aggregate", db)
} else {
format!("{}.{}", db, coll)
};
if result_docs.len() <= batch_size {
// All results fit in first batch.
let first_batch: Vec<Bson> = result_docs
.into_iter()
.map(Bson::Document)
.collect();
Ok(doc! {
"cursor": {
"firstBatch": first_batch,
"id": 0_i64,
"ns": &ns,
},
"ok": 1.0,
})
} else {
// Need to create a cursor for remaining results.
let first_batch: Vec<Bson> = result_docs[..batch_size]
.iter()
.cloned()
.map(Bson::Document)
.collect();
let remaining: Vec<Document> = result_docs[batch_size..].to_vec();
let cursor_id = generate_cursor_id();
ctx.cursors.insert(
cursor_id,
CursorState {
documents: remaining,
position: 0,
database: db.to_string(),
collection: coll.to_string(),
},
);
Ok(doc! {
"cursor": {
"firstBatch": first_batch,
"id": cursor_id,
"ns": &ns,
},
"ok": 1.0,
})
}
}
/// Handle $out stage: drop and replace target collection with pipeline results.
async fn handle_out_stage(
db: &str,
out_spec: &Bson,
docs: &[Document],
ctx: &CommandContext,
) -> CommandResult<()> {
let (target_db, target_coll) = match out_spec {
Bson::String(coll_name) => (db.to_string(), coll_name.clone()),
Bson::Document(d) => {
let tdb = d.get_str("db").unwrap_or(db).to_string();
let tcoll = d
.get_str("coll")
.map_err(|_| CommandError::InvalidArgument("$out requires 'coll'".into()))?
.to_string();
(tdb, tcoll)
}
_ => {
return Err(CommandError::InvalidArgument(
"$out requires a string or document".into(),
));
}
};
// Drop existing target collection (ignore errors).
let _ = ctx.storage.drop_collection(&target_db, &target_coll).await;
// Create target collection.
let _ = ctx.storage.create_database(&target_db).await;
let _ = ctx.storage.create_collection(&target_db, &target_coll).await;
// Insert all result documents.
for doc in docs {
let _ = ctx
.storage
.insert_one(&target_db, &target_coll, doc.clone())
.await;
}
Ok(())
}
/// Handle $merge stage: merge pipeline results into target collection.
async fn handle_merge_stage(
db: &str,
merge_spec: &Bson,
docs: &[Document],
ctx: &CommandContext,
) -> CommandResult<()> {
let (target_db, target_coll) = match merge_spec {
Bson::String(coll_name) => (db.to_string(), coll_name.clone()),
Bson::Document(d) => {
let into_val = d.get("into");
match into_val {
Some(Bson::String(s)) => (db.to_string(), s.clone()),
Some(Bson::Document(into_doc)) => {
let tdb = into_doc.get_str("db").unwrap_or(db).to_string();
let tcoll = into_doc
.get_str("coll")
.map_err(|_| {
CommandError::InvalidArgument("$merge.into requires 'coll'".into())
})?
.to_string();
(tdb, tcoll)
}
_ => {
return Err(CommandError::InvalidArgument(
"$merge requires 'into' field".into(),
));
}
}
}
_ => {
return Err(CommandError::InvalidArgument(
"$merge requires a string or document".into(),
));
}
};
// Ensure target collection exists.
let _ = ctx.storage.create_database(&target_db).await;
let _ = ctx
.storage
.create_collection(&target_db, &target_coll)
.await;
// Simple merge: upsert by _id.
for doc in docs {
let id_str = match doc.get("_id") {
Some(Bson::ObjectId(oid)) => oid.to_hex(),
Some(Bson::String(s)) => s.clone(),
Some(other) => format!("{}", other),
None => {
// No _id, just insert.
let _ = ctx
.storage
.insert_one(&target_db, &target_coll, doc.clone())
.await;
continue;
}
};
// Try update first, insert if it fails.
match ctx
.storage
.update_by_id(&target_db, &target_coll, &id_str, doc.clone())
.await
{
Ok(()) => {}
Err(_) => {
let _ = ctx
.storage
.insert_one(&target_db, &target_coll, doc.clone())
.await;
}
}
}
Ok(())
}
/// Generate a pseudo-random cursor ID.
fn generate_cursor_id() -> i64 {
use std::collections::hash_map::RandomState;
use std::hash::{BuildHasher, Hasher};
let s = RandomState::new();
let mut hasher = s.build_hasher();
hasher.write_u64(std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_nanos() as u64);
let id = hasher.finish() as i64;
// Ensure positive and non-zero.
if id == 0 { 1 } else { id.abs() }
}

View File

@@ -0,0 +1,196 @@
use std::collections::HashSet;
use bson::{doc, Bson, Document};
use rustdb_query::QueryMatcher;
use tracing::debug;
use crate::context::CommandContext;
use crate::error::{CommandError, CommandResult};
/// Handle the `delete` command.
pub async fn handle(
cmd: &Document,
db: &str,
ctx: &CommandContext,
) -> CommandResult<Document> {
let coll = cmd
.get_str("delete")
.map_err(|_| CommandError::InvalidArgument("missing 'delete' field".into()))?;
let deletes = cmd
.get_array("deletes")
.map_err(|_| CommandError::InvalidArgument("missing 'deletes' array".into()))?;
// Ordered flag (default true).
let ordered = match cmd.get("ordered") {
Some(Bson::Boolean(b)) => *b,
_ => true,
};
debug!(
db = db,
collection = coll,
count = deletes.len(),
"delete command"
);
let ns_key = format!("{}.{}", db, coll);
let mut total_deleted: i32 = 0;
let mut write_errors: Vec<Document> = Vec::new();
for (idx, del_spec) in deletes.iter().enumerate() {
let del_doc = match del_spec {
Bson::Document(d) => d,
_ => {
write_errors.push(doc! {
"index": idx as i32,
"code": 14_i32,
"codeName": "TypeMismatch",
"errmsg": "delete spec must be a document",
});
if ordered {
break;
}
continue;
}
};
// Extract filter (q) and limit.
let filter = match del_doc.get_document("q") {
Ok(f) => f.clone(),
Err(_) => Document::new(), // empty filter matches everything
};
let limit = match del_doc.get("limit") {
Some(Bson::Int32(n)) => *n,
Some(Bson::Int64(n)) => *n as i32,
Some(Bson::Double(n)) => *n as i32,
_ => 0, // default: delete all matches
};
match delete_matching(db, coll, &ns_key, &filter, limit, ctx).await {
Ok(count) => {
total_deleted += count;
}
Err(e) => {
write_errors.push(doc! {
"index": idx as i32,
"code": 1_i32,
"codeName": "InternalError",
"errmsg": e.to_string(),
});
if ordered {
break;
}
}
}
}
// Build response.
let mut response = doc! {
"n": total_deleted,
"ok": 1.0,
};
if !write_errors.is_empty() {
response.insert(
"writeErrors",
write_errors
.into_iter()
.map(Bson::Document)
.collect::<Vec<_>>(),
);
}
Ok(response)
}
/// Find and delete documents matching a filter, returning the number deleted.
async fn delete_matching(
db: &str,
coll: &str,
ns_key: &str,
filter: &Document,
limit: i32,
ctx: &CommandContext,
) -> Result<i32, CommandError> {
// Check if the collection exists; if not, nothing to delete.
match ctx.storage.collection_exists(db, coll).await {
Ok(false) => return Ok(0),
Err(_) => return Ok(0),
Ok(true) => {}
}
// Try to use index to narrow candidates.
let candidate_ids: Option<HashSet<String>> = {
if let Some(engine) = ctx.indexes.get(ns_key) {
engine.find_candidate_ids(filter)
} else {
None
}
};
// Load candidate documents.
let docs = if let Some(ids) = candidate_ids {
if ids.is_empty() {
return Ok(0);
}
ctx.storage
.find_by_ids(db, coll, ids)
.await
.map_err(|e| CommandError::StorageError(e.to_string()))?
} else {
ctx.storage
.find_all(db, coll)
.await
.map_err(|e| CommandError::StorageError(e.to_string()))?
};
// Apply filter to get matched documents.
let matched = QueryMatcher::filter(&docs, filter);
// Apply limit: 0 means delete all, 1 means delete only the first match.
let to_delete: &[Document] = if limit == 1 && !matched.is_empty() {
&matched[..1]
} else {
&matched
};
if to_delete.is_empty() {
return Ok(0);
}
let mut deleted_count: i32 = 0;
for doc in to_delete {
// Extract the _id as a hex string for storage deletion.
let id_str = extract_id_string(doc)?;
ctx.storage
.delete_by_id(db, coll, &id_str)
.await
.map_err(|e| CommandError::StorageError(e.to_string()))?;
// Update index engine.
if let Some(mut engine) = ctx.indexes.get_mut(ns_key) {
engine.on_delete(doc);
}
deleted_count += 1;
}
Ok(deleted_count)
}
/// Extract the `_id` field from a document as a hex string suitable for the
/// storage adapter.
fn extract_id_string(doc: &Document) -> Result<String, CommandError> {
match doc.get("_id") {
Some(Bson::ObjectId(oid)) => Ok(oid.to_hex()),
Some(Bson::String(s)) => Ok(s.clone()),
Some(other) => Ok(format!("{}", other)),
None => Err(CommandError::InvalidArgument(
"document missing _id field".into(),
)),
}
}

View File

@@ -0,0 +1,370 @@
use std::sync::atomic::{AtomicI64, Ordering};
use bson::{doc, Bson, Document};
use tracing::debug;
use rustdb_query::{QueryMatcher, sort_documents, apply_projection, distinct_values};
use crate::context::{CommandContext, CursorState};
use crate::error::{CommandError, CommandResult};
/// Atomic counter for generating unique cursor IDs.
static CURSOR_ID_COUNTER: AtomicI64 = AtomicI64::new(1);
/// Generate a new unique, positive cursor ID.
fn next_cursor_id() -> i64 {
CURSOR_ID_COUNTER.fetch_add(1, Ordering::Relaxed)
}
// ---------------------------------------------------------------------------
// Helpers to defensively extract values from BSON command documents
// ---------------------------------------------------------------------------
fn get_str<'a>(doc: &'a Document, key: &str) -> Option<&'a str> {
match doc.get(key)? {
Bson::String(s) => Some(s.as_str()),
_ => None,
}
}
fn get_i32(doc: &Document, key: &str) -> Option<i32> {
match doc.get(key)? {
Bson::Int32(v) => Some(*v),
Bson::Int64(v) => Some(*v as i32),
Bson::Double(v) => Some(*v as i32),
_ => None,
}
}
fn get_i64(doc: &Document, key: &str) -> Option<i64> {
match doc.get(key)? {
Bson::Int64(v) => Some(*v),
Bson::Int32(v) => Some(*v as i64),
Bson::Double(v) => Some(*v as i64),
_ => None,
}
}
fn get_bool(doc: &Document, key: &str) -> Option<bool> {
match doc.get(key)? {
Bson::Boolean(v) => Some(*v),
_ => None,
}
}
fn get_document<'a>(doc: &'a Document, key: &str) -> Option<&'a Document> {
match doc.get(key)? {
Bson::Document(d) => Some(d),
_ => None,
}
}
// ---------------------------------------------------------------------------
// find
// ---------------------------------------------------------------------------
/// Handle the `find` command.
pub async fn handle(
cmd: &Document,
db: &str,
ctx: &CommandContext,
) -> CommandResult<Document> {
let coll = get_str(cmd, "find").unwrap_or("unknown");
let ns = format!("{}.{}", db, coll);
// Extract optional parameters.
let filter = get_document(cmd, "filter").cloned().unwrap_or_default();
let sort_spec = get_document(cmd, "sort").cloned();
let projection = get_document(cmd, "projection").cloned();
let skip = get_i64(cmd, "skip").unwrap_or(0).max(0) as usize;
let limit = get_i64(cmd, "limit").unwrap_or(0).max(0) as usize;
let batch_size = get_i32(cmd, "batchSize").unwrap_or(101).max(0) as usize;
let single_batch = get_bool(cmd, "singleBatch").unwrap_or(false);
// If the collection does not exist, return an empty cursor.
let exists = ctx.storage.collection_exists(db, coll).await?;
if !exists {
return Ok(doc! {
"cursor": {
"firstBatch": [],
"id": 0_i64,
"ns": &ns,
},
"ok": 1.0,
});
}
// Try index-accelerated lookup.
let index_key = format!("{}.{}", db, coll);
let docs = if let Some(idx_ref) = ctx.indexes.get(&index_key) {
if let Some(candidate_ids) = idx_ref.find_candidate_ids(&filter) {
debug!(
ns = %ns,
candidates = candidate_ids.len(),
"using index acceleration"
);
ctx.storage.find_by_ids(db, coll, candidate_ids).await?
} else {
ctx.storage.find_all(db, coll).await?
}
} else {
ctx.storage.find_all(db, coll).await?
};
// Apply filter.
let mut docs = QueryMatcher::filter(&docs, &filter);
// Apply sort.
if let Some(ref sort) = sort_spec {
sort_documents(&mut docs, sort);
}
// Apply skip.
if skip > 0 {
if skip >= docs.len() {
docs = Vec::new();
} else {
docs = docs.split_off(skip);
}
}
// Apply limit.
if limit > 0 && docs.len() > limit {
docs.truncate(limit);
}
// Apply projection.
if let Some(ref proj) = projection {
docs = docs.iter().map(|d| apply_projection(d, proj)).collect();
}
// Determine first batch.
if docs.len() <= batch_size || single_batch {
// Everything fits in a single batch.
let batch: Vec<Bson> = docs.into_iter().map(Bson::Document).collect();
Ok(doc! {
"cursor": {
"firstBatch": batch,
"id": 0_i64,
"ns": &ns,
},
"ok": 1.0,
})
} else {
// Split into first batch and remainder, store cursor.
let remaining = docs.split_off(batch_size);
let first_batch: Vec<Bson> = docs.into_iter().map(Bson::Document).collect();
let cursor_id = next_cursor_id();
ctx.cursors.insert(cursor_id, CursorState {
documents: remaining,
position: 0,
database: db.to_string(),
collection: coll.to_string(),
});
Ok(doc! {
"cursor": {
"firstBatch": first_batch,
"id": cursor_id,
"ns": &ns,
},
"ok": 1.0,
})
}
}
// ---------------------------------------------------------------------------
// getMore
// ---------------------------------------------------------------------------
/// Handle the `getMore` command.
pub async fn handle_get_more(
cmd: &Document,
db: &str,
ctx: &CommandContext,
) -> CommandResult<Document> {
// Defensively extract cursor id.
let cursor_id = get_i64(cmd, "getMore").ok_or_else(|| {
CommandError::InvalidArgument("getMore requires a cursor id".into())
})?;
let coll = get_str(cmd, "collection").unwrap_or("unknown");
let ns = format!("{}.{}", db, coll);
let batch_size = get_i64(cmd, "batchSize")
.or_else(|| get_i32(cmd, "batchSize").map(|v| v as i64))
.unwrap_or(101)
.max(0) as usize;
// Look up the cursor.
let mut cursor_entry = ctx.cursors.get_mut(&cursor_id).ok_or_else(|| {
CommandError::InvalidArgument(format!("cursor id {} not found", cursor_id))
})?;
let cursor = cursor_entry.value_mut();
let start = cursor.position;
let end = (start + batch_size).min(cursor.documents.len());
let batch: Vec<Bson> = cursor.documents[start..end]
.iter()
.cloned()
.map(Bson::Document)
.collect();
cursor.position = end;
let exhausted = cursor.position >= cursor.documents.len();
// Must drop the mutable reference before removing.
drop(cursor_entry);
if exhausted {
ctx.cursors.remove(&cursor_id);
Ok(doc! {
"cursor": {
"nextBatch": batch,
"id": 0_i64,
"ns": &ns,
},
"ok": 1.0,
})
} else {
Ok(doc! {
"cursor": {
"nextBatch": batch,
"id": cursor_id,
"ns": &ns,
},
"ok": 1.0,
})
}
}
// ---------------------------------------------------------------------------
// killCursors
// ---------------------------------------------------------------------------
/// Handle the `killCursors` command.
pub async fn handle_kill_cursors(
cmd: &Document,
_db: &str,
ctx: &CommandContext,
) -> CommandResult<Document> {
let cursor_ids = match cmd.get("cursors") {
Some(Bson::Array(arr)) => arr,
_ => {
return Ok(doc! {
"cursorsKilled": [],
"cursorsNotFound": [],
"cursorsAlive": [],
"cursorsUnknown": [],
"ok": 1.0,
});
}
};
let mut killed: Vec<Bson> = Vec::new();
let mut not_found: Vec<Bson> = Vec::new();
for id_bson in cursor_ids {
let id = match id_bson {
Bson::Int64(v) => *v,
Bson::Int32(v) => *v as i64,
_ => continue,
};
if ctx.cursors.remove(&id).is_some() {
killed.push(Bson::Int64(id));
} else {
not_found.push(Bson::Int64(id));
}
}
Ok(doc! {
"cursorsKilled": killed,
"cursorsNotFound": not_found,
"cursorsAlive": [],
"cursorsUnknown": [],
"ok": 1.0,
})
}
// ---------------------------------------------------------------------------
// count
// ---------------------------------------------------------------------------
/// Handle the `count` command.
pub async fn handle_count(
cmd: &Document,
db: &str,
ctx: &CommandContext,
) -> CommandResult<Document> {
let coll = get_str(cmd, "count").unwrap_or("unknown");
// Check collection existence.
let exists = ctx.storage.collection_exists(db, coll).await?;
if !exists {
return Ok(doc! { "n": 0_i64, "ok": 1.0 });
}
let query = get_document(cmd, "query").cloned().unwrap_or_default();
let skip = get_i64(cmd, "skip").unwrap_or(0).max(0) as usize;
let limit = get_i64(cmd, "limit").unwrap_or(0).max(0) as usize;
let count: u64 = if query.is_empty() && skip == 0 && limit == 0 {
// Fast path: use storage-level count.
ctx.storage.count(db, coll).await?
} else if query.is_empty() {
// No filter but skip/limit apply.
let total = ctx.storage.count(db, coll).await? as usize;
let after_skip = total.saturating_sub(skip);
let result = if limit > 0 { after_skip.min(limit) } else { after_skip };
result as u64
} else {
// Need to load and filter.
let docs = ctx.storage.find_all(db, coll).await?;
let filtered = QueryMatcher::filter(&docs, &query);
let mut n = filtered.len();
// Apply skip.
n = n.saturating_sub(skip);
// Apply limit.
if limit > 0 {
n = n.min(limit);
}
n as u64
};
Ok(doc! {
"n": count as i64,
"ok": 1.0,
})
}
// ---------------------------------------------------------------------------
// distinct
// ---------------------------------------------------------------------------
/// Handle the `distinct` command.
pub async fn handle_distinct(
cmd: &Document,
db: &str,
ctx: &CommandContext,
) -> CommandResult<Document> {
let coll = get_str(cmd, "distinct").unwrap_or("unknown");
let key = get_str(cmd, "key").ok_or_else(|| {
CommandError::InvalidArgument("distinct requires a 'key' field".into())
})?;
// Check collection existence.
let exists = ctx.storage.collection_exists(db, coll).await?;
if !exists {
return Ok(doc! { "values": [], "ok": 1.0 });
}
let query = get_document(cmd, "query").cloned();
let docs = ctx.storage.find_all(db, coll).await?;
let values = distinct_values(&docs, key, query.as_ref());
Ok(doc! {
"values": values,
"ok": 1.0,
})
}

View File

@@ -0,0 +1,28 @@
use bson::{doc, Document};
use crate::context::CommandContext;
use crate::error::CommandResult;
/// Handle `hello`, `ismaster`, and `isMaster` commands.
///
/// Returns server capabilities matching wire protocol expectations.
pub async fn handle(
_cmd: &Document,
_db: &str,
_ctx: &CommandContext,
) -> CommandResult<Document> {
Ok(doc! {
"ismaster": true,
"isWritablePrimary": true,
"maxBsonObjectSize": 16_777_216_i32,
"maxMessageSizeBytes": 48_000_000_i32,
"maxWriteBatchSize": 100_000_i32,
"localTime": bson::DateTime::now(),
"logicalSessionTimeoutMinutes": 30_i32,
"connectionId": 1_i32,
"minWireVersion": 0_i32,
"maxWireVersion": 21_i32,
"readOnly": false,
"ok": 1.0,
})
}

View File

@@ -0,0 +1,342 @@
use bson::{doc, Bson, Document};
use rustdb_index::{IndexEngine, IndexOptions};
use tracing::debug;
use crate::context::CommandContext;
use crate::error::{CommandError, CommandResult};
/// Handle `createIndexes`, `dropIndexes`, and `listIndexes` commands.
pub async fn handle(
cmd: &Document,
db: &str,
ctx: &CommandContext,
command_name: &str,
) -> CommandResult<Document> {
match command_name {
"createIndexes" => handle_create_indexes(cmd, db, ctx).await,
"dropIndexes" => handle_drop_indexes(cmd, db, ctx).await,
"listIndexes" => handle_list_indexes(cmd, db, ctx).await,
_ => Ok(doc! { "ok": 1.0 }),
}
}
/// Handle the `createIndexes` command.
async fn handle_create_indexes(
cmd: &Document,
db: &str,
ctx: &CommandContext,
) -> CommandResult<Document> {
let coll = cmd
.get_str("createIndexes")
.map_err(|_| CommandError::InvalidArgument("missing 'createIndexes' field".into()))?;
let indexes = cmd
.get_array("indexes")
.map_err(|_| CommandError::InvalidArgument("missing 'indexes' array".into()))?;
let ns_key = format!("{}.{}", db, coll);
debug!(
db = db,
collection = coll,
count = indexes.len(),
"createIndexes command"
);
// Auto-create collection if needed.
let created_automatically = ensure_collection_exists(db, coll, ctx).await?;
// Get the number of indexes before creating new ones.
let num_before = {
let engine = ctx
.indexes
.entry(ns_key.clone())
.or_insert_with(IndexEngine::new);
engine.list_indexes().len() as i32
};
let mut created_count = 0_i32;
for index_bson in indexes {
let index_spec = match index_bson {
Bson::Document(d) => d,
_ => {
return Err(CommandError::InvalidArgument(
"index spec must be a document".into(),
));
}
};
let key = match index_spec.get("key") {
Some(Bson::Document(k)) => k.clone(),
_ => {
return Err(CommandError::InvalidArgument(
"index spec must have a 'key' document".into(),
));
}
};
let name = index_spec.get_str("name").ok().map(|s| s.to_string());
let unique = match index_spec.get("unique") {
Some(Bson::Boolean(b)) => *b,
_ => false,
};
let sparse = match index_spec.get("sparse") {
Some(Bson::Boolean(b)) => *b,
_ => false,
};
let expire_after_seconds = match index_spec.get("expireAfterSeconds") {
Some(Bson::Int32(n)) => Some(*n as u64),
Some(Bson::Int64(n)) => Some(*n as u64),
_ => None,
};
let options = IndexOptions {
name,
unique,
sparse,
expire_after_seconds,
};
// Create the index.
let mut engine = ctx
.indexes
.entry(ns_key.clone())
.or_insert_with(IndexEngine::new);
match engine.create_index(key, options) {
Ok(index_name) => {
debug!(index_name = %index_name, "Created index");
created_count += 1;
}
Err(e) => {
return Err(CommandError::IndexError(e.to_string()));
}
}
}
// If we created indexes on an existing collection, rebuild from documents.
if created_count > 0 && !created_automatically {
// Load all documents and rebuild indexes.
if let Ok(all_docs) = ctx.storage.find_all(db, coll).await {
if !all_docs.is_empty() {
let mut engine = ctx
.indexes
.entry(ns_key.clone())
.or_insert_with(IndexEngine::new);
engine.rebuild_from_documents(&all_docs);
}
}
}
let num_after = {
let engine = ctx
.indexes
.entry(ns_key.clone())
.or_insert_with(IndexEngine::new);
engine.list_indexes().len() as i32
};
Ok(doc! {
"createdCollectionAutomatically": created_automatically,
"numIndexesBefore": num_before,
"numIndexesAfter": num_after,
"ok": 1.0,
})
}
/// Handle the `dropIndexes` command.
async fn handle_drop_indexes(
cmd: &Document,
db: &str,
ctx: &CommandContext,
) -> CommandResult<Document> {
let coll = cmd
.get_str("dropIndexes")
.map_err(|_| CommandError::InvalidArgument("missing 'dropIndexes' field".into()))?;
let ns_key = format!("{}.{}", db, coll);
// Get current index count.
let n_indexes_was = {
match ctx.indexes.get(&ns_key) {
Some(engine) => engine.list_indexes().len() as i32,
None => 1_i32, // At minimum the _id_ index.
}
};
let index_spec = cmd.get("index");
debug!(
db = db,
collection = coll,
index_spec = ?index_spec,
"dropIndexes command"
);
match index_spec {
Some(Bson::String(name)) if name == "*" => {
// Drop all indexes except _id_.
if let Some(mut engine) = ctx.indexes.get_mut(&ns_key) {
engine.drop_all_indexes();
}
}
Some(Bson::String(name)) => {
// Drop by name.
if let Some(mut engine) = ctx.indexes.get_mut(&ns_key) {
engine.drop_index(name).map_err(|e| {
CommandError::IndexError(e.to_string())
})?;
} else {
return Err(CommandError::IndexError(format!(
"index not found: {}",
name
)));
}
}
Some(Bson::Document(key_spec)) => {
// Drop by key spec: find the index with matching key.
if let Some(mut engine) = ctx.indexes.get_mut(&ns_key) {
let index_name = engine
.list_indexes()
.iter()
.find(|info| info.key == *key_spec)
.map(|info| info.name.clone());
if let Some(name) = index_name {
engine.drop_index(&name).map_err(|e| {
CommandError::IndexError(e.to_string())
})?;
} else {
return Err(CommandError::IndexError(
"index not found with specified key".into(),
));
}
} else {
return Err(CommandError::IndexError(
"no indexes found for collection".into(),
));
}
}
_ => {
return Err(CommandError::InvalidArgument(
"dropIndexes requires 'index' field (string, document, or \"*\")".into(),
));
}
}
Ok(doc! {
"nIndexesWas": n_indexes_was,
"ok": 1.0,
})
}
/// Handle the `listIndexes` command.
async fn handle_list_indexes(
cmd: &Document,
db: &str,
ctx: &CommandContext,
) -> CommandResult<Document> {
let coll = cmd
.get_str("listIndexes")
.map_err(|_| CommandError::InvalidArgument("missing 'listIndexes' field".into()))?;
let ns_key = format!("{}.{}", db, coll);
let ns = format!("{}.{}", db, coll);
// Check if collection exists.
match ctx.storage.collection_exists(db, coll).await {
Ok(false) => {
return Err(CommandError::NamespaceNotFound(format!(
"ns not found: {}",
ns
)));
}
Err(_) => {
// If we can't check, try to proceed anyway.
}
_ => {}
}
let indexes = match ctx.indexes.get(&ns_key) {
Some(engine) => engine.list_indexes(),
None => {
// Return at least the default _id_ index.
let engine = IndexEngine::new();
engine.list_indexes()
}
};
let first_batch: Vec<Bson> = indexes
.into_iter()
.map(|info| {
let mut doc = doc! {
"v": info.v,
"key": info.key,
"name": info.name,
};
if info.unique {
doc.insert("unique", true);
}
if info.sparse {
doc.insert("sparse", true);
}
if let Some(ttl) = info.expire_after_seconds {
doc.insert("expireAfterSeconds", ttl as i64);
}
Bson::Document(doc)
})
.collect();
Ok(doc! {
"cursor": {
"id": 0_i64,
"ns": &ns,
"firstBatch": first_batch,
},
"ok": 1.0,
})
}
/// Ensure the target database and collection exist. Returns true if the collection
/// was newly created (i.e., `createdCollectionAutomatically`).
async fn ensure_collection_exists(
db: &str,
coll: &str,
ctx: &CommandContext,
) -> CommandResult<bool> {
// Create database (ignore AlreadyExists).
if let Err(e) = ctx.storage.create_database(db).await {
let msg = e.to_string();
if !msg.contains("AlreadyExists") && !msg.contains("already exists") {
return Err(CommandError::StorageError(msg));
}
}
// Check if collection exists.
match ctx.storage.collection_exists(db, coll).await {
Ok(true) => Ok(false),
Ok(false) => {
if let Err(e) = ctx.storage.create_collection(db, coll).await {
let msg = e.to_string();
if !msg.contains("AlreadyExists") && !msg.contains("already exists") {
return Err(CommandError::StorageError(msg));
}
}
Ok(true)
}
Err(_) => {
// Try creating anyway.
if let Err(e) = ctx.storage.create_collection(db, coll).await {
let msg = e.to_string();
if !msg.contains("AlreadyExists") && !msg.contains("already exists") {
return Err(CommandError::StorageError(msg));
}
}
Ok(true)
}
}
}

View File

@@ -0,0 +1,185 @@
use std::collections::HashMap;
use bson::{doc, oid::ObjectId, Bson, Document};
use rustdb_index::IndexEngine;
use tracing::{debug, warn};
use crate::context::CommandContext;
use crate::error::{CommandError, CommandResult};
/// Handle the `insert` command.
pub async fn handle(
cmd: &Document,
db: &str,
ctx: &CommandContext,
document_sequences: Option<&HashMap<String, Vec<Document>>>,
) -> CommandResult<Document> {
let coll = cmd
.get_str("insert")
.map_err(|_| CommandError::InvalidArgument("missing 'insert' field".into()))?;
// Determine whether writes are ordered (default: true).
let ordered = match cmd.get("ordered") {
Some(Bson::Boolean(b)) => *b,
_ => true,
};
// Collect documents from either the command body or OP_MSG document sequences.
let docs: Vec<Document> = if let Some(seqs) = document_sequences {
if let Some(seq_docs) = seqs.get("documents") {
seq_docs.clone()
} else {
extract_docs_from_array(cmd)?
}
} else {
extract_docs_from_array(cmd)?
};
if docs.is_empty() {
return Err(CommandError::InvalidArgument(
"no documents to insert".into(),
));
}
debug!(
db = db,
collection = coll,
count = docs.len(),
"insert command"
);
// Auto-create database and collection if they don't exist.
ensure_collection_exists(db, coll, ctx).await?;
let ns_key = format!("{}.{}", db, coll);
let mut inserted_count: i32 = 0;
let mut write_errors: Vec<Document> = Vec::new();
for (idx, mut doc) in docs.into_iter().enumerate() {
// Auto-generate _id if not present.
if !doc.contains_key("_id") {
doc.insert("_id", ObjectId::new());
}
// Attempt storage insert.
match ctx.storage.insert_one(db, coll, doc.clone()).await {
Ok(_id_str) => {
// Update index engine.
let mut engine = ctx
.indexes
.entry(ns_key.clone())
.or_insert_with(IndexEngine::new);
if let Err(e) = engine.on_insert(&doc) {
warn!(
namespace = %ns_key,
error = %e,
"index update failed after successful insert"
);
}
inserted_count += 1;
}
Err(e) => {
let err_msg = e.to_string();
let (code, code_name) = if err_msg.contains("AlreadyExists")
|| err_msg.contains("duplicate")
{
(11000_i32, "DuplicateKey")
} else {
(1_i32, "InternalError")
};
write_errors.push(doc! {
"index": idx as i32,
"code": code,
"codeName": code_name,
"errmsg": &err_msg,
});
if ordered {
// Stop on first error when ordered.
break;
}
}
}
}
// Build response document.
let mut response = doc! {
"n": inserted_count,
"ok": 1.0,
};
if !write_errors.is_empty() {
response.insert(
"writeErrors",
write_errors
.into_iter()
.map(Bson::Document)
.collect::<Vec<_>>(),
);
}
Ok(response)
}
/// Extract documents from the `documents` array field in the command BSON.
fn extract_docs_from_array(cmd: &Document) -> CommandResult<Vec<Document>> {
match cmd.get_array("documents") {
Ok(arr) => {
let mut docs = Vec::with_capacity(arr.len());
for item in arr {
match item {
Bson::Document(d) => docs.push(d.clone()),
_ => {
return Err(CommandError::InvalidArgument(
"documents array contains non-document element".into(),
));
}
}
}
Ok(docs)
}
Err(_) => Ok(Vec::new()),
}
}
/// Ensure the target database and collection exist, creating them if needed.
async fn ensure_collection_exists(
db: &str,
coll: &str,
ctx: &CommandContext,
) -> CommandResult<()> {
// Create database (no-op if it already exists in most backends).
if let Err(e) = ctx.storage.create_database(db).await {
let msg = e.to_string();
if !msg.contains("AlreadyExists") && !msg.contains("already exists") {
return Err(CommandError::StorageError(msg));
}
}
// Create collection if it doesn't exist.
match ctx.storage.collection_exists(db, coll).await {
Ok(true) => {}
Ok(false) => {
if let Err(e) = ctx.storage.create_collection(db, coll).await {
let msg = e.to_string();
if !msg.contains("AlreadyExists") && !msg.contains("already exists") {
return Err(CommandError::StorageError(msg));
}
}
}
Err(e) => {
// Database might not exist yet; try creating collection anyway.
if let Err(e2) = ctx.storage.create_collection(db, coll).await {
let msg = e2.to_string();
if !msg.contains("AlreadyExists") && !msg.contains("already exists") {
return Err(CommandError::StorageError(format!(
"collection_exists failed: {e}; create_collection failed: {msg}"
)));
}
}
}
}
Ok(())
}

View File

@@ -0,0 +1,8 @@
pub mod admin_handler;
pub mod aggregate_handler;
pub mod delete_handler;
pub mod find_handler;
pub mod hello_handler;
pub mod index_handler;
pub mod insert_handler;
pub mod update_handler;

View File

@@ -0,0 +1,617 @@
use std::collections::HashSet;
use bson::{doc, oid::ObjectId, Bson, Document};
use rustdb_index::IndexEngine;
use rustdb_query::{QueryMatcher, UpdateEngine, sort_documents, apply_projection};
use tracing::debug;
use crate::context::CommandContext;
use crate::error::{CommandError, CommandResult};
/// Handle `update` and `findAndModify` commands.
pub async fn handle(
cmd: &Document,
db: &str,
ctx: &CommandContext,
command_name: &str,
) -> CommandResult<Document> {
match command_name {
"findAndModify" | "findandmodify" => handle_find_and_modify(cmd, db, ctx).await,
_ => handle_update(cmd, db, ctx).await,
}
}
/// Handle the `update` command.
async fn handle_update(
cmd: &Document,
db: &str,
ctx: &CommandContext,
) -> CommandResult<Document> {
let coll = cmd
.get_str("update")
.map_err(|_| CommandError::InvalidArgument("missing 'update' field".into()))?;
let updates = cmd
.get_array("updates")
.map_err(|_| CommandError::InvalidArgument("missing 'updates' array".into()))?;
let ordered = match cmd.get("ordered") {
Some(Bson::Boolean(b)) => *b,
_ => true,
};
debug!(db = db, collection = coll, count = updates.len(), "update command");
// Auto-create database and collection if needed.
ensure_collection_exists(db, coll, ctx).await?;
let ns_key = format!("{}.{}", db, coll);
let mut total_n: i32 = 0;
let mut total_n_modified: i32 = 0;
let mut upserted_list: Vec<Document> = Vec::new();
let mut write_errors: Vec<Document> = Vec::new();
for (idx, update_bson) in updates.iter().enumerate() {
let update_spec = match update_bson {
Bson::Document(d) => d,
_ => {
write_errors.push(doc! {
"index": idx as i32,
"code": 14_i32,
"codeName": "TypeMismatch",
"errmsg": "update spec must be a document",
});
if ordered {
break;
}
continue;
}
};
let filter = match update_spec.get("q") {
Some(Bson::Document(d)) => d.clone(),
_ => Document::new(),
};
let update = match update_spec.get("u") {
Some(Bson::Document(d)) => d.clone(),
Some(Bson::Array(_pipeline)) => {
// Aggregation pipeline updates are not yet supported; treat as error.
write_errors.push(doc! {
"index": idx as i32,
"code": 14_i32,
"codeName": "TypeMismatch",
"errmsg": "aggregation pipeline updates not yet supported",
});
if ordered {
break;
}
continue;
}
_ => {
write_errors.push(doc! {
"index": idx as i32,
"code": 14_i32,
"codeName": "TypeMismatch",
"errmsg": "missing or invalid 'u' field in update spec",
});
if ordered {
break;
}
continue;
}
};
let multi = match update_spec.get("multi") {
Some(Bson::Boolean(b)) => *b,
_ => false,
};
let upsert = match update_spec.get("upsert") {
Some(Bson::Boolean(b)) => *b,
_ => false,
};
let array_filters: Option<Vec<Document>> =
update_spec.get_array("arrayFilters").ok().map(|arr| {
arr.iter()
.filter_map(|v| {
if let Bson::Document(d) = v {
Some(d.clone())
} else {
None
}
})
.collect()
});
// Load all documents and filter.
let all_docs = load_filtered_docs(db, coll, &filter, &ns_key, ctx).await?;
if all_docs.is_empty() && upsert {
// Upsert: create a new document.
let new_doc = build_upsert_doc(&filter);
// Apply update operators or replacement.
match UpdateEngine::apply_update(&new_doc, &update, array_filters.as_deref()) {
Ok(mut updated) => {
// Apply $setOnInsert if present.
if let Some(Bson::Document(soi)) = update.get("$setOnInsert") {
UpdateEngine::apply_set_on_insert(&mut updated, soi);
}
// Ensure _id exists.
let new_id = if !updated.contains_key("_id") {
let oid = ObjectId::new();
updated.insert("_id", oid);
Bson::ObjectId(oid)
} else {
updated.get("_id").unwrap().clone()
};
// Insert the new document.
match ctx.storage.insert_one(db, coll, updated.clone()).await {
Ok(_) => {
// Update index.
let mut engine = ctx
.indexes
.entry(ns_key.clone())
.or_insert_with(IndexEngine::new);
let _ = engine.on_insert(&updated);
total_n += 1;
upserted_list.push(doc! {
"index": idx as i32,
"_id": new_id,
});
}
Err(e) => {
write_errors.push(doc! {
"index": idx as i32,
"code": 1_i32,
"codeName": "InternalError",
"errmsg": e.to_string(),
});
if ordered {
break;
}
}
}
}
Err(e) => {
write_errors.push(doc! {
"index": idx as i32,
"code": 14_i32,
"codeName": "TypeMismatch",
"errmsg": e.to_string(),
});
if ordered {
break;
}
}
}
} else {
// Update matched documents.
let docs_to_update = if multi {
all_docs
} else {
all_docs.into_iter().take(1).collect()
};
for matched_doc in &docs_to_update {
match UpdateEngine::apply_update(
matched_doc,
&update,
array_filters.as_deref(),
) {
Ok(updated_doc) => {
let id_str = extract_id_string(matched_doc);
match ctx
.storage
.update_by_id(db, coll, &id_str, updated_doc.clone())
.await
{
Ok(()) => {
// Update index.
if let Some(mut engine) = ctx.indexes.get_mut(&ns_key) {
let _ = engine.on_update(matched_doc, &updated_doc);
}
total_n += 1;
// Check if the document actually changed.
if matched_doc != &updated_doc {
total_n_modified += 1;
}
}
Err(e) => {
write_errors.push(doc! {
"index": idx as i32,
"code": 1_i32,
"codeName": "InternalError",
"errmsg": e.to_string(),
});
if ordered {
break;
}
}
}
}
Err(e) => {
write_errors.push(doc! {
"index": idx as i32,
"code": 14_i32,
"codeName": "TypeMismatch",
"errmsg": e.to_string(),
});
if ordered {
break;
}
}
}
}
}
}
// Build response.
let mut response = doc! {
"n": total_n,
"nModified": total_n_modified,
"ok": 1.0,
};
if !upserted_list.is_empty() {
response.insert(
"upserted",
upserted_list
.into_iter()
.map(Bson::Document)
.collect::<Vec<_>>(),
);
}
if !write_errors.is_empty() {
response.insert(
"writeErrors",
write_errors
.into_iter()
.map(Bson::Document)
.collect::<Vec<_>>(),
);
}
Ok(response)
}
/// Handle the `findAndModify` command.
async fn handle_find_and_modify(
cmd: &Document,
db: &str,
ctx: &CommandContext,
) -> CommandResult<Document> {
let coll = cmd
.get_str("findAndModify")
.or_else(|_| cmd.get_str("findandmodify"))
.map_err(|_| CommandError::InvalidArgument("missing 'findAndModify' field".into()))?;
let query = match cmd.get("query") {
Some(Bson::Document(d)) => d.clone(),
_ => Document::new(),
};
let sort = match cmd.get("sort") {
Some(Bson::Document(d)) => Some(d.clone()),
_ => None,
};
let update_doc = match cmd.get("update") {
Some(Bson::Document(d)) => Some(d.clone()),
_ => None,
};
let remove = match cmd.get("remove") {
Some(Bson::Boolean(b)) => *b,
_ => false,
};
let return_new = match cmd.get("new") {
Some(Bson::Boolean(b)) => *b,
_ => false,
};
let upsert = match cmd.get("upsert") {
Some(Bson::Boolean(b)) => *b,
_ => false,
};
let fields = match cmd.get("fields") {
Some(Bson::Document(d)) => Some(d.clone()),
_ => None,
};
let array_filters: Option<Vec<Document>> =
cmd.get_array("arrayFilters").ok().map(|arr| {
arr.iter()
.filter_map(|v| {
if let Bson::Document(d) = v {
Some(d.clone())
} else {
None
}
})
.collect()
});
// Auto-create database and collection.
ensure_collection_exists(db, coll, ctx).await?;
let ns_key = format!("{}.{}", db, coll);
// Load and filter documents.
let mut matched = load_filtered_docs(db, coll, &query, &ns_key, ctx).await?;
// Sort if specified.
if let Some(ref sort_spec) = sort {
sort_documents(&mut matched, sort_spec);
}
// Take the first match.
let target = matched.into_iter().next();
if remove {
// Remove operation.
if let Some(ref doc) = target {
let id_str = extract_id_string(doc);
ctx.storage.delete_by_id(db, coll, &id_str).await?;
// Update index.
if let Some(mut engine) = ctx.indexes.get_mut(&ns_key) {
engine.on_delete(doc);
}
let value = apply_fields_projection(doc, &fields);
return Ok(doc! {
"value": value,
"lastErrorObject": {
"n": 1_i32,
"updatedExisting": false,
},
"ok": 1.0,
});
} else {
return Ok(doc! {
"value": Bson::Null,
"lastErrorObject": {
"n": 0_i32,
"updatedExisting": false,
},
"ok": 1.0,
});
}
}
// Update operation.
let update = match update_doc {
Some(u) => u,
None => {
return Ok(doc! {
"value": Bson::Null,
"lastErrorObject": {
"n": 0_i32,
"updatedExisting": false,
},
"ok": 1.0,
});
}
};
if let Some(original_doc) = target {
// Update the matched document.
let updated_doc = UpdateEngine::apply_update(
&original_doc,
&update,
array_filters.as_deref(),
)
.map_err(|e| CommandError::InternalError(e.to_string()))?;
let id_str = extract_id_string(&original_doc);
ctx.storage
.update_by_id(db, coll, &id_str, updated_doc.clone())
.await?;
// Update index.
if let Some(mut engine) = ctx.indexes.get_mut(&ns_key) {
let _ = engine.on_update(&original_doc, &updated_doc);
}
let return_doc = if return_new {
&updated_doc
} else {
&original_doc
};
let value = apply_fields_projection(return_doc, &fields);
Ok(doc! {
"value": value,
"lastErrorObject": {
"n": 1_i32,
"updatedExisting": true,
},
"ok": 1.0,
})
} else if upsert {
// Upsert: create a new document.
let new_doc = build_upsert_doc(&query);
let mut updated_doc = UpdateEngine::apply_update(
&new_doc,
&update,
array_filters.as_deref(),
)
.map_err(|e| CommandError::InternalError(e.to_string()))?;
// Apply $setOnInsert if present.
if let Some(Bson::Document(soi)) = update.get("$setOnInsert") {
UpdateEngine::apply_set_on_insert(&mut updated_doc, soi);
}
// Ensure _id.
let upserted_id = if !updated_doc.contains_key("_id") {
let oid = ObjectId::new();
updated_doc.insert("_id", oid);
Bson::ObjectId(oid)
} else {
updated_doc.get("_id").unwrap().clone()
};
ctx.storage
.insert_one(db, coll, updated_doc.clone())
.await?;
// Update index.
{
let mut engine = ctx
.indexes
.entry(ns_key.clone())
.or_insert_with(IndexEngine::new);
let _ = engine.on_insert(&updated_doc);
}
let value = if return_new {
apply_fields_projection(&updated_doc, &fields)
} else {
Bson::Null
};
Ok(doc! {
"value": value,
"lastErrorObject": {
"n": 1_i32,
"updatedExisting": false,
"upserted": upserted_id,
},
"ok": 1.0,
})
} else {
Ok(doc! {
"value": Bson::Null,
"lastErrorObject": {
"n": 0_i32,
"updatedExisting": false,
},
"ok": 1.0,
})
}
}
// ---- Helpers ----
/// Load documents from storage, optionally using index for candidate narrowing, then filter.
async fn load_filtered_docs(
db: &str,
coll: &str,
filter: &Document,
ns_key: &str,
ctx: &CommandContext,
) -> CommandResult<Vec<Document>> {
// Try to use index to narrow candidates.
let candidate_ids: Option<HashSet<String>> = ctx
.indexes
.get(ns_key)
.and_then(|engine| engine.find_candidate_ids(filter));
let docs = if let Some(ids) = candidate_ids {
if ids.is_empty() {
return Ok(Vec::new());
}
ctx.storage.find_by_ids(db, coll, ids).await?
} else {
ctx.storage.find_all(db, coll).await?
};
// Apply filter.
if filter.is_empty() {
Ok(docs)
} else {
Ok(QueryMatcher::filter(&docs, filter))
}
}
/// Build a base document for an upsert from the filter's equality conditions.
fn build_upsert_doc(filter: &Document) -> Document {
let mut doc = Document::new();
for (key, value) in filter {
if key.starts_with('$') {
// Skip top-level operators like $and, $or.
continue;
}
match value {
Bson::Document(d) if d.keys().any(|k| k.starts_with('$')) => {
// If the value has operators (e.g., $gt), extract $eq if present.
if let Some(eq_val) = d.get("$eq") {
doc.insert(key.clone(), eq_val.clone());
}
}
_ => {
doc.insert(key.clone(), value.clone());
}
}
}
doc
}
/// Extract _id as a string for storage operations.
fn extract_id_string(doc: &Document) -> String {
match doc.get("_id") {
Some(Bson::ObjectId(oid)) => oid.to_hex(),
Some(Bson::String(s)) => s.clone(),
Some(other) => format!("{}", other),
None => String::new(),
}
}
/// Apply fields projection if specified, returning Bson.
fn apply_fields_projection(doc: &Document, fields: &Option<Document>) -> Bson {
match fields {
Some(proj) if !proj.is_empty() => Bson::Document(apply_projection(doc, proj)),
_ => Bson::Document(doc.clone()),
}
}
/// Ensure the target database and collection exist, creating them if needed.
async fn ensure_collection_exists(
db: &str,
coll: &str,
ctx: &CommandContext,
) -> CommandResult<()> {
if let Err(e) = ctx.storage.create_database(db).await {
let msg = e.to_string();
if !msg.contains("AlreadyExists") && !msg.contains("already exists") {
return Err(CommandError::StorageError(msg));
}
}
match ctx.storage.collection_exists(db, coll).await {
Ok(true) => {}
Ok(false) => {
if let Err(e) = ctx.storage.create_collection(db, coll).await {
let msg = e.to_string();
if !msg.contains("AlreadyExists") && !msg.contains("already exists") {
return Err(CommandError::StorageError(msg));
}
}
}
Err(e) => {
if let Err(e2) = ctx.storage.create_collection(db, coll).await {
let msg = e2.to_string();
if !msg.contains("AlreadyExists") && !msg.contains("already exists") {
return Err(CommandError::StorageError(format!(
"collection_exists failed: {e}; create_collection failed: {msg}"
)));
}
}
}
}
Ok(())
}

View File

@@ -0,0 +1,8 @@
mod context;
pub mod error;
pub mod handlers;
mod router;
pub use context::{CommandContext, CursorState};
pub use error::{CommandError, CommandResult};
pub use router::CommandRouter;

View File

@@ -0,0 +1,109 @@
use std::sync::Arc;
use bson::Document;
use tracing::{debug, warn};
use rustdb_wire::ParsedCommand;
use crate::context::CommandContext;
use crate::error::CommandError;
use crate::handlers;
/// Routes parsed wire protocol commands to the appropriate handler.
pub struct CommandRouter {
ctx: Arc<CommandContext>,
}
impl CommandRouter {
/// Create a new command router with the given context.
pub fn new(ctx: Arc<CommandContext>) -> Self {
Self { ctx }
}
/// Route a parsed command to the appropriate handler, returning a BSON response document.
pub async fn route(&self, cmd: &ParsedCommand) -> Document {
let db = &cmd.database;
let command_name = cmd.command_name.as_str();
debug!(command = %command_name, database = %db, "routing command");
// Extract session id if present, and touch the session.
if let Some(lsid) = cmd.command.get("lsid") {
if let Some(session_id) = rustdb_txn::SessionEngine::extract_session_id(lsid) {
self.ctx.sessions.get_or_create_session(&session_id);
}
}
let result = match command_name {
// -- handshake / monitoring --
"hello" | "ismaster" | "isMaster" => {
handlers::hello_handler::handle(&cmd.command, db, &self.ctx).await
}
// -- query commands --
"find" => {
handlers::find_handler::handle(&cmd.command, db, &self.ctx).await
}
"getMore" => {
handlers::find_handler::handle_get_more(&cmd.command, db, &self.ctx).await
}
"killCursors" => {
handlers::find_handler::handle_kill_cursors(&cmd.command, db, &self.ctx).await
}
"count" => {
handlers::find_handler::handle_count(&cmd.command, db, &self.ctx).await
}
"distinct" => {
handlers::find_handler::handle_distinct(&cmd.command, db, &self.ctx).await
}
// -- write commands --
"insert" => {
handlers::insert_handler::handle(&cmd.command, db, &self.ctx, cmd.document_sequences.as_ref()).await
}
"update" | "findAndModify" => {
handlers::update_handler::handle(&cmd.command, db, &self.ctx, command_name).await
}
"delete" => {
handlers::delete_handler::handle(&cmd.command, db, &self.ctx).await
}
// -- aggregation --
"aggregate" => {
handlers::aggregate_handler::handle(&cmd.command, db, &self.ctx).await
}
// -- index management --
"createIndexes" | "dropIndexes" | "listIndexes" => {
handlers::index_handler::handle(&cmd.command, db, &self.ctx, command_name).await
}
// -- admin commands --
"ping" | "buildInfo" | "buildinfo" | "serverStatus" | "hostInfo"
| "whatsmyuri" | "getLog" | "replSetGetStatus" | "getCmdLineOpts"
| "getParameter" | "getFreeMonitoringStatus" | "setFreeMonitoring"
| "getShardMap" | "shardingState" | "atlasVersion"
| "connectionStatus" | "listDatabases" | "listCollections"
| "create" | "drop" | "dropDatabase" | "renameCollection"
| "dbStats" | "collStats" | "validate" | "explain"
| "startSession" | "endSessions" | "killSessions"
| "commitTransaction" | "abortTransaction"
| "saslStart" | "saslContinue" | "authenticate" | "logout"
| "currentOp" | "killOp" | "top" | "profile"
| "compact" | "reIndex" | "fsync" | "connPoolSync" => {
handlers::admin_handler::handle(&cmd.command, db, &self.ctx, command_name).await
}
// -- unknown command --
other => {
warn!(command = %other, "unknown command");
Err(CommandError::NotImplemented(other.to_string()))
}
};
match result {
Ok(doc) => doc,
Err(e) => e.to_error_doc(),
}
}
}