fix(bucket-tenants): make tenant lifecycle and bucket import validation safer

This commit is contained in:
2026-05-02 12:09:13 +00:00
parent 7020810b5e
commit b075de1ecd
23 changed files with 435 additions and 183 deletions
+106 -45
View File
@@ -195,6 +195,17 @@ pub struct BucketTenantMetadata {
pub region: Option<String>,
}
#[derive(Debug, Clone)]
pub struct CredentialRemoval {
pub previous_credentials: Vec<Credential>,
}
#[derive(Debug, Clone)]
pub struct CredentialReplacement {
pub credential: Credential,
pub previous_credentials: Vec<Credential>,
}
impl RuntimeCredentialStore {
pub async fn new(
config: &AuthConfig,
@@ -244,66 +255,54 @@ impl RuntimeCredentialStore {
credentials: Vec<Credential>,
) -> Result<(), StorageError> {
validate_credentials(&credentials)?;
let mut credentials_guard = self.credentials.write().await;
self.persist_credentials(&credentials).await?;
*self.credentials.write().await = credentials;
*credentials_guard = credentials;
Ok(())
}
pub async fn replace_bucket_tenant_credential(
&self,
bucket_name: &str,
mut credential: Credential,
credential: Credential,
) -> Result<Credential, StorageError> {
validate_bucket_scope(bucket_name)?;
credential.bucket_name = Some(bucket_name.to_string());
let replacement = self
.replace_bucket_tenant_credential_with_snapshot(bucket_name, credential)
.await?;
Ok(replacement.credential)
}
let mut credentials = self.credentials.read().await.clone();
if credentials.iter().any(|existing| {
existing.access_key_id == credential.access_key_id
&& existing.bucket_name.as_deref() != Some(bucket_name)
}) {
return Err(StorageError::invalid_request(
"Credential accessKeyId is already assigned to another principal.",
));
}
credentials.retain(|existing| existing.bucket_name.as_deref() != Some(bucket_name));
credentials.push(credential.clone());
validate_credentials(&credentials)?;
self.persist_credentials(&credentials).await?;
*self.credentials.write().await = credentials;
Ok(credential)
pub async fn replace_bucket_tenant_credential_with_snapshot(
&self,
bucket_name: &str,
credential: Credential,
) -> Result<CredentialReplacement, StorageError> {
let mut credentials_guard = self.credentials.write().await;
let previous_credentials = credentials_guard.clone();
let (credential, updated_credentials) =
prepare_bucket_tenant_replacement(bucket_name, credential, &credentials_guard)?;
self.persist_credentials(&updated_credentials).await?;
*credentials_guard = updated_credentials;
Ok(CredentialReplacement {
credential,
previous_credentials,
})
}
pub async fn remove_bucket_tenant_credentials(
&self,
bucket_name: &str,
access_key_id: Option<&str>,
) -> Result<usize, StorageError> {
validate_bucket_scope(bucket_name)?;
let mut credentials = self.credentials.read().await.clone();
let before = credentials.len();
credentials.retain(|credential| {
if credential.bucket_name.as_deref() != Some(bucket_name) {
return true;
}
if let Some(access_key_id) = access_key_id {
credential.access_key_id != access_key_id
} else {
false
}
});
let removed = before.saturating_sub(credentials.len());
if credentials.is_empty() {
return Err(StorageError::invalid_request(
"Cannot remove the last active credential.",
));
}
self.persist_credentials(&credentials).await?;
*self.credentials.write().await = credentials;
Ok(removed)
) -> Result<CredentialRemoval, StorageError> {
let mut credentials_guard = self.credentials.write().await;
let previous_credentials = credentials_guard.clone();
let (_removed, updated_credentials) =
prepare_bucket_tenant_removal(bucket_name, access_key_id, &credentials_guard)?;
self.persist_credentials(&updated_credentials).await?;
*credentials_guard = updated_credentials;
Ok(CredentialRemoval {
previous_credentials,
})
}
pub async fn list_bucket_tenants(&self) -> Vec<BucketTenantMetadata> {
@@ -375,6 +374,68 @@ fn validate_bucket_scope(bucket_name: &str) -> Result<(), StorageError> {
Ok(())
}
fn prepare_bucket_tenant_replacement(
bucket_name: &str,
mut credential: Credential,
credentials: &[Credential],
) -> Result<(Credential, Vec<Credential>), StorageError> {
validate_bucket_scope(bucket_name)?;
credential.bucket_name = Some(bucket_name.to_string());
if credentials.iter().any(|existing| {
existing.access_key_id == credential.access_key_id
&& existing.bucket_name.as_deref() != Some(bucket_name)
}) {
return Err(StorageError::invalid_request(
"Credential accessKeyId is already assigned to another principal.",
));
}
let mut updated_credentials = credentials.to_vec();
updated_credentials.retain(|existing| existing.bucket_name.as_deref() != Some(bucket_name));
updated_credentials.push(credential.clone());
validate_credentials(&updated_credentials)?;
Ok((credential, updated_credentials))
}
fn prepare_bucket_tenant_removal(
bucket_name: &str,
access_key_id: Option<&str>,
credentials: &[Credential],
) -> Result<(usize, Vec<Credential>), StorageError> {
validate_bucket_scope(bucket_name)?;
let mut updated_credentials = credentials.to_vec();
let before = updated_credentials.len();
updated_credentials.retain(|credential| {
if credential.bucket_name.as_deref() != Some(bucket_name) {
return true;
}
if let Some(access_key_id) = access_key_id {
credential.access_key_id != access_key_id
} else {
false
}
});
let removed = before.saturating_sub(updated_credentials.len());
if removed == 0 {
return Err(StorageError::invalid_request(
"No matching bucket tenant credential exists.",
));
}
if updated_credentials.is_empty() {
return Err(StorageError::invalid_request(
"Cannot remove the last active credential.",
));
}
Ok((removed, updated_credentials))
}
fn validate_credentials(credentials: &[Credential]) -> Result<(), StorageError> {
if credentials.is_empty() {
return Err(StorageError::invalid_request(
+3 -6
View File
@@ -80,11 +80,7 @@ impl ErasureCoder {
let total = self.config.total_shards();
if shards.len() != total {
anyhow::bail!(
"Expected {} shards, got {}",
total,
shards.len()
);
anyhow::bail!("Expected {} shards, got {}", total, shards.len());
}
let available = shards.iter().filter(|s| s.is_some()).count();
@@ -159,7 +155,8 @@ mod tests {
#[test]
fn test_decode_with_missing_shards() {
let coder = ErasureCoder::new(&test_config()).unwrap();
let original = b"Testing reconstruction with missing shards - this should work with 4 of 6.";
let original =
b"Testing reconstruction with missing shards - this should work with 4 of 6.";
let shards = coder.encode_chunk(original).unwrap();
+7 -11
View File
@@ -166,12 +166,7 @@ impl HealingService {
Ok(stats)
}
async fn heal_bucket(
&self,
bucket: &str,
offline_nodes: &[String],
stats: &mut HealStats,
) {
async fn heal_bucket(&self, bucket: &str, offline_nodes: &[String], stats: &mut HealStats) {
let bucket_dir = self.manifest_dir.join(bucket);
let manifests = match self.collect_manifests(&bucket_dir).await {
Ok(m) => m,
@@ -264,10 +259,10 @@ impl HealingService {
}
// Reconstruct all shards
let reconstructed = match self.erasure_coder.decode_chunk(
&mut shards,
chunk.data_size,
) {
let reconstructed = match self
.erasure_coder
.decode_chunk(&mut shards, chunk.data_size)
{
Ok(_) => true,
Err(e) => {
tracing::error!(
@@ -361,7 +356,8 @@ impl HealingService {
/// Collect all manifests under a bucket directory.
async fn collect_manifests(&self, dir: &std::path::Path) -> Result<Vec<ObjectManifest>> {
let mut manifests = Vec::new();
self.collect_manifests_recursive(dir, &mut manifests).await?;
self.collect_manifests_recursive(dir, &mut manifests)
.await?;
Ok(manifests)
}
+11 -12
View File
@@ -7,8 +7,7 @@ use tokio::sync::Mutex;
use super::drive_manager::{DriveManager, DriveStatus};
use super::protocol::{
ClusterRequest, ClusterResponse, DriveStateInfo, HeartbeatMessage, JoinRequestMessage,
NodeInfo,
ClusterRequest, ClusterResponse, DriveStateInfo, HeartbeatMessage, JoinRequestMessage, NodeInfo,
};
use super::quic_transport::QuicTransport;
use super::state::ClusterState;
@@ -49,7 +48,11 @@ impl MembershipManager {
/// Join the cluster by contacting seed nodes.
/// Sends a JoinRequest to each seed node until one accepts.
pub async fn join_cluster(&self, seed_nodes: &[String], allow_bootstrap_on_failure: bool) -> Result<()> {
pub async fn join_cluster(
&self,
seed_nodes: &[String],
allow_bootstrap_on_failure: bool,
) -> Result<()> {
if seed_nodes.is_empty() {
tracing::info!("No seed nodes configured, starting as initial cluster node");
self.state.add_node(self.local_node_info.clone()).await;
@@ -84,14 +87,13 @@ impl MembershipManager {
return Ok(());
}
anyhow::bail!("Could not reach any configured seed nodes; refusing unsafe cluster bootstrap")
anyhow::bail!(
"Could not reach any configured seed nodes; refusing unsafe cluster bootstrap"
)
}
async fn try_join(&self, addr: SocketAddr) -> Result<()> {
let conn = self
.transport
.get_connection("seed", addr)
.await?;
let conn = self.transport.get_connection("seed", addr).await?;
let request = ClusterRequest::JoinRequest(JoinRequestMessage {
node_info: self.local_node_info.clone(),
@@ -120,10 +122,7 @@ impl MembershipManager {
}
Ok(())
} else {
anyhow::bail!(
"Join rejected: {}",
join_resp.error.unwrap_or_default()
)
anyhow::bail!("Join rejected: {}", join_resp.error.unwrap_or_default())
}
}
ClusterResponse::Error(e) => {
+9 -6
View File
@@ -113,11 +113,17 @@ mod tests {
// Set 0 should interleave across nodes
let set0_nodes: Vec<&str> = sets[0].drives.iter().map(|d| d.node_id.as_str()).collect();
assert_eq!(set0_nodes, vec!["node1", "node2", "node3", "node1", "node2", "node3"]);
assert_eq!(
set0_nodes,
vec!["node1", "node2", "node3", "node1", "node2", "node3"]
);
// Set 1 should also interleave
let set1_nodes: Vec<&str> = sets[1].drives.iter().map(|d| d.node_id.as_str()).collect();
assert_eq!(set1_nodes, vec!["node1", "node2", "node3", "node1", "node2", "node3"]);
assert_eq!(
set1_nodes,
vec!["node1", "node2", "node3", "node1", "node2", "node3"]
);
// Drive indices should be different between sets
let set0_drives: Vec<u32> = sets[0].drives.iter().map(|d| d.drive_index).collect();
@@ -129,10 +135,7 @@ mod tests {
#[test]
fn test_form_erasure_sets_remainder() {
// 2 nodes, 3 drives each, 4 shards => 1 set (2 drives left over)
let nodes = vec![
("a".to_string(), 3),
("b".to_string(), 3),
];
let nodes = vec![("a".to_string(), 3), ("b".to_string(), 3)];
let sets = form_erasure_sets(&nodes, 4);
assert_eq!(sets.len(), 1);
assert_eq!(sets[0].drives.len(), 4);
-4
View File
@@ -13,7 +13,6 @@ pub enum ClusterRequest {
// ============================
// Shard operations
// ============================
/// Write a shard to a specific drive on the target node.
/// Shard data follows after this header on the same stream.
ShardWrite(ShardWriteRequest),
@@ -30,7 +29,6 @@ pub enum ClusterRequest {
// ============================
// Manifest operations
// ============================
/// Store an object manifest on the target node.
ManifestWrite(ManifestWriteRequest),
@@ -46,7 +44,6 @@ pub enum ClusterRequest {
// ============================
// Cluster management
// ============================
/// Periodic heartbeat.
Heartbeat(HeartbeatMessage),
@@ -59,7 +56,6 @@ pub enum ClusterRequest {
// ============================
// Healing
// ============================
/// Request a shard to be reconstructed and placed on a target drive.
HealRequest(HealRequestMessage),
}
+34 -22
View File
@@ -1,14 +1,14 @@
use super::protocol::{
self, ClusterRequest, ClusterResponse, ShardReadResponse, ShardWriteAck, ShardWriteRequest,
};
use super::shard_store::{ShardId, ShardStore};
use super::state::{ClusterState, NodeStatus};
use anyhow::Result;
use dashmap::DashMap;
use quinn::{ClientConfig, Endpoint, ServerConfig as QuinnServerConfig};
use rustls::pki_types::{CertificateDer, PrivateKeyDer, PrivatePkcs8KeyDer};
use std::net::SocketAddr;
use std::sync::Arc;
use super::protocol::{
self, ClusterRequest, ClusterResponse, ShardReadResponse, ShardWriteAck, ShardWriteRequest,
};
use super::shard_store::{ShardId, ShardStore};
use super::state::{ClusterState, NodeStatus};
/// QUIC transport layer for inter-node communication.
///
@@ -54,13 +54,9 @@ impl QuicTransport {
}
// Establish new connection
let conn = self
.endpoint
.connect(addr, "smartstorage")?
.await?;
let conn = self.endpoint.connect(addr, "smartstorage")?.await?;
self.connections
.insert(node_id.to_string(), conn.clone());
self.connections.insert(node_id.to_string(), conn.clone());
Ok(conn)
}
@@ -246,7 +242,11 @@ impl QuicTransport {
};
let result = match Self::shard_store_for_drive(&shard_stores, drive_index) {
Ok(store) => store.write_shard(&shard_id, &shard_data, write_req.checksum).await,
Ok(store) => {
store
.write_shard(&shard_id, &shard_data, write_req.checksum)
.await
}
Err(error) => Err(error),
};
@@ -272,7 +272,8 @@ impl QuicTransport {
let store = match Self::shard_store_for_drive(&shard_stores, drive_index) {
Ok(store) => store,
Err(error) => {
Self::send_error_response(&mut send, "InvalidDrive", error.to_string()).await?;
Self::send_error_response(&mut send, "InvalidDrive", error.to_string())
.await?;
return Ok(());
}
};
@@ -286,8 +287,10 @@ impl QuicTransport {
checksum,
};
// Send header
let header_bytes = bincode::serialize(&ClusterResponse::ShardReadResponse(header))?;
send.write_all(&(header_bytes.len() as u32).to_le_bytes()).await?;
let header_bytes =
bincode::serialize(&ClusterResponse::ShardReadResponse(header))?;
send.write_all(&(header_bytes.len() as u32).to_le_bytes())
.await?;
send.write_all(&header_bytes).await?;
// Send shard data
send.write_all(&data).await?;
@@ -300,8 +303,10 @@ impl QuicTransport {
shard_data_length: 0,
checksum: 0,
};
let header_bytes = bincode::serialize(&ClusterResponse::ShardReadResponse(header))?;
send.write_all(&(header_bytes.len() as u32).to_le_bytes()).await?;
let header_bytes =
bincode::serialize(&ClusterResponse::ShardReadResponse(header))?;
send.write_all(&(header_bytes.len() as u32).to_le_bytes())
.await?;
send.write_all(&header_bytes).await?;
send.finish()?;
}
@@ -340,7 +345,8 @@ impl QuicTransport {
let store = match Self::shard_store_for_drive(&shard_stores, drive_index) {
Ok(store) => store,
Err(error) => {
Self::send_error_response(&mut send, "InvalidDrive", error.to_string()).await?;
Self::send_error_response(&mut send, "InvalidDrive", error.to_string())
.await?;
return Ok(());
}
};
@@ -403,7 +409,13 @@ impl QuicTransport {
send.write_all(&response).await?;
send.finish()?;
self.broadcast_topology(&state, Some(response_topology), None, Some(&joining_node_id)).await;
self.broadcast_topology(
&state,
Some(response_topology),
None,
Some(&joining_node_id),
)
.await;
}
ClusterRequest::Heartbeat(heartbeat) => {
@@ -434,7 +446,8 @@ impl QuicTransport {
send.finish()?;
if local_topology_version > peer_topology_version {
self.broadcast_topology(&state, None, Some(&peer_node_id), None).await;
self.broadcast_topology(&state, None, Some(&peer_node_id), None)
.await;
}
}
@@ -585,8 +598,7 @@ impl QuicTransport {
/// Close the QUIC endpoint gracefully.
pub fn close(&self) {
self.endpoint
.close(quinn::VarInt::from_u32(0), b"shutdown");
self.endpoint.close(quinn::VarInt::from_u32(0), b"shutdown");
}
/// Get the local node ID.
+2 -11
View File
@@ -40,12 +40,7 @@ impl ShardStore {
}
/// Write a shard to disk atomically (write to temp file, then rename).
pub async fn write_shard(
&self,
shard_id: &ShardId,
data: &[u8],
checksum: u32,
) -> Result<()> {
pub async fn write_shard(&self, shard_id: &ShardId, data: &[u8], checksum: u32) -> Result<()> {
let shard_path = self.shard_data_path(shard_id);
let meta_path = self.shard_meta_path(shard_id);
@@ -117,11 +112,7 @@ impl ShardStore {
}
/// List all shard IDs for a given bucket and key (across all chunks).
pub async fn list_shards_for_object(
&self,
bucket: &str,
key: &str,
) -> Result<Vec<ShardId>> {
pub async fn list_shards_for_object(&self, bucket: &str, key: &str) -> Result<Vec<ShardId>> {
let key_dir = self.key_dir(bucket, key);
if !key_dir.exists() {
return Ok(Vec::new());
+13 -11
View File
@@ -3,16 +3,16 @@ use std::path::PathBuf;
use std::sync::Arc;
use tokio::sync::RwLock;
use super::placement::{DriveLocation, ErasureSet};
use super::persistence;
use super::protocol::{ClusterTopology, ErasureSetInfo, DriveLocationInfo, NodeInfo};
use super::placement::{DriveLocation, ErasureSet};
use super::protocol::{ClusterTopology, DriveLocationInfo, ErasureSetInfo, NodeInfo};
/// Node status for tracking liveness.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum NodeStatus {
Online,
Suspect, // missed 2+ heartbeats
Offline, // missed 5+ heartbeats
Suspect, // missed 2+ heartbeats
Offline, // missed 5+ heartbeats
}
/// Tracked state for a peer node.
@@ -162,11 +162,8 @@ impl ClusterState {
if inner.erasure_sets.is_empty() {
return None;
}
let set_idx = super::placement::erasure_set_for_object(
bucket,
key,
inner.erasure_sets.len() as u32,
);
let set_idx =
super::placement::erasure_set_for_object(bucket, key, inner.erasure_sets.len() as u32);
inner.erasure_sets.get(set_idx as usize).cloned()
}
@@ -284,7 +281,10 @@ impl ClusterState {
let now = chrono::Utc::now();
for node_info in &topology.nodes {
let existing_status = inner.nodes.get(&node_info.node_id).map(|node| node.status.clone());
let existing_status = inner
.nodes
.get(&node_info.node_id)
.map(|node| node.status.clone());
let existing_missed_heartbeats = inner
.nodes
.get(&node_info.node_id)
@@ -304,7 +304,9 @@ impl ClusterState {
);
}
inner.nodes.retain(|node_id, _| topology.nodes.iter().any(|node| &node.node_id == node_id));
inner
.nodes
.retain(|node_id, _| topology.nodes.iter().any(|node| &node.node_id == node_id));
// Update erasure sets
inner.erasure_sets = topology
+25 -5
View File
@@ -18,15 +18,27 @@ impl StorageError {
}
pub fn no_such_key() -> Self {
Self::new("NoSuchKey", "The specified key does not exist.", StatusCode::NOT_FOUND)
Self::new(
"NoSuchKey",
"The specified key does not exist.",
StatusCode::NOT_FOUND,
)
}
pub fn no_such_bucket() -> Self {
Self::new("NoSuchBucket", "The specified bucket does not exist", StatusCode::NOT_FOUND)
Self::new(
"NoSuchBucket",
"The specified bucket does not exist",
StatusCode::NOT_FOUND,
)
}
pub fn bucket_not_empty() -> Self {
Self::new("BucketNotEmpty", "The bucket you tried to delete is not empty", StatusCode::CONFLICT)
Self::new(
"BucketNotEmpty",
"The bucket you tried to delete is not empty",
StatusCode::CONFLICT,
)
}
pub fn access_denied() -> Self {
@@ -34,11 +46,19 @@ impl StorageError {
}
pub fn no_such_upload() -> Self {
Self::new("NoSuchUpload", "The specified upload does not exist", StatusCode::NOT_FOUND)
Self::new(
"NoSuchUpload",
"The specified upload does not exist",
StatusCode::NOT_FOUND,
)
}
pub fn invalid_part_number() -> Self {
Self::new("InvalidPartNumber", "Part number must be between 1 and 10000", StatusCode::BAD_REQUEST)
Self::new(
"InvalidPartNumber",
"Part number must be between 1 and 10000",
StatusCode::BAD_REQUEST,
)
}
pub fn internal_error(msg: &str) -> Self {
+5 -2
View File
@@ -2,9 +2,9 @@ mod action;
mod auth;
mod cluster;
mod config;
mod error;
mod management;
mod policy;
mod error;
mod server;
mod storage;
mod xml_response;
@@ -12,7 +12,10 @@ mod xml_response;
use clap::Parser;
#[derive(Parser)]
#[command(name = "ruststorage", about = "High-performance S3-compatible storage server")]
#[command(
name = "ruststorage",
about = "High-performance S3-compatible storage server"
)]
struct Cli {
/// Run in management mode (IPC via stdin/stdout)
#[arg(long)]
+15 -5
View File
@@ -266,10 +266,16 @@ pub async fn management_loop() -> Result<()> {
}
"listBucketTenants" => {
if let Some(ref s) = server {
match serde_json::to_value(s.list_bucket_tenants().await) {
Ok(value) => send_response(id, value),
match s.list_bucket_tenants().await {
Ok(tenants) => match serde_json::to_value(tenants) {
Ok(value) => send_response(id, value),
Err(error) => send_error(
id,
format!("Failed to serialize bucket tenants: {}", error),
),
},
Err(error) => {
send_error(id, format!("Failed to serialize bucket tenants: {}", error))
send_error(id, format!("Failed to list bucket tenants: {}", error))
}
}
} else {
@@ -287,20 +293,24 @@ pub async fn management_loop() -> Result<()> {
Ok(params) => {
if let Some(ref s) = server {
match s.get_bucket_tenant_credential(&params.bucket_name).await {
Some(credential) => match serde_json::to_value(credential) {
Ok(Some(credential)) => match serde_json::to_value(credential) {
Ok(value) => send_response(id, value),
Err(error) => send_error(
id,
format!("Failed to serialize bucket tenant: {}", error),
),
},
None => send_error(
Ok(None) => send_error(
id,
format!(
"No bucket tenant credential exists for bucket {}",
params.bucket_name
),
),
Err(error) => send_error(
id,
format!("Failed to get bucket tenant credential: {}", error),
),
}
} else {
send_error(id, "Server not started".to_string());
+7 -5
View File
@@ -81,14 +81,14 @@ where
let raw = PrincipalRaw::deserialize(deserializer)?;
match raw {
PrincipalRaw::Star(s) if s == "*" => Ok(Principal::Wildcard),
PrincipalRaw::Star(_) => Err(serde::de::Error::custom(
"Principal string must be \"*\"",
)),
PrincipalRaw::Star(_) => Err(serde::de::Error::custom("Principal string must be \"*\"")),
PrincipalRaw::Map(map) => {
if let Some(aws) = map.get("AWS") {
Ok(Principal::Aws(aws.clone().into_vec()))
} else {
Err(serde::de::Error::custom("Principal map must contain \"AWS\" key"))
Err(serde::de::Error::custom(
"Principal map must contain \"AWS\" key",
))
}
}
}
@@ -286,7 +286,9 @@ const MAX_POLICY_SIZE: usize = 20 * 1024; // 20 KB
pub fn validate_policy(json: &str) -> Result<BucketPolicy, StorageError> {
if json.len() > MAX_POLICY_SIZE {
return Err(StorageError::malformed_policy("Policy exceeds maximum size of 20KB"));
return Err(StorageError::malformed_policy(
"Policy exceeds maximum size of 20KB",
));
}
let policy: BucketPolicy =
+47 -11
View File
@@ -195,11 +195,27 @@ impl StorageServer {
credential: Credential,
) -> Result<Credential> {
self.ensure_tenant_auth_enabled()?;
self.store.create_bucket(bucket_name).await?;
Ok(self
let replacement = self
.auth_runtime
.replace_bucket_tenant_credential(bucket_name, credential)
.await?)
.replace_bucket_tenant_credential_with_snapshot(bucket_name, credential)
.await?;
if let Err(error) = self.store.create_bucket(bucket_name).await {
if let Err(rollback_error) = self
.auth_runtime
.replace_credentials(replacement.previous_credentials)
.await
{
return Err(anyhow::anyhow!(
"Failed to create tenant bucket: {}; credential rollback failed: {}",
error,
rollback_error.message
));
}
return Err(error);
}
Ok(replacement.credential)
}
pub async fn rotate_bucket_tenant_credentials(
@@ -223,23 +239,43 @@ impl StorageServer {
access_key_id: Option<&str>,
) -> Result<()> {
self.ensure_tenant_auth_enabled()?;
self.auth_runtime
let removal = self
.auth_runtime
.remove_bucket_tenant_credentials(bucket_name, access_key_id)
.await?;
if access_key_id.is_none() && self.store.bucket_exists(bucket_name).await {
self.store.delete_bucket_recursive(bucket_name).await?;
if let Err(error) = self.store.delete_bucket_recursive(bucket_name).await {
if let Err(rollback_error) = self
.auth_runtime
.replace_credentials(removal.previous_credentials)
.await
{
return Err(anyhow::anyhow!(
"Failed to delete tenant bucket: {}; credential rollback failed: {}",
error,
rollback_error.message
));
}
return Err(error);
}
}
Ok(())
}
pub async fn list_bucket_tenants(&self) -> Vec<crate::auth::BucketTenantMetadata> {
self.auth_runtime.list_bucket_tenants().await
pub async fn list_bucket_tenants(&self) -> Result<Vec<crate::auth::BucketTenantMetadata>> {
self.ensure_tenant_auth_enabled()?;
Ok(self.auth_runtime.list_bucket_tenants().await)
}
pub async fn get_bucket_tenant_credential(&self, bucket_name: &str) -> Option<Credential> {
self.auth_runtime
pub async fn get_bucket_tenant_credential(
&self,
bucket_name: &str,
) -> Result<Option<Credential>> {
self.ensure_tenant_auth_enabled()?;
Ok(self
.auth_runtime
.get_bucket_tenant_credential(bucket_name)
.await
.await)
}
fn ensure_tenant_auth_enabled(&self) -> Result<()> {
+26 -5
View File
@@ -1576,15 +1576,36 @@ impl StorageBackend {
return Err(StorageError::invalid_request("Unsupported bucket export format.").into());
}
let mut import_objects = Vec::with_capacity(source.objects.len());
for object in source.objects {
let data = hex::decode(&object.data_hex)
.map_err(|error| StorageError::invalid_request(&error.to_string()))?;
if data.len() as u64 != object.size {
return Err(StorageError::invalid_request(&format!(
"Bucket export object '{}' size does not match payload.",
object.key
))
.into());
}
let md5_hex = format!("{:x}", Md5::digest(&data));
if !object.md5.eq_ignore_ascii_case(&md5_hex) {
return Err(StorageError::invalid_request(&format!(
"Bucket export object '{}' md5 does not match payload.",
object.key
))
.into());
}
import_objects.push((object.key, data, object.metadata));
}
if !self.bucket_exists(bucket).await {
self.create_bucket(bucket).await?;
}
for object in source.objects {
let data = hex::decode(&object.data_hex)
.map_err(|error| StorageError::invalid_request(&error.to_string()))?;
self.put_object_bytes(bucket, &object.key, &data, object.metadata)
.await?;
for (key, data, metadata) in import_objects {
self.put_object_bytes(bucket, &key, &data, metadata).await?;
}
Ok(())
+8 -2
View File
@@ -47,7 +47,10 @@ pub fn list_objects_v1_xml(bucket: &str, result: &ListObjectsResult) -> String {
);
if !result.delimiter.is_empty() {
xml.push_str(&format!("<Delimiter>{}</Delimiter>", xml_escape(&result.delimiter)));
xml.push_str(&format!(
"<Delimiter>{}</Delimiter>",
xml_escape(&result.delimiter)
));
}
for entry in &result.contents {
@@ -95,7 +98,10 @@ pub fn list_objects_v2_xml(bucket: &str, result: &ListObjectsResult) -> String {
);
if !result.delimiter.is_empty() {
xml.push_str(&format!("<Delimiter>{}</Delimiter>", xml_escape(&result.delimiter)));
xml.push_str(&format!(
"<Delimiter>{}</Delimiter>",
xml_escape(&result.delimiter)
));
}
if let Some(ref token) = result.next_continuation_token {