feat(bucket-tenants): add persisted bucket-scoped tenant credentials with bucket export and import APIs
This commit is contained in:
@@ -99,6 +99,25 @@ pub struct StorageStats {
|
||||
pub storage_locations: Vec<StorageLocationSummary>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct BucketExport {
|
||||
pub format: String,
|
||||
pub bucket_name: String,
|
||||
pub exported_at: i64,
|
||||
pub objects: Vec<BucketExportObject>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct BucketExportObject {
|
||||
pub key: String,
|
||||
pub size: u64,
|
||||
pub md5: String,
|
||||
pub metadata: HashMap<String, String>,
|
||||
pub data_hex: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ClusterPeerHealth {
|
||||
@@ -593,6 +612,40 @@ impl FileStore {
|
||||
Ok(PutResult { md5: md5_hex })
|
||||
}
|
||||
|
||||
pub async fn put_object_bytes(
|
||||
&self,
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
data: &[u8],
|
||||
metadata: HashMap<String, String>,
|
||||
) -> Result<PutResult> {
|
||||
if !self.bucket_exists(bucket).await {
|
||||
return Err(StorageError::no_such_bucket().into());
|
||||
}
|
||||
|
||||
let previous_size = self.object_size_if_exists(bucket, key).await;
|
||||
let object_path = self.object_path(bucket, key);
|
||||
if let Some(parent) = object_path.parent() {
|
||||
fs::create_dir_all(parent).await?;
|
||||
}
|
||||
|
||||
fs::write(&object_path, data).await?;
|
||||
let md5_hex = format!("{:x}", Md5::digest(data));
|
||||
fs::write(format!("{}.md5", object_path.display()), &md5_hex).await?;
|
||||
|
||||
let metadata_json = serde_json::to_string_pretty(&metadata)?;
|
||||
fs::write(
|
||||
format!("{}.metadata.json", object_path.display()),
|
||||
metadata_json,
|
||||
)
|
||||
.await?;
|
||||
|
||||
self.track_object_upsert(bucket, previous_size, data.len() as u64)
|
||||
.await;
|
||||
|
||||
Ok(PutResult { md5: md5_hex })
|
||||
}
|
||||
|
||||
pub async fn get_object(
|
||||
&self,
|
||||
bucket: &str,
|
||||
@@ -1311,6 +1364,25 @@ impl StorageBackend {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn delete_bucket_recursive(&self, bucket: &str) -> Result<()> {
|
||||
if !self.bucket_exists(bucket).await {
|
||||
return Err(StorageError::no_such_bucket().into());
|
||||
}
|
||||
|
||||
loop {
|
||||
let objects = self.list_objects(bucket, "", "", 1000, None).await?;
|
||||
if objects.contents.is_empty() {
|
||||
break;
|
||||
}
|
||||
|
||||
for object in objects.contents {
|
||||
self.delete_object(bucket, &object.key).await?;
|
||||
}
|
||||
}
|
||||
|
||||
self.delete_bucket(bucket).await
|
||||
}
|
||||
|
||||
pub async fn put_object(
|
||||
&self,
|
||||
bucket: &str,
|
||||
@@ -1324,6 +1396,21 @@ impl StorageBackend {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn put_object_bytes(
|
||||
&self,
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
data: &[u8],
|
||||
metadata: HashMap<String, String>,
|
||||
) -> Result<PutResult> {
|
||||
match self {
|
||||
StorageBackend::Standalone(fs) => {
|
||||
fs.put_object_bytes(bucket, key, data, metadata).await
|
||||
}
|
||||
StorageBackend::Clustered(ds) => ds.put_object_bytes(bucket, key, data, metadata).await,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_object(
|
||||
&self,
|
||||
bucket: &str,
|
||||
@@ -1453,6 +1540,55 @@ impl StorageBackend {
|
||||
StorageBackend::Clustered(ds) => ds.list_multipart_uploads(bucket).await,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn export_bucket(&self, bucket: &str) -> Result<BucketExport> {
|
||||
if !self.bucket_exists(bucket).await {
|
||||
return Err(StorageError::no_such_bucket().into());
|
||||
}
|
||||
|
||||
let objects = self.list_objects(bucket, "", "", usize::MAX, None).await?;
|
||||
let mut exported_objects = Vec::with_capacity(objects.contents.len());
|
||||
|
||||
for object in objects.contents {
|
||||
let result = self.get_object(bucket, &object.key, None).await?;
|
||||
let mut file = result.body;
|
||||
let mut data = Vec::with_capacity(result.size as usize);
|
||||
file.read_to_end(&mut data).await?;
|
||||
exported_objects.push(BucketExportObject {
|
||||
key: object.key,
|
||||
size: result.size,
|
||||
md5: result.md5,
|
||||
metadata: result.metadata,
|
||||
data_hex: hex::encode(data),
|
||||
});
|
||||
}
|
||||
|
||||
Ok(BucketExport {
|
||||
format: "smartstorage.bucket.v1".to_string(),
|
||||
bucket_name: bucket.to_string(),
|
||||
exported_at: Utc::now().timestamp_millis(),
|
||||
objects: exported_objects,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn import_bucket(&self, bucket: &str, source: BucketExport) -> Result<()> {
|
||||
if source.format != "smartstorage.bucket.v1" {
|
||||
return Err(StorageError::invalid_request("Unsupported bucket export format.").into());
|
||||
}
|
||||
|
||||
if !self.bucket_exists(bucket).await {
|
||||
self.create_bucket(bucket).await?;
|
||||
}
|
||||
|
||||
for object in source.objects {
|
||||
let data = hex::decode(&object.data_hex)
|
||||
.map_err(|error| StorageError::invalid_request(&error.to_string()))?;
|
||||
self.put_object_bytes(bucket, &object.key, &data, object.metadata)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// ============================
|
||||
|
||||
Reference in New Issue
Block a user