feat(cluster): add clustered storage backend with QUIC transport, erasure coding, and shard management
This commit is contained in:
85
rust/src/cluster/metadata.rs
Normal file
85
rust/src/cluster/metadata.rs
Normal file
@@ -0,0 +1,85 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Full manifest describing how an object is stored across erasure-coded shards.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ObjectManifest {
|
||||
/// Bucket name
|
||||
pub bucket: String,
|
||||
/// Object key
|
||||
pub key: String,
|
||||
/// Unique version ID for this write
|
||||
pub version_id: String,
|
||||
/// Total object size in bytes
|
||||
pub size: u64,
|
||||
/// MD5 hex digest of the complete object
|
||||
pub content_md5: String,
|
||||
/// Content type
|
||||
pub content_type: String,
|
||||
/// User metadata (x-amz-meta-*, content-type, etc.)
|
||||
pub metadata: HashMap<String, String>,
|
||||
/// When the object was created
|
||||
pub created_at: String,
|
||||
/// Last modified timestamp
|
||||
pub last_modified: String,
|
||||
/// Number of data shards used
|
||||
pub data_shards: usize,
|
||||
/// Number of parity shards used
|
||||
pub parity_shards: usize,
|
||||
/// Chunk size in bytes (last chunk may be smaller)
|
||||
pub chunk_size: usize,
|
||||
/// Per-chunk shard placement info
|
||||
pub chunks: Vec<ChunkManifest>,
|
||||
}
|
||||
|
||||
/// Describes the shards for a single chunk of an object.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ChunkManifest {
|
||||
/// Index of this chunk (0-based)
|
||||
pub chunk_index: u32,
|
||||
/// Actual data size of this chunk (before erasure coding)
|
||||
pub data_size: usize,
|
||||
/// Where each shard was placed
|
||||
pub shard_placements: Vec<ShardPlacement>,
|
||||
}
|
||||
|
||||
/// Describes where a specific shard is stored.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ShardPlacement {
|
||||
/// Shard index within the erasure set (0..data_shards+parity_shards)
|
||||
pub shard_index: u32,
|
||||
/// Node that holds this shard
|
||||
pub node_id: String,
|
||||
/// Drive ID on that node
|
||||
pub drive_id: String,
|
||||
/// CRC32C checksum of the shard data
|
||||
pub checksum: u32,
|
||||
/// Size of the shard data in bytes
|
||||
pub shard_size: usize,
|
||||
}
|
||||
|
||||
/// Manifest for a multipart upload in progress.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct MultipartUploadManifest {
|
||||
pub upload_id: String,
|
||||
pub bucket: String,
|
||||
pub key: String,
|
||||
pub initiated: String,
|
||||
pub metadata: HashMap<String, String>,
|
||||
/// Per-part manifests, keyed by part number.
|
||||
pub parts: HashMap<u32, PartManifest>,
|
||||
}
|
||||
|
||||
/// Manifest for a single part of a multipart upload.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct PartManifest {
|
||||
pub part_number: u32,
|
||||
pub size: u64,
|
||||
pub md5: String,
|
||||
pub chunks: Vec<ChunkManifest>,
|
||||
}
|
||||
Reference in New Issue
Block a user