feat(auth): add AWS SigV4 authentication and bucket policy support

This commit is contained in:
2026-02-17 16:50:04 +00:00
parent adf45dce2d
commit e36758f183
7 changed files with 72 additions and 85 deletions

View File

@@ -1,6 +1,4 @@
use hyper::{Response, StatusCode};
use http_body_util::Full;
use bytes::Bytes;
use hyper::StatusCode;
#[derive(Debug, thiserror::Error)]
#[error("S3Error({code}): {message}")]
@@ -105,14 +103,4 @@ impl S3Error {
self.code, self.message
)
}
pub fn to_response(&self, request_id: &str) -> Response<Full<Bytes>> {
let xml = self.to_xml();
Response::builder()
.status(self.status)
.header("content-type", "application/xml")
.header("x-amz-request-id", request_id)
.body(Full::new(Bytes::from(xml)))
.unwrap()
}
}

View File

@@ -28,7 +28,6 @@ use crate::xml_response;
pub struct S3Server {
store: Arc<FileStore>,
config: S3Config,
shutdown_tx: watch::Sender<bool>,
server_handle: tokio::task::JoinHandle<()>,
}
@@ -110,7 +109,6 @@ impl S3Server {
Ok(Self {
store,
config,
shutdown_tx,
server_handle,
})

View File

@@ -17,12 +17,10 @@ use crate::s3_error::S3Error;
// ============================
pub struct PutResult {
pub size: u64,
pub md5: String,
}
pub struct GetResult {
pub key: String,
pub size: u64,
pub last_modified: DateTime<Utc>,
pub md5: String,
@@ -32,7 +30,6 @@ pub struct GetResult {
}
pub struct HeadResult {
pub key: String,
pub size: u64,
pub last_modified: DateTime<Utc>,
pub md5: String,
@@ -40,7 +37,6 @@ pub struct HeadResult {
}
pub struct CopyResult {
pub size: u64,
pub md5: String,
pub last_modified: DateTime<Utc>,
}
@@ -69,14 +65,12 @@ pub struct BucketInfo {
pub struct MultipartUploadInfo {
pub upload_id: String,
pub bucket: String,
pub key: String,
pub initiated: DateTime<Utc>,
}
pub struct CompleteMultipartResult {
pub etag: String,
pub size: u64,
}
// ============================
@@ -126,10 +120,6 @@ impl FileStore {
self.root_dir.join(".policies")
}
pub fn policy_path(&self, bucket: &str) -> PathBuf {
self.policies_dir().join(format!("{}.policy.json", bucket))
}
pub async fn reset(&self) -> Result<()> {
if self.root_dir.exists() {
fs::remove_dir_all(&self.root_dir).await?;
@@ -220,7 +210,6 @@ impl FileStore {
let file = fs::File::create(&object_path).await?;
let mut writer = BufWriter::new(file);
let mut hasher = Md5::new();
let mut total_size: u64 = 0;
// Stream body frames directly to file
let mut body = body;
@@ -229,7 +218,6 @@ impl FileStore {
Some(Ok(frame)) => {
if let Ok(data) = frame.into_data() {
hasher.update(&data);
total_size += data.len() as u64;
writer.write_all(&data).await?;
}
}
@@ -255,44 +243,6 @@ impl FileStore {
fs::write(&metadata_path, metadata_json).await?;
Ok(PutResult {
size: total_size,
md5: md5_hex,
})
}
pub async fn put_object_bytes(
&self,
bucket: &str,
key: &str,
data: &[u8],
metadata: HashMap<String, String>,
) -> Result<PutResult> {
if !self.bucket_exists(bucket).await {
return Err(S3Error::no_such_bucket().into());
}
let object_path = self.object_path(bucket, key);
if let Some(parent) = object_path.parent() {
fs::create_dir_all(parent).await?;
}
let mut hasher = Md5::new();
hasher.update(data);
let md5_hex = format!("{:x}", hasher.finalize());
fs::write(&object_path, data).await?;
// Write MD5 sidecar
let md5_path = format!("{}.md5", object_path.display());
fs::write(&md5_path, &md5_hex).await?;
// Write metadata sidecar
let metadata_path = format!("{}.metadata.json", object_path.display());
let metadata_json = serde_json::to_string_pretty(&metadata)?;
fs::write(&metadata_path, metadata_json).await?;
Ok(PutResult {
size: data.len() as u64,
md5: md5_hex,
})
}
@@ -326,7 +276,6 @@ impl FileStore {
};
Ok(GetResult {
key: key.to_string(),
size,
last_modified,
md5,
@@ -352,7 +301,6 @@ impl FileStore {
let metadata = self.read_metadata(&object_path).await;
Ok(HeadResult {
key: key.to_string(),
size,
last_modified,
md5,
@@ -439,7 +387,6 @@ impl FileStore {
let last_modified: DateTime<Utc> = file_meta.modified()?.into();
Ok(CopyResult {
size: file_meta.len(),
md5,
last_modified,
})
@@ -672,7 +619,6 @@ impl FileStore {
let dest_file = fs::File::create(&object_path).await?;
let mut writer = BufWriter::new(dest_file);
let mut hasher = Md5::new();
let mut total_size: u64 = 0;
for (part_number, _etag) in parts {
let part_path = upload_dir.join(format!("part-{}", part_number));
@@ -689,7 +635,6 @@ impl FileStore {
}
hasher.update(&buf[..n]);
writer.write_all(&buf[..n]).await?;
total_size += n as u64;
}
}
@@ -712,7 +657,6 @@ impl FileStore {
Ok(CompleteMultipartResult {
etag,
size: total_size,
})
}
@@ -752,7 +696,6 @@ impl FileStore {
uploads.push(MultipartUploadInfo {
upload_id: meta.upload_id,
bucket: meta.bucket,
key: meta.key,
initiated,
});

View File

@@ -132,15 +132,6 @@ pub fn list_objects_v2_xml(bucket: &str, result: &ListObjectsResult) -> String {
xml
}
pub fn error_xml(code: &str, message: &str) -> String {
format!(
"{}\n<Error><Code>{}</Code><Message>{}</Message></Error>",
XML_DECL,
xml_escape(code),
xml_escape(message)
)
}
pub fn copy_object_result_xml(etag: &str, last_modified: &str) -> String {
format!(
"{}\n<CopyObjectResult>\