feat(auth,policy): add AWS SigV4 authentication and S3 bucket policy support
This commit is contained in:
45
rust/Cargo.lock
generated
45
rust/Cargo.lock
generated
@@ -197,6 +197,15 @@ version = "0.8.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b"
|
||||
|
||||
[[package]]
|
||||
name = "cpufeatures"
|
||||
version = "0.2.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crypto-common"
|
||||
version = "0.1.7"
|
||||
@@ -215,6 +224,7 @@ checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292"
|
||||
dependencies = [
|
||||
"block-buffer",
|
||||
"crypto-common",
|
||||
"subtle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -302,6 +312,21 @@ version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
|
||||
|
||||
[[package]]
|
||||
name = "hex"
|
||||
version = "0.4.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
|
||||
|
||||
[[package]]
|
||||
name = "hmac"
|
||||
version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e"
|
||||
dependencies = [
|
||||
"digest",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "http"
|
||||
version = "1.4.0"
|
||||
@@ -748,6 +773,8 @@ dependencies = [
|
||||
"chrono",
|
||||
"clap",
|
||||
"futures-core",
|
||||
"hex",
|
||||
"hmac",
|
||||
"http-body-util",
|
||||
"hyper",
|
||||
"hyper-util",
|
||||
@@ -756,6 +783,7 @@ dependencies = [
|
||||
"quick-xml",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sha2",
|
||||
"thiserror",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
@@ -820,6 +848,17 @@ dependencies = [
|
||||
"zmij",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sha2"
|
||||
version = "0.10.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"cpufeatures",
|
||||
"digest",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sharded-slab"
|
||||
version = "0.1.7"
|
||||
@@ -873,6 +912,12 @@ version = "0.11.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
|
||||
|
||||
[[package]]
|
||||
name = "subtle"
|
||||
version = "2.6.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292"
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "2.0.115"
|
||||
|
||||
@@ -28,3 +28,6 @@ percent-encoding = "2"
|
||||
url = "2"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
futures-core = "0.3"
|
||||
hmac = "0.12"
|
||||
sha2 = "0.10"
|
||||
hex = "0.4"
|
||||
|
||||
172
rust/src/action.rs
Normal file
172
rust/src/action.rs
Normal file
@@ -0,0 +1,172 @@
|
||||
use hyper::body::Incoming;
|
||||
use hyper::{Method, Request};
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// S3 actions that map to IAM permission strings.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum S3Action {
|
||||
ListAllMyBuckets,
|
||||
CreateBucket,
|
||||
DeleteBucket,
|
||||
HeadBucket,
|
||||
ListBucket,
|
||||
GetObject,
|
||||
HeadObject,
|
||||
PutObject,
|
||||
DeleteObject,
|
||||
CopyObject,
|
||||
ListBucketMultipartUploads,
|
||||
AbortMultipartUpload,
|
||||
InitiateMultipartUpload,
|
||||
UploadPart,
|
||||
CompleteMultipartUpload,
|
||||
GetBucketPolicy,
|
||||
PutBucketPolicy,
|
||||
DeleteBucketPolicy,
|
||||
}
|
||||
|
||||
impl S3Action {
|
||||
/// Return the IAM-style action string (e.g. "s3:GetObject").
|
||||
pub fn iam_action(&self) -> &'static str {
|
||||
match self {
|
||||
S3Action::ListAllMyBuckets => "s3:ListAllMyBuckets",
|
||||
S3Action::CreateBucket => "s3:CreateBucket",
|
||||
S3Action::DeleteBucket => "s3:DeleteBucket",
|
||||
S3Action::HeadBucket => "s3:ListBucket",
|
||||
S3Action::ListBucket => "s3:ListBucket",
|
||||
S3Action::GetObject => "s3:GetObject",
|
||||
S3Action::HeadObject => "s3:GetObject",
|
||||
S3Action::PutObject => "s3:PutObject",
|
||||
S3Action::DeleteObject => "s3:DeleteObject",
|
||||
S3Action::CopyObject => "s3:PutObject",
|
||||
S3Action::ListBucketMultipartUploads => "s3:ListBucketMultipartUploads",
|
||||
S3Action::AbortMultipartUpload => "s3:AbortMultipartUpload",
|
||||
S3Action::InitiateMultipartUpload => "s3:PutObject",
|
||||
S3Action::UploadPart => "s3:PutObject",
|
||||
S3Action::CompleteMultipartUpload => "s3:PutObject",
|
||||
S3Action::GetBucketPolicy => "s3:GetBucketPolicy",
|
||||
S3Action::PutBucketPolicy => "s3:PutBucketPolicy",
|
||||
S3Action::DeleteBucketPolicy => "s3:DeleteBucketPolicy",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Context extracted from a request, used for policy evaluation.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RequestContext {
|
||||
pub action: S3Action,
|
||||
pub bucket: Option<String>,
|
||||
pub key: Option<String>,
|
||||
}
|
||||
|
||||
impl RequestContext {
|
||||
/// Build the ARN for this request's resource.
|
||||
pub fn resource_arn(&self) -> String {
|
||||
match (&self.bucket, &self.key) {
|
||||
(Some(bucket), Some(key)) => format!("arn:aws:s3:::{}/{}", bucket, key),
|
||||
(Some(bucket), None) => format!("arn:aws:s3:::{}", bucket),
|
||||
_ => "arn:aws:s3:::*".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Resolve the S3 action from an incoming HTTP request.
|
||||
pub fn resolve_action(req: &Request<Incoming>) -> RequestContext {
|
||||
let method = req.method().clone();
|
||||
let path = req.uri().path().to_string();
|
||||
let query_string = req.uri().query().unwrap_or("").to_string();
|
||||
let query = parse_query_simple(&query_string);
|
||||
|
||||
let segments: Vec<&str> = path
|
||||
.trim_start_matches('/')
|
||||
.splitn(2, '/')
|
||||
.filter(|s| !s.is_empty())
|
||||
.collect();
|
||||
|
||||
match segments.len() {
|
||||
0 => {
|
||||
// Root: GET / -> ListBuckets
|
||||
RequestContext {
|
||||
action: S3Action::ListAllMyBuckets,
|
||||
bucket: None,
|
||||
key: None,
|
||||
}
|
||||
}
|
||||
1 => {
|
||||
let bucket = percent_decode(segments[0]);
|
||||
let has_policy = query.contains_key("policy");
|
||||
let has_uploads = query.contains_key("uploads");
|
||||
|
||||
let action = match (&method, has_policy, has_uploads) {
|
||||
(&Method::GET, true, _) => S3Action::GetBucketPolicy,
|
||||
(&Method::PUT, true, _) => S3Action::PutBucketPolicy,
|
||||
(&Method::DELETE, true, _) => S3Action::DeleteBucketPolicy,
|
||||
(&Method::GET, _, true) => S3Action::ListBucketMultipartUploads,
|
||||
(&Method::GET, _, _) => S3Action::ListBucket,
|
||||
(&Method::PUT, _, _) => S3Action::CreateBucket,
|
||||
(&Method::DELETE, _, _) => S3Action::DeleteBucket,
|
||||
(&Method::HEAD, _, _) => S3Action::HeadBucket,
|
||||
_ => S3Action::ListBucket,
|
||||
};
|
||||
|
||||
RequestContext {
|
||||
action,
|
||||
bucket: Some(bucket),
|
||||
key: None,
|
||||
}
|
||||
}
|
||||
2 => {
|
||||
let bucket = percent_decode(segments[0]);
|
||||
let key = percent_decode(segments[1]);
|
||||
|
||||
let has_copy_source = req.headers().contains_key("x-amz-copy-source");
|
||||
let has_part_number = query.contains_key("partNumber");
|
||||
let has_upload_id = query.contains_key("uploadId");
|
||||
let has_uploads = query.contains_key("uploads");
|
||||
|
||||
let action = match &method {
|
||||
&Method::PUT if has_part_number && has_upload_id => S3Action::UploadPart,
|
||||
&Method::PUT if has_copy_source => S3Action::CopyObject,
|
||||
&Method::PUT => S3Action::PutObject,
|
||||
&Method::GET => S3Action::GetObject,
|
||||
&Method::HEAD => S3Action::HeadObject,
|
||||
&Method::DELETE if has_upload_id => S3Action::AbortMultipartUpload,
|
||||
&Method::DELETE => S3Action::DeleteObject,
|
||||
&Method::POST if has_uploads => S3Action::InitiateMultipartUpload,
|
||||
&Method::POST if has_upload_id => S3Action::CompleteMultipartUpload,
|
||||
_ => S3Action::GetObject,
|
||||
};
|
||||
|
||||
RequestContext {
|
||||
action,
|
||||
bucket: Some(bucket),
|
||||
key: Some(key),
|
||||
}
|
||||
}
|
||||
_ => RequestContext {
|
||||
action: S3Action::ListAllMyBuckets,
|
||||
bucket: None,
|
||||
key: None,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_query_simple(query_string: &str) -> HashMap<String, String> {
|
||||
let mut map = HashMap::new();
|
||||
if query_string.is_empty() {
|
||||
return map;
|
||||
}
|
||||
for pair in query_string.split('&') {
|
||||
let mut parts = pair.splitn(2, '=');
|
||||
let key = parts.next().unwrap_or("");
|
||||
let value = parts.next().unwrap_or("");
|
||||
map.insert(key.to_string(), value.to_string());
|
||||
}
|
||||
map
|
||||
}
|
||||
|
||||
fn percent_decode(s: &str) -> String {
|
||||
percent_encoding::percent_decode_str(s)
|
||||
.decode_utf8_lossy()
|
||||
.to_string()
|
||||
}
|
||||
310
rust/src/auth.rs
Normal file
310
rust/src/auth.rs
Normal file
@@ -0,0 +1,310 @@
|
||||
use hmac::{Hmac, Mac};
|
||||
use hyper::body::Incoming;
|
||||
use hyper::Request;
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use crate::config::{Credential, S3Config};
|
||||
use crate::s3_error::S3Error;
|
||||
|
||||
type HmacSha256 = Hmac<Sha256>;
|
||||
|
||||
/// The identity of an authenticated caller.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct AuthenticatedIdentity {
|
||||
pub access_key_id: String,
|
||||
}
|
||||
|
||||
/// Parsed components of an AWS4-HMAC-SHA256 Authorization header.
|
||||
struct SigV4Header {
|
||||
access_key_id: String,
|
||||
date_stamp: String,
|
||||
region: String,
|
||||
signed_headers: Vec<String>,
|
||||
signature: String,
|
||||
}
|
||||
|
||||
/// Verify the request's SigV4 signature. Returns the caller identity on success.
|
||||
pub fn verify_request(
|
||||
req: &Request<Incoming>,
|
||||
config: &S3Config,
|
||||
) -> Result<AuthenticatedIdentity, S3Error> {
|
||||
let auth_header = req
|
||||
.headers()
|
||||
.get("authorization")
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.unwrap_or("");
|
||||
|
||||
// Reject SigV2
|
||||
if auth_header.starts_with("AWS ") {
|
||||
return Err(S3Error::authorization_header_malformed());
|
||||
}
|
||||
|
||||
if !auth_header.starts_with("AWS4-HMAC-SHA256") {
|
||||
return Err(S3Error::authorization_header_malformed());
|
||||
}
|
||||
|
||||
let parsed = parse_auth_header(auth_header)?;
|
||||
|
||||
// Look up credential
|
||||
let credential = find_credential(&parsed.access_key_id, config)
|
||||
.ok_or_else(S3Error::invalid_access_key_id)?;
|
||||
|
||||
// Get x-amz-date
|
||||
let amz_date = req
|
||||
.headers()
|
||||
.get("x-amz-date")
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.or_else(|| {
|
||||
req.headers()
|
||||
.get("date")
|
||||
.and_then(|v| v.to_str().ok())
|
||||
})
|
||||
.ok_or_else(|| S3Error::missing_security_header("Missing x-amz-date header"))?;
|
||||
|
||||
// Enforce 15-min clock skew
|
||||
check_clock_skew(amz_date)?;
|
||||
|
||||
// Get payload hash
|
||||
let content_sha256 = req
|
||||
.headers()
|
||||
.get("x-amz-content-sha256")
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.unwrap_or("UNSIGNED-PAYLOAD");
|
||||
|
||||
// Build canonical request
|
||||
let canonical_request = build_canonical_request(req, &parsed.signed_headers, content_sha256);
|
||||
|
||||
// Build string to sign
|
||||
let scope = format!(
|
||||
"{}/{}/s3/aws4_request",
|
||||
parsed.date_stamp, parsed.region
|
||||
);
|
||||
let canonical_hash = hex::encode(Sha256::digest(canonical_request.as_bytes()));
|
||||
let string_to_sign = format!(
|
||||
"AWS4-HMAC-SHA256\n{}\n{}\n{}",
|
||||
amz_date, scope, canonical_hash
|
||||
);
|
||||
|
||||
// Derive signing key
|
||||
let signing_key = derive_signing_key(
|
||||
&credential.secret_access_key,
|
||||
&parsed.date_stamp,
|
||||
&parsed.region,
|
||||
);
|
||||
|
||||
// Compute signature
|
||||
let computed = hmac_sha256(&signing_key, string_to_sign.as_bytes());
|
||||
let computed_hex = hex::encode(&computed);
|
||||
|
||||
// Constant-time comparison
|
||||
if !constant_time_eq(computed_hex.as_bytes(), parsed.signature.as_bytes()) {
|
||||
return Err(S3Error::signature_does_not_match());
|
||||
}
|
||||
|
||||
Ok(AuthenticatedIdentity {
|
||||
access_key_id: parsed.access_key_id,
|
||||
})
|
||||
}
|
||||
|
||||
/// Parse the Authorization header into its components.
|
||||
fn parse_auth_header(header: &str) -> Result<SigV4Header, S3Error> {
|
||||
// Format: AWS4-HMAC-SHA256 Credential=KEY/YYYYMMDD/region/s3/aws4_request, SignedHeaders=h1;h2, Signature=hex
|
||||
let after_algo = header
|
||||
.strip_prefix("AWS4-HMAC-SHA256")
|
||||
.ok_or_else(S3Error::authorization_header_malformed)?
|
||||
.trim();
|
||||
|
||||
let mut credential_str = None;
|
||||
let mut signed_headers_str = None;
|
||||
let mut signature_str = None;
|
||||
|
||||
for part in after_algo.split(',') {
|
||||
let part = part.trim();
|
||||
if let Some(val) = part.strip_prefix("Credential=") {
|
||||
credential_str = Some(val.trim());
|
||||
} else if let Some(val) = part.strip_prefix("SignedHeaders=") {
|
||||
signed_headers_str = Some(val.trim());
|
||||
} else if let Some(val) = part.strip_prefix("Signature=") {
|
||||
signature_str = Some(val.trim());
|
||||
}
|
||||
}
|
||||
|
||||
let credential_str = credential_str
|
||||
.ok_or_else(S3Error::authorization_header_malformed)?;
|
||||
let signed_headers_str = signed_headers_str
|
||||
.ok_or_else(S3Error::authorization_header_malformed)?;
|
||||
let signature = signature_str
|
||||
.ok_or_else(S3Error::authorization_header_malformed)?
|
||||
.to_string();
|
||||
|
||||
// Parse credential: KEY/YYYYMMDD/region/s3/aws4_request
|
||||
let cred_parts: Vec<&str> = credential_str.splitn(5, '/').collect();
|
||||
if cred_parts.len() < 5 {
|
||||
return Err(S3Error::authorization_header_malformed());
|
||||
}
|
||||
|
||||
let access_key_id = cred_parts[0].to_string();
|
||||
let date_stamp = cred_parts[1].to_string();
|
||||
let region = cred_parts[2].to_string();
|
||||
|
||||
let signed_headers: Vec<String> = signed_headers_str
|
||||
.split(';')
|
||||
.map(|s| s.trim().to_lowercase())
|
||||
.collect();
|
||||
|
||||
Ok(SigV4Header {
|
||||
access_key_id,
|
||||
date_stamp,
|
||||
region,
|
||||
signed_headers,
|
||||
signature,
|
||||
})
|
||||
}
|
||||
|
||||
/// Find a credential by access key ID.
|
||||
fn find_credential<'a>(access_key_id: &str, config: &'a S3Config) -> Option<&'a Credential> {
|
||||
config
|
||||
.auth
|
||||
.credentials
|
||||
.iter()
|
||||
.find(|c| c.access_key_id == access_key_id)
|
||||
}
|
||||
|
||||
/// Check clock skew (15 minutes max).
|
||||
fn check_clock_skew(amz_date: &str) -> Result<(), S3Error> {
|
||||
// Parse ISO 8601 basic format: YYYYMMDDTHHMMSSZ
|
||||
let parsed = chrono::NaiveDateTime::parse_from_str(amz_date, "%Y%m%dT%H%M%SZ")
|
||||
.map_err(|_| S3Error::authorization_header_malformed())?;
|
||||
|
||||
let request_time = chrono::DateTime::<chrono::Utc>::from_naive_utc_and_offset(parsed, chrono::Utc);
|
||||
let now = chrono::Utc::now();
|
||||
let diff = (now - request_time).num_seconds().unsigned_abs();
|
||||
|
||||
if diff > 15 * 60 {
|
||||
return Err(S3Error::request_time_too_skewed());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Build the canonical request string.
|
||||
fn build_canonical_request(
|
||||
req: &Request<Incoming>,
|
||||
signed_headers: &[String],
|
||||
payload_hash: &str,
|
||||
) -> String {
|
||||
let method = req.method().as_str();
|
||||
let uri_path = req.uri().path();
|
||||
|
||||
// Canonical URI: the path, already percent-encoded by the client
|
||||
let canonical_uri = if uri_path.is_empty() { "/" } else { uri_path };
|
||||
|
||||
// Canonical query string: sorted key=value pairs
|
||||
let canonical_query = build_canonical_query(req.uri().query().unwrap_or(""));
|
||||
|
||||
// Canonical headers: sorted by lowercase header name
|
||||
let canonical_headers = build_canonical_headers(req, signed_headers);
|
||||
|
||||
// Signed headers string
|
||||
let signed_headers_str = signed_headers.join(";");
|
||||
|
||||
// Payload hash — accept UNSIGNED-PAYLOAD and STREAMING-AWS4-HMAC-SHA256-PAYLOAD as-is
|
||||
let effective_payload_hash = if payload_hash == "UNSIGNED-PAYLOAD"
|
||||
|| payload_hash == "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
|
||||
{
|
||||
payload_hash.to_string()
|
||||
} else {
|
||||
payload_hash.to_string()
|
||||
};
|
||||
|
||||
format!(
|
||||
"{}\n{}\n{}\n{}\n{}\n{}",
|
||||
method,
|
||||
canonical_uri,
|
||||
canonical_query,
|
||||
canonical_headers,
|
||||
signed_headers_str,
|
||||
effective_payload_hash
|
||||
)
|
||||
}
|
||||
|
||||
/// Build canonical query string (sorted key=value pairs).
|
||||
fn build_canonical_query(query: &str) -> String {
|
||||
if query.is_empty() {
|
||||
return String::new();
|
||||
}
|
||||
|
||||
let mut pairs: Vec<(String, String)> = Vec::new();
|
||||
for pair in query.split('&') {
|
||||
let mut parts = pair.splitn(2, '=');
|
||||
let key = parts.next().unwrap_or("");
|
||||
let value = parts.next().unwrap_or("");
|
||||
pairs.push((key.to_string(), value.to_string()));
|
||||
}
|
||||
pairs.sort();
|
||||
|
||||
pairs
|
||||
.iter()
|
||||
.map(|(k, v)| format!("{}={}", k, v))
|
||||
.collect::<Vec<_>>()
|
||||
.join("&")
|
||||
}
|
||||
|
||||
/// Build canonical headers string.
|
||||
fn build_canonical_headers(req: &Request<Incoming>, signed_headers: &[String]) -> String {
|
||||
let mut header_map: HashMap<String, Vec<String>> = HashMap::new();
|
||||
|
||||
for (name, value) in req.headers() {
|
||||
let name_lower = name.as_str().to_lowercase();
|
||||
if signed_headers.contains(&name_lower) {
|
||||
if let Ok(val) = value.to_str() {
|
||||
header_map
|
||||
.entry(name_lower)
|
||||
.or_default()
|
||||
.push(val.trim().to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut result = String::new();
|
||||
for header_name in signed_headers {
|
||||
let values = header_map
|
||||
.get(header_name)
|
||||
.map(|v| v.join(","))
|
||||
.unwrap_or_default();
|
||||
result.push_str(header_name);
|
||||
result.push(':');
|
||||
result.push_str(&values);
|
||||
result.push('\n');
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
/// Derive the signing key via 4-step HMAC chain.
|
||||
fn derive_signing_key(secret_key: &str, date_stamp: &str, region: &str) -> Vec<u8> {
|
||||
let k_secret = format!("AWS4{}", secret_key);
|
||||
let k_date = hmac_sha256(k_secret.as_bytes(), date_stamp.as_bytes());
|
||||
let k_region = hmac_sha256(&k_date, region.as_bytes());
|
||||
let k_service = hmac_sha256(&k_region, b"s3");
|
||||
hmac_sha256(&k_service, b"aws4_request")
|
||||
}
|
||||
|
||||
/// Compute HMAC-SHA256.
|
||||
fn hmac_sha256(key: &[u8], data: &[u8]) -> Vec<u8> {
|
||||
let mut mac = HmacSha256::new_from_slice(key).expect("HMAC key length is always valid");
|
||||
mac.update(data);
|
||||
mac.finalize().into_bytes().to_vec()
|
||||
}
|
||||
|
||||
/// Constant-time byte comparison.
|
||||
fn constant_time_eq(a: &[u8], b: &[u8]) -> bool {
|
||||
if a.len() != b.len() {
|
||||
return false;
|
||||
}
|
||||
let mut diff = 0u8;
|
||||
for (x, y) in a.iter().zip(b.iter()) {
|
||||
diff |= x ^ y;
|
||||
}
|
||||
diff == 0
|
||||
}
|
||||
@@ -18,6 +18,12 @@ pub struct ServerConfig {
|
||||
pub port: u16,
|
||||
pub address: String,
|
||||
pub silent: bool,
|
||||
#[serde(default = "default_region")]
|
||||
pub region: String,
|
||||
}
|
||||
|
||||
fn default_region() -> String {
|
||||
"us-east-1".to_string()
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
mod action;
|
||||
mod auth;
|
||||
mod config;
|
||||
mod management;
|
||||
mod policy;
|
||||
mod s3_error;
|
||||
mod server;
|
||||
mod storage;
|
||||
|
||||
429
rust/src/policy.rs
Normal file
429
rust/src/policy.rs
Normal file
@@ -0,0 +1,429 @@
|
||||
use serde::{Deserialize, Deserializer, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
use tokio::fs;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::action::RequestContext;
|
||||
use crate::auth::AuthenticatedIdentity;
|
||||
use crate::s3_error::S3Error;
|
||||
|
||||
// ============================
|
||||
// Policy data model
|
||||
// ============================
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct BucketPolicy {
|
||||
#[serde(rename = "Version")]
|
||||
pub version: String,
|
||||
#[serde(rename = "Statement")]
|
||||
pub statements: Vec<PolicyStatement>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct PolicyStatement {
|
||||
#[serde(rename = "Sid", default, skip_serializing_if = "Option::is_none")]
|
||||
pub sid: Option<String>,
|
||||
#[serde(rename = "Effect")]
|
||||
pub effect: PolicyEffect,
|
||||
#[serde(rename = "Principal", deserialize_with = "deserialize_principal")]
|
||||
pub principal: Principal,
|
||||
#[serde(rename = "Action", deserialize_with = "deserialize_string_or_vec")]
|
||||
pub action: Vec<String>,
|
||||
#[serde(rename = "Resource", deserialize_with = "deserialize_string_or_vec")]
|
||||
pub resource: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub enum PolicyEffect {
|
||||
Allow,
|
||||
Deny,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum Principal {
|
||||
Wildcard,
|
||||
Aws(Vec<String>),
|
||||
}
|
||||
|
||||
impl Serialize for Principal {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
match self {
|
||||
Principal::Wildcard => serializer.serialize_str("*"),
|
||||
Principal::Aws(ids) => {
|
||||
use serde::ser::SerializeMap;
|
||||
let mut map = serializer.serialize_map(Some(1))?;
|
||||
if ids.len() == 1 {
|
||||
map.serialize_entry("AWS", &ids[0])?;
|
||||
} else {
|
||||
map.serialize_entry("AWS", ids)?;
|
||||
}
|
||||
map.end()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn deserialize_principal<'de, D>(deserializer: D) -> Result<Principal, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
#[derive(Deserialize)]
|
||||
#[serde(untagged)]
|
||||
enum PrincipalRaw {
|
||||
Star(String),
|
||||
Map(HashMap<String, StringOrVec>),
|
||||
}
|
||||
|
||||
let raw = PrincipalRaw::deserialize(deserializer)?;
|
||||
match raw {
|
||||
PrincipalRaw::Star(s) if s == "*" => Ok(Principal::Wildcard),
|
||||
PrincipalRaw::Star(_) => Err(serde::de::Error::custom(
|
||||
"Principal string must be \"*\"",
|
||||
)),
|
||||
PrincipalRaw::Map(map) => {
|
||||
if let Some(aws) = map.get("AWS") {
|
||||
Ok(Principal::Aws(aws.clone().into_vec()))
|
||||
} else {
|
||||
Err(serde::de::Error::custom("Principal map must contain \"AWS\" key"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
enum StringOrVec {
|
||||
Single(String),
|
||||
Multiple(Vec<String>),
|
||||
}
|
||||
|
||||
impl StringOrVec {
|
||||
fn into_vec(self) -> Vec<String> {
|
||||
match self {
|
||||
StringOrVec::Single(s) => vec![s],
|
||||
StringOrVec::Multiple(v) => v,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn deserialize_string_or_vec<'de, D>(deserializer: D) -> Result<Vec<String>, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let raw = StringOrVec::deserialize(deserializer)?;
|
||||
Ok(raw.into_vec())
|
||||
}
|
||||
|
||||
// ============================
|
||||
// Policy evaluation
|
||||
// ============================
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum PolicyDecision {
|
||||
Allow,
|
||||
Deny,
|
||||
NoOpinion,
|
||||
}
|
||||
|
||||
/// Evaluate a bucket policy against a request context and caller identity.
|
||||
pub fn evaluate_policy(
|
||||
policy: &BucketPolicy,
|
||||
ctx: &RequestContext,
|
||||
identity: Option<&AuthenticatedIdentity>,
|
||||
) -> PolicyDecision {
|
||||
let resource_arn = ctx.resource_arn();
|
||||
let iam_action = ctx.action.iam_action();
|
||||
let mut has_allow = false;
|
||||
|
||||
for stmt in &policy.statements {
|
||||
// Check principal match
|
||||
if !principal_matches(&stmt.principal, identity) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check action match
|
||||
if !action_matches(&stmt.action, iam_action) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check resource match
|
||||
if !resource_matches(&stmt.resource, &resource_arn, ctx.bucket.as_deref()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Statement matches — apply effect
|
||||
match stmt.effect {
|
||||
PolicyEffect::Deny => return PolicyDecision::Deny,
|
||||
PolicyEffect::Allow => has_allow = true,
|
||||
}
|
||||
}
|
||||
|
||||
if has_allow {
|
||||
PolicyDecision::Allow
|
||||
} else {
|
||||
PolicyDecision::NoOpinion
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if the principal matches the caller.
|
||||
fn principal_matches(principal: &Principal, identity: Option<&AuthenticatedIdentity>) -> bool {
|
||||
match principal {
|
||||
Principal::Wildcard => true,
|
||||
Principal::Aws(ids) => {
|
||||
if let Some(id) = identity {
|
||||
ids.iter().any(|arn| {
|
||||
// Match against full ARN or just the access key ID
|
||||
arn == "*" || arn.ends_with(&id.access_key_id)
|
||||
})
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if the action matches. Supports wildcard `s3:*` and `*`.
|
||||
fn action_matches(policy_actions: &[String], request_action: &str) -> bool {
|
||||
for pa in policy_actions {
|
||||
if pa == "*" || pa == "s3:*" {
|
||||
return true;
|
||||
}
|
||||
if pa.eq_ignore_ascii_case(request_action) {
|
||||
return true;
|
||||
}
|
||||
// Simple prefix wildcard: "s3:Get*" matches "s3:GetObject"
|
||||
if let Some(prefix) = pa.strip_suffix('*') {
|
||||
if request_action
|
||||
.to_lowercase()
|
||||
.starts_with(&prefix.to_lowercase())
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/// Check if the resource matches. Supports wildcard patterns.
|
||||
fn resource_matches(policy_resources: &[String], request_arn: &str, bucket: Option<&str>) -> bool {
|
||||
for pr in policy_resources {
|
||||
if pr == "*" {
|
||||
return true;
|
||||
}
|
||||
if arn_pattern_matches(pr, request_arn) {
|
||||
return true;
|
||||
}
|
||||
// Also check bucket-level ARN if the request is for an object
|
||||
if let Some(b) = bucket {
|
||||
let bucket_arn = format!("arn:aws:s3:::{}", b);
|
||||
if arn_pattern_matches(pr, &bucket_arn) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/// Simple ARN pattern matching with `*` and `?` wildcards.
|
||||
fn arn_pattern_matches(pattern: &str, value: &str) -> bool {
|
||||
// Handle trailing /* specifically: arn:aws:s3:::bucket/* matches arn:aws:s3:::bucket/anything
|
||||
if pattern.ends_with("/*") {
|
||||
let prefix = &pattern[..pattern.len() - 1]; // Remove trailing *
|
||||
if value.starts_with(prefix) {
|
||||
return true;
|
||||
}
|
||||
// Also match exact bucket without trailing /
|
||||
let bucket_only = &pattern[..pattern.len() - 2];
|
||||
if value == bucket_only {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
simple_wildcard_match(pattern, value)
|
||||
}
|
||||
|
||||
fn simple_wildcard_match(pattern: &str, value: &str) -> bool {
|
||||
let pat_bytes = pattern.as_bytes();
|
||||
let val_bytes = value.as_bytes();
|
||||
let mut pi = 0;
|
||||
let mut vi = 0;
|
||||
let mut star_pi = usize::MAX;
|
||||
let mut star_vi = 0;
|
||||
|
||||
while vi < val_bytes.len() {
|
||||
if pi < pat_bytes.len() && (pat_bytes[pi] == b'?' || pat_bytes[pi] == val_bytes[vi]) {
|
||||
pi += 1;
|
||||
vi += 1;
|
||||
} else if pi < pat_bytes.len() && pat_bytes[pi] == b'*' {
|
||||
star_pi = pi;
|
||||
star_vi = vi;
|
||||
pi += 1;
|
||||
} else if star_pi != usize::MAX {
|
||||
pi = star_pi + 1;
|
||||
star_vi += 1;
|
||||
vi = star_vi;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
while pi < pat_bytes.len() && pat_bytes[pi] == b'*' {
|
||||
pi += 1;
|
||||
}
|
||||
|
||||
pi == pat_bytes.len()
|
||||
}
|
||||
|
||||
// ============================
|
||||
// Policy validation
|
||||
// ============================
|
||||
|
||||
const MAX_POLICY_SIZE: usize = 20 * 1024; // 20 KB
|
||||
|
||||
pub fn validate_policy(json: &str) -> Result<BucketPolicy, S3Error> {
|
||||
if json.len() > MAX_POLICY_SIZE {
|
||||
return Err(S3Error::malformed_policy("Policy exceeds maximum size of 20KB"));
|
||||
}
|
||||
|
||||
let policy: BucketPolicy =
|
||||
serde_json::from_str(json).map_err(|e| S3Error::malformed_policy(&e.to_string()))?;
|
||||
|
||||
if policy.version != "2012-10-17" {
|
||||
return Err(S3Error::malformed_policy(
|
||||
"Policy version must be \"2012-10-17\"",
|
||||
));
|
||||
}
|
||||
|
||||
if policy.statements.is_empty() {
|
||||
return Err(S3Error::malformed_policy(
|
||||
"Policy must contain at least one statement",
|
||||
));
|
||||
}
|
||||
|
||||
for (i, stmt) in policy.statements.iter().enumerate() {
|
||||
if stmt.action.is_empty() {
|
||||
return Err(S3Error::malformed_policy(&format!(
|
||||
"Statement {} has no actions",
|
||||
i
|
||||
)));
|
||||
}
|
||||
for action in &stmt.action {
|
||||
if action != "*" && !action.starts_with("s3:") {
|
||||
return Err(S3Error::malformed_policy(&format!(
|
||||
"Action \"{}\" must start with \"s3:\"",
|
||||
action
|
||||
)));
|
||||
}
|
||||
}
|
||||
if stmt.resource.is_empty() {
|
||||
return Err(S3Error::malformed_policy(&format!(
|
||||
"Statement {} has no resources",
|
||||
i
|
||||
)));
|
||||
}
|
||||
for resource in &stmt.resource {
|
||||
if resource != "*" && !resource.starts_with("arn:aws:s3:::") {
|
||||
return Err(S3Error::malformed_policy(&format!(
|
||||
"Resource \"{}\" must start with \"arn:aws:s3:::\"",
|
||||
resource
|
||||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(policy)
|
||||
}
|
||||
|
||||
// ============================
|
||||
// PolicyStore — in-memory cache + disk
|
||||
// ============================
|
||||
|
||||
pub struct PolicyStore {
|
||||
policies: RwLock<HashMap<String, BucketPolicy>>,
|
||||
policies_dir: PathBuf,
|
||||
}
|
||||
|
||||
impl PolicyStore {
|
||||
pub fn new(policies_dir: PathBuf) -> Self {
|
||||
Self {
|
||||
policies: RwLock::new(HashMap::new()),
|
||||
policies_dir,
|
||||
}
|
||||
}
|
||||
|
||||
/// Load all policies from disk into cache.
|
||||
pub async fn load_from_disk(&self) -> anyhow::Result<()> {
|
||||
let dir = &self.policies_dir;
|
||||
if !dir.exists() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut entries = fs::read_dir(dir).await?;
|
||||
let mut policies = HashMap::new();
|
||||
|
||||
while let Some(entry) = entries.next_entry().await? {
|
||||
let name = entry.file_name().to_string_lossy().to_string();
|
||||
if let Some(bucket) = name.strip_suffix(".policy.json") {
|
||||
match fs::read_to_string(entry.path()).await {
|
||||
Ok(json) => match serde_json::from_str::<BucketPolicy>(&json) {
|
||||
Ok(policy) => {
|
||||
tracing::info!("Loaded policy for bucket: {}", bucket);
|
||||
policies.insert(bucket.to_string(), policy);
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::warn!("Failed to parse policy for {}: {}", bucket, e);
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
tracing::warn!("Failed to read policy file {}: {}", name, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut cache = self.policies.write().await;
|
||||
*cache = policies;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get a policy for a bucket.
|
||||
pub async fn get_policy(&self, bucket: &str) -> Option<BucketPolicy> {
|
||||
let cache = self.policies.read().await;
|
||||
cache.get(bucket).cloned()
|
||||
}
|
||||
|
||||
/// Store a policy for a bucket (atomic write + cache update).
|
||||
pub async fn put_policy(&self, bucket: &str, policy: BucketPolicy) -> anyhow::Result<()> {
|
||||
let json = serde_json::to_string_pretty(&policy)?;
|
||||
|
||||
// Atomic write: temp file + rename
|
||||
let policy_path = self.policies_dir.join(format!("{}.policy.json", bucket));
|
||||
let temp_path = self
|
||||
.policies_dir
|
||||
.join(format!("{}.policy.json.tmp", bucket));
|
||||
|
||||
fs::write(&temp_path, &json).await?;
|
||||
fs::rename(&temp_path, &policy_path).await?;
|
||||
|
||||
// Update cache
|
||||
let mut cache = self.policies.write().await;
|
||||
cache.insert(bucket.to_string(), policy);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Delete a policy for a bucket.
|
||||
pub async fn delete_policy(&self, bucket: &str) -> anyhow::Result<()> {
|
||||
let policy_path = self.policies_dir.join(format!("{}.policy.json", bucket));
|
||||
let _ = fs::remove_file(&policy_path).await;
|
||||
|
||||
let mut cache = self.policies.write().await;
|
||||
cache.remove(bucket);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -51,6 +51,54 @@ impl S3Error {
|
||||
Self::new("InvalidRequest", msg, StatusCode::BAD_REQUEST)
|
||||
}
|
||||
|
||||
pub fn signature_does_not_match() -> Self {
|
||||
Self::new(
|
||||
"SignatureDoesNotMatch",
|
||||
"The request signature we calculated does not match the signature you provided.",
|
||||
StatusCode::FORBIDDEN,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn invalid_access_key_id() -> Self {
|
||||
Self::new(
|
||||
"InvalidAccessKeyId",
|
||||
"The AWS Access Key Id you provided does not exist in our records.",
|
||||
StatusCode::FORBIDDEN,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn request_time_too_skewed() -> Self {
|
||||
Self::new(
|
||||
"RequestTimeTooSkewed",
|
||||
"The difference between the request time and the current time is too large.",
|
||||
StatusCode::FORBIDDEN,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn authorization_header_malformed() -> Self {
|
||||
Self::new(
|
||||
"AuthorizationHeaderMalformed",
|
||||
"The authorization header is malformed.",
|
||||
StatusCode::BAD_REQUEST,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn missing_security_header(msg: &str) -> Self {
|
||||
Self::new("MissingSecurityHeader", msg, StatusCode::BAD_REQUEST)
|
||||
}
|
||||
|
||||
pub fn no_such_bucket_policy() -> Self {
|
||||
Self::new(
|
||||
"NoSuchBucketPolicy",
|
||||
"The bucket policy does not exist.",
|
||||
StatusCode::NOT_FOUND,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn malformed_policy(msg: &str) -> Self {
|
||||
Self::new("MalformedPolicy", msg, StatusCode::BAD_REQUEST)
|
||||
}
|
||||
|
||||
pub fn to_xml(&self) -> String {
|
||||
format!(
|
||||
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<Error><Code>{}</Code><Message>{}</Message></Error>",
|
||||
|
||||
@@ -18,7 +18,10 @@ use tokio::sync::watch;
|
||||
use tokio_util::io::ReaderStream;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::action::{self, RequestContext, S3Action};
|
||||
use crate::auth::{self, AuthenticatedIdentity};
|
||||
use crate::config::S3Config;
|
||||
use crate::policy::{self, PolicyDecision, PolicyStore};
|
||||
use crate::s3_error::S3Error;
|
||||
use crate::storage::FileStore;
|
||||
use crate::xml_response;
|
||||
@@ -41,6 +44,10 @@ impl S3Server {
|
||||
store.initialize().await?;
|
||||
}
|
||||
|
||||
// Initialize policy store
|
||||
let policy_store = Arc::new(PolicyStore::new(store.policies_dir()));
|
||||
policy_store.load_from_disk().await?;
|
||||
|
||||
let addr: SocketAddr = format!("{}:{}", config.address(), config.server.port)
|
||||
.parse()?;
|
||||
|
||||
@@ -49,6 +56,7 @@ impl S3Server {
|
||||
|
||||
let server_store = store.clone();
|
||||
let server_config = config.clone();
|
||||
let server_policy_store = policy_store.clone();
|
||||
|
||||
let server_handle = tokio::spawn(async move {
|
||||
loop {
|
||||
@@ -61,13 +69,15 @@ impl S3Server {
|
||||
let io = TokioIo::new(stream);
|
||||
let store = server_store.clone();
|
||||
let cfg = server_config.clone();
|
||||
let ps = server_policy_store.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let svc = service_fn(move |req: Request<Incoming>| {
|
||||
let store = store.clone();
|
||||
let cfg = cfg.clone();
|
||||
let ps = ps.clone();
|
||||
async move {
|
||||
handle_request(req, store, cfg).await
|
||||
handle_request(req, store, cfg, ps).await
|
||||
}
|
||||
});
|
||||
|
||||
@@ -198,6 +208,7 @@ async fn handle_request(
|
||||
req: Request<Incoming>,
|
||||
store: Arc<FileStore>,
|
||||
config: S3Config,
|
||||
policy_store: Arc<PolicyStore>,
|
||||
) -> Result<Response<BoxBody>, std::convert::Infallible> {
|
||||
let request_id = Uuid::new_v4().to_string();
|
||||
let method = req.method().clone();
|
||||
@@ -210,16 +221,41 @@ async fn handle_request(
|
||||
return Ok(resp);
|
||||
}
|
||||
|
||||
// Auth check
|
||||
// Step 1: Resolve S3 action from request
|
||||
let request_ctx = action::resolve_action(&req);
|
||||
|
||||
// Step 2: Auth + policy pipeline
|
||||
if config.auth.enabled {
|
||||
if let Err(e) = check_auth(&req, &config) {
|
||||
tracing::warn!("Auth failed: {}", e.message);
|
||||
// Attempt authentication
|
||||
let identity = {
|
||||
let has_auth_header = req
|
||||
.headers()
|
||||
.get("authorization")
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.map(|s| !s.is_empty())
|
||||
.unwrap_or(false);
|
||||
|
||||
if has_auth_header {
|
||||
match auth::verify_request(&req, &config) {
|
||||
Ok(id) => Some(id),
|
||||
Err(e) => {
|
||||
tracing::warn!("Auth failed: {}", e.message);
|
||||
return Ok(s3_error_response(&e, &request_id));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None // Anonymous request
|
||||
}
|
||||
};
|
||||
|
||||
// Step 3: Authorization (policy evaluation)
|
||||
if let Err(e) = authorize_request(&request_ctx, identity.as_ref(), &policy_store).await {
|
||||
return Ok(s3_error_response(&e, &request_id));
|
||||
}
|
||||
}
|
||||
|
||||
// Route and handle
|
||||
let mut response = match route_request(req, store, &config, &request_id).await {
|
||||
let mut response = match route_request(req, store, &config, &request_id, &policy_store).await {
|
||||
Ok(resp) => resp,
|
||||
Err(err) => {
|
||||
if let Some(s3err) = err.downcast_ref::<S3Error>() {
|
||||
@@ -249,6 +285,42 @@ async fn handle_request(
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
/// Authorize a request based on bucket policies and authentication state.
|
||||
async fn authorize_request(
|
||||
ctx: &RequestContext,
|
||||
identity: Option<&AuthenticatedIdentity>,
|
||||
policy_store: &PolicyStore,
|
||||
) -> Result<(), S3Error> {
|
||||
// ListAllMyBuckets requires authentication (no bucket to apply policy to)
|
||||
if ctx.action == S3Action::ListAllMyBuckets {
|
||||
if identity.is_none() {
|
||||
return Err(S3Error::access_denied());
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// If there's a bucket, check its policy
|
||||
if let Some(ref bucket) = ctx.bucket {
|
||||
if let Some(bucket_policy) = policy_store.get_policy(bucket).await {
|
||||
let decision = policy::evaluate_policy(&bucket_policy, ctx, identity);
|
||||
match decision {
|
||||
PolicyDecision::Deny => return Err(S3Error::access_denied()),
|
||||
PolicyDecision::Allow => return Ok(()),
|
||||
PolicyDecision::NoOpinion => {
|
||||
// Fall through to default behavior
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Default: authenticated users get full access, anonymous denied
|
||||
if identity.is_none() {
|
||||
return Err(S3Error::access_denied());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ============================
|
||||
// Routing
|
||||
// ============================
|
||||
@@ -258,6 +330,7 @@ async fn route_request(
|
||||
store: Arc<FileStore>,
|
||||
_config: &S3Config,
|
||||
request_id: &str,
|
||||
policy_store: &Arc<PolicyStore>,
|
||||
) -> Result<Response<BoxBody>> {
|
||||
let method = req.method().clone();
|
||||
let path = req.uri().path().to_string();
|
||||
@@ -282,6 +355,17 @@ async fn route_request(
|
||||
1 => {
|
||||
// Bucket level: /{bucket}
|
||||
let bucket = percent_decode(segments[0]);
|
||||
|
||||
// Check for ?policy query parameter
|
||||
if query.contains_key("policy") {
|
||||
return match method {
|
||||
Method::GET => handle_get_bucket_policy(policy_store, &bucket, request_id).await,
|
||||
Method::PUT => handle_put_bucket_policy(req, &store, policy_store, &bucket, request_id).await,
|
||||
Method::DELETE => handle_delete_bucket_policy(policy_store, &bucket, request_id).await,
|
||||
_ => Ok(empty_response(StatusCode::METHOD_NOT_ALLOWED, request_id)),
|
||||
};
|
||||
}
|
||||
|
||||
match method {
|
||||
Method::GET => {
|
||||
if query.contains_key("uploads") {
|
||||
@@ -291,7 +375,7 @@ async fn route_request(
|
||||
}
|
||||
}
|
||||
Method::PUT => handle_create_bucket(store, &bucket, request_id).await,
|
||||
Method::DELETE => handle_delete_bucket(store, &bucket, request_id).await,
|
||||
Method::DELETE => handle_delete_bucket(store, &bucket, request_id, policy_store).await,
|
||||
Method::HEAD => handle_head_bucket(store, &bucket, request_id).await,
|
||||
_ => Ok(empty_response(StatusCode::METHOD_NOT_ALLOWED, request_id)),
|
||||
}
|
||||
@@ -369,8 +453,11 @@ async fn handle_delete_bucket(
|
||||
store: Arc<FileStore>,
|
||||
bucket: &str,
|
||||
request_id: &str,
|
||||
policy_store: &Arc<PolicyStore>,
|
||||
) -> Result<Response<BoxBody>> {
|
||||
store.delete_bucket(bucket).await?;
|
||||
// Clean up bucket policy on deletion
|
||||
let _ = policy_store.delete_policy(bucket).await;
|
||||
Ok(empty_response(StatusCode::NO_CONTENT, request_id))
|
||||
}
|
||||
|
||||
@@ -577,6 +664,70 @@ async fn handle_copy_object(
|
||||
Ok(xml_response(StatusCode::OK, xml, request_id))
|
||||
}
|
||||
|
||||
// ============================
|
||||
// Policy handlers
|
||||
// ============================
|
||||
|
||||
async fn handle_get_bucket_policy(
|
||||
policy_store: &Arc<PolicyStore>,
|
||||
bucket: &str,
|
||||
request_id: &str,
|
||||
) -> Result<Response<BoxBody>> {
|
||||
match policy_store.get_policy(bucket).await {
|
||||
Some(p) => {
|
||||
let json = serde_json::to_string_pretty(&p)?;
|
||||
let resp = Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header("content-type", "application/json")
|
||||
.header("x-amz-request-id", request_id)
|
||||
.body(full_body(json))
|
||||
.unwrap();
|
||||
Ok(resp)
|
||||
}
|
||||
None => Err(S3Error::no_such_bucket_policy().into()),
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_put_bucket_policy(
|
||||
req: Request<Incoming>,
|
||||
store: &Arc<FileStore>,
|
||||
policy_store: &Arc<PolicyStore>,
|
||||
bucket: &str,
|
||||
request_id: &str,
|
||||
) -> Result<Response<BoxBody>> {
|
||||
// Verify bucket exists
|
||||
if !store.bucket_exists(bucket).await {
|
||||
return Err(S3Error::no_such_bucket().into());
|
||||
}
|
||||
|
||||
// Read body
|
||||
let body_bytes = req.collect().await.map_err(|e| anyhow::anyhow!("Body error: {}", e))?.to_bytes();
|
||||
let body_str = String::from_utf8_lossy(&body_bytes);
|
||||
|
||||
// Validate and parse
|
||||
let validated_policy = policy::validate_policy(&body_str)?;
|
||||
|
||||
// Store
|
||||
policy_store
|
||||
.put_policy(bucket, validated_policy)
|
||||
.await
|
||||
.map_err(|e| S3Error::internal_error(&e.to_string()))?;
|
||||
|
||||
Ok(empty_response(StatusCode::NO_CONTENT, request_id))
|
||||
}
|
||||
|
||||
async fn handle_delete_bucket_policy(
|
||||
policy_store: &Arc<PolicyStore>,
|
||||
bucket: &str,
|
||||
request_id: &str,
|
||||
) -> Result<Response<BoxBody>> {
|
||||
policy_store
|
||||
.delete_policy(bucket)
|
||||
.await
|
||||
.map_err(|e| S3Error::internal_error(&e.to_string()))?;
|
||||
Ok(empty_response(StatusCode::NO_CONTENT, request_id))
|
||||
}
|
||||
|
||||
// ============================
|
||||
// Multipart handlers
|
||||
// ============================
|
||||
@@ -820,46 +971,3 @@ fn add_cors_headers(headers: &mut hyper::HeaderMap, config: &S3Config) {
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// ============================
|
||||
// Auth
|
||||
// ============================
|
||||
|
||||
fn check_auth(req: &Request<Incoming>, config: &S3Config) -> Result<(), S3Error> {
|
||||
let auth_header = req
|
||||
.headers()
|
||||
.get("authorization")
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.unwrap_or("");
|
||||
|
||||
if auth_header.is_empty() {
|
||||
return Err(S3Error::access_denied());
|
||||
}
|
||||
|
||||
// Extract access key from AWS v2 or v4 signature
|
||||
let access_key = if auth_header.starts_with("AWS4-HMAC-SHA256") {
|
||||
// v4: AWS4-HMAC-SHA256 Credential=KEY/date/region/s3/aws4_request, ...
|
||||
auth_header
|
||||
.split("Credential=")
|
||||
.nth(1)
|
||||
.and_then(|s| s.split('/').next())
|
||||
} else if auth_header.starts_with("AWS ") {
|
||||
// v2: AWS KEY:signature
|
||||
auth_header
|
||||
.strip_prefix("AWS ")
|
||||
.and_then(|s| s.split(':').next())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let access_key = access_key.unwrap_or("");
|
||||
|
||||
// Check against configured credentials
|
||||
for cred in &config.auth.credentials {
|
||||
if cred.access_key_id == access_key {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
Err(S3Error::access_denied())
|
||||
}
|
||||
|
||||
@@ -118,14 +118,24 @@ impl FileStore {
|
||||
|
||||
pub async fn initialize(&self) -> Result<()> {
|
||||
fs::create_dir_all(&self.root_dir).await?;
|
||||
fs::create_dir_all(self.policies_dir()).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn policies_dir(&self) -> PathBuf {
|
||||
self.root_dir.join(".policies")
|
||||
}
|
||||
|
||||
pub fn policy_path(&self, bucket: &str) -> PathBuf {
|
||||
self.policies_dir().join(format!("{}.policy.json", bucket))
|
||||
}
|
||||
|
||||
pub async fn reset(&self) -> Result<()> {
|
||||
if self.root_dir.exists() {
|
||||
fs::remove_dir_all(&self.root_dir).await?;
|
||||
}
|
||||
fs::create_dir_all(&self.root_dir).await?;
|
||||
fs::create_dir_all(self.policies_dir()).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user