From ec57091d44690a5275d5f1e026798ef21bc678cd Mon Sep 17 00:00:00 2001 From: Trinity Pointard Date: Sun, 28 Nov 2021 21:45:22 +0100 Subject: [PATCH] wire new request router in garage --- src/api/api_server.rs | 289 ++++++++++++++++++++---------------------- src/api/s3_list.rs | 28 +--- src/api/s3_put.rs | 7 +- src/api/s3_router.rs | 225 ++++++++++++++++++++------------ 4 files changed, 282 insertions(+), 267 deletions(-) diff --git a/src/api/api_server.rs b/src/api/api_server.rs index 240ffca9..24b5ee71 100644 --- a/src/api/api_server.rs +++ b/src/api/api_server.rs @@ -1,4 +1,3 @@ -use std::collections::HashMap; use std::net::SocketAddr; use std::sync::Arc; @@ -6,7 +5,7 @@ use futures::future::Future; use hyper::header; use hyper::server::conn::AddrStream; use hyper::service::{make_service_fn, service_fn}; -use hyper::{Body, Method, Request, Response, Server}; +use hyper::{Body, Request, Response, Server}; use garage_util::error::Error as GarageError; @@ -22,6 +21,7 @@ use crate::s3_delete::*; use crate::s3_get::*; use crate::s3_list::*; use crate::s3_put::*; +use crate::s3_router::{Authorization, Endpoint}; /// Run the S3 API server pub async fn run_api_server( @@ -86,8 +86,6 @@ async fn handler( } async fn handler_inner(garage: Arc, req: Request) -> Result, Error> { - let path = req.uri().path().to_string(); - let path = percent_encoding::percent_decode_str(&path).decode_utf8()?; let (api_key, content_sha256) = check_signature(&garage, &req).await?; let authority = req @@ -105,166 +103,151 @@ async fn handler_inner(garage: Arc, req: Request) -> Result api_key.allow_read(bucket), - _ => api_key.allow_write(bucket), + let endpoint = Endpoint::from_request(&req, bucket.map(ToOwned::to_owned))?; + let allowed = match endpoint.authorization_type() { + Authorization::None => true, + Authorization::Read(bucket) => api_key.allow_read(bucket), + Authorization::Write(bucket) => api_key.allow_write(bucket), }; + if !allowed { return Err(Error::Forbidden( "Operation is not allowed for this key.".to_string(), )); } - let mut params = HashMap::new(); - if let Some(query) = req.uri().query() { - let query_pairs = url::form_urlencoded::parse(query.as_bytes()); - for (key, val) in query_pairs { - params.insert(key.to_lowercase(), val.to_string()); + match endpoint { + Endpoint::ListBuckets => handle_list_buckets(&api_key), + Endpoint::HeadObject { bucket, key, .. } => handle_head(garage, &req, &bucket, &key).await, + Endpoint::GetObject { bucket, key, .. } => handle_get(garage, &req, &bucket, &key).await, + Endpoint::UploadPart { + bucket, + key, + part_number, + upload_id, + } => { + handle_put_part( + garage, + req, + &bucket, + &key, + part_number, + &upload_id, + content_sha256, + ) + .await } - } - - if let Some(key) = key { - match *req.method() { - Method::HEAD => { - // HeadObject query - Ok(handle_head(garage, &req, bucket, key).await?) + Endpoint::CopyObject { bucket, key } => { + let copy_source = req.headers().get("x-amz-copy-source").unwrap().to_str()?; + let copy_source = percent_encoding::percent_decode_str(copy_source).decode_utf8()?; + let (source_bucket, source_key) = parse_bucket_key(©_source, None)?; + if !api_key.allow_read(source_bucket) { + return Err(Error::Forbidden(format!( + "Reading from bucket {} not allowed for this key", + source_bucket + ))); } - Method::GET => { - // GetObject query - Ok(handle_get(garage, &req, bucket, key).await?) - } - Method::PUT => { - if params.contains_key(&"partnumber".to_string()) - && params.contains_key(&"uploadid".to_string()) - { - // UploadPart query - let part_number = params.get("partnumber").unwrap(); - let upload_id = params.get("uploadid").unwrap(); - Ok(handle_put_part( - garage, - req, - bucket, - key, - part_number, - upload_id, - content_sha256, - ) - .await?) - } else if req.headers().contains_key("x-amz-copy-source") { - // CopyObject query - let copy_source = req.headers().get("x-amz-copy-source").unwrap().to_str()?; - let copy_source = - percent_encoding::percent_decode_str(copy_source).decode_utf8()?; - let (source_bucket, source_key) = parse_bucket_key(©_source, None)?; - if !api_key.allow_read(source_bucket) { - return Err(Error::Forbidden(format!( - "Reading from bucket {} not allowed for this key", - source_bucket - ))); - } - let source_key = source_key.ok_or_bad_request("No source key specified")?; - Ok(handle_copy(garage, &req, bucket, key, source_bucket, source_key).await?) - } else { - // PutObject query - Ok(handle_put(garage, req, bucket, key, content_sha256).await?) - } - } - Method::DELETE => { - if params.contains_key(&"uploadid".to_string()) { - // AbortMultipartUpload query - let upload_id = params.get("uploadid").unwrap(); - Ok(handle_abort_multipart_upload(garage, bucket, key, upload_id).await?) - } else { - // DeleteObject query - Ok(handle_delete(garage, bucket, key).await?) - } - } - Method::POST => { - if params.contains_key(&"uploads".to_string()) { - // CreateMultipartUpload call - Ok(handle_create_multipart_upload(garage, &req, bucket, key).await?) - } else if params.contains_key(&"uploadid".to_string()) { - // CompleteMultipartUpload call - let upload_id = params.get("uploadid").unwrap(); - Ok(handle_complete_multipart_upload( - garage, - req, - bucket, - key, - upload_id, - content_sha256, - ) - .await?) - } else { - Err(Error::BadRequest( - "Not a CreateMultipartUpload call, what is it?".to_string(), - )) - } - } - _ => Err(Error::BadRequest("Invalid method".to_string())), + let source_key = source_key.ok_or_bad_request("No source key specified")?; + handle_copy(garage, &req, &bucket, &key, source_bucket, source_key).await } - } else { - match *req.method() { - Method::PUT => { - // CreateBucket - // If we're here, the bucket already exists, so just answer ok - debug!( - "Body: {}", - std::str::from_utf8(&hyper::body::to_bytes(req.into_body()).await?) - .unwrap_or("") - ); - let empty_body: Body = Body::from(vec![]); - let response = Response::builder() - .header("Location", format!("/{}", bucket)) - .body(empty_body) - .unwrap(); - Ok(response) - } - Method::HEAD => { - // HeadBucket - let empty_body: Body = Body::from(vec![]); - let response = Response::builder().body(empty_body).unwrap(); - Ok(response) - } - Method::DELETE => { - // DeleteBucket query - Err(Error::Forbidden( - "Cannot delete buckets using S3 api, please talk to Garage directly".into(), - )) - } - Method::GET => { - if params.contains_key("location") { - // GetBucketLocation call - Ok(handle_get_bucket_location(garage)?) - } else if params.contains_key("versioning") { - // GetBucketVersioning - Ok(handle_get_bucket_versioning()?) - } else { - // ListObjects or ListObjectsV2 query - let q = parse_list_objects_query(bucket, ¶ms)?; - Ok(handle_list(garage, &q).await?) - } - } - Method::POST => { - if params.contains_key(&"delete".to_string()) { - // DeleteObjects - Ok(handle_delete_objects(garage, bucket, req, content_sha256).await?) - } else { - debug!( - "Body: {}", - std::str::from_utf8(&hyper::body::to_bytes(req.into_body()).await?) - .unwrap_or("") - ); - Err(Error::BadRequest("Unsupported call".to_string())) - } - } - _ => Err(Error::BadRequest("Invalid method".to_string())), + Endpoint::PutObject { bucket, key } => { + handle_put(garage, req, &bucket, &key, content_sha256).await } + Endpoint::AbortMultipartUpload { + bucket, + key, + upload_id, + } => handle_abort_multipart_upload(garage, &bucket, &key, &upload_id).await, + Endpoint::DeleteObject { bucket, key, .. } => handle_delete(garage, &bucket, &key).await, + Endpoint::CreateMultipartUpload { bucket, key } => { + handle_create_multipart_upload(garage, &req, &bucket, &key).await + } + Endpoint::CompleteMultipartUpload { + bucket, + key, + upload_id, + } => { + handle_complete_multipart_upload(garage, req, &bucket, &key, &upload_id, content_sha256) + .await + } + Endpoint::CreateBucket { bucket } => { + debug!( + "Body: {}", + std::str::from_utf8(&hyper::body::to_bytes(req.into_body()).await?) + .unwrap_or("") + ); + let empty_body: Body = Body::from(vec![]); + let response = Response::builder() + .header("Location", format!("/{}", bucket)) + .body(empty_body) + .unwrap(); + Ok(response) + } + Endpoint::HeadBucket { .. } => { + let empty_body: Body = Body::from(vec![]); + let response = Response::builder().body(empty_body).unwrap(); + Ok(response) + } + Endpoint::DeleteBucket { .. } => Err(Error::Forbidden( + "Cannot delete buckets using S3 api, please talk to Garage directly".into(), + )), + Endpoint::GetBucketLocation { .. } => handle_get_bucket_location(garage), + Endpoint::GetBucketVersioning { .. } => handle_get_bucket_versioning(), + Endpoint::ListObjects { + bucket, + delimiter, + encoding_type, + marker, + max_keys, + prefix, + } => { + handle_list( + garage, + &ListObjectsQuery { + is_v2: false, + bucket, + delimiter: delimiter.map(|d| d.to_string()), + max_keys: max_keys.unwrap_or(1000), + prefix: prefix.unwrap_or_default(), + marker, + continuation_token: None, + start_after: None, + urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false), + }, + ) + .await + } + Endpoint::ListObjectsV2 { + bucket, + delimiter, + encoding_type, + max_keys, + prefix, + continuation_token, + start_after, + list_type, + .. + } => { + handle_list( + garage, + &ListObjectsQuery { + is_v2: list_type == "v2", + bucket, + delimiter: delimiter.map(|d| d.to_string()), + max_keys: max_keys.unwrap_or(1000), + prefix: prefix.unwrap_or_default(), + marker: None, + continuation_token, + start_after, + urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false), + }, + ) + .await + } + Endpoint::DeleteObjects { bucket } => { + handle_delete_objects(garage, &bucket, req, content_sha256).await + } + _ => Err(Error::BadRequest("Unsupported call".to_string())), } } diff --git a/src/api/s3_list.rs b/src/api/s3_list.rs index a4de388d..df9c3e6b 100644 --- a/src/api/s3_list.rs +++ b/src/api/s3_list.rs @@ -1,4 +1,4 @@ -use std::collections::{BTreeMap, BTreeSet, HashMap}; +use std::collections::{BTreeMap, BTreeSet}; use std::sync::Arc; use hyper::{Body, Response}; @@ -35,32 +35,6 @@ struct ListResultInfo { etag: String, } -pub fn parse_list_objects_query( - bucket: &str, - params: &HashMap, -) -> Result { - Ok(ListObjectsQuery { - is_v2: params.get("list-type").map(|x| x == "2").unwrap_or(false), - bucket: bucket.to_string(), - delimiter: params.get("delimiter").filter(|x| !x.is_empty()).cloned(), - max_keys: params - .get("max-keys") - .map(|x| { - x.parse::() - .ok_or_bad_request("Invalid value for max-keys") - }) - .unwrap_or(Ok(1000))?, - prefix: params.get("prefix").cloned().unwrap_or_default(), - marker: params.get("marker").cloned(), - continuation_token: params.get("continuation-token").cloned(), - start_after: params.get("start-after").cloned(), - urlencode_resp: params - .get("encoding-type") - .map(|x| x == "url") - .unwrap_or(false), - }) -} - pub async fn handle_list( garage: Arc, query: &ListObjectsQuery, diff --git a/src/api/s3_put.rs b/src/api/s3_put.rs index 5eae3bf5..f63e8307 100644 --- a/src/api/s3_put.rs +++ b/src/api/s3_put.rs @@ -354,15 +354,10 @@ pub async fn handle_put_part( req: Request, bucket: &str, key: &str, - part_number_str: &str, + part_number: u64, upload_id: &str, content_sha256: Option, ) -> Result, Error> { - // Check parameters - let part_number = part_number_str - .parse::() - .ok_or_bad_request("Invalid part number")?; - let version_uuid = decode_upload_id(upload_id)?; let content_md5 = match req.headers().get("content-md5") { diff --git a/src/api/s3_router.rs b/src/api/s3_router.rs index d0988ccd..69348c4d 100644 --- a/src/api/s3_router.rs +++ b/src/api/s3_router.rs @@ -3,7 +3,7 @@ use crate::error::{Error, OkOrBadRequest}; use std::borrow::Cow; use hyper::header::HeaderValue; -use hyper::{HeaderMap, Method, Uri}; +use hyper::{HeaderMap, Method, Request}; macro_rules! s3_match { (@extract $enum:expr , $param:ident, [ $($endpoint:ident,)* ]) => {{ @@ -16,8 +16,8 @@ macro_rules! s3_match { } }}; (@gen_parser ($keyword:expr, $key:expr, $bucket:expr, $query:expr, $header:expr), - key: [$($kw_k:ident $(if $required_k:ident)? $(header $header_k:expr)? => $api_k:ident $(($($conv_k:ident :: $what_k:ident),*))?,)*], - no_key: [$($kw_nk:ident $(if $required_nk:ident)? $(if_header $header_nk:expr)? => $api_nk:ident $(($($conv_nk:ident :: $what_nk:ident),*))?,)*]) => {{ + key: [$($kw_k:ident $(if $required_k:ident)? $(header $header_k:expr)? => $api_k:ident $(($($conv_k:ident :: $param_k:ident),*))?,)*], + no_key: [$($kw_nk:ident $(if $required_nk:ident)? $(if_header $header_nk:expr)? => $api_nk:ident $(($($conv_nk:ident :: $param_nk:ident),*))?,)*]) => {{ use Endpoint::*; use keywords::*; match ($keyword, !$key.is_empty()){ @@ -26,7 +26,7 @@ macro_rules! s3_match { bucket: $bucket, key: $key, $($( - $what_k: s3_match!(@@parse_param $query, $conv_k, $what_k), + $param_k: s3_match!(@@parse_param $query, $conv_k, $param_k), )*)? }), )* @@ -34,11 +34,11 @@ macro_rules! s3_match { ($kw_nk, false) $(if $query.$required_nk.is_some())? $(if $header.contains($header_nk))? => Ok($api_nk { bucket: $bucket, $($( - $what_nk: s3_match!(@@parse_param $query, $conv_nk, $what_nk), + $param_nk: s3_match!(@@parse_param $query, $conv_nk, $param_nk), )*)? }), )* - _ => Err(Error::BadRequest("Invalid endpoint".to_string())), + _ => Err(Error::BadRequest("Invalid endpoint".to_owned())), } }}; @@ -46,24 +46,24 @@ macro_rules! s3_match { $query.$param.take().map(|param| param.into_owned()) }}; (@@parse_param $query:expr, query, $param:ident) => {{ - $query.$param.take().ok_or_bad_request("Invalid endpoint")?.into_owned() + $query.$param.take().ok_or_bad_request("Missing argument for endpoint")?.into_owned() }}; (@@parse_param $query:expr, opt_parse, $param:ident) => {{ $query.$param .take() .map(|param| param.parse()) .transpose() - .map_err(|_| Error::BadRequest("Failed to parse query parameter".to_string()))? + .map_err(|_| Error::BadRequest("Failed to parse query parameter".to_owned()))? }}; (@@parse_param $query:expr, parse, $param:ident) => {{ - $query.$param.take().ok_or_bad_request("Invalid endpoint")? + $query.$param.take().ok_or_bad_request("Missing argument for endpoint")? .parse() - .map_err(|_| Error::BadRequest("Failed to parse query parameter".to_string()))? + .map_err(|_| Error::BadRequest("Failed to parse query parameter".to_owned()))? }}; } /// List of all S3 API endpoints. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq, Eq)] pub enum Endpoint { AbortMultipartUpload { bucket: String, @@ -214,7 +214,7 @@ pub enum Endpoint { GetObject { bucket: String, key: String, - part_number: Option, + part_number: Option, version_id: Option, }, GetObjectAcl { @@ -253,7 +253,7 @@ pub enum Endpoint { HeadObject { bucket: String, key: String, - part_number: Option, + part_number: Option, version_id: Option, }, ListBucketAnalyticsConfigurations { @@ -278,7 +278,7 @@ pub enum Endpoint { delimiter: Option, encoding_type: Option, key_marker: Option, - max_uploads: Option, + max_uploads: Option, prefix: Option, upload_id_marker: Option, }, @@ -287,7 +287,7 @@ pub enum Endpoint { delimiter: Option, encoding_type: Option, marker: Option, - max_keys: Option, + max_keys: Option, prefix: Option, }, ListObjectsV2 { @@ -297,7 +297,7 @@ pub enum Endpoint { delimiter: Option, encoding_type: Option, fetch_owner: Option, - max_keys: Option, + max_keys: Option, prefix: Option, start_after: Option, }, @@ -306,15 +306,15 @@ pub enum Endpoint { delimiter: Option, encoding_type: Option, key_marker: Option, - max_keys: Option, + max_keys: Option, prefix: Option, version_id_marker: Option, }, ListParts { bucket: String, key: String, - max_parts: Option, - part_number_marker: Option, + max_parts: Option, + part_number_marker: Option, upload_id: String, }, PutBucketAccelerateConfiguration { @@ -418,56 +418,52 @@ pub enum Endpoint { UploadPart { bucket: String, key: String, - part_number: u16, + part_number: u64, upload_id: String, }, UploadPartCopy { bucket: String, key: String, - part_number: u16, + part_number: u64, upload_id: String, }, } impl Endpoint { - pub fn from_request( - bucket: Option, - uri: &Uri, - method: &Method, - headers: &HeaderMap, - ) -> Result { + pub fn from_request(req: &Request, bucket: Option) -> Result { + let uri = req.uri(); let path = uri.path().trim_start_matches('/'); let query = uri.query(); if bucket.is_none() && path.is_empty() { if query.is_none() { return Ok(Self::ListBuckets); } else { - return Err(Error::BadRequest("Invalid endpoint".to_string())); + return Err(Error::BadRequest("Invalid ListBuckets query".to_owned())); } } let (bucket, key) = if let Some(bucket) = bucket { - (bucket, path.to_string()) + (bucket, path.to_owned()) } else { path.split_once('/') - .map(|(b, p)| (b.to_string(), p.trim_start_matches('/').to_string())) - .unwrap_or((path.to_string(), String::new())) + .map(|(b, p)| (b.to_owned(), p.trim_start_matches('/').to_owned())) + .unwrap_or((path.to_owned(), String::new())) }; let mut query = QueryParameters::from_query(query.unwrap_or_default())?; - let res = match method { - &Method::GET => Self::from_get(bucket, key, &mut query)?, - &Method::HEAD => Self::from_head(bucket, key, &mut query)?, - &Method::POST => Self::from_post(bucket, key, &mut query)?, - &Method::PUT => Self::from_put(bucket, key, &mut query, headers)?, - &Method::DELETE => Self::from_delete(bucket, key, &mut query)?, - _ => return Err(Error::BadRequest("Invalid endpoint".to_string())), + let res = match *req.method() { + Method::GET => Self::from_get(bucket, key, &mut query)?, + Method::HEAD => Self::from_head(bucket, key, &mut query)?, + Method::POST => Self::from_post(bucket, key, &mut query)?, + Method::PUT => Self::from_put(bucket, key, &mut query, req.headers())?, + Method::DELETE => Self::from_delete(bucket, key, &mut query)?, + _ => return Err(Error::BadRequest("Unknown method".to_owned())), }; if let Some(message) = query.nonempty_message() { // maybe this should just be a warn! ? - Err(Error::BadRequest(message.to_string())) + Err(Error::BadRequest(message.to_owned())) } else { Ok(res) } @@ -542,11 +538,11 @@ impl Endpoint { @gen_parser (query.keyword.take().unwrap_or_default().as_ref(), key, bucket, query, None), key: [ - EMPTY => HeadObject(opt_parse::part_number, query_opt::version_id), + EMPTY => HeadObject(opt_parse::part_number, query_opt::version_id), ], no_key: [ EMPTY => HeadBucket, - ] + ] } } @@ -565,8 +561,8 @@ impl Endpoint { UPLOADS => CreateMultipartUpload, ], no_key: [ - DELETE => DeleteObjects, - ] + DELETE => DeleteObjects, + ] } } @@ -574,45 +570,45 @@ impl Endpoint { bucket: String, key: String, query: &mut QueryParameters<'_>, - headers: &HeaderMap, + headers: &HeaderMap, ) -> Result { s3_match! { @gen_parser (query.keyword.take().unwrap_or_default().as_ref(), key, bucket, query, headers), key: [ - EMPTY if part_number header "x-amz-copy-source" => UploadPartCopy (parse::part_number, query::upload_id), + EMPTY if part_number header "x-amz-copy-source" => UploadPartCopy (parse::part_number, query::upload_id), EMPTY header "x-amz-copy-source" => CopyObject, - EMPTY if part_number => UploadPart (parse::part_number, query::upload_id), + EMPTY if part_number => UploadPart (parse::part_number, query::upload_id), EMPTY => PutObject, - ACL => PutObjectAcl (query_opt::version_id), - LEGAL_HOLD => PutObjectLegalHold (query_opt::version_id), - RETENTION => PutObjectRetention (query_opt::version_id), - TAGGING => PutObjectTagging (query_opt::version_id), + ACL => PutObjectAcl (query_opt::version_id), + LEGAL_HOLD => PutObjectLegalHold (query_opt::version_id), + RETENTION => PutObjectRetention (query_opt::version_id), + TAGGING => PutObjectTagging (query_opt::version_id), ], no_key: [ - EMPTY => CreateBucket, - ACCELERATE => PutBucketAccelerateConfiguration, - ACL => PutBucketAcl, - ANALYTICS => PutBucketAnalyticsConfiguration (query::id), - CORS => PutBucketCors, - ENCRYPTION => PutBucketEncryption, - INTELLIGENT_TIERING => PutBucketIntelligentTieringConfiguration(query::id), - INVENTORY => PutBucketInventoryConfiguration(query::id), - LIFECYCLE => PutBucketLifecycleConfiguration, - LOGGING => PutBucketLogging, - METRICS => PutBucketMetricsConfiguration(query::id), - NOTIFICATION => PutBucketNotificationConfiguration, - OBJECT_LOCK => PutObjectLockConfiguration, - OWNERSHIP_CONTROLS => PutBucketOwnershipControls, - POLICY => PutBucketPolicy, - PUBLIC_ACCESS_BLOCK => PutPublicAccessBlock, - REPLICATION => PutBucketReplication, - REQUEST_PAYMENT => PutBucketRequestPayment, - TAGGING => PutBucketTagging, - VERSIONING => PutBucketVersioning, - WEBSITE => PutBucketWebsite, - ] + EMPTY => CreateBucket, + ACCELERATE => PutBucketAccelerateConfiguration, + ACL => PutBucketAcl, + ANALYTICS => PutBucketAnalyticsConfiguration (query::id), + CORS => PutBucketCors, + ENCRYPTION => PutBucketEncryption, + INTELLIGENT_TIERING => PutBucketIntelligentTieringConfiguration(query::id), + INVENTORY => PutBucketInventoryConfiguration(query::id), + LIFECYCLE => PutBucketLifecycleConfiguration, + LOGGING => PutBucketLogging, + METRICS => PutBucketMetricsConfiguration(query::id), + NOTIFICATION => PutBucketNotificationConfiguration, + OBJECT_LOCK => PutObjectLockConfiguration, + OWNERSHIP_CONTROLS => PutBucketOwnershipControls, + POLICY => PutBucketPolicy, + PUBLIC_ACCESS_BLOCK => PutPublicAccessBlock, + REPLICATION => PutBucketReplication, + REQUEST_PAYMENT => PutBucketRequestPayment, + TAGGING => PutBucketTagging, + VERSIONING => PutBucketVersioning, + WEBSITE => PutBucketWebsite, + ] } } @@ -748,6 +744,7 @@ impl Endpoint { } } + #[allow(dead_code)] pub fn get_key(&self) -> Option<&str> { s3_match! { @extract @@ -780,6 +777,73 @@ impl Endpoint { ] } } + + pub fn authorization_type(&self) -> Authorization<'_> { + let bucket = if let Some(bucket) = self.get_bucket() { + bucket + } else { + return Authorization::None; + }; + let readonly = s3_match! { + @extract + self, + bucket, + [ + GetBucketAccelerateConfiguration, + GetBucketAcl, + GetBucketAnalyticsConfiguration, + GetBucketCors, + GetBucketEncryption, + GetBucketIntelligentTieringConfiguration, + GetBucketInventoryConfiguration, + GetBucketLifecycleConfiguration, + GetBucketLocation, + GetBucketLogging, + GetBucketMetricsConfiguration, + GetBucketNotificationConfiguration, + GetBucketOwnershipControls, + GetBucketPolicy, + GetBucketPolicyStatus, + GetBucketReplication, + GetBucketRequestPayment, + GetBucketTagging, + GetBucketVersioning, + GetBucketWebsite, + GetObject, + GetObjectAcl, + GetObjectLegalHold, + GetObjectLockConfiguration, + GetObjectRetention, + GetObjectTagging, + GetObjectTorrent, + GetPublicAccessBlock, + HeadBucket, + HeadObject, + ListBucketAnalyticsConfigurations, + ListBucketIntelligentTieringConfigurations, + ListBucketInventoryConfigurations, + ListBucketMetricsConfigurations, + ListMultipartUploads, + ListObjects, + ListObjectsV2, + ListObjectVersions, + ListParts, + SelectObjectContent, + ] + } + .is_some(); + if readonly { + Authorization::Read(bucket) + } else { + Authorization::Write(bucket) + } + } +} + +pub enum Authorization<'a> { + None, + Read(&'a str), + Write(&'a str), } macro_rules! generateQueryParameters { @@ -797,19 +861,18 @@ macro_rules! generateQueryParameters { fn from_query(query: &'a str) -> Result { let mut res: Self = Default::default(); for (k, v) in url::form_urlencoded::parse(query.as_bytes()) { - if v.as_ref().is_empty() { - if res.keyword.replace(k).is_some() { - return Err(Error::BadRequest("Multiple keywords".to_string())); - } - continue; - } let repeated = match k.as_ref() { $( - $rest => res.$name.replace(v).is_none(), + $rest => res.$name.replace(v).is_some(), )* _ => { if k.starts_with("response-") { - true + false + } else if v.as_ref().is_empty() { + if res.keyword.replace(k).is_some() { + return Err(Error::BadRequest("Multiple keywords".to_owned())); + } + continue; } else { return Err(Error::BadRequest(format!( "Unknown query parameter '{}'", @@ -873,7 +936,7 @@ mod keywords { pub const ACL: &str = "acl"; pub const ANALYTICS: &str = "analytics"; pub const CORS: &str = "cors"; - pub const DELETE: &str = "delete"; + pub const DELETE: &str = "delete"; pub const ENCRYPTION: &str = "encryption"; pub const INTELLIGENT_TIERING: &str = "intelligent-tiering"; pub const INVENTORY: &str = "inventory";