add proper request router for s3 api #163

Merged
lx merged 5 commits from trinity-1686a/garage:s3-router into main 2021-12-06 14:17:47 +00:00
4 changed files with 282 additions and 267 deletions
Showing only changes of commit ec57091d44 - Show all commits

View file

@ -1,4 +1,3 @@
use std::collections::HashMap;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::sync::Arc; use std::sync::Arc;
@ -6,7 +5,7 @@ use futures::future::Future;
use hyper::header; use hyper::header;
use hyper::server::conn::AddrStream; use hyper::server::conn::AddrStream;
use hyper::service::{make_service_fn, service_fn}; use hyper::service::{make_service_fn, service_fn};
use hyper::{Body, Method, Request, Response, Server}; use hyper::{Body, Request, Response, Server};
use garage_util::error::Error as GarageError; use garage_util::error::Error as GarageError;
@ -22,6 +21,7 @@ use crate::s3_delete::*;
use crate::s3_get::*; use crate::s3_get::*;
use crate::s3_list::*; use crate::s3_list::*;
use crate::s3_put::*; use crate::s3_put::*;
use crate::s3_router::{Authorization, Endpoint};
/// Run the S3 API server /// Run the S3 API server
pub async fn run_api_server( pub async fn run_api_server(
@ -86,8 +86,6 @@ async fn handler(
} }
async fn handler_inner(garage: Arc<Garage>, req: Request<Body>) -> Result<Response<Body>, Error> { async fn handler_inner(garage: Arc<Garage>, req: Request<Body>) -> Result<Response<Body>, Error> {
let path = req.uri().path().to_string();
let path = percent_encoding::percent_decode_str(&path).decode_utf8()?;
let (api_key, content_sha256) = check_signature(&garage, &req).await?; let (api_key, content_sha256) = check_signature(&garage, &req).await?;
let authority = req let authority = req
@ -105,166 +103,151 @@ async fn handler_inner(garage: Arc<Garage>, req: Request<Body>) -> Result<Respon
.as_ref() .as_ref()
.and_then(|root_domain| host_to_bucket(&host, root_domain)); .and_then(|root_domain| host_to_bucket(&host, root_domain));
if path == "/" && bucket.is_none() { let endpoint = Endpoint::from_request(&req, bucket.map(ToOwned::to_owned))?;
return handle_list_buckets(&api_key); let allowed = match endpoint.authorization_type() {
} Authorization::None => true,
Authorization::Read(bucket) => api_key.allow_read(bucket),
let (bucket, key) = parse_bucket_key(&path, bucket)?; Authorization::Write(bucket) => api_key.allow_write(bucket),
let allowed = match req.method() {
&Method::HEAD | &Method::GET => api_key.allow_read(bucket),
_ => api_key.allow_write(bucket),
}; };
if !allowed { if !allowed {
return Err(Error::Forbidden( return Err(Error::Forbidden(
"Operation is not allowed for this key.".to_string(), "Operation is not allowed for this key.".to_string(),
)); ));
} }
let mut params = HashMap::new(); match endpoint {
if let Some(query) = req.uri().query() { Endpoint::ListBuckets => handle_list_buckets(&api_key),
let query_pairs = url::form_urlencoded::parse(query.as_bytes()); Endpoint::HeadObject { bucket, key, .. } => handle_head(garage, &req, &bucket, &key).await,
for (key, val) in query_pairs { Endpoint::GetObject { bucket, key, .. } => handle_get(garage, &req, &bucket, &key).await,
params.insert(key.to_lowercase(), val.to_string()); Endpoint::UploadPart {
bucket,
key,
part_number,
upload_id,
} => {
handle_put_part(
garage,
req,
&bucket,
&key,
part_number,
&upload_id,
content_sha256,
)
.await
} }
} Endpoint::CopyObject { bucket, key } => {
let copy_source = req.headers().get("x-amz-copy-source").unwrap().to_str()?;
if let Some(key) = key { let copy_source = percent_encoding::percent_decode_str(copy_source).decode_utf8()?;
match *req.method() { let (source_bucket, source_key) = parse_bucket_key(&copy_source, None)?;
Method::HEAD => { if !api_key.allow_read(source_bucket) {
// HeadObject query return Err(Error::Forbidden(format!(
Ok(handle_head(garage, &req, bucket, key).await?) "Reading from bucket {} not allowed for this key",
source_bucket
)));
} }
Method::GET => { let source_key = source_key.ok_or_bad_request("No source key specified")?;
// GetObject query handle_copy(garage, &req, &bucket, &key, source_bucket, source_key).await
Ok(handle_get(garage, &req, bucket, key).await?)
}
Method::PUT => {
if params.contains_key(&"partnumber".to_string())
&& params.contains_key(&"uploadid".to_string())
{
// UploadPart query
let part_number = params.get("partnumber").unwrap();
let upload_id = params.get("uploadid").unwrap();
Ok(handle_put_part(
garage,
req,
bucket,
key,
part_number,
upload_id,
content_sha256,
)
.await?)
} else if req.headers().contains_key("x-amz-copy-source") {
// CopyObject query
let copy_source = req.headers().get("x-amz-copy-source").unwrap().to_str()?;
let copy_source =
percent_encoding::percent_decode_str(copy_source).decode_utf8()?;
let (source_bucket, source_key) = parse_bucket_key(&copy_source, None)?;
if !api_key.allow_read(source_bucket) {
return Err(Error::Forbidden(format!(
"Reading from bucket {} not allowed for this key",
source_bucket
)));
}
let source_key = source_key.ok_or_bad_request("No source key specified")?;
Ok(handle_copy(garage, &req, bucket, key, source_bucket, source_key).await?)
} else {
// PutObject query
Ok(handle_put(garage, req, bucket, key, content_sha256).await?)
}
}
Method::DELETE => {
if params.contains_key(&"uploadid".to_string()) {
// AbortMultipartUpload query
let upload_id = params.get("uploadid").unwrap();
Ok(handle_abort_multipart_upload(garage, bucket, key, upload_id).await?)
} else {
// DeleteObject query
Ok(handle_delete(garage, bucket, key).await?)
}
}
Method::POST => {
if params.contains_key(&"uploads".to_string()) {
// CreateMultipartUpload call
Ok(handle_create_multipart_upload(garage, &req, bucket, key).await?)
} else if params.contains_key(&"uploadid".to_string()) {
// CompleteMultipartUpload call
let upload_id = params.get("uploadid").unwrap();
Ok(handle_complete_multipart_upload(
garage,
req,
bucket,
key,
upload_id,
content_sha256,
)
.await?)
} else {
Err(Error::BadRequest(
"Not a CreateMultipartUpload call, what is it?".to_string(),
))
}
}
_ => Err(Error::BadRequest("Invalid method".to_string())),
} }
} else { Endpoint::PutObject { bucket, key } => {
match *req.method() { handle_put(garage, req, &bucket, &key, content_sha256).await
Method::PUT => {
// CreateBucket
// If we're here, the bucket already exists, so just answer ok
debug!(
"Body: {}",
std::str::from_utf8(&hyper::body::to_bytes(req.into_body()).await?)
.unwrap_or("<invalid utf8>")
);
let empty_body: Body = Body::from(vec![]);
let response = Response::builder()
.header("Location", format!("/{}", bucket))
.body(empty_body)
.unwrap();
Ok(response)
}
Method::HEAD => {
// HeadBucket
let empty_body: Body = Body::from(vec![]);
let response = Response::builder().body(empty_body).unwrap();
Ok(response)
}
Method::DELETE => {
// DeleteBucket query
Err(Error::Forbidden(
"Cannot delete buckets using S3 api, please talk to Garage directly".into(),
))
}
Method::GET => {
if params.contains_key("location") {
// GetBucketLocation call
Ok(handle_get_bucket_location(garage)?)
} else if params.contains_key("versioning") {
// GetBucketVersioning
Ok(handle_get_bucket_versioning()?)
} else {
// ListObjects or ListObjectsV2 query
let q = parse_list_objects_query(bucket, &params)?;
Ok(handle_list(garage, &q).await?)
}
}
Method::POST => {
if params.contains_key(&"delete".to_string()) {
// DeleteObjects
Ok(handle_delete_objects(garage, bucket, req, content_sha256).await?)
} else {
debug!(
"Body: {}",
std::str::from_utf8(&hyper::body::to_bytes(req.into_body()).await?)
.unwrap_or("<invalid utf8>")
);
Err(Error::BadRequest("Unsupported call".to_string()))
}
}
_ => Err(Error::BadRequest("Invalid method".to_string())),
} }
Endpoint::AbortMultipartUpload {
bucket,
key,
upload_id,
} => handle_abort_multipart_upload(garage, &bucket, &key, &upload_id).await,
Endpoint::DeleteObject { bucket, key, .. } => handle_delete(garage, &bucket, &key).await,
Endpoint::CreateMultipartUpload { bucket, key } => {
handle_create_multipart_upload(garage, &req, &bucket, &key).await
}
Endpoint::CompleteMultipartUpload {
bucket,
key,
upload_id,
} => {
handle_complete_multipart_upload(garage, req, &bucket, &key, &upload_id, content_sha256)
.await
}
Endpoint::CreateBucket { bucket } => {
debug!(
"Body: {}",
std::str::from_utf8(&hyper::body::to_bytes(req.into_body()).await?)
.unwrap_or("<invalid utf8>")
);
let empty_body: Body = Body::from(vec![]);
let response = Response::builder()
.header("Location", format!("/{}", bucket))
.body(empty_body)
.unwrap();
Ok(response)
}
Endpoint::HeadBucket { .. } => {
let empty_body: Body = Body::from(vec![]);
let response = Response::builder().body(empty_body).unwrap();
Ok(response)
}
Endpoint::DeleteBucket { .. } => Err(Error::Forbidden(
"Cannot delete buckets using S3 api, please talk to Garage directly".into(),
)),
Endpoint::GetBucketLocation { .. } => handle_get_bucket_location(garage),
Endpoint::GetBucketVersioning { .. } => handle_get_bucket_versioning(),
Endpoint::ListObjects {
bucket,
delimiter,
encoding_type,
marker,
max_keys,
prefix,
} => {
handle_list(
garage,
&ListObjectsQuery {
is_v2: false,
bucket,
delimiter: delimiter.map(|d| d.to_string()),
max_keys: max_keys.unwrap_or(1000),
prefix: prefix.unwrap_or_default(),
marker,
continuation_token: None,
start_after: None,
urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false),
},
)
.await
}
Endpoint::ListObjectsV2 {
bucket,
delimiter,
encoding_type,
max_keys,
prefix,
continuation_token,
start_after,
list_type,
..
} => {
handle_list(
garage,
&ListObjectsQuery {
is_v2: list_type == "v2",
Outdated
Review

At this point we should probably return 400 bad request if list_type is not equal to "v2". It doesn't make much sense calling handle_list with is_v2 = false here because the arguments we are giving are those of ListObjectsV2: continuation_token, start_after, etc.

(question: what happens on AWS S3 if we give a list_type value that is not "v2" ?)

At this point we should probably return 400 bad request if `list_type` is not equal to `"v2"`. It doesn't make much sense calling `handle_list` with `is_v2 = false` here because the arguments we are giving are those of ListObjectsV2: continuation_token, start_after, etc. (question: what happens on AWS S3 if we give a list_type value that is not "v2" ?)
bucket,
delimiter: delimiter.map(|d| d.to_string()),
max_keys: max_keys.unwrap_or(1000),
prefix: prefix.unwrap_or_default(),
marker: None,
continuation_token,
start_after,
urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false),
},
)
.await
}
Endpoint::DeleteObjects { bucket } => {
handle_delete_objects(garage, &bucket, req, content_sha256).await
}
_ => Err(Error::BadRequest("Unsupported call".to_string())),
Outdated
Review

TODO (not necessarily in this PR): add a new error type so that we return HTTP 501

TODO (not necessarily in this PR): add a new error type so that we return HTTP 501
} }
} }

View file

@ -1,4 +1,4 @@
use std::collections::{BTreeMap, BTreeSet, HashMap}; use std::collections::{BTreeMap, BTreeSet};
use std::sync::Arc; use std::sync::Arc;
use hyper::{Body, Response}; use hyper::{Body, Response};
@ -35,32 +35,6 @@ struct ListResultInfo {
etag: String, etag: String,
} }
pub fn parse_list_objects_query(
bucket: &str,
params: &HashMap<String, String>,
) -> Result<ListObjectsQuery, Error> {
Ok(ListObjectsQuery {
is_v2: params.get("list-type").map(|x| x == "2").unwrap_or(false),
bucket: bucket.to_string(),
delimiter: params.get("delimiter").filter(|x| !x.is_empty()).cloned(),
max_keys: params
.get("max-keys")
.map(|x| {
x.parse::<usize>()
.ok_or_bad_request("Invalid value for max-keys")
})
.unwrap_or(Ok(1000))?,
prefix: params.get("prefix").cloned().unwrap_or_default(),
marker: params.get("marker").cloned(),
continuation_token: params.get("continuation-token").cloned(),
start_after: params.get("start-after").cloned(),
urlencode_resp: params
.get("encoding-type")
.map(|x| x == "url")
.unwrap_or(false),
})
}
pub async fn handle_list( pub async fn handle_list(
garage: Arc<Garage>, garage: Arc<Garage>,
query: &ListObjectsQuery, query: &ListObjectsQuery,

View file

@ -354,15 +354,10 @@ pub async fn handle_put_part(
req: Request<Body>, req: Request<Body>,
bucket: &str, bucket: &str,
key: &str, key: &str,
part_number_str: &str, part_number: u64,
upload_id: &str, upload_id: &str,
content_sha256: Option<Hash>, content_sha256: Option<Hash>,
) -> Result<Response<Body>, Error> { ) -> Result<Response<Body>, Error> {
// Check parameters
let part_number = part_number_str
.parse::<u64>()
.ok_or_bad_request("Invalid part number")?;
let version_uuid = decode_upload_id(upload_id)?; let version_uuid = decode_upload_id(upload_id)?;
let content_md5 = match req.headers().get("content-md5") { let content_md5 = match req.headers().get("content-md5") {

View file

@ -3,7 +3,7 @@ use crate::error::{Error, OkOrBadRequest};
use std::borrow::Cow; use std::borrow::Cow;
use hyper::header::HeaderValue; use hyper::header::HeaderValue;
use hyper::{HeaderMap, Method, Uri}; use hyper::{HeaderMap, Method, Request};
macro_rules! s3_match { macro_rules! s3_match {
(@extract $enum:expr , $param:ident, [ $($endpoint:ident,)* ]) => {{ (@extract $enum:expr , $param:ident, [ $($endpoint:ident,)* ]) => {{
@ -16,8 +16,8 @@ macro_rules! s3_match {
} }
}}; }};
(@gen_parser ($keyword:expr, $key:expr, $bucket:expr, $query:expr, $header:expr), (@gen_parser ($keyword:expr, $key:expr, $bucket:expr, $query:expr, $header:expr),
key: [$($kw_k:ident $(if $required_k:ident)? $(header $header_k:expr)? => $api_k:ident $(($($conv_k:ident :: $what_k:ident),*))?,)*], key: [$($kw_k:ident $(if $required_k:ident)? $(header $header_k:expr)? => $api_k:ident $(($($conv_k:ident :: $param_k:ident),*))?,)*],
no_key: [$($kw_nk:ident $(if $required_nk:ident)? $(if_header $header_nk:expr)? => $api_nk:ident $(($($conv_nk:ident :: $what_nk:ident),*))?,)*]) => {{ no_key: [$($kw_nk:ident $(if $required_nk:ident)? $(if_header $header_nk:expr)? => $api_nk:ident $(($($conv_nk:ident :: $param_nk:ident),*))?,)*]) => {{
use Endpoint::*; use Endpoint::*;
use keywords::*; use keywords::*;
match ($keyword, !$key.is_empty()){ match ($keyword, !$key.is_empty()){
@ -26,7 +26,7 @@ macro_rules! s3_match {
bucket: $bucket, bucket: $bucket,
key: $key, key: $key,
$($( $($(
$what_k: s3_match!(@@parse_param $query, $conv_k, $what_k), $param_k: s3_match!(@@parse_param $query, $conv_k, $param_k),
)*)? )*)?
}), }),
)* )*
@ -34,11 +34,11 @@ macro_rules! s3_match {
($kw_nk, false) $(if $query.$required_nk.is_some())? $(if $header.contains($header_nk))? => Ok($api_nk { ($kw_nk, false) $(if $query.$required_nk.is_some())? $(if $header.contains($header_nk))? => Ok($api_nk {
bucket: $bucket, bucket: $bucket,
$($( $($(
$what_nk: s3_match!(@@parse_param $query, $conv_nk, $what_nk), $param_nk: s3_match!(@@parse_param $query, $conv_nk, $param_nk),
)*)? )*)?
}), }),
)* )*
_ => Err(Error::BadRequest("Invalid endpoint".to_string())), _ => Err(Error::BadRequest("Invalid endpoint".to_owned())),
} }
}}; }};
@ -46,24 +46,24 @@ macro_rules! s3_match {
$query.$param.take().map(|param| param.into_owned()) $query.$param.take().map(|param| param.into_owned())
}}; }};
(@@parse_param $query:expr, query, $param:ident) => {{ (@@parse_param $query:expr, query, $param:ident) => {{
$query.$param.take().ok_or_bad_request("Invalid endpoint")?.into_owned() $query.$param.take().ok_or_bad_request("Missing argument for endpoint")?.into_owned()
}}; }};
(@@parse_param $query:expr, opt_parse, $param:ident) => {{ (@@parse_param $query:expr, opt_parse, $param:ident) => {{
$query.$param $query.$param
.take() .take()
.map(|param| param.parse()) .map(|param| param.parse())
.transpose() .transpose()
.map_err(|_| Error::BadRequest("Failed to parse query parameter".to_string()))? .map_err(|_| Error::BadRequest("Failed to parse query parameter".to_owned()))?
}}; }};
(@@parse_param $query:expr, parse, $param:ident) => {{ (@@parse_param $query:expr, parse, $param:ident) => {{
$query.$param.take().ok_or_bad_request("Invalid endpoint")? $query.$param.take().ok_or_bad_request("Missing argument for endpoint")?
.parse() .parse()
.map_err(|_| Error::BadRequest("Failed to parse query parameter".to_string()))? .map_err(|_| Error::BadRequest("Failed to parse query parameter".to_owned()))?
}}; }};
} }
/// List of all S3 API endpoints. /// List of all S3 API endpoints.
#[derive(Debug, Clone)] #[derive(Debug, Clone, PartialEq, Eq)]
pub enum Endpoint { pub enum Endpoint {
AbortMultipartUpload { AbortMultipartUpload {
bucket: String, bucket: String,
@ -214,7 +214,7 @@ pub enum Endpoint {
GetObject { GetObject {
bucket: String, bucket: String,
key: String, key: String,
part_number: Option<u16>, part_number: Option<u64>,
version_id: Option<String>, version_id: Option<String>,
}, },
GetObjectAcl { GetObjectAcl {
@ -253,7 +253,7 @@ pub enum Endpoint {
HeadObject { HeadObject {
bucket: String, bucket: String,
key: String, key: String,
part_number: Option<u16>, part_number: Option<u64>,
version_id: Option<String>, version_id: Option<String>,
}, },
ListBucketAnalyticsConfigurations { ListBucketAnalyticsConfigurations {
@ -278,7 +278,7 @@ pub enum Endpoint {
delimiter: Option<char>, delimiter: Option<char>,
encoding_type: Option<String>, encoding_type: Option<String>,
key_marker: Option<String>, key_marker: Option<String>,
max_uploads: Option<u16>, max_uploads: Option<u64>,
prefix: Option<String>, prefix: Option<String>,
upload_id_marker: Option<String>, upload_id_marker: Option<String>,
}, },
@ -287,7 +287,7 @@ pub enum Endpoint {
delimiter: Option<char>, delimiter: Option<char>,
encoding_type: Option<String>, encoding_type: Option<String>,
marker: Option<String>, marker: Option<String>,
max_keys: Option<u16>, max_keys: Option<usize>,
prefix: Option<String>, prefix: Option<String>,
}, },
ListObjectsV2 { ListObjectsV2 {
@ -297,7 +297,7 @@ pub enum Endpoint {
delimiter: Option<char>, delimiter: Option<char>,
encoding_type: Option<String>, encoding_type: Option<String>,
fetch_owner: Option<bool>, fetch_owner: Option<bool>,
max_keys: Option<u16>, max_keys: Option<usize>,
prefix: Option<String>, prefix: Option<String>,
start_after: Option<String>, start_after: Option<String>,
}, },
@ -306,15 +306,15 @@ pub enum Endpoint {
delimiter: Option<char>, delimiter: Option<char>,
encoding_type: Option<String>, encoding_type: Option<String>,
key_marker: Option<String>, key_marker: Option<String>,
max_keys: Option<u16>, max_keys: Option<u64>,
prefix: Option<String>, prefix: Option<String>,
version_id_marker: Option<String>, version_id_marker: Option<String>,
}, },
ListParts { ListParts {
bucket: String, bucket: String,
key: String, key: String,
max_parts: Option<u16>, max_parts: Option<u64>,
part_number_marker: Option<u16>, part_number_marker: Option<u64>,
upload_id: String, upload_id: String,
}, },
PutBucketAccelerateConfiguration { PutBucketAccelerateConfiguration {
@ -418,56 +418,52 @@ pub enum Endpoint {
UploadPart { UploadPart {
bucket: String, bucket: String,
key: String, key: String,
part_number: u16, part_number: u64,
upload_id: String, upload_id: String,
}, },
UploadPartCopy { UploadPartCopy {
bucket: String, bucket: String,
key: String, key: String,
part_number: u16, part_number: u64,
upload_id: String, upload_id: String,
}, },
} }
impl Endpoint { impl Endpoint {
pub fn from_request( pub fn from_request<T>(req: &Request<T>, bucket: Option<String>) -> Result<Self, Error> {
bucket: Option<String>, let uri = req.uri();
uri: &Uri,
method: &Method,
headers: &HeaderMap<HeaderValue>,
) -> Result<Self, Error> {
let path = uri.path().trim_start_matches('/'); let path = uri.path().trim_start_matches('/');
let query = uri.query(); let query = uri.query();
if bucket.is_none() && path.is_empty() { if bucket.is_none() && path.is_empty() {
if query.is_none() { if query.is_none() {
return Ok(Self::ListBuckets); return Ok(Self::ListBuckets);
} else { } else {
return Err(Error::BadRequest("Invalid endpoint".to_string())); return Err(Error::BadRequest("Invalid ListBuckets query".to_owned()));
} }
} }
let (bucket, key) = if let Some(bucket) = bucket { let (bucket, key) = if let Some(bucket) = bucket {
(bucket, path.to_string()) (bucket, path.to_owned())
} else { } else {
path.split_once('/') path.split_once('/')
.map(|(b, p)| (b.to_string(), p.trim_start_matches('/').to_string())) .map(|(b, p)| (b.to_owned(), p.trim_start_matches('/').to_owned()))
.unwrap_or((path.to_string(), String::new())) .unwrap_or((path.to_owned(), String::new()))
}; };
let mut query = QueryParameters::from_query(query.unwrap_or_default())?; let mut query = QueryParameters::from_query(query.unwrap_or_default())?;
let res = match method { let res = match *req.method() {
&Method::GET => Self::from_get(bucket, key, &mut query)?, Method::GET => Self::from_get(bucket, key, &mut query)?,
&Method::HEAD => Self::from_head(bucket, key, &mut query)?, Method::HEAD => Self::from_head(bucket, key, &mut query)?,
&Method::POST => Self::from_post(bucket, key, &mut query)?, Method::POST => Self::from_post(bucket, key, &mut query)?,
&Method::PUT => Self::from_put(bucket, key, &mut query, headers)?, Method::PUT => Self::from_put(bucket, key, &mut query, req.headers())?,
&Method::DELETE => Self::from_delete(bucket, key, &mut query)?, Method::DELETE => Self::from_delete(bucket, key, &mut query)?,
_ => return Err(Error::BadRequest("Invalid endpoint".to_string())), _ => return Err(Error::BadRequest("Unknown method".to_owned())),
}; };
if let Some(message) = query.nonempty_message() { if let Some(message) = query.nonempty_message() {
// maybe this should just be a warn! ? // maybe this should just be a warn! ?
Err(Error::BadRequest(message.to_string())) Err(Error::BadRequest(message.to_owned()))
} else { } else {
Ok(res) Ok(res)
} }
@ -542,11 +538,11 @@ impl Endpoint {
@gen_parser @gen_parser
(query.keyword.take().unwrap_or_default().as_ref(), key, bucket, query, None), (query.keyword.take().unwrap_or_default().as_ref(), key, bucket, query, None),
key: [ key: [
EMPTY => HeadObject(opt_parse::part_number, query_opt::version_id), EMPTY => HeadObject(opt_parse::part_number, query_opt::version_id),
], ],
no_key: [ no_key: [
EMPTY => HeadBucket, EMPTY => HeadBucket,
] ]
} }
} }
@ -565,8 +561,8 @@ impl Endpoint {
UPLOADS => CreateMultipartUpload, UPLOADS => CreateMultipartUpload,
], ],
no_key: [ no_key: [
DELETE => DeleteObjects, DELETE => DeleteObjects,
] ]
} }
} }
@ -574,45 +570,45 @@ impl Endpoint {
bucket: String, bucket: String,
key: String, key: String,
query: &mut QueryParameters<'_>, query: &mut QueryParameters<'_>,
headers: &HeaderMap<HeaderValue>, headers: &HeaderMap<HeaderValue>,
) -> Result<Self, Error> { ) -> Result<Self, Error> {
s3_match! { s3_match! {
@gen_parser @gen_parser
(query.keyword.take().unwrap_or_default().as_ref(), key, bucket, query, headers), (query.keyword.take().unwrap_or_default().as_ref(), key, bucket, query, headers),
key: [ key: [
EMPTY if part_number header "x-amz-copy-source" => UploadPartCopy (parse::part_number, query::upload_id), EMPTY if part_number header "x-amz-copy-source" => UploadPartCopy (parse::part_number, query::upload_id),
EMPTY header "x-amz-copy-source" => CopyObject, EMPTY header "x-amz-copy-source" => CopyObject,
EMPTY if part_number => UploadPart (parse::part_number, query::upload_id), EMPTY if part_number => UploadPart (parse::part_number, query::upload_id),
EMPTY => PutObject, EMPTY => PutObject,
ACL => PutObjectAcl (query_opt::version_id), ACL => PutObjectAcl (query_opt::version_id),
LEGAL_HOLD => PutObjectLegalHold (query_opt::version_id), LEGAL_HOLD => PutObjectLegalHold (query_opt::version_id),
RETENTION => PutObjectRetention (query_opt::version_id), RETENTION => PutObjectRetention (query_opt::version_id),
TAGGING => PutObjectTagging (query_opt::version_id), TAGGING => PutObjectTagging (query_opt::version_id),
], ],
no_key: [ no_key: [
EMPTY => CreateBucket, EMPTY => CreateBucket,
ACCELERATE => PutBucketAccelerateConfiguration, ACCELERATE => PutBucketAccelerateConfiguration,
ACL => PutBucketAcl, ACL => PutBucketAcl,
ANALYTICS => PutBucketAnalyticsConfiguration (query::id), ANALYTICS => PutBucketAnalyticsConfiguration (query::id),
CORS => PutBucketCors, CORS => PutBucketCors,
ENCRYPTION => PutBucketEncryption, ENCRYPTION => PutBucketEncryption,
INTELLIGENT_TIERING => PutBucketIntelligentTieringConfiguration(query::id), INTELLIGENT_TIERING => PutBucketIntelligentTieringConfiguration(query::id),
INVENTORY => PutBucketInventoryConfiguration(query::id), INVENTORY => PutBucketInventoryConfiguration(query::id),
LIFECYCLE => PutBucketLifecycleConfiguration, LIFECYCLE => PutBucketLifecycleConfiguration,
LOGGING => PutBucketLogging, LOGGING => PutBucketLogging,
METRICS => PutBucketMetricsConfiguration(query::id), METRICS => PutBucketMetricsConfiguration(query::id),
NOTIFICATION => PutBucketNotificationConfiguration, NOTIFICATION => PutBucketNotificationConfiguration,
OBJECT_LOCK => PutObjectLockConfiguration, OBJECT_LOCK => PutObjectLockConfiguration,
OWNERSHIP_CONTROLS => PutBucketOwnershipControls, OWNERSHIP_CONTROLS => PutBucketOwnershipControls,
POLICY => PutBucketPolicy, POLICY => PutBucketPolicy,
PUBLIC_ACCESS_BLOCK => PutPublicAccessBlock, PUBLIC_ACCESS_BLOCK => PutPublicAccessBlock,
REPLICATION => PutBucketReplication, REPLICATION => PutBucketReplication,
REQUEST_PAYMENT => PutBucketRequestPayment, REQUEST_PAYMENT => PutBucketRequestPayment,
TAGGING => PutBucketTagging, TAGGING => PutBucketTagging,
VERSIONING => PutBucketVersioning, VERSIONING => PutBucketVersioning,
WEBSITE => PutBucketWebsite, WEBSITE => PutBucketWebsite,
] ]
} }
} }
@ -748,6 +744,7 @@ impl Endpoint {
} }
} }
#[allow(dead_code)]
pub fn get_key(&self) -> Option<&str> { pub fn get_key(&self) -> Option<&str> {
s3_match! { s3_match! {
@extract @extract
@ -780,6 +777,73 @@ impl Endpoint {
] ]
} }
} }
pub fn authorization_type(&self) -> Authorization<'_> {
let bucket = if let Some(bucket) = self.get_bucket() {
bucket
} else {
return Authorization::None;
};
let readonly = s3_match! {
@extract
self,
bucket,
[
GetBucketAccelerateConfiguration,
GetBucketAcl,
GetBucketAnalyticsConfiguration,
GetBucketCors,
GetBucketEncryption,
GetBucketIntelligentTieringConfiguration,
GetBucketInventoryConfiguration,
GetBucketLifecycleConfiguration,
GetBucketLocation,
GetBucketLogging,
GetBucketMetricsConfiguration,
GetBucketNotificationConfiguration,
GetBucketOwnershipControls,
GetBucketPolicy,
GetBucketPolicyStatus,
GetBucketReplication,
GetBucketRequestPayment,
GetBucketTagging,
GetBucketVersioning,
GetBucketWebsite,
GetObject,
GetObjectAcl,
GetObjectLegalHold,
GetObjectLockConfiguration,
GetObjectRetention,
GetObjectTagging,
GetObjectTorrent,
GetPublicAccessBlock,
HeadBucket,
HeadObject,
ListBucketAnalyticsConfigurations,
ListBucketIntelligentTieringConfigurations,
ListBucketInventoryConfigurations,
ListBucketMetricsConfigurations,
ListMultipartUploads,
ListObjects,
ListObjectsV2,
ListObjectVersions,
ListParts,
SelectObjectContent,
]
}
.is_some();
if readonly {
Authorization::Read(bucket)
} else {
Authorization::Write(bucket)
}
}
}
pub enum Authorization<'a> {
None,
Read(&'a str),
Write(&'a str),
} }
macro_rules! generateQueryParameters { macro_rules! generateQueryParameters {
@ -797,19 +861,18 @@ macro_rules! generateQueryParameters {
fn from_query(query: &'a str) -> Result<Self, Error> { fn from_query(query: &'a str) -> Result<Self, Error> {
let mut res: Self = Default::default(); let mut res: Self = Default::default();
for (k, v) in url::form_urlencoded::parse(query.as_bytes()) { for (k, v) in url::form_urlencoded::parse(query.as_bytes()) {
if v.as_ref().is_empty() {
if res.keyword.replace(k).is_some() {
return Err(Error::BadRequest("Multiple keywords".to_string()));
}
continue;
}
let repeated = match k.as_ref() { let repeated = match k.as_ref() {
$( $(
$rest => res.$name.replace(v).is_none(), $rest => res.$name.replace(v).is_some(),
)* )*
_ => { _ => {
if k.starts_with("response-") { if k.starts_with("response-") {
true false
} else if v.as_ref().is_empty() {
if res.keyword.replace(k).is_some() {
return Err(Error::BadRequest("Multiple keywords".to_owned()));
}
continue;
} else { } else {
return Err(Error::BadRequest(format!( return Err(Error::BadRequest(format!(
"Unknown query parameter '{}'", "Unknown query parameter '{}'",
@ -873,7 +936,7 @@ mod keywords {
pub const ACL: &str = "acl"; pub const ACL: &str = "acl";
pub const ANALYTICS: &str = "analytics"; pub const ANALYTICS: &str = "analytics";
pub const CORS: &str = "cors"; pub const CORS: &str = "cors";
pub const DELETE: &str = "delete"; pub const DELETE: &str = "delete";
pub const ENCRYPTION: &str = "encryption"; pub const ENCRYPTION: &str = "encryption";
pub const INTELLIGENT_TIERING: &str = "intelligent-tiering"; pub const INTELLIGENT_TIERING: &str = "intelligent-tiering";
pub const INVENTORY: &str = "inventory"; pub const INVENTORY: &str = "inventory";