forked from Deuxfleurs/garage
fix clippy warnings on api
This commit is contained in:
parent
4a1e079e8f
commit
84856e84e5
8 changed files with 85 additions and 81 deletions
|
@ -92,9 +92,9 @@ async fn handler_inner(garage: Arc<Garage>, req: Request<Body>) -> Result<Respon
|
|||
_ => api_key.allow_write(&bucket),
|
||||
};
|
||||
if !allowed {
|
||||
return Err(Error::Forbidden(format!(
|
||||
"Operation is not allowed for this key."
|
||||
)));
|
||||
return Err(Error::Forbidden(
|
||||
"Operation is not allowed for this key.".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let mut params = HashMap::new();
|
||||
|
@ -106,16 +106,16 @@ async fn handler_inner(garage: Arc<Garage>, req: Request<Body>) -> Result<Respon
|
|||
}
|
||||
|
||||
if let Some(key) = key {
|
||||
match req.method() {
|
||||
&Method::HEAD => {
|
||||
match *req.method() {
|
||||
Method::HEAD => {
|
||||
// HeadObject query
|
||||
Ok(handle_head(garage, &req, &bucket, &key).await?)
|
||||
}
|
||||
&Method::GET => {
|
||||
Method::GET => {
|
||||
// GetObject query
|
||||
Ok(handle_get(garage, &req, &bucket, &key).await?)
|
||||
}
|
||||
&Method::PUT => {
|
||||
Method::PUT => {
|
||||
if params.contains_key(&"partnumber".to_string())
|
||||
&& params.contains_key(&"uploadid".to_string())
|
||||
{
|
||||
|
@ -154,7 +154,7 @@ async fn handler_inner(garage: Arc<Garage>, req: Request<Body>) -> Result<Respon
|
|||
Ok(handle_put(garage, req, &bucket, &key, content_sha256).await?)
|
||||
}
|
||||
}
|
||||
&Method::DELETE => {
|
||||
Method::DELETE => {
|
||||
if params.contains_key(&"uploadid".to_string()) {
|
||||
// AbortMultipartUpload query
|
||||
let upload_id = params.get("uploadid").unwrap();
|
||||
|
@ -164,7 +164,7 @@ async fn handler_inner(garage: Arc<Garage>, req: Request<Body>) -> Result<Respon
|
|||
Ok(handle_delete(garage, &bucket, &key).await?)
|
||||
}
|
||||
}
|
||||
&Method::POST => {
|
||||
Method::POST => {
|
||||
if params.contains_key(&"uploads".to_string()) {
|
||||
// CreateMultipartUpload call
|
||||
Ok(handle_create_multipart_upload(garage, &req, &bucket, &key).await?)
|
||||
|
@ -181,16 +181,16 @@ async fn handler_inner(garage: Arc<Garage>, req: Request<Body>) -> Result<Respon
|
|||
)
|
||||
.await?)
|
||||
} else {
|
||||
Err(Error::BadRequest(format!(
|
||||
"Not a CreateMultipartUpload call, what is it?"
|
||||
)))
|
||||
Err(Error::BadRequest(
|
||||
"Not a CreateMultipartUpload call, what is it?".to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
_ => Err(Error::BadRequest(format!("Invalid method"))),
|
||||
_ => Err(Error::BadRequest("Invalid method".to_string())),
|
||||
}
|
||||
} else {
|
||||
match req.method() {
|
||||
&Method::PUT => {
|
||||
match *req.method() {
|
||||
Method::PUT => {
|
||||
// CreateBucket
|
||||
// If we're here, the bucket already exists, so just answer ok
|
||||
debug!(
|
||||
|
@ -205,19 +205,19 @@ async fn handler_inner(garage: Arc<Garage>, req: Request<Body>) -> Result<Respon
|
|||
.unwrap();
|
||||
Ok(response)
|
||||
}
|
||||
&Method::HEAD => {
|
||||
Method::HEAD => {
|
||||
// HeadBucket
|
||||
let empty_body: Body = Body::from(vec![]);
|
||||
let response = Response::builder().body(empty_body).unwrap();
|
||||
Ok(response)
|
||||
}
|
||||
&Method::DELETE => {
|
||||
Method::DELETE => {
|
||||
// DeleteBucket query
|
||||
Err(Error::Forbidden(
|
||||
"Cannot delete buckets using S3 api, please talk to Garage directly".into(),
|
||||
))
|
||||
}
|
||||
&Method::GET => {
|
||||
Method::GET => {
|
||||
if params.contains_key("location") {
|
||||
// GetBucketLocation call
|
||||
Ok(handle_get_bucket_location(garage)?)
|
||||
|
@ -227,7 +227,7 @@ async fn handler_inner(garage: Arc<Garage>, req: Request<Body>) -> Result<Respon
|
|||
Ok(handle_list(garage, &q).await?)
|
||||
}
|
||||
}
|
||||
&Method::POST => {
|
||||
Method::POST => {
|
||||
if params.contains_key(&"delete".to_string()) {
|
||||
// DeleteObjects
|
||||
Ok(handle_delete_objects(garage, bucket, req, content_sha256).await?)
|
||||
|
@ -237,10 +237,10 @@ async fn handler_inner(garage: Arc<Garage>, req: Request<Body>) -> Result<Respon
|
|||
std::str::from_utf8(&hyper::body::to_bytes(req.into_body()).await?)
|
||||
.unwrap_or("<invalid utf8>")
|
||||
);
|
||||
Err(Error::BadRequest(format!("Unsupported call")))
|
||||
Err(Error::BadRequest("Unsupported call".to_string()))
|
||||
}
|
||||
}
|
||||
_ => Err(Error::BadRequest(format!("Invalid method"))),
|
||||
_ => Err(Error::BadRequest("Invalid method".to_string())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -255,7 +255,7 @@ fn parse_bucket_key(path: &str) -> Result<(&str, Option<&str>), Error> {
|
|||
let (bucket, key) = match path.find('/') {
|
||||
Some(i) => {
|
||||
let key = &path[i + 1..];
|
||||
if key.len() > 0 {
|
||||
if !key.is_empty() {
|
||||
(&path[..i], Some(key))
|
||||
} else {
|
||||
(&path[..i], None)
|
||||
|
@ -263,8 +263,8 @@ fn parse_bucket_key(path: &str) -> Result<(&str, Option<&str>), Error> {
|
|||
}
|
||||
None => (path, None),
|
||||
};
|
||||
if bucket.len() == 0 {
|
||||
return Err(Error::BadRequest(format!("No bucket specified")));
|
||||
if bucket.is_empty() {
|
||||
return Err(Error::BadRequest("No bucket specified".to_string()));
|
||||
}
|
||||
Ok((bucket, key))
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@ use garage_util::error::Error as GarageError;
|
|||
use crate::encoding::*;
|
||||
|
||||
/// Errors of this crate
|
||||
#[allow(clippy::upper_case_acronyms)]
|
||||
#[derive(Debug, Error)]
|
||||
pub enum Error {
|
||||
// Category: internal error
|
||||
|
@ -140,7 +141,7 @@ impl<T> OkOrBadRequest for Option<T> {
|
|||
fn ok_or_bad_request(self, reason: &'static str) -> Result<T, Error> {
|
||||
match self {
|
||||
Some(x) => Ok(x),
|
||||
None => Err(Error::BadRequest(format!("{}", reason))),
|
||||
None => Err(Error::BadRequest(reason.to_string())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -172,10 +173,9 @@ impl<T> OkOrInternalError for Option<T> {
|
|||
fn ok_or_internal_error(self, reason: &'static str) -> Result<T, Error> {
|
||||
match self {
|
||||
Some(x) => Ok(x),
|
||||
None => Err(Error::InternalError(GarageError::Message(format!(
|
||||
"{}",
|
||||
reason
|
||||
)))),
|
||||
None => Err(Error::InternalError(GarageError::Message(
|
||||
reason.to_string(),
|
||||
))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,8 +33,7 @@ pub async fn handle_copy(
|
|||
.versions()
|
||||
.iter()
|
||||
.rev()
|
||||
.filter(|v| v.is_complete())
|
||||
.next()
|
||||
.find(|v| v.is_complete())
|
||||
.ok_or(Error::NotFound)?;
|
||||
|
||||
let source_last_state = match &source_last_v.state {
|
||||
|
|
|
@ -24,10 +24,12 @@ async fn handle_delete_internal(
|
|||
.await?
|
||||
.ok_or(Error::NotFound)?; // No need to delete
|
||||
|
||||
let interesting_versions = object.versions().iter().filter(|v| match v.state {
|
||||
ObjectVersionState::Aborted => false,
|
||||
ObjectVersionState::Complete(ObjectVersionData::DeleteMarker) => false,
|
||||
_ => true,
|
||||
let interesting_versions = object.versions().iter().filter(|v| {
|
||||
!matches!(
|
||||
v.state,
|
||||
ObjectVersionState::Aborted
|
||||
| ObjectVersionState::Complete(ObjectVersionData::DeleteMarker)
|
||||
)
|
||||
});
|
||||
|
||||
let mut version_to_delete = None;
|
||||
|
|
|
@ -28,7 +28,7 @@ fn object_headers(
|
|||
version_meta.headers.content_type.to_string(),
|
||||
)
|
||||
.header("Last-Modified", date_str)
|
||||
.header("Accept-Ranges", format!("bytes"));
|
||||
.header("Accept-Ranges", "bytes".to_string());
|
||||
|
||||
if !version_meta.etag.is_empty() {
|
||||
resp = resp.header("ETag", format!("\"{}\"", version_meta.etag));
|
||||
|
@ -97,8 +97,7 @@ pub async fn handle_head(
|
|||
.versions()
|
||||
.iter()
|
||||
.rev()
|
||||
.filter(|v| v.is_data())
|
||||
.next()
|
||||
.find(|v| v.is_data())
|
||||
.ok_or(Error::NotFound)?;
|
||||
|
||||
let version_meta = match &version.state {
|
||||
|
@ -137,8 +136,7 @@ pub async fn handle_get(
|
|||
.versions()
|
||||
.iter()
|
||||
.rev()
|
||||
.filter(|v| v.is_complete())
|
||||
.next()
|
||||
.find(|v| v.is_complete())
|
||||
.ok_or(Error::NotFound)?;
|
||||
|
||||
let last_v_data = match &last_v.state {
|
||||
|
@ -160,7 +158,9 @@ pub async fn handle_get(
|
|||
let range_str = range.to_str()?;
|
||||
let mut ranges = http_range::HttpRange::parse(range_str, last_v_meta.size)?;
|
||||
if ranges.len() > 1 {
|
||||
return Err(Error::BadRequest(format!("Multiple ranges not supported")));
|
||||
return Err(Error::BadRequest(
|
||||
"Multiple ranges not supported".to_string(),
|
||||
));
|
||||
} else {
|
||||
ranges.pop()
|
||||
}
|
||||
|
@ -236,7 +236,7 @@ async fn handle_get_range(
|
|||
end: u64,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
if end > version_meta.size {
|
||||
return Err(Error::BadRequest(format!("Range not included in file")));
|
||||
return Err(Error::BadRequest("Range not included in file".to_string()));
|
||||
}
|
||||
|
||||
let resp_builder = object_headers(version, version_meta)
|
||||
|
@ -282,7 +282,7 @@ async fn handle_get_range(
|
|||
}
|
||||
// Keep only blocks that have an intersection with the requested range
|
||||
if true_offset < end && true_offset + b.size > begin {
|
||||
blocks.push((b.clone(), true_offset));
|
||||
blocks.push((*b, true_offset));
|
||||
}
|
||||
true_offset += b.size;
|
||||
}
|
||||
|
@ -303,9 +303,9 @@ async fn handle_get_range(
|
|||
} else {
|
||||
end - true_offset
|
||||
};
|
||||
Result::<Bytes, Error>::Ok(Bytes::from(
|
||||
Result::<Bytes, Error>::Ok(
|
||||
data.slice(start_in_block as usize..end_in_block as usize),
|
||||
))
|
||||
)
|
||||
}
|
||||
})
|
||||
.buffered(2);
|
||||
|
|
|
@ -50,7 +50,7 @@ pub fn parse_list_objects_query(
|
|||
.ok_or_bad_request("Invalid value for max-keys")
|
||||
})
|
||||
.unwrap_or(Ok(1000))?,
|
||||
prefix: params.get("prefix").cloned().unwrap_or(String::new()),
|
||||
prefix: params.get("prefix").cloned().unwrap_or_default(),
|
||||
marker: params.get("marker").cloned(),
|
||||
continuation_token: params.get("continuation-token").cloned(),
|
||||
start_after: params.get("start-after").cloned(),
|
||||
|
@ -72,10 +72,13 @@ pub async fn handle_list(
|
|||
if let Some(ct) = &query.continuation_token {
|
||||
String::from_utf8(base64::decode(ct.as_bytes())?)?
|
||||
} else {
|
||||
query.start_after.clone().unwrap_or(query.prefix.clone())
|
||||
query
|
||||
.start_after
|
||||
.clone()
|
||||
.unwrap_or_else(|| query.prefix.clone())
|
||||
}
|
||||
} else {
|
||||
query.marker.clone().unwrap_or(query.prefix.clone())
|
||||
query.marker.clone().unwrap_or_else(|| query.prefix.clone())
|
||||
};
|
||||
|
||||
debug!(
|
||||
|
@ -155,7 +158,7 @@ pub async fn handle_list(
|
|||
truncated = None;
|
||||
break 'query_loop;
|
||||
}
|
||||
if objects.len() > 0 {
|
||||
if !objects.is_empty() {
|
||||
next_chunk_start = objects[objects.len() - 1].key.clone();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ pub async fn handle_put(
|
|||
let body = req.into_body();
|
||||
|
||||
let mut chunker = BodyChunker::new(body, garage.config.block_size);
|
||||
let first_block = chunker.next().await?.unwrap_or(vec![]);
|
||||
let first_block = chunker.next().await?.unwrap_or_default();
|
||||
|
||||
// If body is small enough, store it directly in the object table
|
||||
// as "inline data". We can then return immediately.
|
||||
|
@ -160,16 +160,18 @@ fn ensure_checksum_matches(
|
|||
) -> Result<(), Error> {
|
||||
if let Some(expected_sha256) = content_sha256 {
|
||||
if expected_sha256 != data_sha256sum {
|
||||
return Err(Error::BadRequest(format!(
|
||||
"Unable to validate x-amz-content-sha256"
|
||||
)));
|
||||
return Err(Error::BadRequest(
|
||||
"Unable to validate x-amz-content-sha256".to_string(),
|
||||
));
|
||||
} else {
|
||||
trace!("Successfully validated x-amz-content-sha256");
|
||||
}
|
||||
}
|
||||
if let Some(expected_md5) = content_md5 {
|
||||
if expected_md5.trim_matches('"') != base64::encode(data_md5sum) {
|
||||
return Err(Error::BadRequest(format!("Unable to validate content-md5")));
|
||||
return Err(Error::BadRequest(
|
||||
"Unable to validate content-md5".to_string(),
|
||||
));
|
||||
} else {
|
||||
trace!("Successfully validated content-md5");
|
||||
}
|
||||
|
@ -291,7 +293,7 @@ impl BodyChunker {
|
|||
self.read_all = true;
|
||||
}
|
||||
}
|
||||
if self.buf.len() == 0 {
|
||||
if self.buf.is_empty() {
|
||||
Ok(None)
|
||||
} else if self.buf.len() <= self.block_size {
|
||||
let block = self.buf.drain(..).collect::<Vec<u8>>();
|
||||
|
@ -387,8 +389,8 @@ pub async fn handle_put_part(
|
|||
futures::try_join!(garage.object_table.get(&bucket, &key), chunker.next(),)?;
|
||||
|
||||
// Check object is valid and multipart block can be accepted
|
||||
let first_block = first_block.ok_or(Error::BadRequest(format!("Empty body")))?;
|
||||
let object = object.ok_or(Error::BadRequest(format!("Object not found")))?;
|
||||
let first_block = first_block.ok_or_else(|| Error::BadRequest("Empty body".to_string()))?;
|
||||
let object = object.ok_or_else(|| Error::BadRequest("Object not found".to_string()))?;
|
||||
|
||||
if !object
|
||||
.versions()
|
||||
|
@ -462,21 +464,21 @@ pub async fn handle_complete_multipart_upload(
|
|||
garage.version_table.get(&version_uuid, &EmptyKey),
|
||||
)?;
|
||||
|
||||
let object = object.ok_or(Error::BadRequest(format!("Object not found")))?;
|
||||
let object = object.ok_or_else(|| Error::BadRequest("Object not found".to_string()))?;
|
||||
let mut object_version = object
|
||||
.versions()
|
||||
.iter()
|
||||
.find(|v| v.uuid == version_uuid && v.is_uploading())
|
||||
.cloned()
|
||||
.ok_or(Error::BadRequest(format!("Version not found")))?;
|
||||
.ok_or_else(|| Error::BadRequest("Version not found".to_string()))?;
|
||||
|
||||
let version = version.ok_or(Error::BadRequest(format!("Version not found")))?;
|
||||
if version.blocks.len() == 0 {
|
||||
return Err(Error::BadRequest(format!("No data was uploaded")));
|
||||
let version = version.ok_or_else(|| Error::BadRequest("Version not found".to_string()))?;
|
||||
if version.blocks.is_empty() {
|
||||
return Err(Error::BadRequest("No data was uploaded".to_string()));
|
||||
}
|
||||
|
||||
let headers = match object_version.state {
|
||||
ObjectVersionState::Uploading(headers) => headers.clone(),
|
||||
ObjectVersionState::Uploading(headers) => headers,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
|
@ -493,7 +495,9 @@ pub async fn handle_complete_multipart_upload(
|
|||
.map(|x| (&x.part_number, &x.etag))
|
||||
.eq(parts);
|
||||
if !same_parts {
|
||||
return Err(Error::BadRequest(format!("We don't have the same parts")));
|
||||
return Err(Error::BadRequest(
|
||||
"We don't have the same parts".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
// Calculate etag of final object
|
||||
|
@ -557,7 +561,7 @@ pub async fn handle_abort_multipart_upload(
|
|||
.object_table
|
||||
.get(&bucket.to_string(), &key.to_string())
|
||||
.await?;
|
||||
let object = object.ok_or(Error::BadRequest(format!("Object not found")))?;
|
||||
let object = object.ok_or_else(|| Error::BadRequest("Object not found".to_string()))?;
|
||||
|
||||
let object_version = object
|
||||
.versions()
|
||||
|
|
|
@ -43,13 +43,12 @@ pub async fn check_signature(
|
|||
let date = headers
|
||||
.get("x-amz-date")
|
||||
.ok_or_bad_request("Missing X-Amz-Date field")?;
|
||||
let date: NaiveDateTime = NaiveDateTime::parse_from_str(date, LONG_DATETIME)
|
||||
.ok_or_bad_request("Invalid date")?
|
||||
.into();
|
||||
let date: NaiveDateTime =
|
||||
NaiveDateTime::parse_from_str(date, LONG_DATETIME).ok_or_bad_request("Invalid date")?;
|
||||
let date: DateTime<Utc> = DateTime::from_utc(date, Utc);
|
||||
|
||||
if Utc::now() - date > Duration::hours(24) {
|
||||
return Err(Error::BadRequest(format!("Date is too old")));
|
||||
return Err(Error::BadRequest("Date is too old".to_string()));
|
||||
}
|
||||
|
||||
let scope = format!(
|
||||
|
@ -66,10 +65,7 @@ pub async fn check_signature(
|
|||
.get(&EmptyKey, &authorization.key_id)
|
||||
.await?
|
||||
.filter(|k| !k.deleted.get())
|
||||
.ok_or(Error::Forbidden(format!(
|
||||
"No such key: {}",
|
||||
authorization.key_id
|
||||
)))?;
|
||||
.ok_or_else(|| Error::Forbidden(format!("No such key: {}", authorization.key_id)))?;
|
||||
|
||||
let canonical_request = canonical_request(
|
||||
request.method(),
|
||||
|
@ -95,7 +91,7 @@ pub async fn check_signature(
|
|||
trace!("Canonical request: ``{}``", canonical_request);
|
||||
trace!("String to sign: ``{}``", string_to_sign);
|
||||
trace!("Expected: {}, got: {}", signature, authorization.signature);
|
||||
return Err(Error::Forbidden(format!("Invalid signature")));
|
||||
return Err(Error::Forbidden("Invalid signature".to_string()));
|
||||
}
|
||||
|
||||
let content_sha256 = if authorization.content_sha256 == "UNSIGNED-PAYLOAD" {
|
||||
|
@ -105,7 +101,7 @@ pub async fn check_signature(
|
|||
.ok_or_bad_request("Invalid content sha256 hash")?;
|
||||
Some(
|
||||
Hash::try_from(&bytes[..])
|
||||
.ok_or(Error::BadRequest(format!("Invalid content sha256 hash")))?,
|
||||
.ok_or_else(|| Error::BadRequest("Invalid content sha256 hash".to_string()))?,
|
||||
)
|
||||
};
|
||||
|
||||
|
@ -173,9 +169,9 @@ fn parse_query_authorization(headers: &HashMap<String, String>) -> Result<Author
|
|||
.get("x-amz-algorithm")
|
||||
.ok_or_bad_request("X-Amz-Algorithm not found in query parameters")?;
|
||||
if algo != "AWS4-HMAC-SHA256" {
|
||||
return Err(Error::BadRequest(format!(
|
||||
"Unsupported authorization method"
|
||||
)));
|
||||
return Err(Error::BadRequest(
|
||||
"Unsupported authorization method".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let cred = headers
|
||||
|
@ -293,9 +289,9 @@ pub fn verify_signed_content(content_sha256: Option<Hash>, body: &[u8]) -> Resul
|
|||
let expected_sha256 =
|
||||
content_sha256.ok_or_bad_request("Request content hash not signed, aborting.")?;
|
||||
if expected_sha256 != sha256sum(body) {
|
||||
return Err(Error::BadRequest(format!(
|
||||
"Request content hash does not match signed hash"
|
||||
)));
|
||||
return Err(Error::BadRequest(
|
||||
"Request content hash does not match signed hash".to_string(),
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue