2021-12-06 14:17:47 +00:00
|
|
|
use std::collections::{BTreeMap, BTreeSet};
|
2022-01-12 18:04:55 +00:00
|
|
|
use std::iter::{Iterator, Peekable};
|
2020-04-24 18:47:11 +00:00
|
|
|
use std::sync::Arc;
|
|
|
|
|
2023-01-23 19:14:07 +00:00
|
|
|
use base64::prelude::*;
|
2020-07-07 15:15:53 +00:00
|
|
|
use hyper::{Body, Response};
|
2020-04-24 18:47:11 +00:00
|
|
|
|
2021-12-14 12:55:11 +00:00
|
|
|
use garage_util::data::*;
|
2021-02-19 15:44:06 +00:00
|
|
|
use garage_util::error::Error as GarageError;
|
2021-03-15 15:21:41 +00:00
|
|
|
use garage_util::time::*;
|
2020-04-24 18:47:11 +00:00
|
|
|
|
2020-07-07 11:59:22 +00:00
|
|
|
use garage_model::garage::Garage;
|
2023-05-03 10:02:59 +00:00
|
|
|
use garage_model::s3::mpu_table::*;
|
2022-05-10 11:16:57 +00:00
|
|
|
use garage_model::s3::object_table::*;
|
2022-01-19 16:16:00 +00:00
|
|
|
|
2023-05-03 10:02:59 +00:00
|
|
|
use garage_table::EnumerationOrder;
|
2020-04-24 18:47:11 +00:00
|
|
|
|
2020-04-28 10:18:14 +00:00
|
|
|
use crate::encoding::*;
|
2022-05-10 11:16:57 +00:00
|
|
|
use crate::helpers::key_after_prefix;
|
2022-05-24 10:16:39 +00:00
|
|
|
use crate::s3::error::*;
|
2023-05-03 10:02:59 +00:00
|
|
|
use crate::s3::multipart as s3_multipart;
|
2022-05-10 11:16:57 +00:00
|
|
|
use crate::s3::xml as s3_xml;
|
2021-02-19 15:44:06 +00:00
|
|
|
|
2022-01-19 16:16:00 +00:00
|
|
|
const DUMMY_NAME: &str = "Dummy Key";
|
|
|
|
const DUMMY_KEY: &str = "GKDummyKey";
|
|
|
|
|
2021-02-19 15:44:06 +00:00
|
|
|
#[derive(Debug)]
|
2022-01-12 18:04:55 +00:00
|
|
|
pub struct ListQueryCommon {
|
2021-12-14 12:55:11 +00:00
|
|
|
pub bucket_name: String,
|
|
|
|
pub bucket_id: Uuid,
|
2021-02-19 15:44:06 +00:00
|
|
|
pub delimiter: Option<String>,
|
2022-01-12 18:04:55 +00:00
|
|
|
pub page_size: usize,
|
2021-02-19 15:44:06 +00:00
|
|
|
pub prefix: String,
|
2022-01-12 18:04:55 +00:00
|
|
|
pub urlencode_resp: bool,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub struct ListObjectsQuery {
|
|
|
|
pub is_v2: bool,
|
2021-02-19 15:44:06 +00:00
|
|
|
pub marker: Option<String>,
|
|
|
|
pub continuation_token: Option<String>,
|
|
|
|
pub start_after: Option<String>,
|
2022-01-12 18:04:55 +00:00
|
|
|
pub common: ListQueryCommon,
|
2021-02-19 15:44:06 +00:00
|
|
|
}
|
2020-04-24 18:47:11 +00:00
|
|
|
|
|
|
|
#[derive(Debug)]
|
2022-01-12 18:04:55 +00:00
|
|
|
pub struct ListMultipartUploadsQuery {
|
|
|
|
pub key_marker: Option<String>,
|
|
|
|
pub upload_id_marker: Option<String>,
|
|
|
|
pub common: ListQueryCommon,
|
2020-04-24 18:47:11 +00:00
|
|
|
}
|
|
|
|
|
2022-01-19 16:16:00 +00:00
|
|
|
#[derive(Debug)]
|
|
|
|
pub struct ListPartsQuery {
|
|
|
|
pub bucket_name: String,
|
|
|
|
pub bucket_id: Uuid,
|
|
|
|
pub key: String,
|
|
|
|
pub upload_id: String,
|
|
|
|
pub part_number_marker: Option<u64>,
|
|
|
|
pub max_parts: u64,
|
|
|
|
}
|
|
|
|
|
2020-04-24 18:47:11 +00:00
|
|
|
pub async fn handle_list(
|
|
|
|
garage: Arc<Garage>,
|
2021-02-19 15:44:06 +00:00
|
|
|
query: &ListObjectsQuery,
|
2020-07-07 15:15:53 +00:00
|
|
|
) -> Result<Response<Body>, Error> {
|
2022-01-12 18:04:55 +00:00
|
|
|
let io = |bucket, key, count| {
|
|
|
|
let t = &garage.object_table;
|
|
|
|
async move {
|
2022-05-10 11:16:57 +00:00
|
|
|
t.get_range(
|
|
|
|
&bucket,
|
|
|
|
key,
|
|
|
|
Some(ObjectFilter::IsData),
|
|
|
|
count,
|
|
|
|
EnumerationOrder::Forward,
|
|
|
|
)
|
|
|
|
.await
|
2021-02-19 15:44:06 +00:00
|
|
|
}
|
2022-01-12 18:04:55 +00:00
|
|
|
};
|
|
|
|
|
2022-01-19 16:16:00 +00:00
|
|
|
debug!("ListObjects {:?}", query);
|
2022-01-12 18:04:55 +00:00
|
|
|
let mut acc = query.build_accumulator();
|
|
|
|
let pagination = fetch_list_entries(&query.common, query.begin()?, &mut acc, &io).await?;
|
|
|
|
|
|
|
|
let result = s3_xml::ListBucketResult {
|
|
|
|
xmlns: (),
|
|
|
|
// Sending back request information
|
|
|
|
name: s3_xml::Value(query.common.bucket_name.to_string()),
|
|
|
|
prefix: uriencode_maybe(&query.common.prefix, query.common.urlencode_resp),
|
|
|
|
max_keys: s3_xml::IntValue(query.common.page_size as i64),
|
|
|
|
delimiter: query
|
|
|
|
.common
|
|
|
|
.delimiter
|
|
|
|
.as_ref()
|
|
|
|
.map(|x| uriencode_maybe(x, query.common.urlencode_resp)),
|
|
|
|
encoding_type: match query.common.urlencode_resp {
|
|
|
|
true => Some(s3_xml::Value("url".to_string())),
|
|
|
|
false => None,
|
|
|
|
},
|
|
|
|
marker: match (!query.is_v2, &query.marker) {
|
|
|
|
(true, Some(k)) => Some(uriencode_maybe(k, query.common.urlencode_resp)),
|
|
|
|
_ => None,
|
|
|
|
},
|
|
|
|
start_after: match (query.is_v2, &query.start_after) {
|
|
|
|
(true, Some(sa)) => Some(uriencode_maybe(sa, query.common.urlencode_resp)),
|
|
|
|
_ => None,
|
|
|
|
},
|
|
|
|
continuation_token: match (query.is_v2, &query.continuation_token) {
|
|
|
|
(true, Some(ct)) => Some(s3_xml::Value(ct.to_string())),
|
|
|
|
_ => None,
|
|
|
|
},
|
|
|
|
|
|
|
|
// Pagination
|
|
|
|
is_truncated: s3_xml::Value(format!("{}", pagination.is_some())),
|
|
|
|
key_count: Some(s3_xml::IntValue(
|
|
|
|
acc.keys.len() as i64 + acc.common_prefixes.len() as i64,
|
|
|
|
)),
|
|
|
|
next_marker: match (!query.is_v2, &pagination) {
|
|
|
|
(true, Some(RangeBegin::AfterKey { key: k }))
|
|
|
|
| (
|
|
|
|
true,
|
|
|
|
Some(RangeBegin::IncludingKey {
|
|
|
|
fallback_key: Some(k),
|
|
|
|
..
|
|
|
|
}),
|
|
|
|
) => Some(uriencode_maybe(k, query.common.urlencode_resp)),
|
|
|
|
_ => None,
|
|
|
|
},
|
|
|
|
next_continuation_token: match (query.is_v2, &pagination) {
|
|
|
|
(true, Some(RangeBegin::AfterKey { key })) => Some(s3_xml::Value(format!(
|
|
|
|
"]{}",
|
2023-01-23 19:14:07 +00:00
|
|
|
BASE64_STANDARD.encode(key.as_bytes())
|
2022-01-12 18:04:55 +00:00
|
|
|
))),
|
|
|
|
(true, Some(RangeBegin::IncludingKey { key, .. })) => Some(s3_xml::Value(format!(
|
|
|
|
"[{}",
|
2023-01-23 19:14:07 +00:00
|
|
|
BASE64_STANDARD.encode(key.as_bytes())
|
2022-01-12 18:04:55 +00:00
|
|
|
))),
|
|
|
|
_ => None,
|
|
|
|
},
|
|
|
|
|
|
|
|
// Body
|
|
|
|
contents: acc
|
|
|
|
.keys
|
|
|
|
.iter()
|
|
|
|
.map(|(key, info)| s3_xml::ListBucketItem {
|
|
|
|
key: uriencode_maybe(key, query.common.urlencode_resp),
|
|
|
|
last_modified: s3_xml::Value(msec_to_rfc3339(info.last_modified)),
|
|
|
|
size: s3_xml::IntValue(info.size as i64),
|
2022-01-18 11:22:31 +00:00
|
|
|
etag: s3_xml::Value(format!("\"{}\"", info.etag)),
|
2022-01-12 18:04:55 +00:00
|
|
|
storage_class: s3_xml::Value("STANDARD".to_string()),
|
|
|
|
})
|
|
|
|
.collect(),
|
|
|
|
common_prefixes: acc
|
|
|
|
.common_prefixes
|
|
|
|
.iter()
|
|
|
|
.map(|pfx| s3_xml::CommonPrefix {
|
|
|
|
prefix: uriencode_maybe(pfx, query.common.urlencode_resp),
|
|
|
|
})
|
|
|
|
.collect(),
|
|
|
|
};
|
|
|
|
|
|
|
|
let xml = s3_xml::to_xml_with_header(&result)?;
|
|
|
|
Ok(Response::builder()
|
|
|
|
.header("Content-Type", "application/xml")
|
|
|
|
.body(Body::from(xml.into_bytes()))?)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub async fn handle_list_multipart_upload(
|
|
|
|
garage: Arc<Garage>,
|
|
|
|
query: &ListMultipartUploadsQuery,
|
|
|
|
) -> Result<Response<Body>, Error> {
|
|
|
|
let io = |bucket, key, count| {
|
|
|
|
let t = &garage.object_table;
|
|
|
|
async move {
|
2022-05-10 11:16:57 +00:00
|
|
|
t.get_range(
|
|
|
|
&bucket,
|
|
|
|
key,
|
2023-05-03 10:02:59 +00:00
|
|
|
Some(ObjectFilter::IsUploading {
|
|
|
|
check_multipart: Some(true),
|
|
|
|
}),
|
2022-05-10 11:16:57 +00:00
|
|
|
count,
|
|
|
|
EnumerationOrder::Forward,
|
|
|
|
)
|
|
|
|
.await
|
2021-10-08 16:35:38 +00:00
|
|
|
}
|
2021-02-19 15:44:06 +00:00
|
|
|
};
|
2020-04-24 18:47:11 +00:00
|
|
|
|
2022-01-19 16:16:00 +00:00
|
|
|
debug!("ListMultipartUploads {:?}", query);
|
2022-01-12 18:04:55 +00:00
|
|
|
let mut acc = query.build_accumulator();
|
|
|
|
let pagination = fetch_list_entries(&query.common, query.begin()?, &mut acc, &io).await?;
|
|
|
|
|
|
|
|
let result = s3_xml::ListMultipartUploadsResult {
|
|
|
|
xmlns: (),
|
|
|
|
|
|
|
|
// Sending back some information about the request
|
|
|
|
bucket: s3_xml::Value(query.common.bucket_name.to_string()),
|
|
|
|
prefix: uriencode_maybe(&query.common.prefix, query.common.urlencode_resp),
|
|
|
|
delimiter: query
|
|
|
|
.common
|
|
|
|
.delimiter
|
|
|
|
.as_ref()
|
|
|
|
.map(|d| uriencode_maybe(d, query.common.urlencode_resp)),
|
|
|
|
max_uploads: s3_xml::IntValue(query.common.page_size as i64),
|
|
|
|
key_marker: query
|
|
|
|
.key_marker
|
|
|
|
.as_ref()
|
|
|
|
.map(|m| uriencode_maybe(m, query.common.urlencode_resp)),
|
|
|
|
upload_id_marker: query
|
|
|
|
.upload_id_marker
|
|
|
|
.as_ref()
|
|
|
|
.map(|m| s3_xml::Value(m.to_string())),
|
|
|
|
encoding_type: match query.common.urlencode_resp {
|
|
|
|
true => Some(s3_xml::Value("url".to_string())),
|
|
|
|
false => None,
|
|
|
|
},
|
|
|
|
|
|
|
|
// Handling pagination
|
|
|
|
is_truncated: s3_xml::Value(format!("{}", pagination.is_some())),
|
|
|
|
next_key_marker: match &pagination {
|
|
|
|
None => None,
|
|
|
|
Some(RangeBegin::AfterKey { key })
|
|
|
|
| Some(RangeBegin::AfterUpload { key, .. })
|
|
|
|
| Some(RangeBegin::IncludingKey { key, .. }) => {
|
|
|
|
Some(uriencode_maybe(key, query.common.urlencode_resp))
|
|
|
|
}
|
|
|
|
},
|
|
|
|
next_upload_id_marker: match pagination {
|
|
|
|
Some(RangeBegin::AfterUpload { upload, .. }) => {
|
|
|
|
Some(s3_xml::Value(hex::encode(upload)))
|
|
|
|
}
|
|
|
|
Some(RangeBegin::IncludingKey { .. }) => Some(s3_xml::Value("include".to_string())),
|
|
|
|
_ => None,
|
|
|
|
},
|
|
|
|
|
|
|
|
// Result body
|
|
|
|
upload: acc
|
|
|
|
.keys
|
|
|
|
.iter()
|
|
|
|
.map(|(uuid, info)| s3_xml::ListMultipartItem {
|
|
|
|
initiated: s3_xml::Value(msec_to_rfc3339(info.timestamp)),
|
|
|
|
key: uriencode_maybe(&info.key, query.common.urlencode_resp),
|
|
|
|
upload_id: s3_xml::Value(hex::encode(uuid)),
|
|
|
|
storage_class: s3_xml::Value("STANDARD".to_string()),
|
|
|
|
initiator: s3_xml::Initiator {
|
2022-01-19 16:16:00 +00:00
|
|
|
display_name: s3_xml::Value(DUMMY_NAME.to_string()),
|
|
|
|
id: s3_xml::Value(DUMMY_KEY.to_string()),
|
2022-01-12 18:04:55 +00:00
|
|
|
},
|
|
|
|
owner: s3_xml::Owner {
|
2022-01-19 16:16:00 +00:00
|
|
|
display_name: s3_xml::Value(DUMMY_NAME.to_string()),
|
|
|
|
id: s3_xml::Value(DUMMY_KEY.to_string()),
|
2022-01-12 18:04:55 +00:00
|
|
|
},
|
|
|
|
})
|
|
|
|
.collect(),
|
|
|
|
common_prefixes: acc
|
|
|
|
.common_prefixes
|
|
|
|
.iter()
|
|
|
|
.map(|c| s3_xml::CommonPrefix {
|
|
|
|
prefix: s3_xml::Value(c.to_string()),
|
|
|
|
})
|
|
|
|
.collect(),
|
|
|
|
};
|
|
|
|
|
|
|
|
let xml = s3_xml::to_xml_with_header(&result)?;
|
2020-04-24 18:47:11 +00:00
|
|
|
|
2022-01-12 18:04:55 +00:00
|
|
|
Ok(Response::builder()
|
|
|
|
.header("Content-Type", "application/xml")
|
|
|
|
.body(Body::from(xml.into_bytes()))?)
|
|
|
|
}
|
|
|
|
|
2022-01-19 16:16:00 +00:00
|
|
|
pub async fn handle_list_parts(
|
|
|
|
garage: Arc<Garage>,
|
|
|
|
query: &ListPartsQuery,
|
|
|
|
) -> Result<Response<Body>, Error> {
|
|
|
|
debug!("ListParts {:?}", query);
|
|
|
|
|
2023-05-03 10:02:59 +00:00
|
|
|
let upload_id = s3_multipart::decode_upload_id(&query.upload_id)?;
|
2022-01-19 16:16:00 +00:00
|
|
|
|
2023-05-03 10:02:59 +00:00
|
|
|
let (_, _, mpu) =
|
|
|
|
s3_multipart::get_upload(&garage, &query.bucket_id, &query.key, &upload_id).await?;
|
2022-01-19 16:16:00 +00:00
|
|
|
|
2023-05-03 10:02:59 +00:00
|
|
|
let (info, next) = fetch_part_info(query, &mpu)?;
|
2022-01-19 16:16:00 +00:00
|
|
|
|
|
|
|
let result = s3_xml::ListPartsResult {
|
|
|
|
xmlns: (),
|
2023-05-04 08:09:52 +00:00
|
|
|
|
2023-05-03 10:02:59 +00:00
|
|
|
// Query parameters
|
2022-01-19 16:16:00 +00:00
|
|
|
bucket: s3_xml::Value(query.bucket_name.to_string()),
|
|
|
|
key: s3_xml::Value(query.key.to_string()),
|
|
|
|
upload_id: s3_xml::Value(query.upload_id.to_string()),
|
|
|
|
part_number_marker: query.part_number_marker.map(|e| s3_xml::IntValue(e as i64)),
|
|
|
|
max_parts: s3_xml::IntValue(query.max_parts as i64),
|
2023-05-04 08:09:52 +00:00
|
|
|
|
2023-05-03 10:02:59 +00:00
|
|
|
// Result values
|
|
|
|
next_part_number_marker: next.map(|e| s3_xml::IntValue(e as i64)),
|
2023-05-04 08:09:52 +00:00
|
|
|
is_truncated: s3_xml::Value(format!("{}", next.is_some())),
|
2022-01-19 16:16:00 +00:00
|
|
|
parts: info
|
|
|
|
.iter()
|
|
|
|
.map(|part| s3_xml::PartItem {
|
|
|
|
etag: s3_xml::Value(format!("\"{}\"", part.etag)),
|
|
|
|
last_modified: s3_xml::Value(msec_to_rfc3339(part.timestamp)),
|
|
|
|
part_number: s3_xml::IntValue(part.part_number as i64),
|
|
|
|
size: s3_xml::IntValue(part.size as i64),
|
|
|
|
})
|
|
|
|
.collect(),
|
2023-05-04 08:09:52 +00:00
|
|
|
|
2023-05-03 10:02:59 +00:00
|
|
|
// Dummy result values (unsupported features)
|
2022-01-19 16:16:00 +00:00
|
|
|
initiator: s3_xml::Initiator {
|
|
|
|
display_name: s3_xml::Value(DUMMY_NAME.to_string()),
|
|
|
|
id: s3_xml::Value(DUMMY_KEY.to_string()),
|
|
|
|
},
|
|
|
|
owner: s3_xml::Owner {
|
|
|
|
display_name: s3_xml::Value(DUMMY_NAME.to_string()),
|
|
|
|
id: s3_xml::Value(DUMMY_KEY.to_string()),
|
|
|
|
},
|
|
|
|
storage_class: s3_xml::Value("STANDARD".to_string()),
|
|
|
|
};
|
|
|
|
|
|
|
|
let xml = s3_xml::to_xml_with_header(&result)?;
|
|
|
|
|
|
|
|
Ok(Response::builder()
|
|
|
|
.header("Content-Type", "application/xml")
|
|
|
|
.body(Body::from(xml.into_bytes()))?)
|
|
|
|
}
|
|
|
|
|
2022-01-12 18:04:55 +00:00
|
|
|
/*
|
|
|
|
* Private enums and structs
|
|
|
|
*/
|
|
|
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
struct ObjectInfo {
|
|
|
|
last_modified: u64,
|
|
|
|
size: u64,
|
|
|
|
etag: String,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, PartialEq)]
|
|
|
|
struct UploadInfo {
|
|
|
|
key: String,
|
|
|
|
timestamp: u64,
|
|
|
|
}
|
|
|
|
|
2022-01-19 16:16:00 +00:00
|
|
|
#[derive(Debug, PartialEq)]
|
2023-05-03 10:02:59 +00:00
|
|
|
struct PartInfo<'a> {
|
|
|
|
etag: &'a str,
|
2022-01-19 16:16:00 +00:00
|
|
|
timestamp: u64,
|
|
|
|
part_number: u64,
|
|
|
|
size: u64,
|
|
|
|
}
|
|
|
|
|
2022-01-12 18:04:55 +00:00
|
|
|
enum ExtractionResult {
|
|
|
|
NoMore,
|
|
|
|
Filled,
|
|
|
|
FilledAtUpload {
|
|
|
|
key: String,
|
|
|
|
upload: Uuid,
|
|
|
|
},
|
|
|
|
Extracted {
|
|
|
|
key: String,
|
|
|
|
},
|
|
|
|
// Fallback key is used for legacy APIs that only support
|
|
|
|
// exlusive pagination (and not inclusive one).
|
|
|
|
SkipTo {
|
|
|
|
key: String,
|
|
|
|
fallback_key: Option<String>,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(PartialEq, Clone, Debug)]
|
|
|
|
enum RangeBegin {
|
|
|
|
// Fallback key is used for legacy APIs that only support
|
|
|
|
// exlusive pagination (and not inclusive one).
|
|
|
|
IncludingKey {
|
|
|
|
key: String,
|
|
|
|
fallback_key: Option<String>,
|
|
|
|
},
|
|
|
|
AfterKey {
|
|
|
|
key: String,
|
|
|
|
},
|
|
|
|
AfterUpload {
|
|
|
|
key: String,
|
|
|
|
upload: Uuid,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
type Pagination = Option<RangeBegin>;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Fetch list entries
|
|
|
|
*/
|
|
|
|
|
|
|
|
async fn fetch_list_entries<R, F>(
|
|
|
|
query: &ListQueryCommon,
|
|
|
|
begin: RangeBegin,
|
|
|
|
acc: &mut impl ExtractAccumulator,
|
|
|
|
mut io: F,
|
|
|
|
) -> Result<Pagination, Error>
|
|
|
|
where
|
|
|
|
R: futures::Future<Output = Result<Vec<Object>, GarageError>>,
|
|
|
|
F: FnMut(Uuid, Option<String>, usize) -> R,
|
|
|
|
{
|
|
|
|
let mut cursor = begin;
|
|
|
|
// +1 is needed as we may need to skip the 1st key
|
|
|
|
// (range is inclusive while most S3 requests are exclusive)
|
|
|
|
let count = query.page_size + 1;
|
|
|
|
|
|
|
|
loop {
|
|
|
|
let start_key = match cursor {
|
|
|
|
RangeBegin::AfterKey { ref key }
|
|
|
|
| RangeBegin::AfterUpload { ref key, .. }
|
|
|
|
| RangeBegin::IncludingKey { ref key, .. } => Some(key.clone()),
|
|
|
|
};
|
2021-10-08 16:35:38 +00:00
|
|
|
|
|
|
|
// Fetch objects
|
2022-01-12 18:04:55 +00:00
|
|
|
let objects = io(query.bucket_id, start_key.clone(), count).await?;
|
|
|
|
|
2020-05-01 14:30:50 +00:00
|
|
|
debug!(
|
2022-01-12 18:04:55 +00:00
|
|
|
"List: get range {:?} (max {}), results: {}",
|
|
|
|
start_key,
|
|
|
|
count,
|
2020-05-01 14:30:50 +00:00
|
|
|
objects.len()
|
|
|
|
);
|
2022-01-12 18:04:55 +00:00
|
|
|
let server_more = objects.len() >= count;
|
2021-02-19 15:44:06 +00:00
|
|
|
|
2022-01-12 18:04:55 +00:00
|
|
|
let prev_req_cursor = cursor.clone();
|
|
|
|
let mut iter = objects.iter().peekable();
|
2021-02-19 15:44:06 +00:00
|
|
|
|
2022-01-12 18:04:55 +00:00
|
|
|
// Drop the first key if needed
|
|
|
|
// Only AfterKey requires it according to the S3 spec and our implem.
|
|
|
|
match (&cursor, iter.peek()) {
|
2023-10-19 13:26:17 +00:00
|
|
|
(RangeBegin::AfterKey { key }, Some(object)) if &object.key == key => {
|
|
|
|
iter.next();
|
|
|
|
}
|
|
|
|
_ => (),
|
2022-01-12 18:04:55 +00:00
|
|
|
};
|
2021-10-08 16:35:38 +00:00
|
|
|
|
2022-01-12 18:04:55 +00:00
|
|
|
while let Some(object) = iter.peek() {
|
|
|
|
if !object.key.starts_with(&query.prefix) {
|
|
|
|
// If the key is not in the requested prefix, we're done
|
|
|
|
return Ok(None);
|
|
|
|
}
|
|
|
|
|
2023-10-19 13:26:17 +00:00
|
|
|
match acc.extract(query, &cursor, &mut iter) {
|
|
|
|
ExtractionResult::Extracted { key } => {
|
|
|
|
cursor = RangeBegin::AfterKey { key };
|
|
|
|
}
|
2022-01-12 18:04:55 +00:00
|
|
|
ExtractionResult::SkipTo { key, fallback_key } => {
|
2023-10-19 13:26:17 +00:00
|
|
|
cursor = RangeBegin::IncludingKey { key, fallback_key };
|
2021-10-08 16:35:38 +00:00
|
|
|
}
|
2022-01-12 18:04:55 +00:00
|
|
|
ExtractionResult::FilledAtUpload { key, upload } => {
|
2023-10-19 13:26:17 +00:00
|
|
|
return Ok(Some(RangeBegin::AfterUpload { key, upload }));
|
|
|
|
}
|
|
|
|
ExtractionResult::Filled => {
|
|
|
|
return Ok(Some(cursor));
|
|
|
|
}
|
|
|
|
ExtractionResult::NoMore => {
|
|
|
|
return Ok(None);
|
2022-01-12 18:04:55 +00:00
|
|
|
}
|
2021-10-08 16:35:38 +00:00
|
|
|
};
|
2022-01-12 18:04:55 +00:00
|
|
|
}
|
2021-10-08 16:35:38 +00:00
|
|
|
|
2022-01-12 18:04:55 +00:00
|
|
|
if !server_more {
|
|
|
|
// We did not fully fill the accumulator despite exhausting all the data we have,
|
|
|
|
// we're done
|
|
|
|
return Ok(None);
|
|
|
|
}
|
2021-10-08 16:35:38 +00:00
|
|
|
|
2022-01-12 18:04:55 +00:00
|
|
|
if prev_req_cursor == cursor {
|
|
|
|
unreachable!("No progress has been done in the loop. This is a bug, please report it.");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-10-08 16:35:38 +00:00
|
|
|
|
2023-05-03 10:02:59 +00:00
|
|
|
fn fetch_part_info<'a>(
|
2022-01-19 16:16:00 +00:00
|
|
|
query: &ListPartsQuery,
|
2023-05-03 10:02:59 +00:00
|
|
|
mpu: &'a MultipartUpload,
|
|
|
|
) -> Result<(Vec<PartInfo<'a>>, Option<u64>), Error> {
|
2023-05-04 08:09:52 +00:00
|
|
|
assert!((1..=1000).contains(&query.max_parts)); // see s3/api_server.rs
|
|
|
|
|
2023-05-03 10:02:59 +00:00
|
|
|
// Parse multipart upload part list, removing parts not yet finished
|
|
|
|
// and failed part uploads that were overwritten
|
|
|
|
let mut parts: Vec<PartInfo<'a>> = Vec::with_capacity(mpu.parts.items().len());
|
|
|
|
for (pk, p) in mpu.parts.items().iter() {
|
|
|
|
if let (Some(etag), Some(size)) = (&p.etag, p.size) {
|
|
|
|
let part_info = PartInfo {
|
|
|
|
part_number: pk.part_number,
|
|
|
|
timestamp: pk.timestamp,
|
|
|
|
etag,
|
|
|
|
size,
|
|
|
|
};
|
|
|
|
match parts.last_mut() {
|
|
|
|
Some(lastpart) if lastpart.part_number == pk.part_number => {
|
|
|
|
*lastpart = part_info;
|
2022-01-19 16:16:00 +00:00
|
|
|
}
|
2023-05-03 10:02:59 +00:00
|
|
|
_ => {
|
|
|
|
parts.push(part_info);
|
2022-01-19 16:16:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-05-03 17:49:36 +00:00
|
|
|
// Cut the beginning if we have a marker
|
|
|
|
if let Some(marker) = &query.part_number_marker {
|
|
|
|
let next = marker + 1;
|
|
|
|
let part_idx = parts
|
|
|
|
.binary_search_by(|part| part.part_number.cmp(&next))
|
|
|
|
.unwrap_or_else(|x| x);
|
|
|
|
parts = parts.split_off(part_idx);
|
|
|
|
}
|
2023-05-03 10:02:59 +00:00
|
|
|
|
2023-05-03 17:49:36 +00:00
|
|
|
// Cut the end if we have too many parts
|
|
|
|
if parts.len() > query.max_parts as usize {
|
|
|
|
parts.truncate(query.max_parts as usize);
|
2023-05-04 08:09:52 +00:00
|
|
|
let pagination = Some(parts.last().unwrap().part_number);
|
|
|
|
return Ok((parts, pagination));
|
2022-01-19 16:16:00 +00:00
|
|
|
}
|
2023-05-03 17:49:36 +00:00
|
|
|
|
|
|
|
Ok((parts, None))
|
2022-01-19 16:16:00 +00:00
|
|
|
}
|
|
|
|
|
2022-01-12 18:04:55 +00:00
|
|
|
/*
|
|
|
|
* ListQuery logic
|
|
|
|
*/
|
|
|
|
|
|
|
|
/// Determine the key from where we want to start fetch objects from the database
|
|
|
|
///
|
|
|
|
/// We choose whether the object at this key must
|
|
|
|
/// be included or excluded from the response.
|
|
|
|
/// This key can be the prefix in the base case, or intermediate
|
|
|
|
/// points in the dataset if we are continuing a previous listing.
|
|
|
|
impl ListObjectsQuery {
|
2023-10-19 13:26:17 +00:00
|
|
|
fn build_accumulator(&self) -> ObjectAccumulator {
|
|
|
|
ObjectAccumulator::new(self.common.page_size)
|
2022-01-12 18:04:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fn begin(&self) -> Result<RangeBegin, Error> {
|
|
|
|
if self.is_v2 {
|
|
|
|
match (&self.continuation_token, &self.start_after) {
|
|
|
|
// In V2 mode, the continuation token is defined as an opaque
|
|
|
|
// string in the spec, so we can do whatever we want with it.
|
|
|
|
// In our case, it is defined as either [ or ] (for include
|
2023-10-19 13:26:17 +00:00
|
|
|
// or exclude), followed by a base64-encoded string
|
2022-01-12 18:04:55 +00:00
|
|
|
// representing the key to start with.
|
2023-10-19 13:08:47 +00:00
|
|
|
(Some(token), _) => match &token.get(..1) {
|
|
|
|
Some("[") => Ok(RangeBegin::IncludingKey {
|
2022-05-24 10:16:39 +00:00
|
|
|
key: String::from_utf8(
|
2023-01-23 19:14:07 +00:00
|
|
|
BASE64_STANDARD
|
|
|
|
.decode(token[1..].as_bytes())
|
2022-05-24 10:16:39 +00:00
|
|
|
.ok_or_bad_request("Invalid continuation token")?,
|
|
|
|
)?,
|
2022-01-12 18:04:55 +00:00
|
|
|
fallback_key: None,
|
|
|
|
}),
|
2023-10-19 13:08:47 +00:00
|
|
|
Some("]") => Ok(RangeBegin::AfterKey {
|
2022-05-24 10:16:39 +00:00
|
|
|
key: String::from_utf8(
|
2023-01-23 19:14:07 +00:00
|
|
|
BASE64_STANDARD
|
|
|
|
.decode(token[1..].as_bytes())
|
2022-05-24 10:16:39 +00:00
|
|
|
.ok_or_bad_request("Invalid continuation token")?,
|
|
|
|
)?,
|
2022-01-12 18:04:55 +00:00
|
|
|
}),
|
2022-05-24 10:16:39 +00:00
|
|
|
_ => Err(Error::bad_request("Invalid continuation token")),
|
2021-10-08 16:35:38 +00:00
|
|
|
},
|
2022-01-12 18:04:55 +00:00
|
|
|
|
|
|
|
// StartAfter has defined semantics in the spec:
|
|
|
|
// start listing at the first key immediately after.
|
|
|
|
(_, Some(key)) => Ok(RangeBegin::AfterKey {
|
|
|
|
key: key.to_string(),
|
|
|
|
}),
|
|
|
|
|
|
|
|
// In the case where neither is specified, we start
|
|
|
|
// listing at the specified prefix. If an object has this
|
|
|
|
// exact same key, we include it. (@TODO is this correct?)
|
|
|
|
_ => Ok(RangeBegin::IncludingKey {
|
|
|
|
key: self.common.prefix.to_string(),
|
|
|
|
fallback_key: None,
|
|
|
|
}),
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
match &self.marker {
|
|
|
|
// In V1 mode, the spec defines the Marker value to mean
|
|
|
|
// the same thing as the StartAfter value in V2 mode.
|
|
|
|
Some(key) => Ok(RangeBegin::AfterKey {
|
|
|
|
key: key.to_string(),
|
|
|
|
}),
|
|
|
|
_ => Ok(RangeBegin::IncludingKey {
|
|
|
|
key: self.common.prefix.to_string(),
|
|
|
|
fallback_key: None,
|
|
|
|
}),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl ListMultipartUploadsQuery {
|
2023-10-19 13:26:17 +00:00
|
|
|
fn build_accumulator(&self) -> UploadAccumulator {
|
|
|
|
UploadAccumulator::new(self.common.page_size)
|
2022-01-12 18:04:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fn begin(&self) -> Result<RangeBegin, Error> {
|
|
|
|
match (&self.upload_id_marker, &self.key_marker) {
|
|
|
|
// If both the upload id marker and the key marker are sets,
|
|
|
|
// the spec specifies that we must start listing uploads INCLUDING the given key,
|
|
|
|
// AFTER the specified upload id (sorted in a lexicographic order).
|
|
|
|
// To enable some optimisations, we emulate "IncludingKey" by extending the upload id
|
|
|
|
// semantic. We base our reasoning on the hypothesis that S3's upload ids are opaques
|
|
|
|
// while Garage's ones are 32 bytes hex encoded which enables us to extend this query
|
|
|
|
// with a specific "include" upload id.
|
|
|
|
(Some(up_marker), Some(key_marker)) => match &up_marker[..] {
|
|
|
|
"include" => Ok(RangeBegin::IncludingKey {
|
|
|
|
key: key_marker.to_string(),
|
|
|
|
fallback_key: None,
|
|
|
|
}),
|
|
|
|
uuid => Ok(RangeBegin::AfterUpload {
|
|
|
|
key: key_marker.to_string(),
|
2023-05-03 10:02:59 +00:00
|
|
|
upload: s3_multipart::decode_upload_id(uuid)?,
|
2022-01-12 18:04:55 +00:00
|
|
|
}),
|
|
|
|
},
|
|
|
|
|
|
|
|
// If only the key marker is specified, the spec says that we must start listing
|
|
|
|
// uploads AFTER the specified key.
|
|
|
|
(None, Some(key_marker)) => Ok(RangeBegin::AfterKey {
|
|
|
|
key: key_marker.to_string(),
|
|
|
|
}),
|
|
|
|
_ => Ok(RangeBegin::IncludingKey {
|
|
|
|
key: self.common.prefix.to_string(),
|
|
|
|
fallback_key: None,
|
|
|
|
}),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Accumulator logic
|
|
|
|
*/
|
|
|
|
|
|
|
|
trait ExtractAccumulator {
|
|
|
|
fn extract<'a>(
|
|
|
|
&mut self,
|
|
|
|
query: &ListQueryCommon,
|
|
|
|
cursor: &RangeBegin,
|
|
|
|
iter: &mut Peekable<impl Iterator<Item = &'a Object>>,
|
|
|
|
) -> ExtractionResult;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct Accumulator<K, V> {
|
|
|
|
common_prefixes: BTreeSet<String>,
|
|
|
|
keys: BTreeMap<K, V>,
|
|
|
|
max_capacity: usize,
|
|
|
|
}
|
|
|
|
|
|
|
|
type ObjectAccumulator = Accumulator<String, ObjectInfo>;
|
|
|
|
type UploadAccumulator = Accumulator<Uuid, UploadInfo>;
|
|
|
|
|
|
|
|
impl<K: std::cmp::Ord, V> Accumulator<K, V> {
|
|
|
|
fn new(page_size: usize) -> Accumulator<K, V> {
|
|
|
|
Accumulator {
|
|
|
|
common_prefixes: BTreeSet::<String>::new(),
|
|
|
|
keys: BTreeMap::<K, V>::new(),
|
|
|
|
max_capacity: page_size,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Observe the Object iterator and try to extract a single common prefix
|
|
|
|
///
|
|
|
|
/// This function can consume an arbitrary number of items as long as they share the same
|
|
|
|
/// common prefix.
|
|
|
|
fn extract_common_prefix<'a>(
|
|
|
|
&mut self,
|
|
|
|
objects: &mut Peekable<impl Iterator<Item = &'a Object>>,
|
|
|
|
query: &ListQueryCommon,
|
|
|
|
) -> Option<ExtractionResult> {
|
|
|
|
// Get the next object from the iterator
|
|
|
|
let object = objects.peek().expect("This iterator can not be empty as it is checked earlier in the code. This is a logic bug, please report it.");
|
|
|
|
|
|
|
|
// Check if this is a common prefix (requires a passed delimiter and its value in the key)
|
|
|
|
let pfx = match common_prefix(object, query) {
|
|
|
|
Some(p) => p,
|
|
|
|
None => return None,
|
|
|
|
};
|
2023-10-19 13:26:17 +00:00
|
|
|
assert!(pfx.starts_with(&query.prefix));
|
2022-01-12 18:04:55 +00:00
|
|
|
|
|
|
|
// Try to register this prefix
|
|
|
|
// If not possible, we can return early
|
|
|
|
if !self.try_insert_common_prefix(pfx.to_string()) {
|
|
|
|
return Some(ExtractionResult::Filled);
|
|
|
|
}
|
|
|
|
|
|
|
|
// We consume the whole common prefix from the iterator
|
|
|
|
let mut last_pfx_key = &object.key;
|
|
|
|
loop {
|
2023-10-19 13:26:17 +00:00
|
|
|
match objects.peek() {
|
|
|
|
Some(o) if o.key.starts_with(pfx) => {
|
|
|
|
last_pfx_key = &o.key;
|
|
|
|
objects.next();
|
|
|
|
}
|
2022-01-12 18:04:55 +00:00
|
|
|
Some(_) => {
|
|
|
|
return Some(ExtractionResult::Extracted {
|
|
|
|
key: last_pfx_key.to_owned(),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
None => {
|
|
|
|
return match key_after_prefix(pfx) {
|
|
|
|
Some(next) => Some(ExtractionResult::SkipTo {
|
|
|
|
key: next,
|
|
|
|
fallback_key: Some(last_pfx_key.to_owned()),
|
|
|
|
}),
|
|
|
|
None => Some(ExtractionResult::NoMore),
|
|
|
|
}
|
2021-10-08 16:35:38 +00:00
|
|
|
}
|
|
|
|
};
|
2020-04-24 18:47:11 +00:00
|
|
|
}
|
2022-01-12 18:04:55 +00:00
|
|
|
}
|
2021-10-08 16:35:38 +00:00
|
|
|
|
2022-01-12 18:04:55 +00:00
|
|
|
fn is_full(&mut self) -> bool {
|
|
|
|
self.keys.len() + self.common_prefixes.len() >= self.max_capacity
|
|
|
|
}
|
|
|
|
|
|
|
|
fn try_insert_common_prefix(&mut self, key: String) -> bool {
|
|
|
|
// If we already have an entry, we can continue
|
|
|
|
if self.common_prefixes.contains(&key) {
|
|
|
|
return true;
|
2020-04-24 18:47:11 +00:00
|
|
|
}
|
2021-10-08 16:35:38 +00:00
|
|
|
|
2022-01-12 18:04:55 +00:00
|
|
|
// Otherwise, we need to check if we can add it
|
2023-10-19 13:26:17 +00:00
|
|
|
if self.is_full() {
|
|
|
|
false
|
|
|
|
} else {
|
|
|
|
self.common_prefixes.insert(key);
|
|
|
|
true
|
2020-04-24 18:47:11 +00:00
|
|
|
}
|
2022-01-12 18:04:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fn try_insert_entry(&mut self, key: K, value: V) -> bool {
|
|
|
|
// It is impossible to add twice a key, this is an error
|
|
|
|
assert!(!self.keys.contains_key(&key));
|
2021-10-08 16:35:38 +00:00
|
|
|
|
2023-10-19 13:26:17 +00:00
|
|
|
if self.is_full() {
|
|
|
|
false
|
|
|
|
} else {
|
|
|
|
self.keys.insert(key, value);
|
|
|
|
true
|
2022-01-12 18:04:55 +00:00
|
|
|
}
|
2020-04-24 18:47:11 +00:00
|
|
|
}
|
2022-01-12 18:04:55 +00:00
|
|
|
}
|
2020-04-24 18:47:11 +00:00
|
|
|
|
2022-01-12 18:04:55 +00:00
|
|
|
impl ExtractAccumulator for ObjectAccumulator {
|
|
|
|
fn extract<'a>(
|
|
|
|
&mut self,
|
|
|
|
query: &ListQueryCommon,
|
|
|
|
_cursor: &RangeBegin,
|
|
|
|
objects: &mut Peekable<impl Iterator<Item = &'a Object>>,
|
|
|
|
) -> ExtractionResult {
|
|
|
|
if let Some(e) = self.extract_common_prefix(objects, query) {
|
|
|
|
return e;
|
|
|
|
}
|
2021-02-19 15:44:06 +00:00
|
|
|
|
2022-01-12 18:04:55 +00:00
|
|
|
let object = objects.next().expect("This iterator can not be empty as it is checked earlier in the code. This is a logic bug, please report it.");
|
2023-10-19 13:26:17 +00:00
|
|
|
assert!(object.key.starts_with(&query.prefix));
|
2022-01-12 18:04:55 +00:00
|
|
|
|
|
|
|
let version = match object.versions().iter().find(|x| x.is_data()) {
|
|
|
|
Some(v) => v,
|
|
|
|
None => unreachable!(
|
|
|
|
"Expect to have objects having data due to earlier filtering. This is a logic bug."
|
|
|
|
),
|
|
|
|
};
|
|
|
|
|
|
|
|
let meta = match &version.state {
|
|
|
|
ObjectVersionState::Complete(ObjectVersionData::Inline(meta, _)) => meta,
|
|
|
|
ObjectVersionState::Complete(ObjectVersionData::FirstBlock(meta, _)) => meta,
|
|
|
|
_ => unreachable!(),
|
|
|
|
};
|
|
|
|
let info = ObjectInfo {
|
|
|
|
last_modified: version.timestamp,
|
|
|
|
size: meta.size,
|
|
|
|
etag: meta.etag.to_string(),
|
|
|
|
};
|
|
|
|
|
|
|
|
match self.try_insert_entry(object.key.clone(), info) {
|
|
|
|
true => ExtractionResult::Extracted {
|
|
|
|
key: object.key.clone(),
|
|
|
|
},
|
|
|
|
false => ExtractionResult::Filled,
|
2021-02-19 15:44:06 +00:00
|
|
|
}
|
2022-01-12 18:04:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl ExtractAccumulator for UploadAccumulator {
|
|
|
|
/// Observe the iterator, process a single key, and try to extract one or more upload entries
|
|
|
|
///
|
|
|
|
/// This function processes a single object from the iterator that can contain an arbitrary
|
|
|
|
/// number of versions, and thus "uploads".
|
|
|
|
fn extract<'a>(
|
|
|
|
&mut self,
|
|
|
|
query: &ListQueryCommon,
|
|
|
|
cursor: &RangeBegin,
|
|
|
|
objects: &mut Peekable<impl Iterator<Item = &'a Object>>,
|
|
|
|
) -> ExtractionResult {
|
|
|
|
if let Some(e) = self.extract_common_prefix(objects, query) {
|
|
|
|
return e;
|
2021-02-19 15:44:06 +00:00
|
|
|
}
|
2022-01-12 18:04:55 +00:00
|
|
|
|
|
|
|
// Get the next object from the iterator
|
|
|
|
let object = objects.next().expect("This iterator can not be empty as it is checked earlier in the code. This is a logic bug, please report it.");
|
|
|
|
|
|
|
|
let mut uploads_for_key = object
|
|
|
|
.versions()
|
|
|
|
.iter()
|
2023-05-03 10:02:59 +00:00
|
|
|
.filter(|x| x.is_uploading(Some(true)))
|
2022-01-12 18:04:55 +00:00
|
|
|
.collect::<Vec<&ObjectVersion>>();
|
|
|
|
|
|
|
|
// S3 logic requires lexicographically sorted upload ids.
|
|
|
|
uploads_for_key.sort_unstable_by_key(|e| e.uuid);
|
|
|
|
|
|
|
|
// Skip results if an upload marker is provided
|
|
|
|
if let RangeBegin::AfterUpload { upload, .. } = cursor {
|
|
|
|
// Because our data are sorted, we can use a binary search to find the UUID
|
|
|
|
// or to find where it should have been added. Once this position is found,
|
|
|
|
// we use it to discard the first part of the array.
|
|
|
|
let idx = match uploads_for_key.binary_search_by(|e| e.uuid.cmp(upload)) {
|
|
|
|
// we start after the found uuid so we need to discard the pointed value.
|
|
|
|
// In the worst case, the UUID is the last element, which lead us to an empty array
|
|
|
|
// but we are never out of bound.
|
|
|
|
Ok(i) => i + 1,
|
|
|
|
// if the UUID is not found, the upload may have been discarded between the 2 request,
|
|
|
|
// this function returns where it could have been inserted,
|
|
|
|
// the pointed value is thus greater than our marker and we need to keep it.
|
|
|
|
Err(i) => i,
|
2021-10-08 16:35:38 +00:00
|
|
|
};
|
2022-01-12 18:04:55 +00:00
|
|
|
uploads_for_key = uploads_for_key[idx..].to_vec();
|
2021-02-19 15:44:06 +00:00
|
|
|
}
|
2022-01-12 18:04:55 +00:00
|
|
|
|
|
|
|
let mut iter = uploads_for_key.iter();
|
|
|
|
|
|
|
|
// The first entry is a specific case
|
|
|
|
// as it changes our result enum type
|
|
|
|
let first_upload = match iter.next() {
|
|
|
|
Some(u) => u,
|
|
|
|
None => {
|
|
|
|
return ExtractionResult::Extracted {
|
|
|
|
key: object.key.clone(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
let first_up_info = UploadInfo {
|
|
|
|
key: object.key.to_string(),
|
|
|
|
timestamp: first_upload.timestamp,
|
|
|
|
};
|
|
|
|
if !self.try_insert_entry(first_upload.uuid, first_up_info) {
|
|
|
|
return ExtractionResult::Filled;
|
2021-02-19 15:44:06 +00:00
|
|
|
}
|
2022-01-12 18:04:55 +00:00
|
|
|
|
|
|
|
// We can then collect the remaining uploads in a loop
|
|
|
|
let mut prev_uuid = first_upload.uuid;
|
|
|
|
for upload in iter {
|
|
|
|
let up_info = UploadInfo {
|
|
|
|
key: object.key.to_string(),
|
|
|
|
timestamp: upload.timestamp,
|
|
|
|
};
|
|
|
|
|
|
|
|
// Insert data in our accumulator
|
|
|
|
// If it is full, return information to paginate.
|
|
|
|
if !self.try_insert_entry(upload.uuid, up_info) {
|
|
|
|
return ExtractionResult::FilledAtUpload {
|
|
|
|
key: object.key.clone(),
|
|
|
|
upload: prev_uuid,
|
|
|
|
};
|
2021-10-08 16:35:38 +00:00
|
|
|
}
|
2022-01-12 18:04:55 +00:00
|
|
|
// Update our last added UUID
|
|
|
|
prev_uuid = upload.uuid;
|
2021-02-19 15:44:06 +00:00
|
|
|
}
|
|
|
|
|
2022-01-12 18:04:55 +00:00
|
|
|
// We successfully collected all the uploads
|
|
|
|
ExtractionResult::Extracted {
|
|
|
|
key: object.key.clone(),
|
|
|
|
}
|
2020-04-24 18:47:11 +00:00
|
|
|
}
|
2022-01-12 18:04:55 +00:00
|
|
|
}
|
2021-02-19 15:44:06 +00:00
|
|
|
|
2022-01-12 18:04:55 +00:00
|
|
|
/*
|
|
|
|
* Utility functions
|
|
|
|
*/
|
|
|
|
|
|
|
|
/// Returns the common prefix of the object given the query prefix and delimiter
|
|
|
|
fn common_prefix<'a>(object: &'a Object, query: &ListQueryCommon) -> Option<&'a str> {
|
|
|
|
match &query.delimiter {
|
|
|
|
Some(delimiter) => object.key[query.prefix.len()..]
|
|
|
|
.find(delimiter)
|
|
|
|
.map(|i| &object.key[..query.prefix.len() + i + delimiter.len()]),
|
|
|
|
None => None,
|
2020-04-26 16:22:33 +00:00
|
|
|
}
|
2020-04-26 18:55:13 +00:00
|
|
|
}
|
2021-05-03 20:45:42 +00:00
|
|
|
|
2022-01-12 18:04:55 +00:00
|
|
|
/// URIencode a value if needed
|
2021-05-03 20:45:42 +00:00
|
|
|
fn uriencode_maybe(s: &str, yes: bool) -> s3_xml::Value {
|
|
|
|
if yes {
|
|
|
|
s3_xml::Value(uri_encode(s, true))
|
|
|
|
} else {
|
|
|
|
s3_xml::Value(s.to_string())
|
|
|
|
}
|
|
|
|
}
|
2022-01-12 18:04:55 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Unit tests of this module
|
|
|
|
*/
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use super::*;
|
2022-01-19 16:16:00 +00:00
|
|
|
use garage_util::*;
|
2022-01-12 18:04:55 +00:00
|
|
|
use std::iter::FromIterator;
|
|
|
|
|
|
|
|
const TS: u64 = 1641394898314;
|
|
|
|
|
|
|
|
fn bucket() -> Uuid {
|
|
|
|
Uuid::from([0x42; 32])
|
|
|
|
}
|
|
|
|
|
|
|
|
fn query() -> ListMultipartUploadsQuery {
|
|
|
|
ListMultipartUploadsQuery {
|
|
|
|
common: ListQueryCommon {
|
|
|
|
prefix: "".to_string(),
|
|
|
|
delimiter: Some("/".to_string()),
|
|
|
|
page_size: 1000,
|
|
|
|
urlencode_resp: false,
|
|
|
|
bucket_name: "a".to_string(),
|
|
|
|
bucket_id: Uuid::from([0x00; 32]),
|
|
|
|
},
|
|
|
|
key_marker: None,
|
|
|
|
upload_id_marker: None,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn objs() -> Vec<Object> {
|
|
|
|
vec![
|
|
|
|
Object::new(
|
|
|
|
bucket(),
|
|
|
|
"a/b/c".to_string(),
|
|
|
|
vec![objup_version([0x01; 32])],
|
|
|
|
),
|
|
|
|
Object::new(bucket(), "d".to_string(), vec![objup_version([0x01; 32])]),
|
|
|
|
]
|
|
|
|
}
|
|
|
|
|
|
|
|
fn objup_version(uuid: [u8; 32]) -> ObjectVersion {
|
|
|
|
ObjectVersion {
|
|
|
|
uuid: Uuid::from(uuid),
|
|
|
|
timestamp: TS,
|
2023-05-03 10:02:59 +00:00
|
|
|
state: ObjectVersionState::Uploading {
|
|
|
|
multipart: true,
|
|
|
|
headers: ObjectVersionHeaders {
|
|
|
|
content_type: "text/plain".to_string(),
|
|
|
|
other: BTreeMap::<String, String>::new(),
|
|
|
|
},
|
|
|
|
},
|
2022-01-12 18:04:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_common_prefixes() {
|
|
|
|
let mut query = query();
|
|
|
|
let objs = objs();
|
|
|
|
|
|
|
|
query.common.prefix = "a/".to_string();
|
|
|
|
assert_eq!(
|
2022-03-14 11:00:23 +00:00
|
|
|
common_prefix(objs.get(0).unwrap(), &query.common),
|
2022-01-12 18:04:55 +00:00
|
|
|
Some("a/b/")
|
|
|
|
);
|
|
|
|
|
|
|
|
query.common.prefix = "a/b/".to_string();
|
2022-03-14 11:00:23 +00:00
|
|
|
assert_eq!(common_prefix(objs.get(0).unwrap(), &query.common), None);
|
2022-01-12 18:04:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_extract_common_prefix() {
|
|
|
|
let mut query = query();
|
|
|
|
query.common.prefix = "a/".to_string();
|
|
|
|
let objs = objs();
|
|
|
|
let mut acc = UploadAccumulator::new(query.common.page_size);
|
|
|
|
|
|
|
|
let mut iter = objs.iter().peekable();
|
|
|
|
match acc.extract_common_prefix(&mut iter, &query.common) {
|
|
|
|
Some(ExtractionResult::Extracted { key }) => assert_eq!(key, "a/b/c".to_string()),
|
|
|
|
_ => panic!("wrong result"),
|
|
|
|
}
|
|
|
|
assert_eq!(acc.common_prefixes.len(), 1);
|
|
|
|
assert_eq!(acc.common_prefixes.iter().next().unwrap(), "a/b/");
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_extract_upload() {
|
|
|
|
let objs = vec![
|
|
|
|
Object::new(
|
|
|
|
bucket(),
|
|
|
|
"b".to_string(),
|
|
|
|
vec![
|
|
|
|
objup_version([0x01; 32]),
|
|
|
|
objup_version([0x80; 32]),
|
|
|
|
objup_version([0x8f; 32]),
|
|
|
|
objup_version([0xdd; 32]),
|
|
|
|
],
|
|
|
|
),
|
|
|
|
Object::new(bucket(), "c".to_string(), vec![]),
|
|
|
|
];
|
|
|
|
|
|
|
|
let mut acc = UploadAccumulator::new(2);
|
|
|
|
let mut start = RangeBegin::AfterUpload {
|
|
|
|
key: "b".to_string(),
|
|
|
|
upload: Uuid::from([0x01; 32]),
|
|
|
|
};
|
|
|
|
|
|
|
|
let mut iter = objs.iter().peekable();
|
|
|
|
|
|
|
|
// Check the case where we skip some uploads
|
|
|
|
match acc.extract(&(query().common), &start, &mut iter) {
|
|
|
|
ExtractionResult::FilledAtUpload { key, upload } => {
|
|
|
|
assert_eq!(key, "b");
|
|
|
|
assert_eq!(upload, Uuid::from([0x8f; 32]));
|
|
|
|
}
|
|
|
|
_ => panic!("wrong result"),
|
|
|
|
};
|
|
|
|
|
|
|
|
assert_eq!(acc.keys.len(), 2);
|
|
|
|
assert_eq!(
|
|
|
|
acc.keys.get(&Uuid::from([0x80; 32])).unwrap(),
|
|
|
|
&UploadInfo {
|
|
|
|
timestamp: TS,
|
|
|
|
key: "b".to_string()
|
|
|
|
}
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
acc.keys.get(&Uuid::from([0x8f; 32])).unwrap(),
|
|
|
|
&UploadInfo {
|
|
|
|
timestamp: TS,
|
|
|
|
key: "b".to_string()
|
|
|
|
}
|
|
|
|
);
|
|
|
|
|
|
|
|
acc = UploadAccumulator::new(2);
|
|
|
|
start = RangeBegin::AfterUpload {
|
|
|
|
key: "b".to_string(),
|
|
|
|
upload: Uuid::from([0xff; 32]),
|
|
|
|
};
|
|
|
|
iter = objs.iter().peekable();
|
|
|
|
|
|
|
|
// Check the case where we skip all the uploads
|
|
|
|
match acc.extract(&(query().common), &start, &mut iter) {
|
|
|
|
ExtractionResult::Extracted { key } if key.as_str() == "b" => (),
|
|
|
|
_ => panic!("wrong result"),
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
async fn test_fetch_uploads_no_result() -> Result<(), Error> {
|
|
|
|
let query = query();
|
|
|
|
let mut acc = query.build_accumulator();
|
|
|
|
let page = fetch_list_entries(
|
|
|
|
&query.common,
|
|
|
|
query.begin()?,
|
|
|
|
&mut acc,
|
|
|
|
|_, _, _| async move { Ok(vec![]) },
|
|
|
|
)
|
|
|
|
.await?;
|
|
|
|
assert_eq!(page, None);
|
|
|
|
assert_eq!(acc.common_prefixes.len(), 0);
|
|
|
|
assert_eq!(acc.keys.len(), 0);
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
async fn test_fetch_uploads_basic() -> Result<(), Error> {
|
|
|
|
let query = query();
|
|
|
|
let mut acc = query.build_accumulator();
|
|
|
|
let mut fake_io = |_, _, _| async move { Ok(objs()) };
|
|
|
|
let page =
|
|
|
|
fetch_list_entries(&query.common, query.begin()?, &mut acc, &mut fake_io).await?;
|
|
|
|
assert_eq!(page, None);
|
|
|
|
assert_eq!(acc.common_prefixes.len(), 1);
|
|
|
|
assert_eq!(acc.keys.len(), 1);
|
|
|
|
assert!(acc.common_prefixes.contains("a/"));
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
async fn test_fetch_uploads_advanced() -> Result<(), Error> {
|
|
|
|
let mut query = query();
|
|
|
|
query.common.page_size = 2;
|
|
|
|
|
|
|
|
let mut fake_io = |_, k: Option<String>, _| async move {
|
|
|
|
Ok(match k.as_deref() {
|
|
|
|
Some("") => vec![
|
|
|
|
Object::new(bucket(), "b/a".to_string(), vec![objup_version([0x01; 32])]),
|
|
|
|
Object::new(bucket(), "b/b".to_string(), vec![objup_version([0x01; 32])]),
|
|
|
|
Object::new(bucket(), "b/c".to_string(), vec![objup_version([0x01; 32])]),
|
|
|
|
],
|
|
|
|
Some("b0") => vec![
|
|
|
|
Object::new(bucket(), "c/a".to_string(), vec![objup_version([0x01; 32])]),
|
|
|
|
Object::new(bucket(), "c/b".to_string(), vec![objup_version([0x01; 32])]),
|
|
|
|
Object::new(bucket(), "c/c".to_string(), vec![objup_version([0x02; 32])]),
|
|
|
|
],
|
|
|
|
Some("c0") => vec![Object::new(
|
|
|
|
bucket(),
|
|
|
|
"d".to_string(),
|
|
|
|
vec![objup_version([0x01; 32])],
|
|
|
|
)],
|
|
|
|
_ => panic!("wrong value {:?}", k),
|
|
|
|
})
|
|
|
|
};
|
|
|
|
|
|
|
|
let mut acc = query.build_accumulator();
|
|
|
|
let page =
|
|
|
|
fetch_list_entries(&query.common, query.begin()?, &mut acc, &mut fake_io).await?;
|
|
|
|
assert_eq!(
|
|
|
|
page,
|
|
|
|
Some(RangeBegin::IncludingKey {
|
|
|
|
key: "c0".to_string(),
|
|
|
|
fallback_key: Some("c/c".to_string())
|
|
|
|
})
|
|
|
|
);
|
|
|
|
assert_eq!(acc.common_prefixes.len(), 2);
|
|
|
|
assert_eq!(acc.keys.len(), 0);
|
|
|
|
assert!(acc.common_prefixes.contains("b/"));
|
|
|
|
assert!(acc.common_prefixes.contains("c/"));
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
2022-01-19 16:16:00 +00:00
|
|
|
|
2023-05-03 17:49:36 +00:00
|
|
|
fn mpu() -> MultipartUpload {
|
2022-01-19 16:16:00 +00:00
|
|
|
let uuid = Uuid::from([0x08; 32]);
|
|
|
|
|
2023-05-03 17:49:36 +00:00
|
|
|
let parts = vec![
|
2022-01-19 16:16:00 +00:00
|
|
|
(
|
2023-05-03 17:49:36 +00:00
|
|
|
MpuPartKey {
|
2022-01-19 16:16:00 +00:00
|
|
|
part_number: 1,
|
2023-05-03 17:49:36 +00:00
|
|
|
timestamp: TS,
|
2022-01-19 16:16:00 +00:00
|
|
|
},
|
2023-05-03 17:49:36 +00:00
|
|
|
MpuPart {
|
|
|
|
version: uuid,
|
|
|
|
size: Some(3),
|
|
|
|
etag: Some("etag1".into()),
|
2022-01-19 16:16:00 +00:00
|
|
|
},
|
|
|
|
),
|
|
|
|
(
|
2023-05-03 17:49:36 +00:00
|
|
|
MpuPartKey {
|
|
|
|
part_number: 2,
|
|
|
|
timestamp: TS,
|
2022-01-19 16:16:00 +00:00
|
|
|
},
|
2023-05-03 17:49:36 +00:00
|
|
|
MpuPart {
|
|
|
|
version: uuid,
|
|
|
|
size: None,
|
|
|
|
etag: None,
|
2022-01-19 16:16:00 +00:00
|
|
|
},
|
|
|
|
),
|
|
|
|
(
|
2023-05-03 17:49:36 +00:00
|
|
|
MpuPartKey {
|
|
|
|
part_number: 3,
|
|
|
|
timestamp: TS,
|
2022-01-19 16:16:00 +00:00
|
|
|
},
|
2023-05-03 17:49:36 +00:00
|
|
|
MpuPart {
|
|
|
|
version: uuid,
|
|
|
|
size: Some(10),
|
|
|
|
etag: Some("etag2".into()),
|
2022-01-19 16:16:00 +00:00
|
|
|
},
|
|
|
|
),
|
|
|
|
(
|
2023-05-03 17:49:36 +00:00
|
|
|
MpuPartKey {
|
2022-01-19 16:16:00 +00:00
|
|
|
part_number: 5,
|
2023-05-03 17:49:36 +00:00
|
|
|
timestamp: TS,
|
2022-01-19 16:16:00 +00:00
|
|
|
},
|
2023-05-03 17:49:36 +00:00
|
|
|
MpuPart {
|
|
|
|
version: uuid,
|
|
|
|
size: Some(7),
|
|
|
|
etag: Some("etag3".into()),
|
2022-01-19 16:16:00 +00:00
|
|
|
},
|
|
|
|
),
|
|
|
|
(
|
2023-05-03 17:49:36 +00:00
|
|
|
MpuPartKey {
|
2022-01-19 16:16:00 +00:00
|
|
|
part_number: 8,
|
2023-05-03 17:49:36 +00:00
|
|
|
timestamp: TS,
|
2022-01-19 16:16:00 +00:00
|
|
|
},
|
2023-05-03 17:49:36 +00:00
|
|
|
MpuPart {
|
|
|
|
version: uuid,
|
|
|
|
size: Some(5),
|
|
|
|
etag: Some("etag4".into()),
|
2022-01-19 16:16:00 +00:00
|
|
|
},
|
|
|
|
),
|
|
|
|
];
|
|
|
|
|
2023-05-03 17:49:36 +00:00
|
|
|
MultipartUpload {
|
|
|
|
upload_id: uuid,
|
2023-06-13 21:10:46 +00:00
|
|
|
timestamp: TS,
|
2022-01-19 16:16:00 +00:00
|
|
|
deleted: false.into(),
|
2023-05-03 17:49:36 +00:00
|
|
|
parts: crdt::Map::<MpuPartKey, MpuPart>::from_iter(parts),
|
|
|
|
bucket_id: uuid,
|
|
|
|
key: "a".into(),
|
2022-01-19 16:16:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_fetch_part_info() -> Result<(), Error> {
|
|
|
|
let uuid = Uuid::from([0x08; 32]);
|
|
|
|
let mut query = ListPartsQuery {
|
|
|
|
bucket_name: "a".to_string(),
|
|
|
|
bucket_id: uuid,
|
|
|
|
key: "a".to_string(),
|
|
|
|
upload_id: "xx".to_string(),
|
|
|
|
part_number_marker: None,
|
|
|
|
max_parts: 2,
|
|
|
|
};
|
|
|
|
|
2023-05-03 17:49:36 +00:00
|
|
|
let mpu = mpu();
|
2022-01-19 16:16:00 +00:00
|
|
|
|
|
|
|
// Start from the beginning but with limited size to trigger pagination
|
2023-05-03 17:49:36 +00:00
|
|
|
let (info, pagination) = fetch_part_info(&query, &mpu)?;
|
|
|
|
assert_eq!(pagination.unwrap(), 3);
|
2022-01-19 16:16:00 +00:00
|
|
|
assert_eq!(
|
|
|
|
info,
|
|
|
|
vec![
|
|
|
|
PartInfo {
|
2023-05-03 17:49:36 +00:00
|
|
|
etag: "etag1",
|
2022-01-19 16:16:00 +00:00
|
|
|
timestamp: TS,
|
|
|
|
part_number: 1,
|
2023-05-03 17:49:36 +00:00
|
|
|
size: 3
|
2022-01-19 16:16:00 +00:00
|
|
|
},
|
|
|
|
PartInfo {
|
2023-05-03 17:49:36 +00:00
|
|
|
etag: "etag2",
|
2022-01-19 16:16:00 +00:00
|
|
|
timestamp: TS,
|
2023-05-03 17:49:36 +00:00
|
|
|
part_number: 3,
|
|
|
|
size: 10
|
2022-01-19 16:16:00 +00:00
|
|
|
},
|
|
|
|
]
|
|
|
|
);
|
|
|
|
|
|
|
|
// Use previous pagination to make a new request
|
|
|
|
query.part_number_marker = Some(pagination.unwrap());
|
2023-05-03 17:49:36 +00:00
|
|
|
let (info, pagination) = fetch_part_info(&query, &mpu)?;
|
2022-01-19 16:16:00 +00:00
|
|
|
assert!(pagination.is_none());
|
|
|
|
assert_eq!(
|
|
|
|
info,
|
2023-05-03 17:49:36 +00:00
|
|
|
vec![
|
|
|
|
PartInfo {
|
|
|
|
etag: "etag3",
|
|
|
|
timestamp: TS,
|
|
|
|
part_number: 5,
|
|
|
|
size: 7
|
|
|
|
},
|
|
|
|
PartInfo {
|
|
|
|
etag: "etag4",
|
|
|
|
timestamp: TS,
|
|
|
|
part_number: 8,
|
|
|
|
size: 5
|
|
|
|
},
|
|
|
|
]
|
2022-01-19 16:16:00 +00:00
|
|
|
);
|
|
|
|
|
|
|
|
// Trying to access a part that is way larger than registered ones
|
|
|
|
query.part_number_marker = Some(9999);
|
2023-05-03 17:49:36 +00:00
|
|
|
let (info, pagination) = fetch_part_info(&query, &mpu)?;
|
2022-01-19 16:16:00 +00:00
|
|
|
assert!(pagination.is_none());
|
|
|
|
assert_eq!(info, vec![]);
|
|
|
|
|
|
|
|
// Try without any limitation
|
|
|
|
query.max_parts = 1000;
|
|
|
|
query.part_number_marker = None;
|
2023-05-03 17:49:36 +00:00
|
|
|
let (info, pagination) = fetch_part_info(&query, &mpu)?;
|
2022-01-19 16:16:00 +00:00
|
|
|
assert!(pagination.is_none());
|
|
|
|
assert_eq!(
|
|
|
|
info,
|
|
|
|
vec![
|
|
|
|
PartInfo {
|
2023-05-03 17:49:36 +00:00
|
|
|
etag: "etag1",
|
2022-01-19 16:16:00 +00:00
|
|
|
timestamp: TS,
|
|
|
|
part_number: 1,
|
2023-05-03 17:49:36 +00:00
|
|
|
size: 3
|
|
|
|
},
|
|
|
|
PartInfo {
|
|
|
|
etag: "etag2",
|
|
|
|
timestamp: TS,
|
|
|
|
part_number: 3,
|
|
|
|
size: 10
|
2022-01-19 16:16:00 +00:00
|
|
|
},
|
|
|
|
PartInfo {
|
2023-05-03 17:49:36 +00:00
|
|
|
etag: "etag3",
|
2022-01-19 16:16:00 +00:00
|
|
|
timestamp: TS,
|
|
|
|
part_number: 5,
|
|
|
|
size: 7
|
|
|
|
},
|
|
|
|
PartInfo {
|
2023-05-03 17:49:36 +00:00
|
|
|
etag: "etag4",
|
2022-01-19 16:16:00 +00:00
|
|
|
timestamp: TS,
|
|
|
|
part_number: 8,
|
|
|
|
size: 5
|
|
|
|
},
|
|
|
|
]
|
|
|
|
);
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
2022-01-12 18:04:55 +00:00
|
|
|
}
|