Merge pull request 'Fix #204 (full Multipart Uploads semantics)' (#553) from nlnet-task1 into next

Reviewed-on: Deuxfleurs/garage#553
This commit is contained in:
Alex 2023-06-09 15:34:09 +00:00
commit 0a06fda0da
33 changed files with 2057 additions and 877 deletions

4
Cargo.lock generated
View file

@ -2641,9 +2641,9 @@ dependencies = [
[[package]] [[package]]
name = "proc-macro-hack" name = "proc-macro-hack"
version = "0.5.19" version = "0.5.20+deprecated"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068"
[[package]] [[package]]
name = "proc-macro2" name = "proc-macro2"

View file

@ -32,7 +32,7 @@ args@{
ignoreLockHash, ignoreLockHash,
}: }:
let let
nixifiedLockHash = "30497a698042332c229ed062aa3c4bc7d17c3e927deb3cf9d4dc12d8a0492515"; nixifiedLockHash = "5214dc0a8455fa16df874d9a8efdaa0b99cb3b6ae4cf2ea973137c1b8a1d39d6";
workspaceSrc = if args.workspaceSrc == null then ./. else args.workspaceSrc; workspaceSrc = if args.workspaceSrc == null then ./. else args.workspaceSrc;
currentLockHash = builtins.hashFile "sha256" (workspaceSrc + /Cargo.lock); currentLockHash = builtins.hashFile "sha256" (workspaceSrc + /Cargo.lock);
lockHashIgnored = if ignoreLockHash lockHashIgnored = if ignoreLockHash
@ -1985,7 +1985,7 @@ in
src = fetchCratesIo { inherit name version; sha256 = "f6b0decc02f4636b9ccad390dcbe77b722a77efedfa393caf8379a51d5c61899"; }; src = fetchCratesIo { inherit name version; sha256 = "f6b0decc02f4636b9ccad390dcbe77b722a77efedfa393caf8379a51d5c61899"; };
dependencies = { dependencies = {
git_version_macro = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".git-version-macro."0.3.5" { profileName = "__noProfile"; }).out; git_version_macro = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".git-version-macro."0.3.5" { profileName = "__noProfile"; }).out;
proc_macro_hack = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".proc-macro-hack."0.5.19" { profileName = "__noProfile"; }).out; proc_macro_hack = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".proc-macro-hack."0.5.20+deprecated" { profileName = "__noProfile"; }).out;
}; };
}); });
@ -1995,7 +1995,7 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index"; registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "fe69f1cbdb6e28af2bac214e943b99ce8a0a06b447d15d3e61161b0423139f3f"; }; src = fetchCratesIo { inherit name version; sha256 = "fe69f1cbdb6e28af2bac214e943b99ce8a0a06b447d15d3e61161b0423139f3f"; };
dependencies = { dependencies = {
proc_macro_hack = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".proc-macro-hack."0.5.19" { profileName = "__noProfile"; }).out; proc_macro_hack = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".proc-macro-hack."0.5.20+deprecated" { profileName = "__noProfile"; }).out;
proc_macro2 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".proc-macro2."1.0.56" { inherit profileName; }).out; proc_macro2 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".proc-macro2."1.0.56" { inherit profileName; }).out;
quote = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".quote."1.0.26" { inherit profileName; }).out; quote = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".quote."1.0.26" { inherit profileName; }).out;
syn = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".syn."1.0.94" { inherit profileName; }).out; syn = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".syn."1.0.94" { inherit profileName; }).out;
@ -3695,11 +3695,11 @@ in
}; };
}); });
"registry+https://github.com/rust-lang/crates.io-index".proc-macro-hack."0.5.19" = overridableMkRustCrate (profileName: rec { "registry+https://github.com/rust-lang/crates.io-index".proc-macro-hack."0.5.20+deprecated" = overridableMkRustCrate (profileName: rec {
name = "proc-macro-hack"; name = "proc-macro-hack";
version = "0.5.19"; version = "0.5.20+deprecated";
registry = "registry+https://github.com/rust-lang/crates.io-index"; registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5"; }; src = fetchCratesIo { inherit name version; sha256 = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068"; };
}); });
"registry+https://github.com/rust-lang/crates.io-index".proc-macro2."1.0.56" = overridableMkRustCrate (profileName: rec { "registry+https://github.com/rust-lang/crates.io-index".proc-macro2."1.0.56" = overridableMkRustCrate (profileName: rec {

View file

@ -535,7 +535,10 @@ Example response:
], ],
"objects": 14827, "objects": 14827,
"bytes": 13189855625, "bytes": 13189855625,
"unfinshedUploads": 0, "unfinishedUploads": 1,
"unfinishedMultipartUploads": 1,
"unfinishedMultipartUploadParts": 11,
"unfinishedMultipartUploadBytes": 41943040,
"quotas": { "quotas": {
"maxSize": null, "maxSize": null,
"maxObjects": null "maxObjects": null

138
script/test-renumbering.sh Normal file
View file

@ -0,0 +1,138 @@
#!/usr/bin/env bash
: '
This script tests part renumbering on an S3 remote (here configured for Minio).
On Minio:
The results confirm that if I upload parts with number 1, 4, 5 and 6,
they are renumbered to 1, 2, 3 and 4 after CompleteMultipartUpload.
Thus, specifying partNumber=4 on a GetObject/HeadObject should return
information on the part I originally uploaded with part number
On S3: not tested
Sample output (on Minio):
f07e1404cc527d494242824ded3a616b part1
78974cd4d0f622eb3426ea7cd22f5a1c part4
f9cc379f8baa61645558d9ba7e6351fa part5
1bd2383eebbac1f8e7143575ba5b1f4a part6
Upload ID: 6838b813-d0ca-400b-9d28-ec8b2b5cd004
PART 1 ETag: "f07e1404cc527d494242824ded3a616b"
PART 4 ETag: "78974cd4d0f622eb3426ea7cd22f5a1c"
PART 5 ETag: "f9cc379f8baa61645558d9ba7e6351fa"
PART 6 ETag: "1bd2383eebbac1f8e7143575ba5b1f4a"
======================================== LIST ====
{
"Parts": [
{
"PartNumber": 1,
"LastModified": "2023-04-25T10:21:54.350000+00:00",
"ETag": "\"f07e1404cc527d494242824ded3a616b\"",
"Size": 20971520
},
{
"PartNumber": 4,
"LastModified": "2023-04-25T10:21:54.350000+00:00",
"ETag": "\"78974cd4d0f622eb3426ea7cd22f5a1c\"",
"Size": 20971520
},
{
"PartNumber": 5,
"LastModified": "2023-04-25T10:21:54.350000+00:00",
"ETag": "\"f9cc379f8baa61645558d9ba7e6351fa\"",
"Size": 20971520
},
{
"PartNumber": 6,
"LastModified": "2023-04-25T10:21:54.350000+00:00",
"ETag": "\"1bd2383eebbac1f8e7143575ba5b1f4a\"",
"Size": 20971520
}
],
"ChecksumAlgorithm": "",
"Initiator": {
"ID": "02d6176db174dc93cb1b899f7c6078f08654445fe8cf1b6ce98d8855f66bdbf4",
"DisplayName": "02d6176db174dc93cb1b899f7c6078f08654445fe8cf1b6ce98d8855f66bdbf4"
},
"Owner": {
"DisplayName": "02d6176db174dc93cb1b899f7c6078f08654445fe8cf1b6ce98d8855f66bdbf4",
"ID": "02d6176db174dc93cb1b899f7c6078f08654445fe8cf1b6ce98d8855f66bdbf4"
},
"StorageClass": "STANDARD"
}
======================================== COMPLETE ====
{
"Location": "http://localhost:9000/test/upload",
"Bucket": "test",
"Key": "upload",
"ETag": "\"8e817c8ccd442f9a79c77b58fe808c43-4\""
}
======================================== LIST ====
An error occurred (NoSuchUpload) when calling the ListParts operation: The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.
======================================== GET PART 4 ====
{
"AcceptRanges": "bytes",
"LastModified": "2023-04-25T10:21:59+00:00",
"ContentLength": 20971520,
"ETag": "\"8e817c8ccd442f9a79c77b58fe808c43-4\"",
"ContentRange": "bytes 62914560-83886079/83886080",
"ContentType": "binary/octet-stream",
"Metadata": {},
"PartsCount": 4
}
1bd2383eebbac1f8e7143575ba5b1f4a get-part4
Conclusions:
- Parts are indeed renumbered with consecutive numbers
- ListParts only applies to multipart uploads in progress,
it cannot be used once the multipart upload has been completed
'
export AWS_ACCESS_KEY_ID=1D8Pk2k4oQSoh1BU
export AWS_SECRET_ACCESS_KEY=4B46SR8U7FUgY0raB8Zuxg1NLyLTvbNV
function aws { command aws --endpoint-url http://localhost:9000 $@ ; }
aws --version
aws s3 mb s3://test
for NUM in 1 4 5 6; do
dd if=/dev/urandom of=part$NUM bs=1M count=10
done
md5sum part*
UPLOAD=$(aws s3api create-multipart-upload --bucket test --key 'upload' | jq -r ".UploadId")
echo "Upload ID: $UPLOAD"
PARTS=""
for NUM in 1 4 5 6; do
ETAG=$(aws s3api upload-part --bucket test --key 'upload' --part-number $NUM \
--body "part$NUM" --upload-id "$UPLOAD" | jq -r ".ETag")
echo "PART $NUM ETag: $ETAG"
if [ -n "$PARTS" ]; then
PARTS="$PARTS,"
fi
PARTS="$PARTS {\"ETag\":$ETAG,\"PartNumber\":$NUM}"
done
echo "======================================== LIST ===="
aws s3api list-parts --bucket test --key upload --upload-id "$UPLOAD" | jq
echo "======================================== COMPLETE ===="
echo "{\"Parts\":[$PARTS]}" > mpu
aws s3api complete-multipart-upload --multipart-upload file://mpu \
--bucket test --key 'upload' --upload-id "$UPLOAD"
echo "======================================== LIST ===="
aws s3api list-parts --bucket test --key upload --upload-id "$UPLOAD" | jq
echo "======================================== GET PART 4 ===="
aws s3api get-object --bucket test --key upload --part-number 4 get-part4
md5sum get-part4

103
script/test-skip-part.sh Normal file
View file

@ -0,0 +1,103 @@
#!/usr/bin/env bash
: '
This script tests whether uploaded parts can be skipped in a
CompleteMultipartUpoad
On Minio: yes, parts can be skipped
On S3: not tested
Sample output (on Minio):
f23911bcd1230f5ebe8887cbf5bc396e part1
a2657143167eaf647c40473e78a091dc part4
72f72c02c5163bc81024b28ac818c5e0 part5
e29cf500d20498218904b8df8806caa2 part6
Upload ID: e8fe7b83-9800-46fb-ae90-9d7ccd42fe76
PART 1 ETag: "f23911bcd1230f5ebe8887cbf5bc396e"
PART 4 ETag: "a2657143167eaf647c40473e78a091dc"
PART 5 ETag: "72f72c02c5163bc81024b28ac818c5e0"
PART 6 ETag: "e29cf500d20498218904b8df8806caa2"
======================================== COMPLETE ====
{
"Location": "http://localhost:9000/test/upload",
"Bucket": "test",
"Key": "upload",
"ETag": "\"48246e44d4b38bdc2f3c10ee25b1af17-3\""
}
======================================== GET FULL ====
{
"AcceptRanges": "bytes",
"LastModified": "2023-04-25T10:54:35+00:00",
"ContentLength": 31457280,
"ETag": "\"48246e44d4b38bdc2f3c10ee25b1af17-3\"",
"ContentType": "binary/octet-stream",
"Metadata": {}
}
97fb904da7ad310699a6afab0eb6e061 get-full
97fb904da7ad310699a6afab0eb6e061 -
======================================== GET PART 3 ====
{
"AcceptRanges": "bytes",
"LastModified": "2023-04-25T10:54:35+00:00",
"ContentLength": 10485760,
"ETag": "\"48246e44d4b38bdc2f3c10ee25b1af17-3\"",
"ContentRange": "bytes 20971520-31457279/31457280",
"ContentType": "binary/octet-stream",
"Metadata": {},
"PartsCount": 3
}
e29cf500d20498218904b8df8806caa2 get-part3
Conclusions:
- Skipping a part in a CompleteMultipartUpoad call is OK
- The part is simply not included in the stored object
- Sequential part renumbering counts only non-skipped parts
'
export AWS_ACCESS_KEY_ID=1D8Pk2k4oQSoh1BU
export AWS_SECRET_ACCESS_KEY=4B46SR8U7FUgY0raB8Zuxg1NLyLTvbNV
function aws { command aws --endpoint-url http://localhost:9000 $@ ; }
aws --version
aws s3 mb s3://test
for NUM in 1 4 5 6; do
dd if=/dev/urandom of=part$NUM bs=1M count=10
done
md5sum part*
UPLOAD=$(aws s3api create-multipart-upload --bucket test --key 'upload' | jq -r ".UploadId")
echo "Upload ID: $UPLOAD"
PARTS=""
for NUM in 1 4 5 6; do
ETAG=$(aws s3api upload-part --bucket test --key 'upload' --part-number $NUM \
--body "part$NUM" --upload-id "$UPLOAD" | jq -r ".ETag")
echo "PART $NUM ETag: $ETAG"
if [ "$NUM" != "5" ]; then
if [ -n "$PARTS" ]; then
PARTS="$PARTS,"
fi
PARTS="$PARTS {\"ETag\":$ETAG,\"PartNumber\":$NUM}"
fi
done
echo "======================================== COMPLETE ===="
echo "{\"Parts\":[$PARTS]}" > mpu
aws s3api complete-multipart-upload --multipart-upload file://mpu \
--bucket test --key 'upload' --upload-id "$UPLOAD"
echo "======================================== GET FULL ===="
aws s3api get-object --bucket test --key upload get-full
md5sum get-full
cat part1 part4 part6 | md5sum
echo "======================================== GET PART 3 ===="
aws s3api get-object --bucket test --key upload --part-number 3 get-part3
md5sum get-part3

View file

@ -31,6 +31,11 @@ dd if=/dev/urandom of=/tmp/garage.1.rnd bs=1k count=2 # No multipart, inline sto
dd if=/dev/urandom of=/tmp/garage.2.rnd bs=1M count=5 # No multipart but file will be chunked dd if=/dev/urandom of=/tmp/garage.2.rnd bs=1M count=5 # No multipart but file will be chunked
dd if=/dev/urandom of=/tmp/garage.3.rnd bs=1M count=10 # by default, AWS starts using multipart at 8MB dd if=/dev/urandom of=/tmp/garage.3.rnd bs=1M count=10 # by default, AWS starts using multipart at 8MB
dd if=/dev/urandom of=/tmp/garage.part1.rnd bs=1M count=5
dd if=/dev/urandom of=/tmp/garage.part2.rnd bs=1M count=5
dd if=/dev/urandom of=/tmp/garage.part3.rnd bs=1M count=5
dd if=/dev/urandom of=/tmp/garage.part4.rnd bs=1M count=5
# data of lower entropy, to test compression # data of lower entropy, to test compression
dd if=/dev/urandom bs=1k count=2 | base64 -w0 > /tmp/garage.1.b64 dd if=/dev/urandom bs=1k count=2 | base64 -w0 > /tmp/garage.1.b64
dd if=/dev/urandom bs=1M count=5 | base64 -w0 > /tmp/garage.2.b64 dd if=/dev/urandom bs=1M count=5 | base64 -w0 > /tmp/garage.2.b64
@ -40,7 +45,7 @@ echo "🧪 S3 API testing..."
# AWS # AWS
if [ -z "$SKIP_AWS" ]; then if [ -z "$SKIP_AWS" ]; then
echo "🛠️ Testing with awscli" echo "🛠️ Testing with awscli (aws s3)"
source ${SCRIPT_FOLDER}/dev-env-aws.sh source ${SCRIPT_FOLDER}/dev-env-aws.sh
aws s3 ls aws s3 ls
for idx in {1..3}.{rnd,b64}; do for idx in {1..3}.{rnd,b64}; do
@ -51,7 +56,35 @@ if [ -z "$SKIP_AWS" ]; then
rm /tmp/garage.$idx.dl rm /tmp/garage.$idx.dl
aws s3 rm "s3://eprouvette/&+-é\"/garage.$idx.aws" aws s3 rm "s3://eprouvette/&+-é\"/garage.$idx.aws"
done done
echo "🛠️ Testing multipart uploads with awscli (aws s3api)"
UPLOAD=$(aws s3api create-multipart-upload --bucket eprouvette --key 'upload' | jq -r ".UploadId")
echo "Upload ID: $UPLOAD"
ETAG3=$(aws s3api upload-part --bucket eprouvette --key 'upload' \
--part-number 3 --body "/tmp/garage.part1.rnd" --upload-id "$UPLOAD" \
| jq -r ".ETag")
ETAG2=$(aws s3api upload-part --bucket eprouvette --key 'upload' \
--part-number 2 --body "/tmp/garage.part2.rnd" --upload-id "$UPLOAD" \
| jq -r ".ETag")
ETAG3=$(aws s3api upload-part --bucket eprouvette --key 'upload' \
--part-number 3 --body "/tmp/garage.part3.rnd" --upload-id "$UPLOAD" \
| jq -r ".ETag")
ETAG6=$(aws s3api upload-part --bucket eprouvette --key 'upload' \
--part-number 6 --body "/tmp/garage.part4.rnd" --upload-id "$UPLOAD" \
| jq -r ".ETag")
MPU="{\"Parts\":[{\"PartNumber\":2,\"ETag\":$ETAG2}, {\"PartNumber\":3,\"ETag\":$ETAG3}, {\"PartNumber\":6,\"ETag\":$ETAG6}]}"
echo $MPU > /tmp/garage.mpu.json
aws s3api complete-multipart-upload --multipart-upload file:///tmp/garage.mpu.json \
--bucket eprouvette --key 'upload' --upload-id "$UPLOAD"
aws s3api get-object --bucket eprouvette --key upload /tmp/garage.mpu.get
if [ "$(md5sum /tmp/garage.mpu.get | cut -d ' ' -f 1)" != "$(cat /tmp/garage.part{2,3,4}.rnd | md5sum | cut -d ' ' -f 1)" ]; then
echo "Invalid multipart upload"
exit 1
fi fi
fi
echo "OK!!"
exit 0
# S3CMD # S3CMD
if [ -z "$SKIP_S3CMD" ]; then if [ -z "$SKIP_S3CMD" ]; then
@ -141,6 +174,7 @@ rm eprouvette/winscp
EOF EOF
fi fi
rm /tmp/garage.part{1..4}.rnd
rm /tmp/garage.{1..3}.{rnd,b64} rm /tmp/garage.{1..3}.{rnd,b64}
echo "🏁 Teardown" echo "🏁 Teardown"

View file

@ -14,6 +14,7 @@ use garage_model::bucket_alias_table::*;
use garage_model::bucket_table::*; use garage_model::bucket_table::*;
use garage_model::garage::Garage; use garage_model::garage::Garage;
use garage_model::permission::*; use garage_model::permission::*;
use garage_model::s3::mpu_table;
use garage_model::s3::object_table::*; use garage_model::s3::object_table::*;
use crate::admin::error::*; use crate::admin::error::*;
@ -124,6 +125,14 @@ async fn bucket_info_results(
.map(|x| x.filtered_values(&garage.system.ring.borrow())) .map(|x| x.filtered_values(&garage.system.ring.borrow()))
.unwrap_or_default(); .unwrap_or_default();
let mpu_counters = garage
.mpu_counter_table
.table
.get(&bucket_id, &EmptyKey)
.await?
.map(|x| x.filtered_values(&garage.system.ring.borrow()))
.unwrap_or_default();
let mut relevant_keys = HashMap::new(); let mut relevant_keys = HashMap::new();
for (k, _) in bucket for (k, _) in bucket
.state .state
@ -183,8 +192,8 @@ async fn bucket_info_results(
} }
}), }),
keys: relevant_keys keys: relevant_keys
.into_iter() .into_values()
.map(|(_, key)| { .map(|key| {
let p = key.state.as_option().unwrap(); let p = key.state.as_option().unwrap();
GetBucketInfoKey { GetBucketInfoKey {
access_key_id: key.key_id, access_key_id: key.key_id,
@ -208,12 +217,12 @@ async fn bucket_info_results(
} }
}) })
.collect::<Vec<_>>(), .collect::<Vec<_>>(),
objects: counters.get(OBJECTS).cloned().unwrap_or_default(), objects: *counters.get(OBJECTS).unwrap_or(&0),
bytes: counters.get(BYTES).cloned().unwrap_or_default(), bytes: *counters.get(BYTES).unwrap_or(&0),
unfinished_uploads: counters unfinished_uploads: *counters.get(UNFINISHED_UPLOADS).unwrap_or(&0),
.get(UNFINISHED_UPLOADS) unfinished_multipart_uploads: *mpu_counters.get(mpu_table::UPLOADS).unwrap_or(&0),
.cloned() unfinished_multipart_upload_parts: *mpu_counters.get(mpu_table::PARTS).unwrap_or(&0),
.unwrap_or_default(), unfinished_multipart_upload_bytes: *mpu_counters.get(mpu_table::BYTES).unwrap_or(&0),
quotas: ApiBucketQuotas { quotas: ApiBucketQuotas {
max_size: quotas.max_size, max_size: quotas.max_size,
max_objects: quotas.max_objects, max_objects: quotas.max_objects,
@ -235,6 +244,9 @@ struct GetBucketInfoResult {
objects: i64, objects: i64,
bytes: i64, bytes: i64,
unfinished_uploads: i64, unfinished_uploads: i64,
unfinished_multipart_uploads: i64,
unfinished_multipart_upload_parts: i64,
unfinished_multipart_upload_bytes: i64,
quotas: ApiBucketQuotas, quotas: ApiBucketQuotas,
} }

View file

@ -183,8 +183,8 @@ async fn key_info_results(garage: &Arc<Garage>, key: Key) -> Result<Response<Bod
create_bucket: *key_state.allow_create_bucket.get(), create_bucket: *key_state.allow_create_bucket.get(),
}, },
buckets: relevant_buckets buckets: relevant_buckets
.into_iter() .into_values()
.map(|(_, bucket)| { .map(|bucket| {
let state = bucket.state.as_option().unwrap(); let state = bucket.state.as_option().unwrap();
KeyInfoBucketResult { KeyInfoBucketResult {
id: hex::encode(bucket.id), id: hex::encode(bucket.id),

View file

@ -27,6 +27,7 @@ use crate::s3::cors::*;
use crate::s3::delete::*; use crate::s3::delete::*;
use crate::s3::get::*; use crate::s3::get::*;
use crate::s3::list::*; use crate::s3::list::*;
use crate::s3::multipart::*;
use crate::s3::post_object::handle_post_object; use crate::s3::post_object::handle_post_object;
use crate::s3::put::*; use crate::s3::put::*;
use crate::s3::router::Endpoint; use crate::s3::router::Endpoint;
@ -256,7 +257,7 @@ impl ApiHandler for S3ApiServer {
bucket_name, bucket_name,
bucket_id, bucket_id,
delimiter: delimiter.map(|d| d.to_string()), delimiter: delimiter.map(|d| d.to_string()),
page_size: max_keys.map(|p| p.clamp(1, 1000)).unwrap_or(1000), page_size: max_keys.unwrap_or(1000).clamp(1, 1000),
prefix: prefix.unwrap_or_default(), prefix: prefix.unwrap_or_default(),
urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false), urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false),
}, },
@ -286,7 +287,7 @@ impl ApiHandler for S3ApiServer {
bucket_name, bucket_name,
bucket_id, bucket_id,
delimiter: delimiter.map(|d| d.to_string()), delimiter: delimiter.map(|d| d.to_string()),
page_size: max_keys.map(|p| p.clamp(1, 1000)).unwrap_or(1000), page_size: max_keys.unwrap_or(1000).clamp(1, 1000),
urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false), urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false),
prefix: prefix.unwrap_or_default(), prefix: prefix.unwrap_or_default(),
}, },
@ -319,7 +320,7 @@ impl ApiHandler for S3ApiServer {
bucket_name, bucket_name,
bucket_id, bucket_id,
delimiter: delimiter.map(|d| d.to_string()), delimiter: delimiter.map(|d| d.to_string()),
page_size: max_uploads.map(|p| p.clamp(1, 1000)).unwrap_or(1000), page_size: max_uploads.unwrap_or(1000).clamp(1, 1000),
prefix: prefix.unwrap_or_default(), prefix: prefix.unwrap_or_default(),
urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false), urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false),
}, },
@ -343,7 +344,7 @@ impl ApiHandler for S3ApiServer {
key, key,
upload_id, upload_id,
part_number_marker: part_number_marker.map(|p| p.clamp(1, 10000)), part_number_marker: part_number_marker.map(|p| p.clamp(1, 10000)),
max_parts: max_parts.map(|p| p.clamp(1, 1000)).unwrap_or(1000), max_parts: max_parts.unwrap_or(1000).clamp(1, 1000),
}, },
) )
.await .await

View file

@ -2,7 +2,7 @@ use std::pin::Pin;
use std::sync::Arc; use std::sync::Arc;
use std::time::{Duration, SystemTime, UNIX_EPOCH}; use std::time::{Duration, SystemTime, UNIX_EPOCH};
use futures::{stream, stream::Stream, StreamExt, TryFutureExt}; use futures::{stream, stream::Stream, StreamExt};
use md5::{Digest as Md5Digest, Md5}; use md5::{Digest as Md5Digest, Md5};
use bytes::Bytes; use bytes::Bytes;
@ -18,12 +18,14 @@ use garage_util::time::*;
use garage_model::garage::Garage; use garage_model::garage::Garage;
use garage_model::key_table::Key; use garage_model::key_table::Key;
use garage_model::s3::block_ref_table::*; use garage_model::s3::block_ref_table::*;
use garage_model::s3::mpu_table::*;
use garage_model::s3::object_table::*; use garage_model::s3::object_table::*;
use garage_model::s3::version_table::*; use garage_model::s3::version_table::*;
use crate::helpers::parse_bucket_key; use crate::helpers::parse_bucket_key;
use crate::s3::error::*; use crate::s3::error::*;
use crate::s3::put::{decode_upload_id, get_headers}; use crate::s3::multipart;
use crate::s3::put::get_headers;
use crate::s3::xml::{self as s3_xml, xmlns_tag}; use crate::s3::xml::{self as s3_xml, xmlns_tag};
pub async fn handle_copy( pub async fn handle_copy(
@ -92,7 +94,10 @@ pub async fn handle_copy(
let tmp_dest_object_version = ObjectVersion { let tmp_dest_object_version = ObjectVersion {
uuid: new_uuid, uuid: new_uuid,
timestamp: new_timestamp, timestamp: new_timestamp,
state: ObjectVersionState::Uploading(new_meta.headers.clone()), state: ObjectVersionState::Uploading {
headers: new_meta.headers.clone(),
multipart: false,
},
}; };
let tmp_dest_object = Object::new( let tmp_dest_object = Object::new(
dest_bucket_id, dest_bucket_id,
@ -105,8 +110,14 @@ pub async fn handle_copy(
// this means that the BlockRef entries linked to this version cannot be // this means that the BlockRef entries linked to this version cannot be
// marked as deleted (they are marked as deleted only if the Version // marked as deleted (they are marked as deleted only if the Version
// doesn't exist or is marked as deleted). // doesn't exist or is marked as deleted).
let mut dest_version = let mut dest_version = Version::new(
Version::new(new_uuid, dest_bucket_id, dest_key.to_string(), false); new_uuid,
VersionBacklink::Object {
bucket_id: dest_bucket_id,
key: dest_key.to_string(),
},
false,
);
garage.version_table.insert(&dest_version).await?; garage.version_table.insert(&dest_version).await?;
// Fill in block list for version and insert block refs // Fill in block list for version and insert block refs
@ -179,17 +190,13 @@ pub async fn handle_upload_part_copy(
) -> Result<Response<Body>, Error> { ) -> Result<Response<Body>, Error> {
let copy_precondition = CopyPreconditionHeaders::parse(req)?; let copy_precondition = CopyPreconditionHeaders::parse(req)?;
let dest_version_uuid = decode_upload_id(upload_id)?; let dest_upload_id = multipart::decode_upload_id(upload_id)?;
let dest_key = dest_key.to_string(); let dest_key = dest_key.to_string();
let (source_object, dest_object) = futures::try_join!( let (source_object, (_, _, mut dest_mpu)) = futures::try_join!(
get_copy_source(&garage, api_key, req), get_copy_source(&garage, api_key, req),
garage multipart::get_upload(&garage, &dest_bucket_id, &dest_key, &dest_upload_id)
.object_table
.get(&dest_bucket_id, &dest_key)
.map_err(Error::from),
)?; )?;
let dest_object = dest_object.ok_or(Error::NoSuchKey)?;
let (source_object_version, source_version_data, source_version_meta) = let (source_object_version, source_version_data, source_version_meta) =
extract_source_info(&source_object)?; extract_source_info(&source_object)?;
@ -217,15 +224,6 @@ pub async fn handle_upload_part_copy(
}, },
}; };
// Check destination version is indeed in uploading state
if !dest_object
.versions()
.iter()
.any(|v| v.uuid == dest_version_uuid && v.is_uploading())
{
return Err(Error::NoSuchUpload);
}
// Check source version is not inlined // Check source version is not inlined
match source_version_data { match source_version_data {
ObjectVersionData::DeleteMarker => unreachable!(), ObjectVersionData::DeleteMarker => unreachable!(),
@ -242,23 +240,11 @@ pub async fn handle_upload_part_copy(
// Fetch source versin with its block list, // Fetch source versin with its block list,
// and destination version to check part hasn't yet been uploaded // and destination version to check part hasn't yet been uploaded
let (source_version, dest_version) = futures::try_join!( let source_version = garage
garage
.version_table .version_table
.get(&source_object_version.uuid, &EmptyKey), .get(&source_object_version.uuid, &EmptyKey)
garage.version_table.get(&dest_version_uuid, &EmptyKey), .await?
)?; .ok_or(Error::NoSuchKey)?;
let source_version = source_version.ok_or(Error::NoSuchKey)?;
// Check this part number hasn't yet been uploaded
if let Some(dv) = dest_version {
if dv.has_part_number(part_number) {
return Err(Error::bad_request(format!(
"Part number {} has already been uploaded",
part_number
)));
}
}
// We want to reuse blocks from the source version as much as possible. // We want to reuse blocks from the source version as much as possible.
// However, we still need to get the data from these blocks // However, we still need to get the data from these blocks
@ -299,6 +285,33 @@ pub async fn handle_upload_part_copy(
current_offset = block_end; current_offset = block_end;
} }
// Calculate the identity of destination part: timestamp, version id
let dest_version_id = gen_uuid();
let dest_mpu_part_key = MpuPartKey {
part_number,
timestamp: dest_mpu.next_timestamp(part_number),
};
// Create the uploaded part
dest_mpu.parts.clear();
dest_mpu.parts.put(
dest_mpu_part_key,
MpuPart {
version: dest_version_id,
etag: None,
size: None,
},
);
garage.mpu_table.insert(&dest_mpu).await?;
let mut dest_version = Version::new(
dest_version_id,
VersionBacklink::MultipartUpload {
upload_id: dest_upload_id,
},
false,
);
// Now, actually copy the blocks // Now, actually copy the blocks
let mut md5hasher = Md5::new(); let mut md5hasher = Md5::new();
@ -348,8 +361,8 @@ pub async fn handle_upload_part_copy(
let must_upload = existing_block_hash.is_none(); let must_upload = existing_block_hash.is_none();
let final_hash = existing_block_hash.unwrap_or_else(|| blake2sum(&data[..])); let final_hash = existing_block_hash.unwrap_or_else(|| blake2sum(&data[..]));
let mut version = Version::new(dest_version_uuid, dest_bucket_id, dest_key.clone(), false); dest_version.blocks.clear();
version.blocks.put( dest_version.blocks.put(
VersionBlockKey { VersionBlockKey {
part_number, part_number,
offset: current_offset, offset: current_offset,
@ -363,7 +376,7 @@ pub async fn handle_upload_part_copy(
let block_ref = BlockRef { let block_ref = BlockRef {
block: final_hash, block: final_hash,
version: dest_version_uuid, version: dest_version_id,
deleted: false.into(), deleted: false.into(),
}; };
@ -378,23 +391,33 @@ pub async fn handle_upload_part_copy(
Ok(()) Ok(())
} }
}, },
async {
// Thing 2: we need to insert the block in the version // Thing 2: we need to insert the block in the version
garage.version_table.insert(&version), garage.version_table.insert(&dest_version).await?;
// Thing 3: we need to add a block reference // Thing 3: we need to add a block reference
garage.block_ref_table.insert(&block_ref), garage.block_ref_table.insert(&block_ref).await
},
// Thing 4: we need to prefetch the next block // Thing 4: we need to prefetch the next block
defragmenter.next(), defragmenter.next(),
)?; )?;
next_block = res.3; next_block = res.2;
} }
assert_eq!(current_offset, source_range.length);
let data_md5sum = md5hasher.finalize(); let data_md5sum = md5hasher.finalize();
let etag = hex::encode(data_md5sum); let etag = hex::encode(data_md5sum);
// Put the part's ETag in the Versiontable // Put the part's ETag in the Versiontable
let mut version = Version::new(dest_version_uuid, dest_bucket_id, dest_key.clone(), false); dest_mpu.parts.put(
version.parts_etags.put(part_number, etag.clone()); dest_mpu_part_key,
garage.version_table.insert(&version).await?; MpuPart {
version: dest_version_id,
etag: Some(etag.clone()),
size: Some(current_offset),
},
);
garage.mpu_table.insert(&dest_mpu).await?;
// LGTM // LGTM
let resp_xml = s3_xml::to_xml_with_header(&CopyPartResult { let resp_xml = s3_xml::to_xml_with_header(&CopyPartResult {

View file

@ -149,7 +149,6 @@ pub async fn handle_head(
let (part_offset, part_end) = let (part_offset, part_end) =
calculate_part_bounds(&version, pn).ok_or(Error::InvalidPart)?; calculate_part_bounds(&version, pn).ok_or(Error::InvalidPart)?;
let n_parts = version.parts_etags.items().len();
Ok(object_headers(object_version, version_meta) Ok(object_headers(object_version, version_meta)
.header(CONTENT_LENGTH, format!("{}", part_end - part_offset)) .header(CONTENT_LENGTH, format!("{}", part_end - part_offset))
@ -162,7 +161,7 @@ pub async fn handle_head(
version_meta.size version_meta.size
), ),
) )
.header(X_AMZ_MP_PARTS_COUNT, format!("{}", n_parts)) .header(X_AMZ_MP_PARTS_COUNT, format!("{}", version.n_parts()?))
.status(StatusCode::PARTIAL_CONTENT) .status(StatusCode::PARTIAL_CONTENT)
.body(Body::empty())?) .body(Body::empty())?)
} }
@ -376,7 +375,6 @@ async fn handle_get_part(
let (begin, end) = let (begin, end) =
calculate_part_bounds(&version, part_number).ok_or(Error::InvalidPart)?; calculate_part_bounds(&version, part_number).ok_or(Error::InvalidPart)?;
let n_parts = version.parts_etags.items().len();
let body = body_from_blocks_range(garage, version.blocks.items(), begin, end); let body = body_from_blocks_range(garage, version.blocks.items(), begin, end);
@ -386,7 +384,7 @@ async fn handle_get_part(
CONTENT_RANGE, CONTENT_RANGE,
format!("bytes {}-{}/{}", begin, end - 1, version_meta.size), format!("bytes {}-{}/{}", begin, end - 1, version_meta.size),
) )
.header(X_AMZ_MP_PARTS_COUNT, format!("{}", n_parts)) .header(X_AMZ_MP_PARTS_COUNT, format!("{}", version.n_parts()?))
.body(body)?) .body(body)?)
} }
_ => unreachable!(), _ => unreachable!(),
@ -443,7 +441,7 @@ fn body_from_blocks_range(
// block.part_number, which is not the same in the case of a multipart upload) // block.part_number, which is not the same in the case of a multipart upload)
let mut blocks: Vec<(VersionBlock, u64)> = Vec::with_capacity(std::cmp::min( let mut blocks: Vec<(VersionBlock, u64)> = Vec::with_capacity(std::cmp::min(
all_blocks.len(), all_blocks.len(),
4 + ((end - begin) / std::cmp::max(all_blocks[0].1.size as u64, 1024)) as usize, 4 + ((end - begin) / std::cmp::max(all_blocks[0].1.size, 1024)) as usize,
)); ));
let mut block_offset: u64 = 0; let mut block_offset: u64 = 0;
for (_, b) in all_blocks.iter() { for (_, b) in all_blocks.iter() {
@ -454,7 +452,7 @@ fn body_from_blocks_range(
if block_offset < end && block_offset + b.size > begin { if block_offset < end && block_offset + b.size > begin {
blocks.push((*b, block_offset)); blocks.push((*b, block_offset));
} }
block_offset += b.size as u64; block_offset += b.size;
} }
let order_stream = OrderTag::stream(); let order_stream = OrderTag::stream();

View file

@ -1,4 +1,3 @@
use std::cmp::Ordering;
use std::collections::{BTreeMap, BTreeSet}; use std::collections::{BTreeMap, BTreeSet};
use std::iter::{Iterator, Peekable}; use std::iter::{Iterator, Peekable};
use std::sync::Arc; use std::sync::Arc;
@ -11,15 +10,15 @@ use garage_util::error::Error as GarageError;
use garage_util::time::*; use garage_util::time::*;
use garage_model::garage::Garage; use garage_model::garage::Garage;
use garage_model::s3::mpu_table::*;
use garage_model::s3::object_table::*; use garage_model::s3::object_table::*;
use garage_model::s3::version_table::Version;
use garage_table::{EmptyKey, EnumerationOrder}; use garage_table::EnumerationOrder;
use crate::encoding::*; use crate::encoding::*;
use crate::helpers::key_after_prefix; use crate::helpers::key_after_prefix;
use crate::s3::error::*; use crate::s3::error::*;
use crate::s3::put as s3_put; use crate::s3::multipart as s3_multipart;
use crate::s3::xml as s3_xml; use crate::s3::xml as s3_xml;
const DUMMY_NAME: &str = "Dummy Key"; const DUMMY_NAME: &str = "Dummy Key";
@ -176,7 +175,9 @@ pub async fn handle_list_multipart_upload(
t.get_range( t.get_range(
&bucket, &bucket,
key, key,
Some(ObjectFilter::IsUploading), Some(ObjectFilter::IsUploading {
check_multipart: Some(true),
}),
count, count,
EnumerationOrder::Forward, EnumerationOrder::Forward,
) )
@ -272,24 +273,26 @@ pub async fn handle_list_parts(
) -> Result<Response<Body>, Error> { ) -> Result<Response<Body>, Error> {
debug!("ListParts {:?}", query); debug!("ListParts {:?}", query);
let upload_id = s3_put::decode_upload_id(&query.upload_id)?; let upload_id = s3_multipart::decode_upload_id(&query.upload_id)?;
let (object, version) = futures::try_join!( let (_, _, mpu) =
garage.object_table.get(&query.bucket_id, &query.key), s3_multipart::get_upload(&garage, &query.bucket_id, &query.key, &upload_id).await?;
garage.version_table.get(&upload_id, &EmptyKey),
)?;
let (info, next) = fetch_part_info(query, object, version, upload_id)?; let (info, next) = fetch_part_info(query, &mpu)?;
let result = s3_xml::ListPartsResult { let result = s3_xml::ListPartsResult {
xmlns: (), xmlns: (),
// Query parameters
bucket: s3_xml::Value(query.bucket_name.to_string()), bucket: s3_xml::Value(query.bucket_name.to_string()),
key: s3_xml::Value(query.key.to_string()), key: s3_xml::Value(query.key.to_string()),
upload_id: s3_xml::Value(query.upload_id.to_string()), upload_id: s3_xml::Value(query.upload_id.to_string()),
part_number_marker: query.part_number_marker.map(|e| s3_xml::IntValue(e as i64)), part_number_marker: query.part_number_marker.map(|e| s3_xml::IntValue(e as i64)),
next_part_number_marker: next.map(|e| s3_xml::IntValue(e as i64)),
max_parts: s3_xml::IntValue(query.max_parts as i64), max_parts: s3_xml::IntValue(query.max_parts as i64),
is_truncated: s3_xml::Value(next.map(|_| "true").unwrap_or("false").to_string()),
// Result values
next_part_number_marker: next.map(|e| s3_xml::IntValue(e as i64)),
is_truncated: s3_xml::Value(format!("{}", next.is_some())),
parts: info parts: info
.iter() .iter()
.map(|part| s3_xml::PartItem { .map(|part| s3_xml::PartItem {
@ -299,6 +302,8 @@ pub async fn handle_list_parts(
size: s3_xml::IntValue(part.size as i64), size: s3_xml::IntValue(part.size as i64),
}) })
.collect(), .collect(),
// Dummy result values (unsupported features)
initiator: s3_xml::Initiator { initiator: s3_xml::Initiator {
display_name: s3_xml::Value(DUMMY_NAME.to_string()), display_name: s3_xml::Value(DUMMY_NAME.to_string()),
id: s3_xml::Value(DUMMY_KEY.to_string()), id: s3_xml::Value(DUMMY_KEY.to_string()),
@ -335,8 +340,8 @@ struct UploadInfo {
} }
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
struct PartInfo { struct PartInfo<'a> {
etag: String, etag: &'a str,
timestamp: u64, timestamp: u64,
part_number: u64, part_number: u64,
size: u64, size: u64,
@ -456,107 +461,51 @@ where
} }
} }
fn fetch_part_info( fn fetch_part_info<'a>(
query: &ListPartsQuery, query: &ListPartsQuery,
object: Option<Object>, mpu: &'a MultipartUpload,
version: Option<Version>, ) -> Result<(Vec<PartInfo<'a>>, Option<u64>), Error> {
upload_id: Uuid, assert!((1..=1000).contains(&query.max_parts)); // see s3/api_server.rs
) -> Result<(Vec<PartInfo>, Option<u64>), Error> {
// Check results
let object = object.ok_or(Error::NoSuchKey)?;
let obj_version = object // Parse multipart upload part list, removing parts not yet finished
.versions() // and failed part uploads that were overwritten
.iter() let mut parts: Vec<PartInfo<'a>> = Vec::with_capacity(mpu.parts.items().len());
.find(|v| v.uuid == upload_id && v.is_uploading()) for (pk, p) in mpu.parts.items().iter() {
.ok_or(Error::NoSuchUpload)?; if let (Some(etag), Some(size)) = (&p.etag, p.size) {
let part_info = PartInfo {
let version = version.ok_or(Error::NoSuchKey)?; part_number: pk.part_number,
timestamp: pk.timestamp,
// Cut the beginning of our 2 vectors if required etag,
let (etags, blocks) = match &query.part_number_marker { size,
Some(marker) => {
let next = marker + 1;
let part_idx = into_ok_or_err(
version
.parts_etags
.items()
.binary_search_by(|(part_num, _)| part_num.cmp(&next)),
);
let parts = &version.parts_etags.items()[part_idx..];
let block_idx = into_ok_or_err(
version
.blocks
.items()
.binary_search_by(|(vkey, _)| vkey.part_number.cmp(&next)),
);
let blocks = &version.blocks.items()[block_idx..];
(parts, blocks)
}
None => (version.parts_etags.items(), version.blocks.items()),
}; };
match parts.last_mut() {
// Use the block vector to compute a (part_number, size) vector Some(lastpart) if lastpart.part_number == pk.part_number => {
let mut size = Vec::<(u64, u64)>::new(); *lastpart = part_info;
blocks.iter().for_each(|(key, val)| {
let mut new_size = val.size;
match size.pop() {
Some((part_number, size)) if part_number == key.part_number => new_size += size,
Some(v) => size.push(v),
None => (),
} }
size.push((key.part_number, new_size))
});
// Merge the etag vector and size vector to build a PartInfo vector
let max_parts = query.max_parts as usize;
let (mut etag_iter, mut size_iter) = (etags.iter().peekable(), size.iter().peekable());
let mut info = Vec::<PartInfo>::with_capacity(max_parts);
while info.len() < max_parts {
match (etag_iter.peek(), size_iter.peek()) {
(Some((ep, etag)), Some((sp, size))) => match ep.cmp(sp) {
Ordering::Less => {
debug!("ETag information ignored due to missing corresponding block information. Query: {:?}", query);
etag_iter.next();
}
Ordering::Equal => {
info.push(PartInfo {
etag: etag.to_string(),
timestamp: obj_version.timestamp,
part_number: *ep,
size: *size,
});
etag_iter.next();
size_iter.next();
}
Ordering::Greater => {
debug!("Block information ignored due to missing corresponding ETag information. Query: {:?}", query);
size_iter.next();
}
},
(None, None) => return Ok((info, None)),
_ => { _ => {
debug!( parts.push(part_info);
"Additional block or ETag information ignored. Query: {:?}", }
query
);
return Ok((info, None));
} }
} }
} }
match info.last() { // Cut the beginning if we have a marker
Some(part_info) => { if let Some(marker) = &query.part_number_marker {
let pagination = Some(part_info.part_number); let next = marker + 1;
Ok((info, pagination)) let part_idx = parts
.binary_search_by(|part| part.part_number.cmp(&next))
.unwrap_or_else(|x| x);
parts = parts.split_off(part_idx);
} }
None => Ok((info, None)),
// Cut the end if we have too many parts
if parts.len() > query.max_parts as usize {
parts.truncate(query.max_parts as usize);
let pagination = Some(parts.last().unwrap().part_number);
return Ok((parts, pagination));
} }
Ok((parts, None))
} }
/* /*
@ -651,7 +600,7 @@ impl ListMultipartUploadsQuery {
}), }),
uuid => Ok(RangeBegin::AfterUpload { uuid => Ok(RangeBegin::AfterUpload {
key: key_marker.to_string(), key: key_marker.to_string(),
upload: s3_put::decode_upload_id(uuid)?, upload: s3_multipart::decode_upload_id(uuid)?,
}), }),
}, },
@ -843,7 +792,7 @@ impl ExtractAccumulator for UploadAccumulator {
let mut uploads_for_key = object let mut uploads_for_key = object
.versions() .versions()
.iter() .iter()
.filter(|x| x.is_uploading()) .filter(|x| x.is_uploading(Some(true)))
.collect::<Vec<&ObjectVersion>>(); .collect::<Vec<&ObjectVersion>>();
// S3 logic requires lexicographically sorted upload ids. // S3 logic requires lexicographically sorted upload ids.
@ -918,14 +867,6 @@ impl ExtractAccumulator for UploadAccumulator {
* Utility functions * Utility functions
*/ */
/// This is a stub for Result::into_ok_or_err that is not yet in Rust stable
fn into_ok_or_err<T>(r: Result<T, T>) -> T {
match r {
Ok(r) => r,
Err(r) => r,
}
}
/// Returns the common prefix of the object given the query prefix and delimiter /// Returns the common prefix of the object given the query prefix and delimiter
fn common_prefix<'a>(object: &'a Object, query: &ListQueryCommon) -> Option<&'a str> { fn common_prefix<'a>(object: &'a Object, query: &ListQueryCommon) -> Option<&'a str> {
match &query.delimiter { match &query.delimiter {
@ -951,7 +892,6 @@ fn uriencode_maybe(s: &str, yes: bool) -> s3_xml::Value {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use garage_model::s3::version_table::*;
use garage_util::*; use garage_util::*;
use std::iter::FromIterator; use std::iter::FromIterator;
@ -991,10 +931,13 @@ mod tests {
ObjectVersion { ObjectVersion {
uuid: Uuid::from(uuid), uuid: Uuid::from(uuid),
timestamp: TS, timestamp: TS,
state: ObjectVersionState::Uploading(ObjectVersionHeaders { state: ObjectVersionState::Uploading {
multipart: true,
headers: ObjectVersionHeaders {
content_type: "text/plain".to_string(), content_type: "text/plain".to_string(),
other: BTreeMap::<String, String>::new(), other: BTreeMap::<String, String>::new(),
}), },
},
} }
} }
@ -1169,83 +1112,76 @@ mod tests {
Ok(()) Ok(())
} }
fn version() -> Version { fn mpu() -> MultipartUpload {
let uuid = Uuid::from([0x08; 32]); let uuid = Uuid::from([0x08; 32]);
let blocks = vec![ let parts = vec![
( (
VersionBlockKey { MpuPartKey {
part_number: 1, part_number: 1,
offset: 1, timestamp: TS,
}, },
VersionBlock { MpuPart {
hash: uuid, version: uuid,
size: 3, size: Some(3),
etag: Some("etag1".into()),
}, },
), ),
( (
VersionBlockKey { MpuPartKey {
part_number: 1,
offset: 2,
},
VersionBlock {
hash: uuid,
size: 2,
},
),
(
VersionBlockKey {
part_number: 2, part_number: 2,
offset: 1, timestamp: TS,
}, },
VersionBlock { MpuPart {
hash: uuid, version: uuid,
size: 8, size: None,
etag: None,
}, },
), ),
( (
VersionBlockKey { MpuPartKey {
part_number: 3,
timestamp: TS,
},
MpuPart {
version: uuid,
size: Some(10),
etag: Some("etag2".into()),
},
),
(
MpuPartKey {
part_number: 5, part_number: 5,
offset: 1, timestamp: TS,
}, },
VersionBlock { MpuPart {
hash: uuid, version: uuid,
size: 7, size: Some(7),
etag: Some("etag3".into()),
}, },
), ),
( (
VersionBlockKey { MpuPartKey {
part_number: 8, part_number: 8,
offset: 1, timestamp: TS,
}, },
VersionBlock { MpuPart {
hash: uuid, version: uuid,
size: 5, size: Some(5),
etag: Some("etag4".into()),
}, },
), ),
]; ];
let etags = vec![
(1, "etag1".to_string()),
(3, "etag2".to_string()),
(5, "etag3".to_string()),
(8, "etag4".to_string()),
(9, "etag5".to_string()),
];
Version { MultipartUpload {
bucket_id: uuid, upload_id: uuid,
key: "a".to_string(),
uuid,
deleted: false.into(), deleted: false.into(),
blocks: crdt::Map::<VersionBlockKey, VersionBlock>::from_iter(blocks), parts: crdt::Map::<MpuPartKey, MpuPart>::from_iter(parts),
parts_etags: crdt::Map::<u64, String>::from_iter(etags), bucket_id: uuid,
key: "a".into(),
} }
} }
fn obj() -> Object {
Object::new(bucket(), "d".to_string(), vec![objup_version([0x08; 32])])
}
#[test] #[test]
fn test_fetch_part_info() -> Result<(), Error> { fn test_fetch_part_info() -> Result<(), Error> {
let uuid = Uuid::from([0x08; 32]); let uuid = Uuid::from([0x08; 32]);
@ -1258,82 +1194,85 @@ mod tests {
max_parts: 2, max_parts: 2,
}; };
assert!( let mpu = mpu();
fetch_part_info(&query, None, None, uuid).is_err(),
"No object and version should fail"
);
assert!(
fetch_part_info(&query, Some(obj()), None, uuid).is_err(),
"No version should faild"
);
assert!(
fetch_part_info(&query, None, Some(version()), uuid).is_err(),
"No object should fail"
);
// Start from the beginning but with limited size to trigger pagination // Start from the beginning but with limited size to trigger pagination
let (info, pagination) = fetch_part_info(&query, Some(obj()), Some(version()), uuid)?; let (info, pagination) = fetch_part_info(&query, &mpu)?;
assert_eq!(pagination.unwrap(), 5); assert_eq!(pagination.unwrap(), 3);
assert_eq!( assert_eq!(
info, info,
vec![ vec![
PartInfo { PartInfo {
etag: "etag1".to_string(), etag: "etag1",
timestamp: TS, timestamp: TS,
part_number: 1, part_number: 1,
size: 5 size: 3
}, },
PartInfo { PartInfo {
etag: "etag3".to_string(), etag: "etag2",
timestamp: TS, timestamp: TS,
part_number: 5, part_number: 3,
size: 7 size: 10
}, },
] ]
); );
// Use previous pagination to make a new request // Use previous pagination to make a new request
query.part_number_marker = Some(pagination.unwrap()); query.part_number_marker = Some(pagination.unwrap());
let (info, pagination) = fetch_part_info(&query, Some(obj()), Some(version()), uuid)?; let (info, pagination) = fetch_part_info(&query, &mpu)?;
assert!(pagination.is_none()); assert!(pagination.is_none());
assert_eq!( assert_eq!(
info, info,
vec![PartInfo { vec![
etag: "etag4".to_string(), PartInfo {
etag: "etag3",
timestamp: TS,
part_number: 5,
size: 7
},
PartInfo {
etag: "etag4",
timestamp: TS, timestamp: TS,
part_number: 8, part_number: 8,
size: 5 size: 5
},] },
]
); );
// Trying to access a part that is way larger than registered ones // Trying to access a part that is way larger than registered ones
query.part_number_marker = Some(9999); query.part_number_marker = Some(9999);
let (info, pagination) = fetch_part_info(&query, Some(obj()), Some(version()), uuid)?; let (info, pagination) = fetch_part_info(&query, &mpu)?;
assert!(pagination.is_none()); assert!(pagination.is_none());
assert_eq!(info, vec![]); assert_eq!(info, vec![]);
// Try without any limitation // Try without any limitation
query.max_parts = 1000; query.max_parts = 1000;
query.part_number_marker = None; query.part_number_marker = None;
let (info, pagination) = fetch_part_info(&query, Some(obj()), Some(version()), uuid)?; let (info, pagination) = fetch_part_info(&query, &mpu)?;
assert!(pagination.is_none()); assert!(pagination.is_none());
assert_eq!( assert_eq!(
info, info,
vec![ vec![
PartInfo { PartInfo {
etag: "etag1".to_string(), etag: "etag1",
timestamp: TS, timestamp: TS,
part_number: 1, part_number: 1,
size: 5 size: 3
}, },
PartInfo { PartInfo {
etag: "etag3".to_string(), etag: "etag2",
timestamp: TS,
part_number: 3,
size: 10
},
PartInfo {
etag: "etag3",
timestamp: TS, timestamp: TS,
part_number: 5, part_number: 5,
size: 7 size: 7
}, },
PartInfo { PartInfo {
etag: "etag4".to_string(), etag: "etag4",
timestamp: TS, timestamp: TS,
part_number: 8, part_number: 8,
size: 5 size: 5

View file

@ -7,6 +7,7 @@ pub mod cors;
mod delete; mod delete;
pub mod get; pub mod get;
mod list; mod list;
mod multipart;
mod post_object; mod post_object;
mod put; mod put;
mod website; mod website;

464
src/api/s3/multipart.rs Normal file
View file

@ -0,0 +1,464 @@
use std::collections::HashMap;
use std::sync::Arc;
use futures::prelude::*;
use hyper::body::Body;
use hyper::{Request, Response};
use md5::{Digest as Md5Digest, Md5};
use garage_table::*;
use garage_util::async_hash::*;
use garage_util::data::*;
use garage_util::time::*;
use garage_model::bucket_table::Bucket;
use garage_model::garage::Garage;
use garage_model::s3::block_ref_table::*;
use garage_model::s3::mpu_table::*;
use garage_model::s3::object_table::*;
use garage_model::s3::version_table::*;
use crate::s3::error::*;
use crate::s3::put::*;
use crate::s3::xml as s3_xml;
use crate::signature::verify_signed_content;
// ----
pub async fn handle_create_multipart_upload(
garage: Arc<Garage>,
req: &Request<Body>,
bucket_name: &str,
bucket_id: Uuid,
key: &str,
) -> Result<Response<Body>, Error> {
let upload_id = gen_uuid();
let headers = get_headers(req.headers())?;
// Create object in object table
let object_version = ObjectVersion {
uuid: upload_id,
timestamp: now_msec(),
state: ObjectVersionState::Uploading {
multipart: true,
headers,
},
};
let object = Object::new(bucket_id, key.to_string(), vec![object_version]);
garage.object_table.insert(&object).await?;
// Create multipart upload in mpu table
// This multipart upload will hold references to uploaded parts
// (which are entries in the Version table)
let mpu = MultipartUpload::new(upload_id, bucket_id, key.into(), false);
garage.mpu_table.insert(&mpu).await?;
// Send success response
let result = s3_xml::InitiateMultipartUploadResult {
xmlns: (),
bucket: s3_xml::Value(bucket_name.to_string()),
key: s3_xml::Value(key.to_string()),
upload_id: s3_xml::Value(hex::encode(upload_id)),
};
let xml = s3_xml::to_xml_with_header(&result)?;
Ok(Response::new(Body::from(xml.into_bytes())))
}
pub async fn handle_put_part(
garage: Arc<Garage>,
req: Request<Body>,
bucket_id: Uuid,
key: &str,
part_number: u64,
upload_id: &str,
content_sha256: Option<Hash>,
) -> Result<Response<Body>, Error> {
let upload_id = decode_upload_id(upload_id)?;
let content_md5 = match req.headers().get("content-md5") {
Some(x) => Some(x.to_str()?.to_string()),
None => None,
};
// Read first chuck, and at the same time try to get object to see if it exists
let key = key.to_string();
let body = req.into_body().map_err(Error::from);
let mut chunker = StreamChunker::new(body, garage.config.block_size);
let ((_, _, mut mpu), first_block) = futures::try_join!(
get_upload(&garage, &bucket_id, &key, &upload_id),
chunker.next(),
)?;
// Check object is valid and part can be accepted
let first_block = first_block.ok_or_bad_request("Empty body")?;
// Calculate part identity: timestamp, version id
let version_uuid = gen_uuid();
let mpu_part_key = MpuPartKey {
part_number,
timestamp: mpu.next_timestamp(part_number),
};
// The following consists in many steps that can each fail.
// Keep track that some cleanup will be needed if things fail
// before everything is finished (cleanup is done using the Drop trait).
let mut interrupted_cleanup = InterruptedCleanup(Some(InterruptedCleanupInner {
garage: garage.clone(),
upload_id,
version_uuid,
}));
// Create version and link version from MPU
mpu.parts.clear();
mpu.parts.put(
mpu_part_key,
MpuPart {
version: version_uuid,
etag: None,
size: None,
},
);
garage.mpu_table.insert(&mpu).await?;
let version = Version::new(
version_uuid,
VersionBacklink::MultipartUpload { upload_id },
false,
);
garage.version_table.insert(&version).await?;
// Copy data to version
let first_block_hash = async_blake2sum(first_block.clone()).await;
let (total_size, data_md5sum, data_sha256sum) = read_and_put_blocks(
&garage,
&version,
part_number,
first_block,
first_block_hash,
&mut chunker,
)
.await?;
// Verify that checksums map
ensure_checksum_matches(
data_md5sum.as_slice(),
data_sha256sum,
content_md5.as_deref(),
content_sha256,
)?;
// Store part etag in version
let data_md5sum_hex = hex::encode(data_md5sum);
mpu.parts.put(
mpu_part_key,
MpuPart {
version: version_uuid,
etag: Some(data_md5sum_hex.clone()),
size: Some(total_size),
},
);
garage.mpu_table.insert(&mpu).await?;
// We were not interrupted, everything went fine.
// We won't have to clean up on drop.
interrupted_cleanup.cancel();
let response = Response::builder()
.header("ETag", format!("\"{}\"", data_md5sum_hex))
.body(Body::empty())
.unwrap();
Ok(response)
}
struct InterruptedCleanup(Option<InterruptedCleanupInner>);
struct InterruptedCleanupInner {
garage: Arc<Garage>,
upload_id: Uuid,
version_uuid: Uuid,
}
impl InterruptedCleanup {
fn cancel(&mut self) {
drop(self.0.take());
}
}
impl Drop for InterruptedCleanup {
fn drop(&mut self) {
if let Some(info) = self.0.take() {
tokio::spawn(async move {
let version = Version::new(
info.version_uuid,
VersionBacklink::MultipartUpload {
upload_id: info.upload_id,
},
true,
);
if let Err(e) = info.garage.version_table.insert(&version).await {
warn!("Cannot cleanup after aborted UploadPart: {}", e);
}
});
}
}
}
pub async fn handle_complete_multipart_upload(
garage: Arc<Garage>,
req: Request<Body>,
bucket_name: &str,
bucket: &Bucket,
key: &str,
upload_id: &str,
content_sha256: Option<Hash>,
) -> Result<Response<Body>, Error> {
let body = hyper::body::to_bytes(req.into_body()).await?;
if let Some(content_sha256) = content_sha256 {
verify_signed_content(content_sha256, &body[..])?;
}
let body_xml = roxmltree::Document::parse(std::str::from_utf8(&body)?)?;
let body_list_of_parts = parse_complete_multipart_upload_body(&body_xml)
.ok_or_bad_request("Invalid CompleteMultipartUpload XML")?;
debug!(
"CompleteMultipartUpload list of parts: {:?}",
body_list_of_parts
);
let upload_id = decode_upload_id(upload_id)?;
// Get object and multipart upload
let key = key.to_string();
let (_, mut object_version, mpu) = get_upload(&garage, &bucket.id, &key, &upload_id).await?;
if mpu.parts.is_empty() {
return Err(Error::bad_request("No data was uploaded"));
}
let headers = match object_version.state {
ObjectVersionState::Uploading { headers, .. } => headers,
_ => unreachable!(),
};
// Check that part numbers are an increasing sequence.
// (it doesn't need to start at 1 nor to be a continuous sequence,
// see discussion in #192)
if body_list_of_parts.is_empty() {
return Err(Error::EntityTooSmall);
}
if !body_list_of_parts
.iter()
.zip(body_list_of_parts.iter().skip(1))
.all(|(p1, p2)| p1.part_number < p2.part_number)
{
return Err(Error::InvalidPartOrder);
}
// Check that the list of parts they gave us corresponds to parts we have here
debug!("Parts stored in multipart upload: {:?}", mpu.parts.items());
let mut have_parts = HashMap::new();
for (pk, pv) in mpu.parts.items().iter() {
have_parts.insert(pk.part_number, pv);
}
let mut parts = vec![];
for req_part in body_list_of_parts.iter() {
match have_parts.get(&req_part.part_number) {
Some(part) if part.etag.as_ref() == Some(&req_part.etag) && part.size.is_some() => {
parts.push(*part)
}
_ => return Err(Error::InvalidPart),
}
}
let grg = &garage;
let parts_versions = futures::future::try_join_all(parts.iter().map(|p| async move {
grg.version_table
.get(&p.version, &EmptyKey)
.await?
.ok_or_internal_error("Part version missing from version table")
}))
.await?;
// Create final version and block refs
let mut final_version = Version::new(
upload_id,
VersionBacklink::Object {
bucket_id: bucket.id,
key: key.to_string(),
},
false,
);
for (part_number, part_version) in parts_versions.iter().enumerate() {
if part_version.deleted.get() {
return Err(Error::InvalidPart);
}
for (vbk, vb) in part_version.blocks.items().iter() {
final_version.blocks.put(
VersionBlockKey {
part_number: (part_number + 1) as u64,
offset: vbk.offset,
},
*vb,
);
}
}
garage.version_table.insert(&final_version).await?;
let block_refs = final_version.blocks.items().iter().map(|(_, b)| BlockRef {
block: b.hash,
version: upload_id,
deleted: false.into(),
});
garage.block_ref_table.insert_many(block_refs).await?;
// Calculate etag of final object
// To understand how etags are calculated, read more here:
// https://teppen.io/2018/06/23/aws_s3_etags/
let mut etag_md5_hasher = Md5::new();
for part in parts.iter() {
etag_md5_hasher.update(part.etag.as_ref().unwrap().as_bytes());
}
let etag = format!(
"{}-{}",
hex::encode(etag_md5_hasher.finalize()),
parts.len()
);
// Calculate total size of final object
let total_size = parts.iter().map(|x| x.size.unwrap()).sum();
if let Err(e) = check_quotas(&garage, bucket, &key, total_size).await {
object_version.state = ObjectVersionState::Aborted;
let final_object = Object::new(bucket.id, key.clone(), vec![object_version]);
garage.object_table.insert(&final_object).await?;
return Err(e);
}
// Write final object version
object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock(
ObjectVersionMeta {
headers,
size: total_size,
etag: etag.clone(),
},
final_version.blocks.items()[0].1.hash,
));
let final_object = Object::new(bucket.id, key.clone(), vec![object_version]);
garage.object_table.insert(&final_object).await?;
// Send response saying ok we're done
let result = s3_xml::CompleteMultipartUploadResult {
xmlns: (),
location: None,
bucket: s3_xml::Value(bucket_name.to_string()),
key: s3_xml::Value(key),
etag: s3_xml::Value(format!("\"{}\"", etag)),
};
let xml = s3_xml::to_xml_with_header(&result)?;
Ok(Response::new(Body::from(xml.into_bytes())))
}
pub async fn handle_abort_multipart_upload(
garage: Arc<Garage>,
bucket_id: Uuid,
key: &str,
upload_id: &str,
) -> Result<Response<Body>, Error> {
let upload_id = decode_upload_id(upload_id)?;
let (_, mut object_version, _) =
get_upload(&garage, &bucket_id, &key.to_string(), &upload_id).await?;
object_version.state = ObjectVersionState::Aborted;
let final_object = Object::new(bucket_id, key.to_string(), vec![object_version]);
garage.object_table.insert(&final_object).await?;
Ok(Response::new(Body::from(vec![])))
}
// ======== helpers ============
#[allow(clippy::ptr_arg)]
pub(crate) async fn get_upload(
garage: &Garage,
bucket_id: &Uuid,
key: &String,
upload_id: &Uuid,
) -> Result<(Object, ObjectVersion, MultipartUpload), Error> {
let (object, mpu) = futures::try_join!(
garage.object_table.get(bucket_id, key).map_err(Error::from),
garage
.mpu_table
.get(upload_id, &EmptyKey)
.map_err(Error::from),
)?;
let object = object.ok_or(Error::NoSuchUpload)?;
let mpu = mpu.ok_or(Error::NoSuchUpload)?;
let object_version = object
.versions()
.iter()
.find(|v| v.uuid == *upload_id && v.is_uploading(Some(true)))
.ok_or(Error::NoSuchUpload)?
.clone();
Ok((object, object_version, mpu))
}
pub fn decode_upload_id(id: &str) -> Result<Uuid, Error> {
let id_bin = hex::decode(id).map_err(|_| Error::NoSuchUpload)?;
if id_bin.len() != 32 {
return Err(Error::NoSuchUpload);
}
let mut uuid = [0u8; 32];
uuid.copy_from_slice(&id_bin[..]);
Ok(Uuid::from(uuid))
}
#[derive(Debug)]
struct CompleteMultipartUploadPart {
etag: String,
part_number: u64,
}
fn parse_complete_multipart_upload_body(
xml: &roxmltree::Document,
) -> Option<Vec<CompleteMultipartUploadPart>> {
let mut parts = vec![];
let root = xml.root();
let cmu = root.first_child()?;
if !cmu.has_tag_name("CompleteMultipartUpload") {
return None;
}
for item in cmu.children() {
// Only parse <Part> nodes
if !item.is_element() {
continue;
}
if item.has_tag_name("Part") {
let etag = item.children().find(|e| e.has_tag_name("ETag"))?.text()?;
let part_number = item
.children()
.find(|e| e.has_tag_name("PartNumber"))?
.text()?;
parts.push(CompleteMultipartUploadPart {
etag: etag.trim_matches('"').to_string(),
part_number: part_number.parse().ok()?,
});
} else {
return None;
}
}
Some(parts)
}

View file

@ -1,4 +1,4 @@
use std::collections::{BTreeMap, BTreeSet, HashMap}; use std::collections::{BTreeMap, HashMap};
use std::sync::Arc; use std::sync::Arc;
use base64::prelude::*; use base64::prelude::*;
@ -30,8 +30,6 @@ use garage_model::s3::object_table::*;
use garage_model::s3::version_table::*; use garage_model::s3::version_table::*;
use crate::s3::error::*; use crate::s3::error::*;
use crate::s3::xml as s3_xml;
use crate::signature::verify_signed_content;
pub async fn handle_put( pub async fn handle_put(
garage: Arc<Garage>, garage: Arc<Garage>,
@ -123,20 +121,23 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
// The following consists in many steps that can each fail. // The following consists in many steps that can each fail.
// Keep track that some cleanup will be needed if things fail // Keep track that some cleanup will be needed if things fail
// before everything is finished (cleanup is done using the Drop trait). // before everything is finished (cleanup is done using the Drop trait).
let mut interrupted_cleanup = InterruptedCleanup(Some(( let mut interrupted_cleanup = InterruptedCleanup(Some(InterruptedCleanupInner {
garage.clone(), garage: garage.clone(),
bucket.id, bucket_id: bucket.id,
key.into(), key: key.into(),
version_uuid, version_uuid,
version_timestamp, version_timestamp,
))); }));
// Write version identifier in object table so that we have a trace // Write version identifier in object table so that we have a trace
// that we are uploading something // that we are uploading something
let mut object_version = ObjectVersion { let mut object_version = ObjectVersion {
uuid: version_uuid, uuid: version_uuid,
timestamp: version_timestamp, timestamp: version_timestamp,
state: ObjectVersionState::Uploading(headers.clone()), state: ObjectVersionState::Uploading {
headers: headers.clone(),
multipart: false,
},
}; };
let object = Object::new(bucket.id, key.into(), vec![object_version.clone()]); let object = Object::new(bucket.id, key.into(), vec![object_version.clone()]);
garage.object_table.insert(&object).await?; garage.object_table.insert(&object).await?;
@ -145,7 +146,14 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
// Write this entry now, even with empty block list, // Write this entry now, even with empty block list,
// to prevent block_ref entries from being deleted (they can be deleted // to prevent block_ref entries from being deleted (they can be deleted
// if the reference a version that isn't found in the version table) // if the reference a version that isn't found in the version table)
let version = Version::new(version_uuid, bucket.id, key.into(), false); let version = Version::new(
version_uuid,
VersionBacklink::Object {
bucket_id: bucket.id,
key: key.into(),
},
false,
);
garage.version_table.insert(&version).await?; garage.version_table.insert(&version).await?;
// Transfer data and verify checksum // Transfer data and verify checksum
@ -192,7 +200,7 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
/// Validate MD5 sum against content-md5 header /// Validate MD5 sum against content-md5 header
/// and sha256sum against signed content-sha256 /// and sha256sum against signed content-sha256
fn ensure_checksum_matches( pub(crate) fn ensure_checksum_matches(
data_md5sum: &[u8], data_md5sum: &[u8],
data_sha256sum: garage_util::data::FixedBytes32, data_sha256sum: garage_util::data::FixedBytes32,
content_md5: Option<&str>, content_md5: Option<&str>,
@ -218,7 +226,7 @@ fn ensure_checksum_matches(
} }
/// Check that inserting this object with this size doesn't exceed bucket quotas /// Check that inserting this object with this size doesn't exceed bucket quotas
async fn check_quotas( pub(crate) async fn check_quotas(
garage: &Arc<Garage>, garage: &Arc<Garage>,
bucket: &Bucket, bucket: &Bucket,
key: &str, key: &str,
@ -275,7 +283,7 @@ async fn check_quotas(
Ok(()) Ok(())
} }
async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> + Unpin>( pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
garage: &Garage, garage: &Garage,
version: &Version, version: &Version,
part_number: u64, part_number: u64,
@ -381,7 +389,7 @@ async fn put_block_meta(
Ok(()) Ok(())
} }
struct StreamChunker<S: Stream<Item = Result<Bytes, Error>>> { pub(crate) struct StreamChunker<S: Stream<Item = Result<Bytes, Error>>> {
stream: S, stream: S,
read_all: bool, read_all: bool,
block_size: usize, block_size: usize,
@ -389,7 +397,7 @@ struct StreamChunker<S: Stream<Item = Result<Bytes, Error>>> {
} }
impl<S: Stream<Item = Result<Bytes, Error>> + Unpin> StreamChunker<S> { impl<S: Stream<Item = Result<Bytes, Error>> + Unpin> StreamChunker<S> {
fn new(stream: S, block_size: usize) -> Self { pub(crate) fn new(stream: S, block_size: usize) -> Self {
Self { Self {
stream, stream,
read_all: false, read_all: false,
@ -398,7 +406,7 @@ impl<S: Stream<Item = Result<Bytes, Error>> + Unpin> StreamChunker<S> {
} }
} }
async fn next(&mut self) -> Result<Option<Bytes>, Error> { pub(crate) async fn next(&mut self) -> Result<Option<Bytes>, Error> {
while !self.read_all && self.buf.len() < self.block_size { while !self.read_all && self.buf.len() < self.block_size {
if let Some(block) = self.stream.next().await { if let Some(block) = self.stream.next().await {
let bytes = block?; let bytes = block?;
@ -425,7 +433,14 @@ pub fn put_response(version_uuid: Uuid, md5sum_hex: String) -> Response<Body> {
.unwrap() .unwrap()
} }
struct InterruptedCleanup(Option<(Arc<Garage>, Uuid, String, Uuid, u64)>); struct InterruptedCleanup(Option<InterruptedCleanupInner>);
struct InterruptedCleanupInner {
garage: Arc<Garage>,
bucket_id: Uuid,
key: String,
version_uuid: Uuid,
version_timestamp: u64,
}
impl InterruptedCleanup { impl InterruptedCleanup {
fn cancel(&mut self) { fn cancel(&mut self) {
@ -434,15 +449,15 @@ impl InterruptedCleanup {
} }
impl Drop for InterruptedCleanup { impl Drop for InterruptedCleanup {
fn drop(&mut self) { fn drop(&mut self) {
if let Some((garage, bucket_id, key, version_uuid, version_ts)) = self.0.take() { if let Some(info) = self.0.take() {
tokio::spawn(async move { tokio::spawn(async move {
let object_version = ObjectVersion { let object_version = ObjectVersion {
uuid: version_uuid, uuid: info.version_uuid,
timestamp: version_ts, timestamp: info.version_timestamp,
state: ObjectVersionState::Aborted, state: ObjectVersionState::Aborted,
}; };
let object = Object::new(bucket_id, key, vec![object_version]); let object = Object::new(info.bucket_id, info.key, vec![object_version]);
if let Err(e) = garage.object_table.insert(&object).await { if let Err(e) = info.garage.object_table.insert(&object).await {
warn!("Cannot cleanup after aborted PutObject: {}", e); warn!("Cannot cleanup after aborted PutObject: {}", e);
} }
}); });
@ -450,326 +465,9 @@ impl Drop for InterruptedCleanup {
} }
} }
// ---- // ============ helpers ============
pub async fn handle_create_multipart_upload( pub(crate) fn get_mime_type(headers: &HeaderMap<HeaderValue>) -> Result<String, Error> {
garage: Arc<Garage>,
req: &Request<Body>,
bucket_name: &str,
bucket_id: Uuid,
key: &str,
) -> Result<Response<Body>, Error> {
let version_uuid = gen_uuid();
let headers = get_headers(req.headers())?;
// Create object in object table
let object_version = ObjectVersion {
uuid: version_uuid,
timestamp: now_msec(),
state: ObjectVersionState::Uploading(headers),
};
let object = Object::new(bucket_id, key.to_string(), vec![object_version]);
garage.object_table.insert(&object).await?;
// Insert empty version so that block_ref entries refer to something
// (they are inserted concurrently with blocks in the version table, so
// there is the possibility that they are inserted before the version table
// is created, in which case it is allowed to delete them, e.g. in repair_*)
let version = Version::new(version_uuid, bucket_id, key.into(), false);
garage.version_table.insert(&version).await?;
// Send success response
let result = s3_xml::InitiateMultipartUploadResult {
xmlns: (),
bucket: s3_xml::Value(bucket_name.to_string()),
key: s3_xml::Value(key.to_string()),
upload_id: s3_xml::Value(hex::encode(version_uuid)),
};
let xml = s3_xml::to_xml_with_header(&result)?;
Ok(Response::new(Body::from(xml.into_bytes())))
}
pub async fn handle_put_part(
garage: Arc<Garage>,
req: Request<Body>,
bucket_id: Uuid,
key: &str,
part_number: u64,
upload_id: &str,
content_sha256: Option<Hash>,
) -> Result<Response<Body>, Error> {
let version_uuid = decode_upload_id(upload_id)?;
let content_md5 = match req.headers().get("content-md5") {
Some(x) => Some(x.to_str()?.to_string()),
None => None,
};
// Read first chuck, and at the same time try to get object to see if it exists
let key = key.to_string();
let body = req.into_body().map_err(Error::from);
let mut chunker = StreamChunker::new(body, garage.config.block_size);
let (object, version, first_block) = futures::try_join!(
garage
.object_table
.get(&bucket_id, &key)
.map_err(Error::from),
garage
.version_table
.get(&version_uuid, &EmptyKey)
.map_err(Error::from),
chunker.next(),
)?;
// Check object is valid and multipart block can be accepted
let first_block = first_block.ok_or_bad_request("Empty body")?;
let object = object.ok_or_bad_request("Object not found")?;
if !object
.versions()
.iter()
.any(|v| v.uuid == version_uuid && v.is_uploading())
{
return Err(Error::NoSuchUpload);
}
// Check part hasn't already been uploaded
if let Some(v) = version {
if v.has_part_number(part_number) {
return Err(Error::bad_request(format!(
"Part number {} has already been uploaded",
part_number
)));
}
}
// Copy block to store
let version = Version::new(version_uuid, bucket_id, key, false);
let first_block_hash = async_blake2sum(first_block.clone()).await;
let (_, data_md5sum, data_sha256sum) = read_and_put_blocks(
&garage,
&version,
part_number,
first_block,
first_block_hash,
&mut chunker,
)
.await?;
// Verify that checksums map
ensure_checksum_matches(
data_md5sum.as_slice(),
data_sha256sum,
content_md5.as_deref(),
content_sha256,
)?;
// Store part etag in version
let data_md5sum_hex = hex::encode(data_md5sum);
let mut version = version;
version
.parts_etags
.put(part_number, data_md5sum_hex.clone());
garage.version_table.insert(&version).await?;
let response = Response::builder()
.header("ETag", format!("\"{}\"", data_md5sum_hex))
.body(Body::empty())
.unwrap();
Ok(response)
}
pub async fn handle_complete_multipart_upload(
garage: Arc<Garage>,
req: Request<Body>,
bucket_name: &str,
bucket: &Bucket,
key: &str,
upload_id: &str,
content_sha256: Option<Hash>,
) -> Result<Response<Body>, Error> {
let body = hyper::body::to_bytes(req.into_body()).await?;
if let Some(content_sha256) = content_sha256 {
verify_signed_content(content_sha256, &body[..])?;
}
let body_xml = roxmltree::Document::parse(std::str::from_utf8(&body)?)?;
let body_list_of_parts = parse_complete_multipart_upload_body(&body_xml)
.ok_or_bad_request("Invalid CompleteMultipartUpload XML")?;
debug!(
"CompleteMultipartUpload list of parts: {:?}",
body_list_of_parts
);
let version_uuid = decode_upload_id(upload_id)?;
// Get object and version
let key = key.to_string();
let (object, version) = futures::try_join!(
garage.object_table.get(&bucket.id, &key),
garage.version_table.get(&version_uuid, &EmptyKey),
)?;
let object = object.ok_or(Error::NoSuchKey)?;
let mut object_version = object
.versions()
.iter()
.find(|v| v.uuid == version_uuid && v.is_uploading())
.cloned()
.ok_or(Error::NoSuchUpload)?;
let version = version.ok_or(Error::NoSuchKey)?;
if version.blocks.is_empty() {
return Err(Error::bad_request("No data was uploaded"));
}
let headers = match object_version.state {
ObjectVersionState::Uploading(headers) => headers,
_ => unreachable!(),
};
// Check that part numbers are an increasing sequence.
// (it doesn't need to start at 1 nor to be a continuous sequence,
// see discussion in #192)
if body_list_of_parts.is_empty() {
return Err(Error::EntityTooSmall);
}
if !body_list_of_parts
.iter()
.zip(body_list_of_parts.iter().skip(1))
.all(|(p1, p2)| p1.part_number < p2.part_number)
{
return Err(Error::InvalidPartOrder);
}
// Garage-specific restriction, see #204: part numbers must be
// consecutive starting at 1
if body_list_of_parts[0].part_number != 1
|| !body_list_of_parts
.iter()
.zip(body_list_of_parts.iter().skip(1))
.all(|(p1, p2)| p1.part_number + 1 == p2.part_number)
{
return Err(Error::NotImplemented("Garage does not support completing a Multipart upload with non-consecutive part numbers. This is a restriction of Garage's data model, which might be fixed in a future release. See issue #204 for more information on this topic.".into()));
}
// Check that the list of parts they gave us corresponds to the parts we have here
debug!("Expected parts from request: {:?}", body_list_of_parts);
debug!("Parts stored in version: {:?}", version.parts_etags.items());
let parts = version
.parts_etags
.items()
.iter()
.map(|pair| (&pair.0, &pair.1));
let same_parts = body_list_of_parts
.iter()
.map(|x| (&x.part_number, &x.etag))
.eq(parts);
if !same_parts {
return Err(Error::InvalidPart);
}
// Check that all blocks belong to one of the parts
let block_parts = version
.blocks
.items()
.iter()
.map(|(bk, _)| bk.part_number)
.collect::<BTreeSet<_>>();
let same_parts = body_list_of_parts
.iter()
.map(|x| x.part_number)
.eq(block_parts.into_iter());
if !same_parts {
return Err(Error::bad_request(
"Part numbers in block list and part list do not match. This can happen if a part was partially uploaded. Please abort the multipart upload and try again."
));
}
// Calculate etag of final object
// To understand how etags are calculated, read more here:
// https://teppen.io/2018/06/23/aws_s3_etags/
let num_parts = body_list_of_parts.len();
let mut etag_md5_hasher = Md5::new();
for (_, etag) in version.parts_etags.items().iter() {
etag_md5_hasher.update(etag.as_bytes());
}
let etag = format!("{}-{}", hex::encode(etag_md5_hasher.finalize()), num_parts);
// Calculate total size of final object
let total_size = version.blocks.items().iter().map(|x| x.1.size).sum();
if let Err(e) = check_quotas(&garage, bucket, &key, total_size).await {
object_version.state = ObjectVersionState::Aborted;
let final_object = Object::new(bucket.id, key.clone(), vec![object_version]);
garage.object_table.insert(&final_object).await?;
return Err(e);
}
// Write final object version
object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock(
ObjectVersionMeta {
headers,
size: total_size,
etag: etag.clone(),
},
version.blocks.items()[0].1.hash,
));
let final_object = Object::new(bucket.id, key.clone(), vec![object_version]);
garage.object_table.insert(&final_object).await?;
// Send response saying ok we're done
let result = s3_xml::CompleteMultipartUploadResult {
xmlns: (),
location: None,
bucket: s3_xml::Value(bucket_name.to_string()),
key: s3_xml::Value(key),
etag: s3_xml::Value(format!("\"{}\"", etag)),
};
let xml = s3_xml::to_xml_with_header(&result)?;
Ok(Response::new(Body::from(xml.into_bytes())))
}
pub async fn handle_abort_multipart_upload(
garage: Arc<Garage>,
bucket_id: Uuid,
key: &str,
upload_id: &str,
) -> Result<Response<Body>, Error> {
let version_uuid = decode_upload_id(upload_id)?;
let object = garage
.object_table
.get(&bucket_id, &key.to_string())
.await?;
let object = object.ok_or(Error::NoSuchKey)?;
let object_version = object
.versions()
.iter()
.find(|v| v.uuid == version_uuid && v.is_uploading());
let mut object_version = match object_version {
None => return Err(Error::NoSuchUpload),
Some(x) => x.clone(),
};
object_version.state = ObjectVersionState::Aborted;
let final_object = Object::new(bucket_id, key.to_string(), vec![object_version]);
garage.object_table.insert(&final_object).await?;
Ok(Response::new(Body::from(vec![])))
}
fn get_mime_type(headers: &HeaderMap<HeaderValue>) -> Result<String, Error> {
Ok(headers Ok(headers
.get(hyper::header::CONTENT_TYPE) .get(hyper::header::CONTENT_TYPE)
.map(|x| x.to_str()) .map(|x| x.to_str())
@ -821,54 +519,3 @@ pub(crate) fn get_headers(headers: &HeaderMap<HeaderValue>) -> Result<ObjectVers
other, other,
}) })
} }
pub fn decode_upload_id(id: &str) -> Result<Uuid, Error> {
let id_bin = hex::decode(id).map_err(|_| Error::NoSuchUpload)?;
if id_bin.len() != 32 {
return Err(Error::NoSuchUpload);
}
let mut uuid = [0u8; 32];
uuid.copy_from_slice(&id_bin[..]);
Ok(Uuid::from(uuid))
}
#[derive(Debug)]
struct CompleteMultipartUploadPart {
etag: String,
part_number: u64,
}
fn parse_complete_multipart_upload_body(
xml: &roxmltree::Document,
) -> Option<Vec<CompleteMultipartUploadPart>> {
let mut parts = vec![];
let root = xml.root();
let cmu = root.first_child()?;
if !cmu.has_tag_name("CompleteMultipartUpload") {
return None;
}
for item in cmu.children() {
// Only parse <Part> nodes
if !item.is_element() {
continue;
}
if item.has_tag_name("Part") {
let etag = item.children().find(|e| e.has_tag_name("ETag"))?.text()?;
let part_number = item
.children()
.find(|e| e.has_tag_name("PartNumber"))?
.text()?;
parts.push(CompleteMultipartUploadPart {
etag: etag.trim_matches('"').to_string(),
part_number: part_number.parse().ok()?,
});
} else {
return None;
}
}
Some(parts)
}

View file

@ -34,6 +34,7 @@ impl AdminRpcHandler {
.get_range(&hash, None, None, 10000, Default::default()) .get_range(&hash, None, None, 10000, Default::default())
.await?; .await?;
let mut versions = vec![]; let mut versions = vec![];
let mut uploads = vec![];
for br in block_refs { for br in block_refs {
if let Some(v) = self if let Some(v) = self
.garage .garage
@ -41,6 +42,11 @@ impl AdminRpcHandler {
.get(&br.version, &EmptyKey) .get(&br.version, &EmptyKey)
.await? .await?
{ {
if let VersionBacklink::MultipartUpload { upload_id } = &v.backlink {
if let Some(u) = self.garage.mpu_table.get(upload_id, &EmptyKey).await? {
uploads.push(u);
}
}
versions.push(Ok(v)); versions.push(Ok(v));
} else { } else {
versions.push(Err(br.version)); versions.push(Err(br.version));
@ -50,6 +56,7 @@ impl AdminRpcHandler {
hash, hash,
refcount, refcount,
versions, versions,
uploads,
}) })
} }
@ -93,6 +100,7 @@ impl AdminRpcHandler {
} }
let mut obj_dels = 0; let mut obj_dels = 0;
let mut mpu_dels = 0;
let mut ver_dels = 0; let mut ver_dels = 0;
for hash in blocks { for hash in blocks {
@ -105,56 +113,80 @@ impl AdminRpcHandler {
.await?; .await?;
for br in block_refs { for br in block_refs {
let version = match self if let Some(version) = self
.garage .garage
.version_table .version_table
.get(&br.version, &EmptyKey) .get(&br.version, &EmptyKey)
.await? .await?
{ {
Some(v) => v, self.handle_block_purge_version_backlink(
None => continue, &version,
}; &mut obj_dels,
&mut mpu_dels,
if let Some(object) = self )
.garage .await?;
.object_table
.get(&version.bucket_id, &version.key)
.await?
{
let ov = object.versions().iter().rev().find(|v| v.is_complete());
if let Some(ov) = ov {
if ov.uuid == br.version {
let del_uuid = gen_uuid();
let deleted_object = Object::new(
version.bucket_id,
version.key.clone(),
vec![ObjectVersion {
uuid: del_uuid,
timestamp: ov.timestamp + 1,
state: ObjectVersionState::Complete(
ObjectVersionData::DeleteMarker,
),
}],
);
self.garage.object_table.insert(&deleted_object).await?;
obj_dels += 1;
}
}
}
if !version.deleted.get() { if !version.deleted.get() {
let deleted_version = let deleted_version = Version::new(version.uuid, version.backlink, true);
Version::new(version.uuid, version.bucket_id, version.key.clone(), true);
self.garage.version_table.insert(&deleted_version).await?; self.garage.version_table.insert(&deleted_version).await?;
ver_dels += 1; ver_dels += 1;
} }
} }
} }
}
Ok(AdminRpc::Ok(format!( Ok(AdminRpc::Ok(format!(
"{} blocks were purged: {} object deletion markers added, {} versions marked deleted", "Purged {} blocks, {} versions, {} objects, {} multipart uploads",
blocks.len(), blocks.len(),
ver_dels,
obj_dels, obj_dels,
ver_dels mpu_dels,
))) )))
} }
async fn handle_block_purge_version_backlink(
&self,
version: &Version,
obj_dels: &mut usize,
mpu_dels: &mut usize,
) -> Result<(), Error> {
let (bucket_id, key, ov_id) = match &version.backlink {
VersionBacklink::Object { bucket_id, key } => (*bucket_id, key.clone(), version.uuid),
VersionBacklink::MultipartUpload { upload_id } => {
if let Some(mut mpu) = self.garage.mpu_table.get(upload_id, &EmptyKey).await? {
if !mpu.deleted.get() {
mpu.parts.clear();
mpu.deleted.set();
self.garage.mpu_table.insert(&mpu).await?;
*mpu_dels += 1;
}
(mpu.bucket_id, mpu.key.clone(), *upload_id)
} else {
return Ok(());
}
}
};
if let Some(object) = self.garage.object_table.get(&bucket_id, &key).await? {
let ov = object.versions().iter().rev().find(|v| v.is_complete());
if let Some(ov) = ov {
if ov.uuid == ov_id {
let del_uuid = gen_uuid();
let deleted_object = Object::new(
bucket_id,
key,
vec![ObjectVersion {
uuid: del_uuid,
timestamp: ov.timestamp + 1,
state: ObjectVersionState::Complete(ObjectVersionData::DeleteMarker),
}],
);
self.garage.object_table.insert(&deleted_object).await?;
*obj_dels += 1;
}
}
}
Ok(())
}
} }

View file

@ -73,6 +73,15 @@ impl AdminRpcHandler {
.map(|x| x.filtered_values(&self.garage.system.ring.borrow())) .map(|x| x.filtered_values(&self.garage.system.ring.borrow()))
.unwrap_or_default(); .unwrap_or_default();
let mpu_counters = self
.garage
.mpu_counter_table
.table
.get(&bucket_id, &EmptyKey)
.await?
.map(|x| x.filtered_values(&self.garage.system.ring.borrow()))
.unwrap_or_default();
let mut relevant_keys = HashMap::new(); let mut relevant_keys = HashMap::new();
for (k, _) in bucket for (k, _) in bucket
.state .state
@ -112,6 +121,7 @@ impl AdminRpcHandler {
bucket, bucket,
relevant_keys, relevant_keys,
counters, counters,
mpu_counters,
}) })
} }

View file

@ -27,6 +27,7 @@ use garage_model::garage::Garage;
use garage_model::helper::error::{Error, OkOrBadRequest}; use garage_model::helper::error::{Error, OkOrBadRequest};
use garage_model::key_table::*; use garage_model::key_table::*;
use garage_model::migrate::Migrate; use garage_model::migrate::Migrate;
use garage_model::s3::mpu_table::MultipartUpload;
use garage_model::s3::version_table::Version; use garage_model::s3::version_table::Version;
use crate::cli::*; use crate::cli::*;
@ -52,6 +53,7 @@ pub enum AdminRpc {
bucket: Bucket, bucket: Bucket,
relevant_keys: HashMap<String, Key>, relevant_keys: HashMap<String, Key>,
counters: HashMap<String, i64>, counters: HashMap<String, i64>,
mpu_counters: HashMap<String, i64>,
}, },
KeyList(Vec<(String, String)>), KeyList(Vec<(String, String)>),
KeyInfo(Key, HashMap<Uuid, Bucket>), KeyInfo(Key, HashMap<Uuid, Bucket>),
@ -66,6 +68,7 @@ pub enum AdminRpc {
hash: Hash, hash: Hash,
refcount: u64, refcount: u64,
versions: Vec<Result<Version, Uuid>>, versions: Vec<Result<Version, Uuid>>,
uploads: Vec<MultipartUpload>,
}, },
} }

View file

@ -190,8 +190,9 @@ pub async fn cmd_admin(
bucket, bucket,
relevant_keys, relevant_keys,
counters, counters,
mpu_counters,
} => { } => {
print_bucket_info(&bucket, &relevant_keys, &counters); print_bucket_info(&bucket, &relevant_keys, &counters, &mpu_counters);
} }
AdminRpc::KeyList(kl) => { AdminRpc::KeyList(kl) => {
print_key_list(kl); print_key_list(kl);
@ -215,8 +216,9 @@ pub async fn cmd_admin(
hash, hash,
refcount, refcount,
versions, versions,
uploads,
} => { } => {
print_block_info(hash, refcount, versions); print_block_info(hash, refcount, versions, uploads);
} }
r => { r => {
error!("Unexpected response: {:?}", r); error!("Unexpected response: {:?}", r);

View file

@ -443,19 +443,22 @@ pub struct RepairOpt {
#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)] #[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)]
pub enum RepairWhat { pub enum RepairWhat {
/// Only do a full sync of metadata tables /// Do a full sync of metadata tables
#[structopt(name = "tables", version = garage_version())] #[structopt(name = "tables", version = garage_version())]
Tables, Tables,
/// Only repair (resync/rebalance) the set of stored blocks /// Repair (resync/rebalance) the set of stored blocks
#[structopt(name = "blocks", version = garage_version())] #[structopt(name = "blocks", version = garage_version())]
Blocks, Blocks,
/// Only redo the propagation of object deletions to the version table (slow) /// Repropagate object deletions to the version table
#[structopt(name = "versions", version = garage_version())] #[structopt(name = "versions", version = garage_version())]
Versions, Versions,
/// Only redo the propagation of version deletions to the block ref table (extremely slow) /// Repropagate object deletions to the multipart upload table
#[structopt(name = "mpu", version = garage_version())]
MultipartUploads,
/// Repropagate version deletions to the block ref table
#[structopt(name = "block_refs", version = garage_version())] #[structopt(name = "block_refs", version = garage_version())]
BlockRefs, BlockRefs,
/// Verify integrity of all blocks on disc (extremely slow, i/o intensive) /// Verify integrity of all blocks on disc
#[structopt(name = "scrub", version = garage_version())] #[structopt(name = "scrub", version = garage_version())]
Scrub { Scrub {
#[structopt(subcommand)] #[structopt(subcommand)]

View file

@ -12,8 +12,9 @@ use garage_block::manager::BlockResyncErrorInfo;
use garage_model::bucket_table::*; use garage_model::bucket_table::*;
use garage_model::key_table::*; use garage_model::key_table::*;
use garage_model::s3::object_table::{BYTES, OBJECTS, UNFINISHED_UPLOADS}; use garage_model::s3::mpu_table::{self, MultipartUpload};
use garage_model::s3::version_table::Version; use garage_model::s3::object_table;
use garage_model::s3::version_table::*;
use crate::cli::structs::WorkerListOpt; use crate::cli::structs::WorkerListOpt;
@ -135,6 +136,7 @@ pub fn print_bucket_info(
bucket: &Bucket, bucket: &Bucket,
relevant_keys: &HashMap<String, Key>, relevant_keys: &HashMap<String, Key>,
counters: &HashMap<String, i64>, counters: &HashMap<String, i64>,
mpu_counters: &HashMap<String, i64>,
) { ) {
let key_name = |k| { let key_name = |k| {
relevant_keys relevant_keys
@ -148,7 +150,7 @@ pub fn print_bucket_info(
Deletable::Deleted => println!("Bucket is deleted."), Deletable::Deleted => println!("Bucket is deleted."),
Deletable::Present(p) => { Deletable::Present(p) => {
let size = let size =
bytesize::ByteSize::b(counters.get(BYTES).cloned().unwrap_or_default() as u64); bytesize::ByteSize::b(*counters.get(object_table::BYTES).unwrap_or(&0) as u64);
println!( println!(
"\nSize: {} ({})", "\nSize: {} ({})",
size.to_string_as(true), size.to_string_as(true),
@ -156,14 +158,22 @@ pub fn print_bucket_info(
); );
println!( println!(
"Objects: {}", "Objects: {}",
counters.get(OBJECTS).cloned().unwrap_or_default() *counters.get(object_table::OBJECTS).unwrap_or(&0)
);
println!(
"Unfinished uploads (multipart and non-multipart): {}",
*counters.get(object_table::UNFINISHED_UPLOADS).unwrap_or(&0)
); );
println!( println!(
"Unfinished multipart uploads: {}", "Unfinished multipart uploads: {}",
counters *mpu_counters.get(mpu_table::UPLOADS).unwrap_or(&0)
.get(UNFINISHED_UPLOADS) );
.cloned() let mpu_size =
.unwrap_or_default() bytesize::ByteSize::b(*mpu_counters.get(mpu_table::BYTES).unwrap_or(&0) as u64);
println!(
"Size of unfinished multipart uploads: {} ({})",
mpu_size.to_string_as(true),
mpu_size.to_string_as(false),
); );
println!("\nWebsite access: {}", p.website_config.get().is_some()); println!("\nWebsite access: {}", p.website_config.get().is_some());
@ -385,29 +395,49 @@ pub fn print_block_error_list(el: Vec<BlockResyncErrorInfo>) {
format_table(table); format_table(table);
} }
pub fn print_block_info(hash: Hash, refcount: u64, versions: Vec<Result<Version, Uuid>>) { pub fn print_block_info(
hash: Hash,
refcount: u64,
versions: Vec<Result<Version, Uuid>>,
uploads: Vec<MultipartUpload>,
) {
println!("Block hash: {}", hex::encode(hash.as_slice())); println!("Block hash: {}", hex::encode(hash.as_slice()));
println!("Refcount: {}", refcount); println!("Refcount: {}", refcount);
println!(); println!();
let mut table = vec!["Version\tBucket\tKey\tDeleted".into()]; let mut table = vec!["Version\tBucket\tKey\tMPU\tDeleted".into()];
let mut nondeleted_count = 0; let mut nondeleted_count = 0;
for v in versions.iter() { for v in versions.iter() {
match v { match v {
Ok(ver) => { Ok(ver) => {
match &ver.backlink {
VersionBacklink::Object { bucket_id, key } => {
table.push(format!( table.push(format!(
"{:?}\t{:?}\t{}\t{:?}", "{:?}\t{:?}\t{}\t\t{:?}",
ver.uuid, ver.uuid,
ver.bucket_id, bucket_id,
ver.key, key,
ver.deleted.get() ver.deleted.get()
)); ));
}
VersionBacklink::MultipartUpload { upload_id } => {
let upload = uploads.iter().find(|x| x.upload_id == *upload_id);
table.push(format!(
"{:?}\t{:?}\t{}\t{:?}\t{:?}",
ver.uuid,
upload.map(|u| u.bucket_id).unwrap_or_default(),
upload.map(|u| u.key.as_str()).unwrap_or_default(),
upload_id,
ver.deleted.get()
));
}
}
if !ver.deleted.get() { if !ver.deleted.get() {
nondeleted_count += 1; nondeleted_count += 1;
} }
} }
Err(vh) => { Err(vh) => {
table.push(format!("{:?}\t\t\tyes", vh)); table.push(format!("{:?}\t\t\t\tyes", vh));
} }
} }
} }

View file

@ -5,11 +5,16 @@ use async_trait::async_trait;
use tokio::sync::watch; use tokio::sync::watch;
use garage_block::repair::ScrubWorkerCommand; use garage_block::repair::ScrubWorkerCommand;
use garage_model::garage::Garage; use garage_model::garage::Garage;
use garage_model::s3::block_ref_table::*; use garage_model::s3::block_ref_table::*;
use garage_model::s3::mpu_table::*;
use garage_model::s3::object_table::*; use garage_model::s3::object_table::*;
use garage_model::s3::version_table::*; use garage_model::s3::version_table::*;
use garage_table::replication::*;
use garage_table::*; use garage_table::*;
use garage_util::background::*; use garage_util::background::*;
use garage_util::error::Error; use garage_util::error::Error;
use garage_util::migrate::Migrate; use garage_util::migrate::Migrate;
@ -32,11 +37,15 @@ pub async fn launch_online_repair(
} }
RepairWhat::Versions => { RepairWhat::Versions => {
info!("Repairing the versions table"); info!("Repairing the versions table");
bg.spawn_worker(RepairVersionsWorker::new(garage.clone())); bg.spawn_worker(TableRepairWorker::new(garage.clone(), RepairVersions));
}
RepairWhat::MultipartUploads => {
info!("Repairing the multipart uploads table");
bg.spawn_worker(TableRepairWorker::new(garage.clone(), RepairMpu));
} }
RepairWhat::BlockRefs => { RepairWhat::BlockRefs => {
info!("Repairing the block refs table"); info!("Repairing the block refs table");
bg.spawn_worker(RepairBlockrefsWorker::new(garage.clone())); bg.spawn_worker(TableRepairWorker::new(garage.clone(), RepairBlockRefs));
} }
RepairWhat::Blocks => { RepairWhat::Blocks => {
info!("Repairing the stored blocks"); info!("Repairing the stored blocks");
@ -67,70 +76,70 @@ pub async fn launch_online_repair(
// ---- // ----
struct RepairVersionsWorker { #[async_trait]
trait TableRepair: Send + Sync + 'static {
type T: TableSchema;
fn table(garage: &Garage) -> &Table<Self::T, TableShardedReplication>;
async fn process(
&mut self,
garage: &Garage,
entry: <<Self as TableRepair>::T as TableSchema>::E,
) -> Result<bool, Error>;
}
struct TableRepairWorker<T: TableRepair> {
garage: Arc<Garage>, garage: Arc<Garage>,
pos: Vec<u8>, pos: Vec<u8>,
counter: usize, counter: usize,
repairs: usize,
inner: T,
} }
impl RepairVersionsWorker { impl<R: TableRepair> TableRepairWorker<R> {
fn new(garage: Arc<Garage>) -> Self { fn new(garage: Arc<Garage>, inner: R) -> Self {
Self { Self {
garage, garage,
inner,
pos: vec![], pos: vec![],
counter: 0, counter: 0,
repairs: 0,
} }
} }
} }
#[async_trait] #[async_trait]
impl Worker for RepairVersionsWorker { impl<R: TableRepair> Worker for TableRepairWorker<R> {
fn name(&self) -> String { fn name(&self) -> String {
"Version repair worker".into() format!("{} repair worker", R::T::TABLE_NAME)
} }
fn status(&self) -> WorkerStatus { fn status(&self) -> WorkerStatus {
WorkerStatus { WorkerStatus {
progress: Some(self.counter.to_string()), progress: Some(format!("{} ({})", self.counter, self.repairs)),
..Default::default() ..Default::default()
} }
} }
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> { async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
let (item_bytes, next_pos) = match self.garage.version_table.data.store.get_gt(&self.pos)? { let (item_bytes, next_pos) = match R::table(&self.garage).data.store.get_gt(&self.pos)? {
Some((k, v)) => (v, k), Some((k, v)) => (v, k),
None => { None => {
info!("repair_versions: finished, done {}", self.counter); info!(
"{}: finished, done {}, fixed {}",
self.name(),
self.counter,
self.repairs
);
return Ok(WorkerState::Done); return Ok(WorkerState::Done);
} }
}; };
let version = Version::decode(&item_bytes).ok_or_message("Cannot decode Version")?; let entry = <R::T as TableSchema>::E::decode(&item_bytes)
if !version.deleted.get() { .ok_or_message("Cannot decode table entry")?;
let object = self if self.inner.process(&self.garage, entry).await? {
.garage self.repairs += 1;
.object_table
.get(&version.bucket_id, &version.key)
.await?;
let version_exists = match object {
Some(o) => o
.versions()
.iter()
.any(|x| x.uuid == version.uuid && x.state != ObjectVersionState::Aborted),
None => false,
};
if !version_exists {
info!("Repair versions: marking version as deleted: {:?}", version);
self.garage
.version_table
.insert(&Version::new(
version.uuid,
version.bucket_id,
version.key,
true,
))
.await?;
}
} }
self.counter += 1; self.counter += 1;
@ -146,77 +155,124 @@ impl Worker for RepairVersionsWorker {
// ---- // ----
struct RepairBlockrefsWorker { struct RepairVersions;
garage: Arc<Garage>,
pos: Vec<u8>,
counter: usize,
}
impl RepairBlockrefsWorker {
fn new(garage: Arc<Garage>) -> Self {
Self {
garage,
pos: vec![],
counter: 0,
}
}
}
#[async_trait] #[async_trait]
impl Worker for RepairBlockrefsWorker { impl TableRepair for RepairVersions {
fn name(&self) -> String { type T = VersionTable;
"Block refs repair worker".into()
fn table(garage: &Garage) -> &Table<Self::T, TableShardedReplication> {
&garage.version_table
} }
fn status(&self) -> WorkerStatus { async fn process(&mut self, garage: &Garage, version: Version) -> Result<bool, Error> {
WorkerStatus { if !version.deleted.get() {
progress: Some(self.counter.to_string()), let ref_exists = match &version.backlink {
..Default::default() VersionBacklink::Object { bucket_id, key } => garage
} .object_table
} .get(bucket_id, key)
.await?
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> { .map(|o| {
let (item_bytes, next_pos) = o.versions().iter().any(|x| {
match self.garage.block_ref_table.data.store.get_gt(&self.pos)? { x.uuid == version.uuid && x.state != ObjectVersionState::Aborted
Some((k, v)) => (v, k), })
None => { })
info!("repair_block_ref: finished, done {}", self.counter); .unwrap_or(false),
return Ok(WorkerState::Done); VersionBacklink::MultipartUpload { upload_id } => garage
} .mpu_table
.get(upload_id, &EmptyKey)
.await?
.map(|u| !u.deleted.get())
.unwrap_or(false),
}; };
let block_ref = BlockRef::decode(&item_bytes).ok_or_message("Cannot decode BlockRef")?; if !ref_exists {
info!("Repair versions: marking version as deleted: {:?}", version);
garage
.version_table
.insert(&Version::new(version.uuid, version.backlink, true))
.await?;
return Ok(true);
}
}
Ok(false)
}
}
// ----
struct RepairBlockRefs;
#[async_trait]
impl TableRepair for RepairBlockRefs {
type T = BlockRefTable;
fn table(garage: &Garage) -> &Table<Self::T, TableShardedReplication> {
&garage.block_ref_table
}
async fn process(&mut self, garage: &Garage, mut block_ref: BlockRef) -> Result<bool, Error> {
if !block_ref.deleted.get() { if !block_ref.deleted.get() {
let version = self let ref_exists = garage
.garage
.version_table .version_table
.get(&block_ref.version, &EmptyKey) .get(&block_ref.version, &EmptyKey)
.await?; .await?
// The version might not exist if it has been GC'ed .map(|v| !v.deleted.get())
let ref_exists = version.map(|v| !v.deleted.get()).unwrap_or(false); .unwrap_or(false);
if !ref_exists { if !ref_exists {
info!( info!(
"Repair block ref: marking block_ref as deleted: {:?}", "Repair block ref: marking block_ref as deleted: {:?}",
block_ref block_ref
); );
self.garage block_ref.deleted.set();
.block_ref_table garage.block_ref_table.insert(&block_ref).await?;
.insert(&BlockRef { return Ok(true);
block: block_ref.block, }
version: block_ref.version, }
deleted: true.into(),
Ok(false)
}
}
// ----
struct RepairMpu;
#[async_trait]
impl TableRepair for RepairMpu {
type T = MultipartUploadTable;
fn table(garage: &Garage) -> &Table<Self::T, TableShardedReplication> {
&garage.mpu_table
}
async fn process(&mut self, garage: &Garage, mut mpu: MultipartUpload) -> Result<bool, Error> {
if !mpu.deleted.get() {
let ref_exists = garage
.object_table
.get(&mpu.bucket_id, &mpu.key)
.await?
.map(|o| {
o.versions()
.iter()
.any(|x| x.uuid == mpu.upload_id && x.is_uploading(Some(true)))
}) })
.await?; .unwrap_or(false);
if !ref_exists {
info!(
"Repair multipart uploads: marking mpu as deleted: {:?}",
mpu
);
mpu.parts.clear();
mpu.deleted.set();
garage.mpu_table.insert(&mpu).await?;
return Ok(true);
} }
} }
self.counter += 1; Ok(false)
self.pos = next_pos;
Ok(WorkerState::Busy)
}
async fn wait_for_work(&mut self) -> WorkerState {
unreachable!()
} }
} }

View file

@ -5,6 +5,190 @@ use aws_sdk_s3::types::ByteStream;
const SZ_5MB: usize = 5 * 1024 * 1024; const SZ_5MB: usize = 5 * 1024 * 1024;
const SZ_10MB: usize = 10 * 1024 * 1024; const SZ_10MB: usize = 10 * 1024 * 1024;
#[tokio::test]
async fn test_multipart_upload() {
let ctx = common::context();
let bucket = ctx.create_bucket("testmpu");
let u1 = vec![0x11; SZ_5MB];
let u2 = vec![0x22; SZ_5MB];
let u3 = vec![0x33; SZ_5MB];
let u4 = vec![0x44; SZ_5MB];
let u5 = vec![0x55; SZ_5MB];
let up = ctx
.client
.create_multipart_upload()
.bucket(&bucket)
.key("a")
.send()
.await
.unwrap();
assert!(up.upload_id.is_some());
let uid = up.upload_id.as_ref().unwrap();
let p3 = ctx
.client
.upload_part()
.bucket(&bucket)
.key("a")
.upload_id(uid)
.part_number(3)
.body(ByteStream::from(u3.clone()))
.send()
.await
.unwrap();
let _p1 = ctx
.client
.upload_part()
.bucket(&bucket)
.key("a")
.upload_id(uid)
.part_number(1)
.body(ByteStream::from(u1))
.send()
.await
.unwrap();
let _p4 = ctx
.client
.upload_part()
.bucket(&bucket)
.key("a")
.upload_id(uid)
.part_number(4)
.body(ByteStream::from(u4))
.send()
.await
.unwrap();
let p1bis = ctx
.client
.upload_part()
.bucket(&bucket)
.key("a")
.upload_id(uid)
.part_number(1)
.body(ByteStream::from(u2.clone()))
.send()
.await
.unwrap();
let p6 = ctx
.client
.upload_part()
.bucket(&bucket)
.key("a")
.upload_id(uid)
.part_number(6)
.body(ByteStream::from(u5.clone()))
.send()
.await
.unwrap();
{
let r = ctx
.client
.list_parts()
.bucket(&bucket)
.key("a")
.upload_id(uid)
.send()
.await
.unwrap();
assert_eq!(r.parts.unwrap().len(), 4);
}
let cmp = CompletedMultipartUpload::builder()
.parts(
CompletedPart::builder()
.part_number(1)
.e_tag(p1bis.e_tag.unwrap())
.build(),
)
.parts(
CompletedPart::builder()
.part_number(3)
.e_tag(p3.e_tag.unwrap())
.build(),
)
.parts(
CompletedPart::builder()
.part_number(6)
.e_tag(p6.e_tag.unwrap())
.build(),
)
.build();
ctx.client
.complete_multipart_upload()
.bucket(&bucket)
.key("a")
.upload_id(uid)
.multipart_upload(cmp)
.send()
.await
.unwrap();
// The multipart upload must not appear anymore
assert!(ctx
.client
.list_parts()
.bucket(&bucket)
.key("a")
.upload_id(uid)
.send()
.await
.is_err());
{
// The object must appear as a regular object
let r = ctx
.client
.head_object()
.bucket(&bucket)
.key("a")
.send()
.await
.unwrap();
assert_eq!(r.content_length, (SZ_5MB * 3) as i64);
}
{
let o = ctx
.client
.get_object()
.bucket(&bucket)
.key("a")
.send()
.await
.unwrap();
assert_bytes_eq!(o.body, &[&u2[..], &u3[..], &u5[..]].concat());
}
{
for (part_number, data) in [(1, &u2), (2, &u3), (3, &u5)] {
let o = ctx
.client
.get_object()
.bucket(&bucket)
.key("a")
.part_number(part_number)
.send()
.await
.unwrap();
eprintln!("get_object with part_number = {}", part_number);
assert_eq!(o.content_length, SZ_5MB as i64);
assert_bytes_eq!(o.body, data);
}
}
}
#[tokio::test] #[tokio::test]
async fn test_uploadlistpart() { async fn test_uploadlistpart() {
let ctx = common::context(); let ctx = common::context();
@ -65,7 +249,8 @@ async fn test_uploadlistpart() {
let ps = r.parts.unwrap(); let ps = r.parts.unwrap();
assert_eq!(ps.len(), 1); assert_eq!(ps.len(), 1);
let fp = ps.iter().find(|x| x.part_number == 2).unwrap(); assert_eq!(ps[0].part_number, 2);
let fp = &ps[0];
assert!(fp.last_modified.is_some()); assert!(fp.last_modified.is_some());
assert_eq!( assert_eq!(
fp.e_tag.as_ref().unwrap(), fp.e_tag.as_ref().unwrap(),
@ -100,13 +285,24 @@ async fn test_uploadlistpart() {
let ps = r.parts.unwrap(); let ps = r.parts.unwrap();
assert_eq!(ps.len(), 2); assert_eq!(ps.len(), 2);
let fp = ps.iter().find(|x| x.part_number == 1).unwrap();
assert_eq!(ps[0].part_number, 1);
let fp = &ps[0];
assert!(fp.last_modified.is_some()); assert!(fp.last_modified.is_some());
assert_eq!( assert_eq!(
fp.e_tag.as_ref().unwrap(), fp.e_tag.as_ref().unwrap(),
"\"3c484266f9315485694556e6c693bfa2\"" "\"3c484266f9315485694556e6c693bfa2\""
); );
assert_eq!(fp.size, SZ_5MB as i64); assert_eq!(fp.size, SZ_5MB as i64);
assert_eq!(ps[1].part_number, 2);
let sp = &ps[1];
assert!(sp.last_modified.is_some());
assert_eq!(
sp.e_tag.as_ref().unwrap(),
"\"3366bb9dcf710d6801b5926467d02e19\""
);
assert_eq!(sp.size, SZ_5MB as i64);
} }
{ {
@ -123,12 +319,19 @@ async fn test_uploadlistpart() {
.unwrap(); .unwrap();
assert!(r.part_number_marker.is_none()); assert!(r.part_number_marker.is_none());
assert!(r.next_part_number_marker.is_some()); assert_eq!(r.next_part_number_marker.as_deref(), Some("1"));
assert_eq!(r.max_parts, 1_i32); assert_eq!(r.max_parts, 1_i32);
assert!(r.is_truncated); assert!(r.is_truncated);
assert_eq!(r.key.unwrap(), "a"); assert_eq!(r.key.unwrap(), "a");
assert_eq!(r.upload_id.unwrap().as_str(), uid.as_str()); assert_eq!(r.upload_id.unwrap().as_str(), uid.as_str());
assert_eq!(r.parts.unwrap().len(), 1); let parts = r.parts.unwrap();
assert_eq!(parts.len(), 1);
let fp = &parts[0];
assert_eq!(fp.part_number, 1);
assert_eq!(
fp.e_tag.as_ref().unwrap(),
"\"3c484266f9315485694556e6c693bfa2\""
);
let r2 = ctx let r2 = ctx
.client .client
@ -147,10 +350,18 @@ async fn test_uploadlistpart() {
r.next_part_number_marker.as_ref().unwrap() r.next_part_number_marker.as_ref().unwrap()
); );
assert_eq!(r2.max_parts, 1_i32); assert_eq!(r2.max_parts, 1_i32);
assert!(r2.is_truncated);
assert_eq!(r2.key.unwrap(), "a"); assert_eq!(r2.key.unwrap(), "a");
assert_eq!(r2.upload_id.unwrap().as_str(), uid.as_str()); assert_eq!(r2.upload_id.unwrap().as_str(), uid.as_str());
assert_eq!(r2.parts.unwrap().len(), 1); let parts = r2.parts.unwrap();
assert_eq!(parts.len(), 1);
let fp = &parts[0];
assert_eq!(fp.part_number, 2);
assert_eq!(
fp.e_tag.as_ref().unwrap(),
"\"3366bb9dcf710d6801b5926467d02e19\""
);
//assert!(r2.is_truncated); // WHY? (this was the test before)
assert!(!r2.is_truncated);
} }
let cmp = CompletedMultipartUpload::builder() let cmp = CompletedMultipartUpload::builder()

View file

@ -17,6 +17,7 @@ use garage_table::replication::TableShardedReplication;
use garage_table::*; use garage_table::*;
use crate::s3::block_ref_table::*; use crate::s3::block_ref_table::*;
use crate::s3::mpu_table::*;
use crate::s3::object_table::*; use crate::s3::object_table::*;
use crate::s3::version_table::*; use crate::s3::version_table::*;
@ -57,6 +58,10 @@ pub struct Garage {
pub object_table: Arc<Table<ObjectTable, TableShardedReplication>>, pub object_table: Arc<Table<ObjectTable, TableShardedReplication>>,
/// Counting table containing object counters /// Counting table containing object counters
pub object_counter_table: Arc<IndexCounter<Object>>, pub object_counter_table: Arc<IndexCounter<Object>>,
/// Table containing S3 multipart uploads
pub mpu_table: Arc<Table<MultipartUploadTable, TableShardedReplication>>,
/// Counting table containing multipart object counters
pub mpu_counter_table: Arc<IndexCounter<MultipartUpload>>,
/// Table containing S3 object versions /// Table containing S3 object versions
pub version_table: Arc<Table<VersionTable, TableShardedReplication>>, pub version_table: Arc<Table<VersionTable, TableShardedReplication>>,
/// Table containing S3 block references (not blocks themselves) /// Table containing S3 block references (not blocks themselves)
@ -261,6 +266,20 @@ impl Garage {
&db, &db,
); );
info!("Initialize multipart upload counter table...");
let mpu_counter_table = IndexCounter::new(system.clone(), meta_rep_param.clone(), &db);
info!("Initialize multipart upload table...");
let mpu_table = Table::new(
MultipartUploadTable {
version_table: version_table.clone(),
mpu_counter_table: mpu_counter_table.clone(),
},
meta_rep_param.clone(),
system.clone(),
&db,
);
info!("Initialize object counter table..."); info!("Initialize object counter table...");
let object_counter_table = IndexCounter::new(system.clone(), meta_rep_param.clone(), &db); let object_counter_table = IndexCounter::new(system.clone(), meta_rep_param.clone(), &db);
@ -269,6 +288,7 @@ impl Garage {
let object_table = Table::new( let object_table = Table::new(
ObjectTable { ObjectTable {
version_table: version_table.clone(), version_table: version_table.clone(),
mpu_table: mpu_table.clone(),
object_counter_table: object_counter_table.clone(), object_counter_table: object_counter_table.clone(),
}, },
meta_rep_param.clone(), meta_rep_param.clone(),
@ -297,6 +317,8 @@ impl Garage {
key_table, key_table,
object_table, object_table,
object_counter_table, object_counter_table,
mpu_table,
mpu_counter_table,
version_table, version_table,
block_ref_table, block_ref_table,
#[cfg(feature = "k2v")] #[cfg(feature = "k2v")]
@ -313,6 +335,8 @@ impl Garage {
self.object_table.spawn_workers(bg); self.object_table.spawn_workers(bg);
self.object_counter_table.spawn_workers(bg); self.object_counter_table.spawn_workers(bg);
self.mpu_table.spawn_workers(bg);
self.mpu_counter_table.spawn_workers(bg);
self.version_table.spawn_workers(bg); self.version_table.spawn_workers(bg);
self.block_ref_table.spawn_workers(bg); self.block_ref_table.spawn_workers(bg);

View file

@ -478,7 +478,9 @@ impl<'a> BucketHelper<'a> {
// ---- // ----
/// Deletes all incomplete multipart uploads that are older than a certain time. /// Deletes all incomplete multipart uploads that are older than a certain time.
/// Returns the number of uploads aborted /// Returns the number of uploads aborted.
/// This will also include non-multipart uploads, which may be lingering
/// after a node crash
pub async fn cleanup_incomplete_uploads( pub async fn cleanup_incomplete_uploads(
&self, &self,
bucket_id: &Uuid, bucket_id: &Uuid,
@ -496,7 +498,9 @@ impl<'a> BucketHelper<'a> {
.get_range( .get_range(
bucket_id, bucket_id,
start, start,
Some(ObjectFilter::IsUploading), Some(ObjectFilter::IsUploading {
check_multipart: None,
}),
1000, 1000,
EnumerationOrder::Forward, EnumerationOrder::Forward,
) )
@ -508,7 +512,7 @@ impl<'a> BucketHelper<'a> {
let aborted_versions = object let aborted_versions = object
.versions() .versions()
.iter() .iter()
.filter(|v| v.is_uploading() && v.timestamp < older_than) .filter(|v| v.is_uploading(None) && v.timestamp < older_than)
.map(|v| ObjectVersion { .map(|v| ObjectVersion {
state: ObjectVersionState::Aborted, state: ObjectVersionState::Aborted,
uuid: v.uuid, uuid: v.uuid,

View file

@ -1,3 +1,4 @@
pub mod block_ref_table; pub mod block_ref_table;
pub mod mpu_table;
pub mod object_table; pub mod object_table;
pub mod version_table; pub mod version_table;

245
src/model/s3/mpu_table.rs Normal file
View file

@ -0,0 +1,245 @@
use std::sync::Arc;
use garage_db as db;
use garage_util::crdt::Crdt;
use garage_util::data::*;
use garage_util::time::*;
use garage_table::replication::TableShardedReplication;
use garage_table::*;
use crate::index_counter::*;
use crate::s3::version_table::*;
pub const UPLOADS: &str = "uploads";
pub const PARTS: &str = "parts";
pub const BYTES: &str = "bytes";
mod v09 {
use garage_util::crdt;
use garage_util::data::Uuid;
use serde::{Deserialize, Serialize};
/// A part of a multipart upload
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct MultipartUpload {
/// Partition key = Upload id = UUID of the object version
pub upload_id: Uuid,
/// Is this multipart upload deleted
/// The MultipartUpload is marked as deleted as soon as the
/// multipart upload is either completed or aborted
pub deleted: crdt::Bool,
/// List of uploaded parts, key = (part number, timestamp)
/// In case of retries, all versions for each part are kept
/// Everything is cleaned up only once the MultipartUpload is marked deleted
pub parts: crdt::Map<MpuPartKey, MpuPart>,
// Back link to bucket+key so that we can find the object this mpu
// belongs to and check whether it is still valid
/// Bucket in which the related object is stored
pub bucket_id: Uuid,
/// Key in which the related object is stored
pub key: String,
}
#[derive(PartialEq, Eq, Clone, Copy, Debug, Serialize, Deserialize)]
pub struct MpuPartKey {
/// Number of the part
pub part_number: u64,
/// Timestamp of part upload
pub timestamp: u64,
}
/// The version of an uploaded part
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct MpuPart {
/// Links to a Version in VersionTable
pub version: Uuid,
/// ETag of the content of this part (known only once done uploading)
pub etag: Option<String>,
/// Size of this part (known only once done uploading)
pub size: Option<u64>,
}
impl garage_util::migrate::InitialFormat for MultipartUpload {
const VERSION_MARKER: &'static [u8] = b"G09s3mpu";
}
}
pub use v09::*;
impl Ord for MpuPartKey {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.part_number
.cmp(&other.part_number)
.then(self.timestamp.cmp(&other.timestamp))
}
}
impl PartialOrd for MpuPartKey {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl MultipartUpload {
pub fn new(upload_id: Uuid, bucket_id: Uuid, key: String, deleted: bool) -> Self {
Self {
upload_id,
deleted: crdt::Bool::new(deleted),
parts: crdt::Map::new(),
bucket_id,
key,
}
}
pub fn next_timestamp(&self, part_number: u64) -> u64 {
std::cmp::max(
now_msec(),
1 + self
.parts
.items()
.iter()
.filter(|(x, _)| x.part_number == part_number)
.map(|(x, _)| x.timestamp)
.max()
.unwrap_or(0),
)
}
}
impl Entry<Uuid, EmptyKey> for MultipartUpload {
fn partition_key(&self) -> &Uuid {
&self.upload_id
}
fn sort_key(&self) -> &EmptyKey {
&EmptyKey
}
fn is_tombstone(&self) -> bool {
self.deleted.get()
}
}
impl Crdt for MultipartUpload {
fn merge(&mut self, other: &Self) {
self.deleted.merge(&other.deleted);
if self.deleted.get() {
self.parts.clear();
} else {
self.parts.merge(&other.parts);
}
}
}
impl Crdt for MpuPart {
fn merge(&mut self, other: &Self) {
self.etag = match (self.etag.take(), &other.etag) {
(None, Some(_)) => other.etag.clone(),
(Some(x), Some(y)) if x < *y => other.etag.clone(),
(x, _) => x,
};
self.size = match (self.size, other.size) {
(None, Some(_)) => other.size,
(Some(x), Some(y)) if x < y => other.size,
(x, _) => x,
};
}
}
pub struct MultipartUploadTable {
pub version_table: Arc<Table<VersionTable, TableShardedReplication>>,
pub mpu_counter_table: Arc<IndexCounter<MultipartUpload>>,
}
impl TableSchema for MultipartUploadTable {
const TABLE_NAME: &'static str = "multipart_upload";
type P = Uuid;
type S = EmptyKey;
type E = MultipartUpload;
type Filter = DeletedFilter;
fn updated(
&self,
tx: &mut db::Transaction,
old: Option<&Self::E>,
new: Option<&Self::E>,
) -> db::TxOpResult<()> {
// 1. Count
let counter_res = self.mpu_counter_table.count(tx, old, new);
if let Err(e) = db::unabort(counter_res)? {
error!(
"Unable to update multipart object part counter: {}. Index values will be wrong!",
e
);
}
// 2. Propagate deletions to version table
if let (Some(old_mpu), Some(new_mpu)) = (old, new) {
if new_mpu.deleted.get() && !old_mpu.deleted.get() {
let deleted_versions = old_mpu.parts.items().iter().map(|(_k, p)| {
Version::new(
p.version,
VersionBacklink::MultipartUpload {
upload_id: old_mpu.upload_id,
},
true,
)
});
for version in deleted_versions {
let res = self.version_table.queue_insert(tx, &version);
if let Err(e) = db::unabort(res)? {
error!("Unable to enqueue version deletion propagation: {}. A repair will be needed.", e);
}
}
}
}
Ok(())
}
fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool {
filter.apply(entry.is_tombstone())
}
}
impl CountedItem for MultipartUpload {
const COUNTER_TABLE_NAME: &'static str = "bucket_mpu_counter";
// Partition key = bucket id
type CP = Uuid;
// Sort key = nothing
type CS = EmptyKey;
fn counter_partition_key(&self) -> &Uuid {
&self.bucket_id
}
fn counter_sort_key(&self) -> &EmptyKey {
&EmptyKey
}
fn counts(&self) -> Vec<(&'static str, i64)> {
let uploads = if self.deleted.get() { 0 } else { 1 };
let mut parts = self
.parts
.items()
.iter()
.map(|(k, _)| k.part_number)
.collect::<Vec<_>>();
parts.dedup();
let bytes = self
.parts
.items()
.iter()
.map(|(_, p)| p.size.unwrap_or(0))
.sum::<u64>();
vec![
(UPLOADS, uploads),
(PARTS, parts.len() as i64),
(BYTES, bytes as i64),
]
}
}

View file

@ -10,6 +10,7 @@ use garage_table::replication::TableShardedReplication;
use garage_table::*; use garage_table::*;
use crate::index_counter::*; use crate::index_counter::*;
use crate::s3::mpu_table::*;
use crate::s3::version_table::*; use crate::s3::version_table::*;
pub const OBJECTS: &str = "objects"; pub const OBJECTS: &str = "objects";
@ -130,7 +131,86 @@ mod v08 {
} }
} }
pub use v08::*; mod v09 {
use garage_util::data::Uuid;
use serde::{Deserialize, Serialize};
use super::v08;
pub use v08::{ObjectVersionData, ObjectVersionHeaders, ObjectVersionMeta};
/// An object
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct Object {
/// The bucket in which the object is stored, used as partition key
pub bucket_id: Uuid,
/// The key at which the object is stored in its bucket, used as sorting key
pub key: String,
/// The list of currenty stored versions of the object
pub(super) versions: Vec<ObjectVersion>,
}
/// Informations about a version of an object
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct ObjectVersion {
/// Id of the version
pub uuid: Uuid,
/// Timestamp of when the object was created
pub timestamp: u64,
/// State of the version
pub state: ObjectVersionState,
}
/// State of an object version
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub enum ObjectVersionState {
/// The version is being received
Uploading {
/// Indicates whether this is a multipart upload
multipart: bool,
/// Headers to be included in the final object
headers: ObjectVersionHeaders,
},
/// The version is fully received
Complete(ObjectVersionData),
/// The version uploaded containded errors or the upload was explicitly aborted
Aborted,
}
impl garage_util::migrate::Migrate for Object {
const VERSION_MARKER: &'static [u8] = b"G09s3o";
type Previous = v08::Object;
fn migrate(old: v08::Object) -> Object {
let versions = old
.versions
.into_iter()
.map(|x| ObjectVersion {
uuid: x.uuid,
timestamp: x.timestamp,
state: match x.state {
v08::ObjectVersionState::Uploading(h) => ObjectVersionState::Uploading {
multipart: false,
headers: h,
},
v08::ObjectVersionState::Complete(d) => ObjectVersionState::Complete(d),
v08::ObjectVersionState::Aborted => ObjectVersionState::Aborted,
},
})
.collect();
Object {
bucket_id: old.bucket_id,
key: old.key,
versions,
}
}
}
}
pub use v09::*;
impl Object { impl Object {
/// Initialize an Object struct from parts /// Initialize an Object struct from parts
@ -180,11 +260,11 @@ impl Crdt for ObjectVersionState {
Complete(a) => { Complete(a) => {
a.merge(b); a.merge(b);
} }
Uploading(_) => { Uploading { .. } => {
*self = Complete(b.clone()); *self = Complete(b.clone());
} }
}, },
Uploading(_) => {} Uploading { .. } => {}
} }
} }
} }
@ -199,8 +279,17 @@ impl ObjectVersion {
} }
/// Is the object version currently being uploaded /// Is the object version currently being uploaded
pub fn is_uploading(&self) -> bool { ///
matches!(self.state, ObjectVersionState::Uploading(_)) /// matches only multipart uploads if check_multipart is Some(true)
/// matches only non-multipart uploads if check_multipart is Some(false)
/// matches both if check_multipart is None
pub fn is_uploading(&self, check_multipart: Option<bool>) -> bool {
match &self.state {
ObjectVersionState::Uploading { multipart, .. } => {
check_multipart.map(|x| x == *multipart).unwrap_or(true)
}
_ => false,
}
} }
/// Is the object version completely received /// Is the object version completely received
@ -267,13 +356,20 @@ impl Crdt for Object {
pub struct ObjectTable { pub struct ObjectTable {
pub version_table: Arc<Table<VersionTable, TableShardedReplication>>, pub version_table: Arc<Table<VersionTable, TableShardedReplication>>,
pub mpu_table: Arc<Table<MultipartUploadTable, TableShardedReplication>>,
pub object_counter_table: Arc<IndexCounter<Object>>, pub object_counter_table: Arc<IndexCounter<Object>>,
} }
#[derive(Clone, Copy, Debug, Serialize, Deserialize)] #[derive(Clone, Copy, Debug, Serialize, Deserialize)]
pub enum ObjectFilter { pub enum ObjectFilter {
/// Is the object version available (received and not a tombstone)
IsData, IsData,
IsUploading, /// Is the object version currently being uploaded
///
/// matches only multipart uploads if check_multipart is Some(true)
/// matches only non-multipart uploads if check_multipart is Some(false)
/// matches both if check_multipart is None
IsUploading { check_multipart: Option<bool> },
} }
impl TableSchema for ObjectTable { impl TableSchema for ObjectTable {
@ -301,21 +397,28 @@ impl TableSchema for ObjectTable {
// 2. Enqueue propagation deletions to version table // 2. Enqueue propagation deletions to version table
if let (Some(old_v), Some(new_v)) = (old, new) { if let (Some(old_v), Some(new_v)) = (old, new) {
// Propagate deletion of old versions
for v in old_v.versions.iter() { for v in old_v.versions.iter() {
let newly_deleted = match new_v let new_v_id = new_v
.versions .versions
.binary_search_by(|nv| nv.cmp_key().cmp(&v.cmp_key())) .binary_search_by(|nv| nv.cmp_key().cmp(&v.cmp_key()));
{
// Propagate deletion of old versions to the Version table
let delete_version = match new_v_id {
Err(_) => true, Err(_) => true,
Ok(i) => { Ok(i) => {
new_v.versions[i].state == ObjectVersionState::Aborted new_v.versions[i].state == ObjectVersionState::Aborted
&& v.state != ObjectVersionState::Aborted && v.state != ObjectVersionState::Aborted
} }
}; };
if newly_deleted { if delete_version {
let deleted_version = let deleted_version = Version::new(
Version::new(v.uuid, old_v.bucket_id, old_v.key.clone(), true); v.uuid,
VersionBacklink::Object {
bucket_id: old_v.bucket_id,
key: old_v.key.clone(),
},
true,
);
let res = self.version_table.queue_insert(tx, &deleted_version); let res = self.version_table.queue_insert(tx, &deleted_version);
if let Err(e) = db::unabort(res)? { if let Err(e) = db::unabort(res)? {
error!( error!(
@ -324,6 +427,34 @@ impl TableSchema for ObjectTable {
); );
} }
} }
// After abortion or completion of multipart uploads, delete MPU table entry
if matches!(
v.state,
ObjectVersionState::Uploading {
multipart: true,
..
}
) {
let delete_mpu = match new_v_id {
Err(_) => true,
Ok(i) => !matches!(
new_v.versions[i].state,
ObjectVersionState::Uploading { .. }
),
};
if delete_mpu {
let deleted_mpu =
MultipartUpload::new(v.uuid, old_v.bucket_id, old_v.key.clone(), true);
let res = self.mpu_table.queue_insert(tx, &deleted_mpu);
if let Err(e) = db::unabort(res)? {
error!(
"Unable to enqueue multipart upload deletion propagation: {}. A repair will be needed.",
e
);
}
}
}
} }
} }
@ -333,7 +464,10 @@ impl TableSchema for ObjectTable {
fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool { fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool {
match filter { match filter {
ObjectFilter::IsData => entry.versions.iter().any(|v| v.is_data()), ObjectFilter::IsData => entry.versions.iter().any(|v| v.is_data()),
ObjectFilter::IsUploading => entry.versions.iter().any(|v| v.is_uploading()), ObjectFilter::IsUploading { check_multipart } => entry
.versions
.iter()
.any(|v| v.is_uploading(*check_multipart)),
} }
} }
} }
@ -360,10 +494,7 @@ impl CountedItem for Object {
} else { } else {
0 0
}; };
let n_unfinished_uploads = versions let n_unfinished_uploads = versions.iter().filter(|v| v.is_uploading(None)).count();
.iter()
.filter(|v| matches!(v.state, ObjectVersionState::Uploading(_)))
.count();
let n_bytes = versions let n_bytes = versions
.iter() .iter()
.map(|v| match &v.state { .map(|v| match &v.state {

View file

@ -3,6 +3,7 @@ use std::sync::Arc;
use garage_db as db; use garage_db as db;
use garage_util::data::*; use garage_util::data::*;
use garage_util::error::*;
use garage_table::crdt::*; use garage_table::crdt::*;
use garage_table::replication::TableShardedReplication; use garage_table::replication::TableShardedReplication;
@ -66,6 +67,8 @@ mod v08 {
use super::v05; use super::v05;
pub use v05::{VersionBlock, VersionBlockKey};
/// A version of an object /// A version of an object
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct Version { pub struct Version {
@ -90,8 +93,6 @@ mod v08 {
pub key: String, pub key: String,
} }
pub use v05::{VersionBlock, VersionBlockKey};
impl garage_util::migrate::Migrate for Version { impl garage_util::migrate::Migrate for Version {
type Previous = v05::Version; type Previous = v05::Version;
@ -110,32 +111,94 @@ mod v08 {
} }
} }
pub use v08::*; pub(crate) mod v09 {
use garage_util::crdt;
use garage_util::data::Uuid;
use serde::{Deserialize, Serialize};
use super::v08;
pub use v08::{VersionBlock, VersionBlockKey};
/// A version of an object
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct Version {
/// UUID of the version, used as partition key
pub uuid: Uuid,
// Actual data: the blocks for this version
// In the case of a multipart upload, also store the etags
// of individual parts and check them when doing CompleteMultipartUpload
/// Is this version deleted
pub deleted: crdt::Bool,
/// list of blocks of data composing the version
pub blocks: crdt::Map<VersionBlockKey, VersionBlock>,
// Back link to owner of this version (either an object or a multipart
// upload), used to find whether it has been deleted and this version
// should in turn be deleted (see versions repair procedure)
pub backlink: VersionBacklink,
}
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub enum VersionBacklink {
Object {
/// Bucket in which the related object is stored
bucket_id: Uuid,
/// Key in which the related object is stored
key: String,
},
MultipartUpload {
upload_id: Uuid,
},
}
impl garage_util::migrate::Migrate for Version {
const VERSION_MARKER: &'static [u8] = b"G09s3v";
type Previous = v08::Version;
fn migrate(old: v08::Version) -> Version {
Version {
uuid: old.uuid,
deleted: old.deleted,
blocks: old.blocks,
backlink: VersionBacklink::Object {
bucket_id: old.bucket_id,
key: old.key,
},
}
}
}
}
pub use v09::*;
impl Version { impl Version {
pub fn new(uuid: Uuid, bucket_id: Uuid, key: String, deleted: bool) -> Self { pub fn new(uuid: Uuid, backlink: VersionBacklink, deleted: bool) -> Self {
Self { Self {
uuid, uuid,
deleted: deleted.into(), deleted: deleted.into(),
blocks: crdt::Map::new(), blocks: crdt::Map::new(),
parts_etags: crdt::Map::new(), backlink,
bucket_id,
key,
} }
} }
pub fn has_part_number(&self, part_number: u64) -> bool { pub fn has_part_number(&self, part_number: u64) -> bool {
let case1 = self self.blocks
.parts_etags
.items()
.binary_search_by(|(k, _)| k.cmp(&part_number))
.is_ok();
let case2 = self
.blocks
.items() .items()
.binary_search_by(|(k, _)| k.part_number.cmp(&part_number)) .binary_search_by(|(k, _)| k.part_number.cmp(&part_number))
.is_ok(); .is_ok()
case1 || case2 }
pub fn n_parts(&self) -> Result<u64, Error> {
Ok(self
.blocks
.items()
.last()
.ok_or_message("version has no parts")?
.0
.part_number)
} }
} }
@ -175,10 +238,8 @@ impl Crdt for Version {
if self.deleted.get() { if self.deleted.get() {
self.blocks.clear(); self.blocks.clear();
self.parts_etags.clear();
} else { } else {
self.blocks.merge(&other.blocks); self.blocks.merge(&other.blocks);
self.parts_etags.merge(&other.parts_etags);
} }
} }
} }

View file

@ -119,7 +119,7 @@ mod v09 {
} }
impl garage_util::migrate::Migrate for ClusterLayout { impl garage_util::migrate::Migrate for ClusterLayout {
const VERSION_MARKER: &'static [u8] = b"Glayout09"; const VERSION_MARKER: &'static [u8] = b"G09layout";
type Previous = v08::ClusterLayout; type Previous = v08::ClusterLayout;

View file

@ -347,9 +347,7 @@ impl<F: TableSchema, R: TableReplication> TableData<F, R> {
// ---- Utility functions ---- // ---- Utility functions ----
pub fn tree_key(&self, p: &F::P, s: &F::S) -> Vec<u8> { pub fn tree_key(&self, p: &F::P, s: &F::S) -> Vec<u8> {
let mut ret = p.hash().to_vec(); [p.hash().as_slice(), s.sort_key()].concat()
ret.extend(s.sort_key());
ret
} }
pub fn decode_entry(&self, bytes: &[u8]) -> Result<F::E, Error> { pub fn decode_entry(&self, bytes: &[u8]) -> Result<F::E, Error> {

View file

@ -6,6 +6,8 @@ use garage_util::migrate::Migrate;
use crate::crdt::Crdt; use crate::crdt::Crdt;
// =================================== PARTITION KEYS
/// Trait for field used to partition data /// Trait for field used to partition data
pub trait PartitionKey: pub trait PartitionKey:
Clone + PartialEq + Serialize + for<'de> Deserialize<'de> + Send + Sync + 'static Clone + PartialEq + Serialize + for<'de> Deserialize<'de> + Send + Sync + 'static
@ -29,6 +31,8 @@ impl PartitionKey for FixedBytes32 {
} }
} }
// =================================== SORT KEYS
/// Trait for field used to sort data /// Trait for field used to sort data
pub trait SortKey: Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync + 'static { pub trait SortKey: Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync + 'static {
/// Get the key used to sort /// Get the key used to sort
@ -47,6 +51,8 @@ impl SortKey for FixedBytes32 {
} }
} }
// =================================== SCHEMA
/// Trait for an entry in a table. It must be sortable and partitionnable. /// Trait for an entry in a table. It must be sortable and partitionnable.
pub trait Entry<P: PartitionKey, S: SortKey>: pub trait Entry<P: PartitionKey, S: SortKey>:
Crdt + PartialEq + Clone + Migrate + Send + Sync + 'static Crdt + PartialEq + Clone + Migrate + Send + Sync + 'static

View file

@ -106,7 +106,7 @@ impl WebServer {
addr: SocketAddr, addr: SocketAddr,
) -> Result<Response<Body>, Infallible> { ) -> Result<Response<Body>, Infallible> {
if let Ok(forwarded_for_ip_addr) = if let Ok(forwarded_for_ip_addr) =
forwarded_headers::handle_forwarded_for_headers(&req.headers()) forwarded_headers::handle_forwarded_for_headers(req.headers())
{ {
info!( info!(
"{} (via {}) {} {}", "{} (via {}) {} {}",