Implement ListMultipartUploads (#171)
continuous-integration/drone/push Build is passing Details

Implement ListMultipartUploads, also refactor ListObjects and ListObjectsV2.

It took me some times as I wanted to propose the following things:
  - Using an iterator instead of the loop+goto pattern. I find it easier to read and it should enable some optimizations. For example, when consuming keys of a common prefix, we do many [redundant checks](https://git.deuxfleurs.fr/Deuxfleurs/garage/src/branch/main/src/api/s3_list.rs#L125-L156) while the only thing to do is to [check if the following key is still part of the common prefix](https://git.deuxfleurs.fr/Deuxfleurs/garage/src/branch/feature/s3-multipart-compat/src/api/s3_list.rs#L476).
  - Try to name things (see ExtractionResult and RangeBegin enums) and to separate concerns (see ListQuery and Accumulator)
  - An IO closure to make unit tests possibles.
  - Unit tests, to track regressions and document how to interact with the code
  - Integration tests with `s3api`. In the future, I would like to move them in Rust with the aws rust SDK.

Merging of the logic of ListMultipartUploads and ListObjects was not a goal but a consequence of the previous modifications.

Some points that we might want to discuss:
  - ListObjectsV1, when using pagination and delimiters, has a weird behavior (it lists multiple times the same prefix) with `aws s3api` due to the fact that it can not use our optimization to skip the whole prefix. It is independant from my refactor and can be tested with the commented `s3api` tests in `test-smoke.sh`. It probably has the same weird behavior on the official AWS S3 implementation.
  - Considering ListMultipartUploads, I had to "abuse" upload id marker to support prefix skipping. I send an `upload-id-marker` with the hardcoded value `include` to emulate your "including" token.
  - Some ways to test ListMultipartUploads with existing software (my tests are limited to s3api for now).

Co-authored-by: Quentin Dufour <quentin@deuxfleurs.fr>
Reviewed-on: #171
Co-authored-by: Quentin <quentin@dufour.io>
Co-committed-by: Quentin <quentin@dufour.io>
This commit is contained in:
Quentin 2022-01-12 19:04:55 +01:00 committed by Alex
parent 9cb2e9e57c
commit b4592a00fe
17 changed files with 1285 additions and 283 deletions

View File

@ -24,6 +24,11 @@ in let
rustChannel = pkgs.rustPlatform.rust;
overrides = pkgs.buildPackages.rustBuilder.overrides.all ++ [
/*
We want to inject the git version while keeping the build deterministic.
As we do not want to consider the .git folder as part of the input source,
we ask the user (the CI often) to pass the value to Nix.
*/
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage";
overrideAttrs = drv: if git_version != null then {
@ -33,6 +38,21 @@ in let
'';
} else {};
})
/*
On a sandbox pure NixOS environment, /usr/bin/file is not available.
This is a known problem: https://github.com/NixOS/nixpkgs/issues/98440
We simply patch the file as suggested
*/
/*(pkgs.rustBuilder.rustLib.makeOverride {
name = "libsodium-sys";
overrideAttrs = drv: {
preConfigure = ''
${drv.preConfigure or ""}
sed -i 's,/usr/bin/file,${file}/bin/file,g' ./configure
'';
}
})*/
];
packageFun = import ./Cargo.nix;

View File

@ -43,6 +43,8 @@ All APIs that are not mentionned are not implemented and will return a 400 bad r
| ListBuckets | Implemented |
| ListObjects | Implemented, bugs? (see below) |
| ListObjectsV2 | Implemented |
| ListMultipartUpload | Implemented |
| ListParts | Missing |
| PutObject | Implemented |
| PutBucketWebsite | Partially implemented (see below)|
| UploadPart | Implemented |

View File

@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
set -ex

View File

@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
set -ex

View File

@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
set -e

View File

@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
set -ex

View File

@ -1,5 +1,3 @@
#!/bin/bash
export AWS_ACCESS_KEY_ID=`cat /tmp/garage.s3 |cut -d' ' -f1`
export AWS_SECRET_ACCESS_KEY=`cat /tmp/garage.s3 |cut -d' ' -f2`
export AWS_DEFAULT_REGION='garage'

View File

@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
set -ex
@ -10,6 +10,7 @@ GARAGE_DEBUG="${REPO_FOLDER}/target/debug/"
GARAGE_RELEASE="${REPO_FOLDER}/target/release/"
NIX_RELEASE="${REPO_FOLDER}/result/bin/"
PATH="${GARAGE_DEBUG}:${GARAGE_RELEASE}:${NIX_RELEASE}:$PATH"
CMDOUT=/tmp/garage.cmd.tmp
# @FIXME Duck is not ready for testing, we have a bug
SKIP_DUCK=1
@ -130,6 +131,142 @@ if [ -z "$SKIP_AWS" ]; then
rm /tmp/garage-index.html
fi
if [ -z "$SKIP_AWS" ]; then
echo "🔌 Test S3API"
echo "Test Objects"
aws s3api put-object --bucket eprouvette --key a
aws s3api put-object --bucket eprouvette --key a/a
aws s3api put-object --bucket eprouvette --key a/b
aws s3api put-object --bucket eprouvette --key a/c
aws s3api put-object --bucket eprouvette --key a/d/a
aws s3api put-object --bucket eprouvette --key a/é
aws s3api put-object --bucket eprouvette --key b
aws s3api put-object --bucket eprouvette --key c
aws s3api list-objects-v2 --bucket eprouvette >$CMDOUT
[ $(jq '.Contents | length' $CMDOUT) == 8 ]
[ $(jq '.CommonPrefixes | length' $CMDOUT) == 0 ]
aws s3api list-objects-v2 --bucket eprouvette --page-size 0 >$CMDOUT
[ $(jq '.Contents | length' $CMDOUT) == 8 ]
[ $(jq '.CommonPrefixes | length' $CMDOUT) == 0 ]
aws s3api list-objects-v2 --bucket eprouvette --page-size 999999999999999 >$CMDOUT
[ $(jq '.Contents | length' $CMDOUT) == 8 ]
[ $(jq '.CommonPrefixes | length' $CMDOUT) == 0 ]
aws s3api list-objects-v2 --bucket eprouvette --page-size 1 >$CMDOUT
[ $(jq '.Contents | length' $CMDOUT) == 8 ]
[ $(jq '.CommonPrefixes | length' $CMDOUT) == 0 ]
aws s3api list-objects-v2 --bucket eprouvette --delimiter '/' >$CMDOUT
[ $(jq '.Contents | length' $CMDOUT) == 3 ]
[ $(jq '.CommonPrefixes | length' $CMDOUT) == 1 ]
aws s3api list-objects-v2 --bucket eprouvette --delimiter '/' --page-size 1 >$CMDOUT
[ $(jq '.Contents | length' $CMDOUT) == 3 ]
[ $(jq '.CommonPrefixes | length' $CMDOUT) == 1 ]
aws s3api list-objects-v2 --bucket eprouvette --prefix 'a/' >$CMDOUT
[ $(jq '.Contents | length' $CMDOUT) == 5 ]
[ $(jq '.CommonPrefixes | length' $CMDOUT) == 0 ]
aws s3api list-objects-v2 --bucket eprouvette --prefix 'a/' --delimiter '/' >$CMDOUT
[ $(jq '.Contents | length' $CMDOUT) == 4 ]
[ $(jq '.CommonPrefixes | length' $CMDOUT) == 1 ]
aws s3api list-objects-v2 --bucket eprouvette --prefix 'a/' --page-size 1 >$CMDOUT
[ $(jq '.Contents | length' $CMDOUT) == 5 ]
[ $(jq '.CommonPrefixes | length' $CMDOUT) == 0 ]
aws s3api list-objects-v2 --bucket eprouvette --prefix 'a/' --delimiter '/' --page-size 1 >$CMDOUT
[ $(jq '.Contents | length' $CMDOUT) == 4 ]
[ $(jq '.CommonPrefixes | length' $CMDOUT) == 1 ]
aws s3api list-objects-v2 --bucket eprouvette --start-after 'Z' >$CMDOUT
[ $(jq '.Contents | length' $CMDOUT) == 8 ]
[ $(jq '.CommonPrefixes | length' $CMDOUT) == 0 ]
aws s3api list-objects-v2 --bucket eprouvette --start-after 'c' >$CMDOUT
! [ -s $CMDOUT ]
aws s3api list-objects --bucket eprouvette >$CMDOUT
[ $(jq '.Contents | length' $CMDOUT) == 8 ]
[ $(jq '.CommonPrefixes | length' $CMDOUT) == 0 ]
aws s3api list-objects --bucket eprouvette --page-size 1 >$CMDOUT
[ $(jq '.Contents | length' $CMDOUT) == 8 ]
[ $(jq '.CommonPrefixes | length' $CMDOUT) == 0 ]
aws s3api list-objects --bucket eprouvette --delimiter '/' >$CMDOUT
[ $(jq '.Contents | length' $CMDOUT) == 3 ]
[ $(jq '.CommonPrefixes | length' $CMDOUT) == 1 ]
# @FIXME it does not work as expected but might be a limitation of aws s3api
# The problem is the conjunction of a delimiter + pagination + v1 of listobjects
#aws s3api list-objects --bucket eprouvette --delimiter '/' --page-size 1 >$CMDOUT
#[ $(jq '.Contents | length' $CMDOUT) == 3 ]
#[ $(jq '.CommonPrefixes | length' $CMDOUT) == 1 ]
aws s3api list-objects --bucket eprouvette --prefix 'a/' >$CMDOUT
[ $(jq '.Contents | length' $CMDOUT) == 5 ]
[ $(jq '.CommonPrefixes | length' $CMDOUT) == 0 ]
aws s3api list-objects --bucket eprouvette --prefix 'a/' --delimiter '/' >$CMDOUT
[ $(jq '.Contents | length' $CMDOUT) == 4 ]
[ $(jq '.CommonPrefixes | length' $CMDOUT) == 1 ]
aws s3api list-objects --bucket eprouvette --prefix 'a/' --page-size 1 >$CMDOUT
[ $(jq '.Contents | length' $CMDOUT) == 5 ]
[ $(jq '.CommonPrefixes | length' $CMDOUT) == 0 ]
# @FIXME idem
#aws s3api list-objects --bucket eprouvette --prefix 'a/' --delimiter '/' --page-size 1 >$CMDOUT
#[ $(jq '.Contents | length' $CMDOUT) == 4 ]
#[ $(jq '.CommonPrefixes | length' $CMDOUT) == 1 ]
aws s3api list-objects --bucket eprouvette --starting-token 'Z' >$CMDOUT
[ $(jq '.Contents | length' $CMDOUT) == 8 ]
[ $(jq '.CommonPrefixes | length' $CMDOUT) == 0 ]
aws s3api list-objects --bucket eprouvette --starting-token 'c' >$CMDOUT
! [ -s $CMDOUT ]
aws s3api list-objects-v2 --bucket eprouvette | \
jq -c '. | {Objects: [.Contents[] | {Key: .Key}], Quiet: true}' | \
aws s3api delete-objects --bucket eprouvette --delete file:///dev/stdin
echo "Test Multipart Upload"
aws s3api create-multipart-upload --bucket eprouvette --key a
aws s3api create-multipart-upload --bucket eprouvette --key a
aws s3api create-multipart-upload --bucket eprouvette --key c
aws s3api create-multipart-upload --bucket eprouvette --key c/a
aws s3api create-multipart-upload --bucket eprouvette --key c/b
aws s3api list-multipart-uploads --bucket eprouvette >$CMDOUT
[ $(jq '.Uploads | length' $CMDOUT) == 5 ]
[ $(jq '.CommonPrefixes | length' $CMDOUT) == 0 ]
aws s3api list-multipart-uploads --bucket eprouvette --page-size 1 >$CMDOUT
[ $(jq '.Uploads | length' $CMDOUT) == 5 ]
[ $(jq '.CommonPrefixes | length' $CMDOUT) == 0 ]
aws s3api list-multipart-uploads --bucket eprouvette --delimiter '/' >$CMDOUT
[ $(jq '.Uploads | length' $CMDOUT) == 3 ]
[ $(jq '.CommonPrefixes | length' $CMDOUT) == 1 ]
aws s3api list-multipart-uploads --bucket eprouvette --delimiter '/' --page-size 1 >$CMDOUT
[ $(jq '.Uploads | length' $CMDOUT) == 3 ]
[ $(jq '.CommonPrefixes | length' $CMDOUT) == 1 ]
aws s3api list-multipart-uploads --bucket eprouvette --prefix 'c' >$CMDOUT
[ $(jq '.Uploads | length' $CMDOUT) == 3 ]
[ $(jq '.CommonPrefixes | length' $CMDOUT) == 0 ]
aws s3api list-multipart-uploads --bucket eprouvette --prefix 'c' --page-size 1 >$CMDOUT
[ $(jq '.Uploads | length' $CMDOUT) == 3 ]
[ $(jq '.CommonPrefixes | length' $CMDOUT) == 0 ]
aws s3api list-multipart-uploads --bucket eprouvette --prefix 'c' --delimiter '/' >$CMDOUT
[ $(jq '.Uploads | length' $CMDOUT) == 1 ]
[ $(jq '.CommonPrefixes | length' $CMDOUT) == 1 ]
aws s3api list-multipart-uploads --bucket eprouvette --prefix 'c' --delimiter '/' --page-size 1 >$CMDOUT
[ $(jq '.Uploads | length' $CMDOUT) == 1 ]
[ $(jq '.CommonPrefixes | length' $CMDOUT) == 1 ]
aws s3api list-multipart-uploads --bucket eprouvette --starting-token 'ZZZZZ' >$CMDOUT
[ $(jq '.Uploads | length' $CMDOUT) == 5 ]
[ $(jq '.CommonPrefixes | length' $CMDOUT) == 0 ]
aws s3api list-multipart-uploads --bucket eprouvette --starting-token 'd' >$CMDOUT
! [ -s $CMDOUT ]
aws s3api list-multipart-uploads --bucket eprouvette | \
jq -r '.Uploads[] | "\(.Key) \(.UploadId)"' | \
while read r; do
key=$(echo $r|cut -d' ' -f 1);
uid=$(echo $r|cut -d' ' -f 2);
aws s3api abort-multipart-upload --bucket eprouvette --key $key --upload-id $uid;
echo "Deleted ${key}:${uid}"
done
fi
if [ -z "$SKIP_AWS" ]; then
echo "🪣 Test bucket logic "
AWS_ACCESS_KEY_ID=`cat /tmp/garage.s3 |cut -d' ' -f1`
@ -151,5 +288,6 @@ AWS_SECRET_ACCESS_KEY=`cat /tmp/garage.s3 |cut -d' ' -f2`
garage -c /tmp/config.1.toml bucket deny --read --write eprouvette --key $AWS_ACCESS_KEY_ID
garage -c /tmp/config.1.toml bucket delete --yes eprouvette
garage -c /tmp/config.1.toml key delete --yes $AWS_ACCESS_KEY_ID
exec 3>&-
echo "✅ Success"

View File

@ -83,6 +83,7 @@ function refresh_toolchain {
pkgs.which
pkgs.openssl
pkgs.curl
pkgs.jq
] else [])
++
(if release then [

View File

@ -1,3 +1,4 @@
use std::cmp::{max, min};
use std::net::SocketAddr;
use std::sync::Arc;
@ -217,16 +218,18 @@ async fn handler_inner(garage: Arc<Garage>, req: Request<Body>) -> Result<Respon
handle_list(
garage,
&ListObjectsQuery {
common: ListQueryCommon {
bucket_name: bucket,
bucket_id,
delimiter: delimiter.map(|d| d.to_string()),
page_size: max_keys.map(|p| min(1000, max(1, p))).unwrap_or(1000),
prefix: prefix.unwrap_or_default(),
urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false),
},
is_v2: false,
bucket_name: bucket,
bucket_id,
delimiter: delimiter.map(|d| d.to_string()),
max_keys: max_keys.unwrap_or(1000),
prefix: prefix.unwrap_or_default(),
marker,
continuation_token: None,
start_after: None,
urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false),
},
)
.await
@ -246,16 +249,18 @@ async fn handler_inner(garage: Arc<Garage>, req: Request<Body>) -> Result<Respon
handle_list(
garage,
&ListObjectsQuery {
common: ListQueryCommon {
bucket_name: bucket,
bucket_id,
delimiter: delimiter.map(|d| d.to_string()),
page_size: max_keys.map(|p| min(1000, max(1, p))).unwrap_or(1000),
urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false),
prefix: prefix.unwrap_or_default(),
},
is_v2: true,
bucket_name: bucket,
bucket_id,
delimiter: delimiter.map(|d| d.to_string()),
max_keys: max_keys.unwrap_or(1000),
prefix: prefix.unwrap_or_default(),
marker: None,
continuation_token,
start_after,
urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false),
},
)
.await
@ -266,6 +271,32 @@ async fn handler_inner(garage: Arc<Garage>, req: Request<Body>) -> Result<Respon
)))
}
}
Endpoint::ListMultipartUploads {
bucket,
delimiter,
encoding_type,
key_marker,
max_uploads,
prefix,
upload_id_marker,
} => {
handle_list_multipart_upload(
garage,
&ListMultipartUploadsQuery {
common: ListQueryCommon {
bucket_name: bucket,
bucket_id,
delimiter: delimiter.map(|d| d.to_string()),
page_size: max_uploads.map(|p| min(1000, max(1, p))).unwrap_or(1000),
prefix: prefix.unwrap_or_default(),
urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false),
},
key_marker,
upload_id_marker,
},
)
.await
}
Endpoint::DeleteObjects { .. } => {
handle_delete_objects(garage, bucket_id, req, content_sha256).await
}

View File

@ -7,6 +7,7 @@ use garage_model::bucket_alias_table::*;
use garage_model::bucket_table::Bucket;
use garage_model::garage::Garage;
use garage_model::key_table::Key;
use garage_model::object_table::ObjectFilter;
use garage_model::permission::BucketKeyPerm;
use garage_table::util::*;
use garage_util::crdt::*;
@ -226,7 +227,7 @@ pub async fn handle_delete_bucket(
// Check bucket is empty
let objects = garage
.object_table
.get_range(&bucket_id, None, Some(DeletedFilter::NotDeleted), 10)
.get_range(&bucket_id, None, Some(ObjectFilter::IsData), 10)
.await?;
if !objects.is_empty() {
return Err(Error::BucketNotEmpty);

File diff suppressed because it is too large Load Diff

View File

@ -610,7 +610,7 @@ pub(crate) fn get_headers(req: &Request<Body>) -> Result<ObjectVersionHeaders, E
})
}
fn decode_upload_id(id: &str) -> Result<Uuid, Error> {
pub fn decode_upload_id(id: &str) -> Result<Uuid, Error> {
let id_bin = hex::decode(id).map_err(|_| Error::NoSuchUpload)?;
if id_bin.len() != 32 {
return Err(Error::NoSuchUpload);

View File

@ -350,7 +350,7 @@ pub enum Endpoint {
delimiter: Option<char>,
encoding_type: Option<String>,
key_marker: Option<String>,
max_uploads: Option<u64>,
max_uploads: Option<usize>,
prefix: Option<String>,
upload_id_marker: Option<String>,
},

View File

@ -141,6 +141,60 @@ pub struct CompleteMultipartUploadResult {
pub etag: Value,
}
#[derive(Debug, Serialize, PartialEq)]
pub struct Initiator {
#[serde(rename = "DisplayName")]
pub display_name: Value,
#[serde(rename = "ID")]
pub id: Value,
}
#[derive(Debug, Serialize, PartialEq)]
pub struct ListMultipartItem {
#[serde(rename = "Initiated")]
pub initiated: Value,
#[serde(rename = "Initiator")]
pub initiator: Initiator,
#[serde(rename = "Key")]
pub key: Value,
#[serde(rename = "UploadId")]
pub upload_id: Value,
#[serde(rename = "Owner")]
pub owner: Owner,
#[serde(rename = "StorageClass")]
pub storage_class: Value,
}
#[derive(Debug, Serialize, PartialEq)]
pub struct ListMultipartUploadsResult {
#[serde(serialize_with = "xmlns_tag")]
pub xmlns: (),
#[serde(rename = "Bucket")]
pub bucket: Value,
#[serde(rename = "KeyMarker")]
pub key_marker: Option<Value>,
#[serde(rename = "UploadIdMarker")]
pub upload_id_marker: Option<Value>,
#[serde(rename = "NextKeyMarker")]
pub next_key_marker: Option<Value>,
#[serde(rename = "NextUploadIdMarker")]
pub next_upload_id_marker: Option<Value>,
#[serde(rename = "Prefix")]
pub prefix: Value,
#[serde(rename = "Delimiter")]
pub delimiter: Option<Value>,
#[serde(rename = "MaxUploads")]
pub max_uploads: IntValue,
#[serde(rename = "IsTruncated")]
pub is_truncated: Value,
#[serde(rename = "Upload")]
pub upload: Vec<ListMultipartItem>,
#[serde(rename = "CommonPrefixes")]
pub common_prefixes: Vec<CommonPrefix>,
#[serde(rename = "EncodingType")]
pub encoding_type: Option<Value>,
}
#[derive(Debug, Serialize, PartialEq)]
pub struct ListBucketItem {
#[serde(rename = "Key")]
@ -432,6 +486,58 @@ mod tests {
Ok(())
}
#[test]
fn list_multipart_uploads_result() -> Result<(), ApiError> {
let result = ListMultipartUploadsResult {
xmlns: (),
bucket: Value("example-bucket".to_string()),
key_marker: None,
next_key_marker: None,
upload_id_marker: None,
encoding_type: None,
next_upload_id_marker: None,
upload: vec![],
delimiter: Some(Value("/".to_string())),
prefix: Value("photos/2006/".to_string()),
max_uploads: IntValue(1000),
is_truncated: Value("false".to_string()),
common_prefixes: vec![
CommonPrefix {
prefix: Value("photos/2006/February/".to_string()),
},
CommonPrefix {
prefix: Value("photos/2006/January/".to_string()),
},
CommonPrefix {
prefix: Value("photos/2006/March/".to_string()),
},
],
};
assert_eq!(
to_xml_with_header(&result)?,
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<ListMultipartUploadsResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
<Bucket>example-bucket</Bucket>\
<Prefix>photos/2006/</Prefix>\
<Delimiter>/</Delimiter>\
<MaxUploads>1000</MaxUploads>\
<IsTruncated>false</IsTruncated>\
<CommonPrefixes>\
<Prefix>photos/2006/February/</Prefix>\
</CommonPrefixes>\
<CommonPrefixes>\
<Prefix>photos/2006/January/</Prefix>\
</CommonPrefixes>\
<CommonPrefixes>\
<Prefix>photos/2006/March/</Prefix>\
</CommonPrefixes>\
</ListMultipartUploadsResult>"
);
Ok(())
}
#[test]
fn list_objects_v1_1() -> Result<(), ApiError> {
let result = ListBucketResult {

View File

@ -21,6 +21,7 @@ use garage_model::garage::Garage;
use garage_model::helper::error::{Error, OkOrBadRequest};
use garage_model::key_table::*;
use garage_model::migrate::Migrate;
use garage_model::object_table::ObjectFilter;
use garage_model::permission::*;
use crate::cli::*;
@ -209,7 +210,7 @@ impl AdminRpcHandler {
let objects = self
.garage
.object_table
.get_range(&bucket_id, None, Some(DeletedFilter::NotDeleted), 10)
.get_range(&bucket_id, None, Some(ObjectFilter::IsData), 10)
.await?;
if !objects.is_empty() {
return Err(Error::BadRequest(format!(

View File

@ -218,13 +218,19 @@ pub struct ObjectTable {
pub version_table: Arc<Table<VersionTable, TableShardedReplication>>,
}
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
pub enum ObjectFilter {
IsData,
IsUploading,
}
impl TableSchema for ObjectTable {
const TABLE_NAME: &'static str = "object";
type P = Uuid;
type S = String;
type E = Object;
type Filter = DeletedFilter;
type Filter = ObjectFilter;
fn updated(&self, old: Option<Self::E>, new: Option<Self::E>) {
let version_table = self.version_table.clone();
@ -254,8 +260,10 @@ impl TableSchema for ObjectTable {
}
fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool {
let deleted = !entry.versions.iter().any(|v| v.is_data());
filter.apply(deleted)
match filter {
ObjectFilter::IsData => entry.versions.iter().any(|v| v.is_data()),
ObjectFilter::IsUploading => entry.versions.iter().any(|v| v.is_uploading()),
}
}
fn try_migrate(bytes: &[u8]) -> Option<Self::E> {