New model for buckets #172
138
Cargo.lock
generated
|
@ -382,17 +382,17 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "garage"
|
||||
version = "0.5.0"
|
||||
version = "0.6.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes 1.1.0",
|
||||
"futures",
|
||||
"futures-util",
|
||||
"garage_api",
|
||||
"garage_model",
|
||||
"garage_rpc",
|
||||
"garage_table",
|
||||
"garage_util",
|
||||
"garage_model 0.6.0",
|
||||
"garage_rpc 0.6.0",
|
||||
"garage_table 0.6.0",
|
||||
"garage_util 0.6.0",
|
||||
"garage_web",
|
||||
"git-version",
|
||||
"hex",
|
||||
|
@ -411,7 +411,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "garage_api"
|
||||
version = "0.5.0"
|
||||
version = "0.6.0"
|
||||
dependencies = [
|
||||
"base64",
|
||||
"bytes 1.1.0",
|
||||
|
@ -420,9 +420,9 @@ dependencies = [
|
|||
"err-derive 0.3.0",
|
||||
"futures",
|
||||
"futures-util",
|
||||
"garage_model",
|
||||
"garage_table",
|
||||
"garage_util",
|
||||
"garage_model 0.6.0",
|
||||
"garage_table 0.6.0",
|
||||
"garage_util 0.6.0",
|
||||
"hex",
|
||||
"hmac",
|
||||
"http",
|
||||
|
@ -444,14 +444,39 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "garage_model"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c56150ee02bc26c77996b19fee0851f7d53cf42ae80370a8cf3a5dd5bb0bba76"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"async-trait",
|
||||
"futures",
|
||||
"futures-util",
|
||||
"garage_rpc",
|
||||
"garage_table",
|
||||
"garage_util",
|
||||
"garage_rpc 0.5.0",
|
||||
"garage_table 0.5.0",
|
||||
"garage_util 0.5.0",
|
||||
"hex",
|
||||
"log",
|
||||
"netapp",
|
||||
"rand",
|
||||
"rmp-serde 0.15.5",
|
||||
"serde",
|
||||
"serde_bytes",
|
||||
"sled",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "garage_model"
|
||||
version = "0.6.0"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"async-trait",
|
||||
"futures",
|
||||
"futures-util",
|
||||
"garage_model 0.5.0",
|
||||
"garage_rpc 0.6.0",
|
||||
"garage_table 0.6.0",
|
||||
"garage_util 0.6.0",
|
||||
"hex",
|
||||
"log",
|
||||
"netapp",
|
||||
|
@ -467,13 +492,40 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "garage_rpc"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3c5743c49f616b260f548454ff52b81d10372593d4c4bc01d516ee3c3c4e515a"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"async-trait",
|
||||
"bytes 1.1.0",
|
||||
"futures",
|
||||
"futures-util",
|
||||
"garage_util",
|
||||
"garage_util 0.5.0",
|
||||
"gethostname",
|
||||
"hex",
|
||||
"hyper",
|
||||
"kuska-sodiumoxide",
|
||||
"log",
|
||||
"netapp",
|
||||
"rand",
|
||||
"rmp-serde 0.15.5",
|
||||
"serde",
|
||||
"serde_bytes",
|
||||
"serde_json",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "garage_rpc"
|
||||
version = "0.6.0"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"async-trait",
|
||||
"bytes 1.1.0",
|
||||
"futures",
|
||||
"futures-util",
|
||||
"garage_util 0.6.0",
|
||||
"gethostname",
|
||||
"hex",
|
||||
"hyper",
|
||||
|
@ -492,13 +544,35 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "garage_table"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "378ffd69e8fd084e0817dc64a23a1692b58ffc86509ac2cadc64aa2d83c3e1e0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes 1.1.0",
|
||||
"futures",
|
||||
"futures-util",
|
||||
"garage_rpc",
|
||||
"garage_util",
|
||||
"garage_rpc 0.5.0",
|
||||
"garage_util 0.5.0",
|
||||
"hexdump",
|
||||
"log",
|
||||
"rand",
|
||||
"rmp-serde 0.15.5",
|
||||
"serde",
|
||||
"serde_bytes",
|
||||
"sled",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "garage_table"
|
||||
version = "0.6.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes 1.1.0",
|
||||
"futures",
|
||||
"futures-util",
|
||||
"garage_rpc 0.6.0",
|
||||
"garage_util 0.6.0",
|
||||
"hexdump",
|
||||
"log",
|
||||
"rand",
|
||||
|
@ -512,6 +586,32 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "garage_util"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5282e613b4da5ecca5bfec8c48ce9f25226cc1f35fbc439ed5fc698cce1aa549"
|
||||
dependencies = [
|
||||
"blake2",
|
||||
"chrono",
|
||||
"err-derive 0.3.0",
|
||||
"futures",
|
||||
"hex",
|
||||
"http",
|
||||
"hyper",
|
||||
"log",
|
||||
"netapp",
|
||||
"rand",
|
||||
"rmp-serde 0.15.5",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sha2",
|
||||
"sled",
|
||||
"tokio",
|
||||
"toml",
|
||||
"xxhash-rust",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "garage_util"
|
||||
version = "0.6.0"
|
||||
dependencies = [
|
||||
"blake2",
|
||||
"chrono",
|
||||
|
@ -535,14 +635,14 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "garage_web"
|
||||
version = "0.5.0"
|
||||
version = "0.6.0"
|
||||
dependencies = [
|
||||
"err-derive 0.3.0",
|
||||
"futures",
|
||||
"garage_api",
|
||||
"garage_model",
|
||||
"garage_table",
|
||||
"garage_util",
|
||||
"garage_model 0.6.0",
|
||||
"garage_table 0.6.0",
|
||||
"garage_util 0.6.0",
|
||||
"http",
|
||||
"hyper",
|
||||
"log",
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_api"
|
||||
version = "0.5.0"
|
||||
version = "0.6.0"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
@ -14,9 +14,9 @@ path = "lib.rs"
|
|||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
garage_model = { version = "0.5.0", path = "../model" }
|
||||
garage_table = { version = "0.5.0", path = "../table" }
|
||||
garage_util = { version = "0.5.0", path = "../util" }
|
||||
garage_model = { version = "0.6.0", path = "../model" }
|
||||
garage_table = { version = "0.6.0", path = "../table" }
|
||||
garage_util = { version = "0.6.0", path = "../util" }
|
||||
|
||||
base64 = "0.13"
|
||||
bytes = "1.0"
|
||||
|
|
|
@ -7,9 +7,12 @@ use hyper::server::conn::AddrStream;
|
|||
use hyper::service::{make_service_fn, service_fn};
|
||||
use hyper::{Body, Request, Response, Server};
|
||||
|
||||
use garage_util::crdt;
|
||||
use garage_util::data::*;
|
||||
use garage_util::error::Error as GarageError;
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
use garage_model::key_table::Key;
|
||||
|
||||
use crate::error::*;
|
||||
use crate::signature::check_signature;
|
||||
|
@ -105,10 +108,20 @@ async fn handler_inner(garage: Arc<Garage>, req: Request<Body>) -> Result<Respon
|
|||
.and_then(|root_domain| host_to_bucket(&host, root_domain));
|
||||
|
||||
let endpoint = Endpoint::from_request(&req, bucket.map(ToOwned::to_owned))?;
|
||||
|
||||
let bucket_name = match endpoint.authorization_type() {
|
||||
lx marked this conversation as resolved
Outdated
|
||||
Authorization::None => {
|
||||
return handle_request_without_bucket(garage, req, api_key, endpoint).await
|
||||
}
|
||||
Authorization::Read(bucket) | Authorization::Write(bucket) => bucket.to_string(),
|
||||
};
|
||||
|
||||
let bucket_id = resolve_bucket(&garage, &bucket_name, &api_key).await?;
|
||||
|
||||
let allowed = match endpoint.authorization_type() {
|
||||
Authorization::None => true,
|
||||
Authorization::Read(bucket) => api_key.allow_read(bucket),
|
||||
Authorization::Write(bucket) => api_key.allow_write(bucket),
|
||||
Authorization::Read(_) => api_key.allow_read(&bucket_id),
|
||||
Authorization::Write(_) => api_key.allow_write(&bucket_id),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
if !allowed {
|
||||
|
@ -118,19 +131,18 @@ async fn handler_inner(garage: Arc<Garage>, req: Request<Body>) -> Result<Respon
|
|||
}
|
||||
|
||||
match endpoint {
|
||||
Endpoint::ListBuckets => handle_list_buckets(&api_key),
|
||||
Endpoint::HeadObject { bucket, key, .. } => handle_head(garage, &req, &bucket, &key).await,
|
||||
Endpoint::GetObject { bucket, key, .. } => handle_get(garage, &req, &bucket, &key).await,
|
||||
Endpoint::HeadObject { key, .. } => handle_head(garage, &req, bucket_id, &key).await,
|
||||
Endpoint::GetObject { key, .. } => handle_get(garage, &req, bucket_id, &key).await,
|
||||
Endpoint::UploadPart {
|
||||
bucket,
|
||||
key,
|
||||
part_number,
|
||||
upload_id,
|
||||
..
|
||||
} => {
|
||||
handle_put_part(
|
||||
garage,
|
||||
req,
|
||||
&bucket,
|
||||
bucket_id,
|
||||
&key,
|
||||
part_number,
|
||||
&upload_id,
|
||||
|
@ -138,38 +150,46 @@ async fn handler_inner(garage: Arc<Garage>, req: Request<Body>) -> Result<Respon
|
|||
)
|
||||
.await
|
||||
}
|
||||
Endpoint::CopyObject { bucket, key } => {
|
||||
Endpoint::CopyObject { key, .. } => {
|
||||
let copy_source = req.headers().get("x-amz-copy-source").unwrap().to_str()?;
|
||||
let copy_source = percent_encoding::percent_decode_str(copy_source).decode_utf8()?;
|
||||
let (source_bucket, source_key) = parse_bucket_key(©_source, None)?;
|
||||
if !api_key.allow_read(source_bucket) {
|
||||
let source_bucket_id =
|
||||
resolve_bucket(&garage, &source_bucket.to_string(), &api_key).await?;
|
||||
if !api_key.allow_read(&source_bucket_id) {
|
||||
return Err(Error::Forbidden(format!(
|
||||
"Reading from bucket {} not allowed for this key",
|
||||
source_bucket
|
||||
)));
|
||||
}
|
||||
let source_key = source_key.ok_or_bad_request("No source key specified")?;
|
||||
handle_copy(garage, &req, &bucket, &key, source_bucket, source_key).await
|
||||
handle_copy(garage, &req, bucket_id, &key, source_bucket_id, source_key).await
|
||||
}
|
||||
Endpoint::PutObject { bucket, key } => {
|
||||
handle_put(garage, req, &bucket, &key, content_sha256).await
|
||||
Endpoint::PutObject { key, .. } => {
|
||||
handle_put(garage, req, bucket_id, &key, content_sha256).await
|
||||
}
|
||||
Endpoint::AbortMultipartUpload {
|
||||
bucket,
|
||||
key,
|
||||
upload_id,
|
||||
} => handle_abort_multipart_upload(garage, &bucket, &key, &upload_id).await,
|
||||
Endpoint::DeleteObject { bucket, key, .. } => handle_delete(garage, &bucket, &key).await,
|
||||
Endpoint::AbortMultipartUpload { key, upload_id, .. } => {
|
||||
handle_abort_multipart_upload(garage, bucket_id, &key, &upload_id).await
|
||||
}
|
||||
Endpoint::DeleteObject { key, .. } => handle_delete(garage, bucket_id, &key).await,
|
||||
Endpoint::CreateMultipartUpload { bucket, key } => {
|
||||
handle_create_multipart_upload(garage, &req, &bucket, &key).await
|
||||
handle_create_multipart_upload(garage, &req, &bucket, bucket_id, &key).await
|
||||
}
|
||||
Endpoint::CompleteMultipartUpload {
|
||||
bucket,
|
||||
key,
|
||||
upload_id,
|
||||
} => {
|
||||
handle_complete_multipart_upload(garage, req, &bucket, &key, &upload_id, content_sha256)
|
||||
.await
|
||||
handle_complete_multipart_upload(
|
||||
garage,
|
||||
req,
|
||||
&bucket,
|
||||
bucket_id,
|
||||
&key,
|
||||
&upload_id,
|
||||
content_sha256,
|
||||
)
|
||||
.await
|
||||
}
|
||||
Endpoint::CreateBucket { bucket } => {
|
||||
debug!(
|
||||
|
@ -206,7 +226,8 @@ async fn handler_inner(garage: Arc<Garage>, req: Request<Body>) -> Result<Respon
|
|||
garage,
|
||||
&ListObjectsQuery {
|
||||
is_v2: false,
|
||||
bucket,
|
||||
bucket_name: bucket,
|
||||
bucket_id,
|
||||
delimiter: delimiter.map(|d| d.to_string()),
|
||||
max_keys: max_keys.unwrap_or(1000),
|
||||
prefix: prefix.unwrap_or_default(),
|
||||
|
@ -234,7 +255,8 @@ async fn handler_inner(garage: Arc<Garage>, req: Request<Body>) -> Result<Respon
|
|||
garage,
|
||||
&ListObjectsQuery {
|
||||
is_v2: true,
|
||||
bucket,
|
||||
bucket_name: bucket,
|
||||
bucket_id,
|
||||
delimiter: delimiter.map(|d| d.to_string()),
|
||||
max_keys: max_keys.unwrap_or(1000),
|
||||
prefix: prefix.unwrap_or_default(),
|
||||
|
@ -252,8 +274,8 @@ async fn handler_inner(garage: Arc<Garage>, req: Request<Body>) -> Result<Respon
|
|||
)))
|
||||
}
|
||||
}
|
||||
Endpoint::DeleteObjects { bucket } => {
|
||||
handle_delete_objects(garage, &bucket, req, content_sha256).await
|
||||
Endpoint::DeleteObjects { .. } => {
|
||||
handle_delete_objects(garage, bucket_id, req, content_sha256).await
|
||||
}
|
||||
Endpoint::PutBucketWebsite { bucket } => {
|
||||
handle_put_website(garage, bucket, req, content_sha256).await
|
||||
|
@ -263,6 +285,41 @@ async fn handler_inner(garage: Arc<Garage>, req: Request<Body>) -> Result<Respon
|
|||
}
|
||||
}
|
||||
|
||||
async fn handle_request_without_bucket(
|
||||
garage: Arc<Garage>,
|
||||
_req: Request<Body>,
|
||||
api_key: Key,
|
||||
endpoint: Endpoint,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
match endpoint {
|
||||
Endpoint::ListBuckets => handle_list_buckets(&garage, &api_key).await,
|
||||
endpoint => Err(Error::NotImplemented(endpoint.name().to_owned())),
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::ptr_arg)]
|
||||
async fn resolve_bucket(
|
||||
garage: &Garage,
|
||||
bucket_name: &String,
|
||||
api_key: &Key,
|
||||
) -> Result<Uuid, Error> {
|
||||
let api_key_params = api_key
|
||||
.state
|
||||
.as_option()
|
||||
.ok_or_else(|| Error::Forbidden("Operation is not allowed for this key.".to_string()))?;
|
||||
|
||||
if let Some(crdt::Deletable::Present(bucket_id)) = api_key_params.local_aliases.get(bucket_name)
|
||||
{
|
||||
Ok(*bucket_id)
|
||||
} else {
|
||||
Ok(garage
|
||||
.bucket_helper()
|
||||
.resolve_global_bucket_name(bucket_name)
|
||||
.await?
|
||||
.ok_or(Error::NotFound)?)
|
||||
}
|
||||
}
|
||||
|
||||
/// Extract the bucket name and the key name from an HTTP path and possibly a bucket provided in
|
||||
/// the host header of the request
|
||||
///
|
||||
|
|
|
@ -1,9 +1,12 @@
|
|||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use hyper::{Body, Response};
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
use garage_model::key_table::Key;
|
||||
use garage_table::util::EmptyKey;
|
||||
use garage_util::crdt::*;
|
||||
use garage_util::time::*;
|
||||
|
||||
use crate::error::*;
|
||||
|
@ -34,20 +37,65 @@ pub fn handle_get_bucket_versioning() -> Result<Response<Body>, Error> {
|
|||
.body(Body::from(xml.into_bytes()))?)
|
||||
}
|
||||
|
||||
pub fn handle_list_buckets(api_key: &Key) -> Result<Response<Body>, Error> {
|
||||
pub async fn handle_list_buckets(garage: &Garage, api_key: &Key) -> Result<Response<Body>, Error> {
|
||||
let key_state = api_key.state.as_option().ok_or_internal_error(
|
||||
"Key should not be in deleted state at this point (internal error)",
|
||||
)?;
|
||||
|
||||
// Collect buckets user has access to
|
||||
let ids = api_key
|
||||
.state
|
||||
.as_option()
|
||||
.unwrap()
|
||||
.authorized_buckets
|
||||
.items()
|
||||
.iter()
|
||||
.filter(|(_, perms)| perms.allow_read || perms.allow_write)
|
||||
lx marked this conversation as resolved
Outdated
trinity-1686a
commented
would it make sense to be bucket owner without read nor write permission? would it make sense to be bucket owner without read nor write permission?
|
||||
.map(|(id, _)| *id)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let mut buckets_by_id = HashMap::new();
|
||||
let mut aliases = HashMap::new();
|
||||
|
||||
for bucket_id in ids.iter() {
|
||||
let bucket = garage.bucket_table.get(bucket_id, &EmptyKey).await?;
|
||||
if let Some(bucket) = bucket {
|
||||
if let Deletable::Present(param) = bucket.state {
|
||||
for (alias, _, active) in param.aliases.items() {
|
||||
if *active {
|
||||
let alias_ent = garage.bucket_alias_table.get(&EmptyKey, alias).await?;
|
||||
if let Some(alias_ent) = alias_ent {
|
||||
if let Some(alias_p) = alias_ent.state.get().as_option() {
|
||||
if alias_p.bucket_id == *bucket_id {
|
||||
aliases.insert(alias_ent.name.clone(), *bucket_id);
|
||||
lx marked this conversation as resolved
Outdated
trinity-1686a
commented
this is a lot of indentation. It may be possible to remove a few layers by using Option::and_then, and doing this is a lot of indentation. It may be possible to remove a few layers by using Option::and_then, and doing `param.aliases.items().filter(|(_,_,active)| active)`
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
buckets_by_id.insert(bucket_id, param);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (alias, _, id) in key_state.local_aliases.items() {
|
||||
if let Some(id) = id.as_option() {
|
||||
aliases.insert(alias.clone(), *id);
|
||||
}
|
||||
}
|
||||
|
||||
// Generate response
|
||||
let list_buckets = s3_xml::ListAllMyBucketsResult {
|
||||
owner: s3_xml::Owner {
|
||||
display_name: s3_xml::Value(api_key.name.get().to_string()),
|
||||
id: s3_xml::Value(api_key.key_id.to_string()),
|
||||
},
|
||||
buckets: s3_xml::BucketList {
|
||||
entries: api_key
|
||||
.authorized_buckets
|
||||
.items()
|
||||
entries: aliases
|
||||
.iter()
|
||||
.filter(|(_, _, perms)| perms.allow_read || perms.allow_write)
|
||||
.map(|(name, ts, _)| s3_xml::Bucket {
|
||||
creation_date: s3_xml::Value(msec_to_rfc3339(*ts)),
|
||||
.filter_map(|(name, id)| buckets_by_id.get(id).map(|p| (name, id, p)))
|
||||
.map(|(name, _id, param)| s3_xml::Bucket {
|
||||
creation_date: s3_xml::Value(msec_to_rfc3339(param.creation_date)),
|
||||
name: s3_xml::Value(name.to_string()),
|
||||
})
|
||||
.collect(),
|
||||
|
|
|
@ -18,14 +18,14 @@ use crate::s3_xml;
|
|||
pub async fn handle_copy(
|
||||
garage: Arc<Garage>,
|
||||
req: &Request<Body>,
|
||||
dest_bucket: &str,
|
||||
dest_bucket_id: Uuid,
|
||||
dest_key: &str,
|
||||
source_bucket: &str,
|
||||
source_bucket_id: Uuid,
|
||||
source_key: &str,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
let source_object = garage
|
||||
.object_table
|
||||
.get(&source_bucket.to_string(), &source_key.to_string())
|
||||
.get(&source_bucket_id, &source_key.to_string())
|
||||
.await?
|
||||
.ok_or(Error::NotFound)?;
|
||||
|
||||
|
@ -76,7 +76,7 @@ pub async fn handle_copy(
|
|||
)),
|
||||
};
|
||||
let dest_object = Object::new(
|
||||
dest_bucket.to_string(),
|
||||
dest_bucket_id,
|
||||
dest_key.to_string(),
|
||||
vec![dest_object_version],
|
||||
);
|
||||
|
@ -99,7 +99,7 @@ pub async fn handle_copy(
|
|||
state: ObjectVersionState::Uploading(new_meta.headers.clone()),
|
||||
};
|
||||
let tmp_dest_object = Object::new(
|
||||
dest_bucket.to_string(),
|
||||
dest_bucket_id,
|
||||
dest_key.to_string(),
|
||||
vec![tmp_dest_object_version],
|
||||
);
|
||||
|
@ -109,12 +109,8 @@ pub async fn handle_copy(
|
|||
// this means that the BlockRef entries linked to this version cannot be
|
||||
// marked as deleted (they are marked as deleted only if the Version
|
||||
// doesn't exist or is marked as deleted).
|
||||
let mut dest_version = Version::new(
|
||||
new_uuid,
|
||||
dest_bucket.to_string(),
|
||||
dest_key.to_string(),
|
||||
false,
|
||||
);
|
||||
let mut dest_version =
|
||||
Version::new(new_uuid, dest_bucket_id, dest_key.to_string(), false);
|
||||
garage.version_table.insert(&dest_version).await?;
|
||||
|
||||
// Fill in block list for version and insert block refs
|
||||
|
@ -151,7 +147,7 @@ pub async fn handle_copy(
|
|||
)),
|
||||
};
|
||||
let dest_object = Object::new(
|
||||
dest_bucket.to_string(),
|
||||
dest_bucket_id,
|
||||
dest_key.to_string(),
|
||||
vec![dest_object_version],
|
||||
);
|
||||
|
|
|
@ -14,12 +14,12 @@ use crate::signature::verify_signed_content;
|
|||
|
||||
async fn handle_delete_internal(
|
||||
garage: &Garage,
|
||||
bucket: &str,
|
||||
bucket_id: Uuid,
|
||||
key: &str,
|
||||
) -> Result<(Uuid, Uuid), Error> {
|
||||
let object = garage
|
||||
.object_table
|
||||
.get(&bucket.to_string(), &key.to_string())
|
||||
.get(&bucket_id, &key.to_string())
|
||||
.await?
|
||||
.ok_or(Error::NotFound)?; // No need to delete
|
||||
|
||||
|
@ -45,7 +45,7 @@ async fn handle_delete_internal(
|
|||
let version_uuid = gen_uuid();
|
||||
|
||||
let object = Object::new(
|
||||
bucket.into(),
|
||||
bucket_id,
|
||||
key.into(),
|
||||
vec![ObjectVersion {
|
||||
uuid: version_uuid,
|
||||
|
@ -61,11 +61,11 @@ async fn handle_delete_internal(
|
|||
|
||||
pub async fn handle_delete(
|
||||
garage: Arc<Garage>,
|
||||
bucket: &str,
|
||||
bucket_id: Uuid,
|
||||
key: &str,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
let (_deleted_version, delete_marker_version) =
|
||||
handle_delete_internal(&garage, bucket, key).await?;
|
||||
handle_delete_internal(&garage, bucket_id, key).await?;
|
||||
|
||||
Ok(Response::builder()
|
||||
.header("x-amz-version-id", hex::encode(delete_marker_version))
|
||||
|
@ -76,7 +76,7 @@ pub async fn handle_delete(
|
|||
|
||||
pub async fn handle_delete_objects(
|
||||
garage: Arc<Garage>,
|
||||
bucket: &str,
|
||||
bucket_id: Uuid,
|
||||
req: Request<Body>,
|
||||
content_sha256: Option<Hash>,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
|
@ -90,7 +90,7 @@ pub async fn handle_delete_objects(
|
|||
let mut ret_errors = Vec::new();
|
||||
|
||||
for obj in cmd.objects.iter() {
|
||||
match handle_delete_internal(&garage, bucket, &obj.key).await {
|
||||
match handle_delete_internal(&garage, bucket_id, &obj.key).await {
|
||||
Ok((deleted_version, delete_marker_version)) => {
|
||||
if cmd.quiet {
|
||||
continue;
|
||||
|
|
|
@ -7,6 +7,7 @@ use hyper::body::Bytes;
|
|||
use hyper::{Body, Request, Response, StatusCode};
|
||||
|
||||
use garage_table::EmptyKey;
|
||||
use garage_util::data::*;
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
use garage_model::object_table::*;
|
||||
|
@ -84,12 +85,12 @@ fn try_answer_cached(
|
|||
pub async fn handle_head(
|
||||
garage: Arc<Garage>,
|
||||
req: &Request<Body>,
|
||||
bucket: &str,
|
||||
bucket_id: Uuid,
|
||||
key: &str,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
let object = garage
|
||||
.object_table
|
||||
.get(&bucket.to_string(), &key.to_string())
|
||||
.get(&bucket_id, &key.to_string())
|
||||
.await?
|
||||
.ok_or(Error::NotFound)?;
|
||||
|
||||
|
@ -123,12 +124,12 @@ pub async fn handle_head(
|
|||
pub async fn handle_get(
|
||||
garage: Arc<Garage>,
|
||||
req: &Request<Body>,
|
||||
bucket: &str,
|
||||
bucket_id: Uuid,
|
||||
key: &str,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
let object = garage
|
||||
.object_table
|
||||
.get(&bucket.to_string(), &key.to_string())
|
||||
.get(&bucket_id, &key.to_string())
|
||||
.await?
|
||||
.ok_or(Error::NotFound)?;
|
||||
|
||||
|
|
|
@ -3,6 +3,7 @@ use std::sync::Arc;
|
|||
|
||||
use hyper::{Body, Response};
|
||||
|
||||
use garage_util::data::*;
|
||||
use garage_util::error::Error as GarageError;
|
||||
use garage_util::time::*;
|
||||
|
||||
|
@ -18,7 +19,8 @@ use crate::s3_xml;
|
|||
#[derive(Debug)]
|
||||
pub struct ListObjectsQuery {
|
||||
pub is_v2: bool,
|
||||
pub bucket: String,
|
||||
pub bucket_name: String,
|
||||
pub bucket_id: Uuid,
|
||||
pub delimiter: Option<String>,
|
||||
pub max_keys: usize,
|
||||
pub prefix: String,
|
||||
|
@ -102,7 +104,7 @@ pub async fn handle_list(
|
|||
let objects = garage
|
||||
.object_table
|
||||
.get_range(
|
||||
&query.bucket,
|
||||
&query.bucket_id,
|
||||
Some(next_chunk_start.clone()),
|
||||
Some(DeletedFilter::NotDeleted),
|
||||
query.max_keys + 1,
|
||||
|
@ -232,7 +234,7 @@ pub async fn handle_list(
|
|||
|
||||
let mut result = s3_xml::ListBucketResult {
|
||||
xmlns: (),
|
||||
name: s3_xml::Value(query.bucket.to_string()),
|
||||
name: s3_xml::Value(query.bucket_name.to_string()),
|
||||
prefix: uriencode_maybe(&query.prefix, query.urlencode_resp),
|
||||
marker: None,
|
||||
next_marker: None,
|
||||
|
|
|
@ -24,7 +24,7 @@ use crate::signature::verify_signed_content;
|
|||
pub async fn handle_put(
|
||||
garage: Arc<Garage>,
|
||||
req: Request<Body>,
|
||||
bucket: &str,
|
||||
bucket_id: Uuid,
|
||||
key: &str,
|
||||
content_sha256: Option<Hash>,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
|
@ -77,7 +77,7 @@ pub async fn handle_put(
|
|||
)),
|
||||
};
|
||||
|
||||
let object = Object::new(bucket.into(), key.into(), vec![object_version]);
|
||||
let object = Object::new(bucket_id, key.into(), vec![object_version]);
|
||||
garage.object_table.insert(&object).await?;
|
||||
|
||||
return Ok(put_response(version_uuid, data_md5sum_hex));
|
||||
|
@ -90,14 +90,14 @@ pub async fn handle_put(
|
|||
timestamp: version_timestamp,
|
||||
state: ObjectVersionState::Uploading(headers.clone()),
|
||||
};
|
||||
let object = Object::new(bucket.into(), key.into(), vec![object_version.clone()]);
|
||||
let object = Object::new(bucket_id, key.into(), vec![object_version.clone()]);
|
||||
garage.object_table.insert(&object).await?;
|
||||
|
||||
// Initialize corresponding entry in version table
|
||||
// Write this entry now, even with empty block list,
|
||||
// to prevent block_ref entries from being deleted (they can be deleted
|
||||
// if the reference a version that isn't found in the version table)
|
||||
let version = Version::new(version_uuid, bucket.into(), key.into(), false);
|
||||
let version = Version::new(version_uuid, bucket_id, key.into(), false);
|
||||
garage.version_table.insert(&version).await?;
|
||||
|
||||
// Transfer data and verify checksum
|
||||
|
@ -127,7 +127,7 @@ pub async fn handle_put(
|
|||
Err(e) => {
|
||||
// Mark object as aborted, this will free the blocks further down
|
||||
object_version.state = ObjectVersionState::Aborted;
|
||||
let object = Object::new(bucket.into(), key.into(), vec![object_version.clone()]);
|
||||
let object = Object::new(bucket_id, key.into(), vec![object_version.clone()]);
|
||||
garage.object_table.insert(&object).await?;
|
||||
return Err(e);
|
||||
}
|
||||
|
@ -143,7 +143,7 @@ pub async fn handle_put(
|
|||
},
|
||||
first_block_hash,
|
||||
));
|
||||
let object = Object::new(bucket.into(), key.into(), vec![object_version]);
|
||||
let object = Object::new(bucket_id, key.into(), vec![object_version]);
|
||||
garage.object_table.insert(&object).await?;
|
||||
|
||||
Ok(put_response(version_uuid, md5sum_hex))
|
||||
|
@ -315,7 +315,8 @@ pub fn put_response(version_uuid: Uuid, md5sum_hex: String) -> Response<Body> {
|
|||
pub async fn handle_create_multipart_upload(
|
||||
garage: Arc<Garage>,
|
||||
req: &Request<Body>,
|
||||
bucket: &str,
|
||||
bucket_name: &str,
|
||||
bucket_id: Uuid,
|
||||
key: &str,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
let version_uuid = gen_uuid();
|
||||
|
@ -327,20 +328,20 @@ pub async fn handle_create_multipart_upload(
|
|||
timestamp: now_msec(),
|
||||
state: ObjectVersionState::Uploading(headers),
|
||||
};
|
||||
let object = Object::new(bucket.to_string(), key.to_string(), vec![object_version]);
|
||||
let object = Object::new(bucket_id, key.to_string(), vec![object_version]);
|
||||
garage.object_table.insert(&object).await?;
|
||||
|
||||
// Insert empty version so that block_ref entries refer to something
|
||||
// (they are inserted concurrently with blocks in the version table, so
|
||||
// there is the possibility that they are inserted before the version table
|
||||
// is created, in which case it is allowed to delete them, e.g. in repair_*)
|
||||
let version = Version::new(version_uuid, bucket.into(), key.into(), false);
|
||||
let version = Version::new(version_uuid, bucket_id, key.into(), false);
|
||||
garage.version_table.insert(&version).await?;
|
||||
|
||||
// Send success response
|
||||
let result = s3_xml::InitiateMultipartUploadResult {
|
||||
xmlns: (),
|
||||
bucket: s3_xml::Value(bucket.to_string()),
|
||||
bucket: s3_xml::Value(bucket_name.to_string()),
|
||||
key: s3_xml::Value(key.to_string()),
|
||||
upload_id: s3_xml::Value(hex::encode(version_uuid)),
|
||||
};
|
||||
|
@ -352,7 +353,7 @@ pub async fn handle_create_multipart_upload(
|
|||
pub async fn handle_put_part(
|
||||
garage: Arc<Garage>,
|
||||
req: Request<Body>,
|
||||
bucket: &str,
|
||||
bucket_id: Uuid,
|
||||
key: &str,
|
||||
part_number: u64,
|
||||
upload_id: &str,
|
||||
|
@ -366,12 +367,11 @@ pub async fn handle_put_part(
|
|||
};
|
||||
|
||||
// Read first chuck, and at the same time try to get object to see if it exists
|
||||
let bucket = bucket.to_string();
|
||||
let key = key.to_string();
|
||||
let mut chunker = BodyChunker::new(req.into_body(), garage.config.block_size);
|
||||
|
||||
let (object, first_block) =
|
||||
futures::try_join!(garage.object_table.get(&bucket, &key), chunker.next(),)?;
|
||||
futures::try_join!(garage.object_table.get(&bucket_id, &key), chunker.next(),)?;
|
||||
|
||||
// Check object is valid and multipart block can be accepted
|
||||
let first_block = first_block.ok_or_else(|| Error::BadRequest("Empty body".to_string()))?;
|
||||
|
@ -386,7 +386,7 @@ pub async fn handle_put_part(
|
|||
}
|
||||
|
||||
// Copy block to store
|
||||
let version = Version::new(version_uuid, bucket, key, false);
|
||||
let version = Version::new(version_uuid, bucket_id, key, false);
|
||||
let first_block_hash = blake2sum(&first_block[..]);
|
||||
let (_, data_md5sum, data_sha256sum) = read_and_put_blocks(
|
||||
&garage,
|
||||
|
@ -424,7 +424,8 @@ pub async fn handle_put_part(
|
|||
pub async fn handle_complete_multipart_upload(
|
||||
garage: Arc<Garage>,
|
||||
req: Request<Body>,
|
||||
bucket: &str,
|
||||
bucket_name: &str,
|
||||
bucket_id: Uuid,
|
||||
key: &str,
|
||||
upload_id: &str,
|
||||
content_sha256: Option<Hash>,
|
||||
|
@ -442,10 +443,9 @@ pub async fn handle_complete_multipart_upload(
|
|||
|
||||
let version_uuid = decode_upload_id(upload_id)?;
|
||||
|
||||
let bucket = bucket.to_string();
|
||||
let key = key.to_string();
|
||||
let (object, version) = futures::try_join!(
|
||||
garage.object_table.get(&bucket, &key),
|
||||
garage.object_table.get(&bucket_id, &key),
|
||||
garage.version_table.get(&version_uuid, &EmptyKey),
|
||||
)?;
|
||||
|
||||
|
@ -510,14 +510,14 @@ pub async fn handle_complete_multipart_upload(
|
|||
version.blocks.items()[0].1.hash,
|
||||
));
|
||||
|
||||
let final_object = Object::new(bucket.clone(), key.clone(), vec![object_version]);
|
||||
let final_object = Object::new(bucket_id, key.clone(), vec![object_version]);
|
||||
garage.object_table.insert(&final_object).await?;
|
||||
|
||||
// Send response saying ok we're done
|
||||
let result = s3_xml::CompleteMultipartUploadResult {
|
||||
xmlns: (),
|
||||
location: None,
|
||||
bucket: s3_xml::Value(bucket),
|
||||
bucket: s3_xml::Value(bucket_name.to_string()),
|
||||
key: s3_xml::Value(key),
|
||||
etag: s3_xml::Value(etag),
|
||||
};
|
||||
|
@ -528,7 +528,7 @@ pub async fn handle_complete_multipart_upload(
|
|||
|
||||
pub async fn handle_abort_multipart_upload(
|
||||
garage: Arc<Garage>,
|
||||
bucket: &str,
|
||||
bucket_id: Uuid,
|
||||
key: &str,
|
||||
upload_id: &str,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
|
@ -536,7 +536,7 @@ pub async fn handle_abort_multipart_upload(
|
|||
|
||||
let object = garage
|
||||
.object_table
|
||||
.get(&bucket.to_string(), &key.to_string())
|
||||
.get(&bucket_id, &key.to_string())
|
||||
.await?;
|
||||
let object = object.ok_or_else(|| Error::BadRequest("Object not found".to_string()))?;
|
||||
|
||||
|
@ -550,7 +550,7 @@ pub async fn handle_abort_multipart_upload(
|
|||
};
|
||||
|
||||
object_version.state = ObjectVersionState::Aborted;
|
||||
let final_object = Object::new(bucket.to_string(), key.to_string(), vec![object_version]);
|
||||
let final_object = Object::new(bucket_id, key.to_string(), vec![object_version]);
|
||||
garage.object_table.insert(&final_object).await?;
|
||||
|
||||
Ok(Response::new(Body::from(vec![])))
|
||||
|
|
|
@ -7,9 +7,10 @@ use serde::{Deserialize, Serialize};
|
|||
use crate::error::*;
|
||||
use crate::s3_xml::{xmlns_tag, IntValue, Value};
|
||||
use crate::signature::verify_signed_content;
|
||||
use garage_model::bucket_table::BucketState;
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
use garage_table::*;
|
||||
use garage_util::crdt;
|
||||
use garage_util::data::Hash;
|
||||
|
||||
pub async fn handle_delete_website(
|
||||
|
@ -17,14 +18,18 @@ pub async fn handle_delete_website(
|
|||
bucket: String,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
let mut bucket = garage
|
||||
.bucket_table
|
||||
.bucket_alias_table
|
||||
.get(&EmptyKey, &bucket)
|
||||
.await?
|
||||
.ok_or(Error::NotFound)?;
|
||||
|
||||
if let BucketState::Present(state) = bucket.state.get_mut() {
|
||||
state.website.update(false);
|
||||
garage.bucket_table.insert(&bucket).await?;
|
||||
if let crdt::Deletable::Present(state) = bucket.state.get_mut() {
|
||||
let mut new_param = state.clone();
|
||||
new_param.website_access = false;
|
||||
bucket.state.update(crdt::Deletable::present(new_param));
|
||||
garage.bucket_alias_table.insert(&bucket).await?;
|
||||
} else {
|
||||
unreachable!();
|
||||
}
|
||||
|
||||
Ok(Response::builder()
|
||||
|
@ -43,7 +48,7 @@ pub async fn handle_put_website(
|
|||
verify_signed_content(content_sha256, &body[..])?;
|
||||
|
||||
let mut bucket = garage
|
||||
.bucket_table
|
||||
.bucket_alias_table
|
||||
.get(&EmptyKey, &bucket)
|
||||
.await?
|
||||
.ok_or(Error::NotFound)?;
|
||||
|
@ -51,9 +56,13 @@ pub async fn handle_put_website(
|
|||
let conf: WebsiteConfiguration = from_reader(&body as &[u8])?;
|
||||
conf.validate()?;
|
||||
|
||||
if let BucketState::Present(state) = bucket.state.get_mut() {
|
||||
state.website.update(true);
|
||||
garage.bucket_table.insert(&bucket).await?;
|
||||
if let crdt::Deletable::Present(state) = bucket.state.get() {
|
||||
let mut new_param = state.clone();
|
||||
new_param.website_access = true;
|
||||
bucket.state.update(crdt::Deletable::present(new_param));
|
||||
garage.bucket_alias_table.insert(&bucket).await?;
|
||||
} else {
|
||||
unreachable!();
|
||||
}
|
||||
|
||||
Ok(Response::builder()
|
||||
|
|
|
@ -64,7 +64,7 @@ pub async fn check_signature(
|
|||
.key_table
|
||||
.get(&EmptyKey, &authorization.key_id)
|
||||
.await?
|
||||
.filter(|k| !k.deleted.get())
|
||||
.filter(|k| !k.state.is_deleted())
|
||||
.ok_or_else(|| Error::Forbidden(format!("No such key: {}", authorization.key_id)))?;
|
||||
|
||||
let canonical_request = canonical_request(
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage"
|
||||
version = "0.5.0"
|
||||
version = "0.6.0"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
@ -15,12 +15,12 @@ path = "main.rs"
|
|||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
garage_api = { version = "0.5.0", path = "../api" }
|
||||
garage_model = { version = "0.5.0", path = "../model" }
|
||||
garage_rpc = { version = "0.5.0", path = "../rpc" }
|
||||
garage_table = { version = "0.5.0", path = "../table" }
|
||||
garage_util = { version = "0.5.0", path = "../util" }
|
||||
garage_web = { version = "0.5.0", path = "../web" }
|
||||
garage_api = { version = "0.6.0", path = "../api" }
|
||||
garage_model = { version = "0.6.0", path = "../model" }
|
||||
garage_rpc = { version = "0.6.0", path = "../rpc" }
|
||||
garage_table = { version = "0.6.0", path = "../table" }
|
||||
garage_util = { version = "0.6.0", path = "../util" }
|
||||
garage_web = { version = "0.6.0", path = "../web" }
|
||||
|
||||
bytes = "1.0"
|
||||
git-version = "0.3.4"
|
||||
|
|
|
@ -5,17 +5,21 @@ use std::sync::Arc;
|
|||
use async_trait::async_trait;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use garage_util::error::Error;
|
||||
use garage_util::crdt::*;
|
||||
use garage_util::data::*;
|
||||
use garage_util::error::*;
|
||||
use garage_util::time::*;
|
||||
|
||||
use garage_table::crdt::Crdt;
|
||||
use garage_table::replication::*;
|
||||
use garage_table::*;
|
||||
|
||||
use garage_rpc::*;
|
||||
|
||||
use garage_model::bucket_alias_table::*;
|
||||
use garage_model::bucket_table::*;
|
||||
use garage_model::garage::Garage;
|
||||
use garage_model::key_table::*;
|
||||
use garage_model::permission::*;
|
||||
|
||||
use crate::cli::*;
|
||||
use crate::repair::Repair;
|
||||
|
@ -31,7 +35,7 @@ pub enum AdminRpc {
|
|||
|
||||
// Replies
|
||||
Ok(String),
|
||||
BucketList(Vec<String>),
|
||||
BucketList(Vec<BucketAlias>),
|
||||
BucketInfo(Bucket),
|
||||
KeyList(Vec<(String, String)>),
|
||||
KeyInfo(Key),
|
||||
|
@ -56,203 +60,331 @@ impl AdminRpcHandler {
|
|||
|
||||
async fn handle_bucket_cmd(&self, cmd: &BucketOperation) -> Result<AdminRpc, Error> {
|
||||
match cmd {
|
||||
BucketOperation::List => {
|
||||
let bucket_names = self
|
||||
.garage
|
||||
.bucket_table
|
||||
.get_range(&EmptyKey, None, Some(DeletedFilter::NotDeleted), 10000)
|
||||
.await?
|
||||
.iter()
|
||||
.map(|b| b.name.to_string())
|
||||
.collect::<Vec<_>>();
|
||||
Ok(AdminRpc::BucketList(bucket_names))
|
||||
}
|
||||
BucketOperation::List => self.handle_list_buckets().await,
|
||||
BucketOperation::Info(query) => {
|
||||
let bucket = self.get_existing_bucket(&query.name).await?;
|
||||
let bucket_id = self
|
||||
.garage
|
||||
.bucket_helper()
|
||||
.resolve_global_bucket_name(&query.name)
|
||||
.await?
|
||||
.ok_or_message("Bucket not found")?;
|
||||
let bucket = self
|
||||
.garage
|
||||
.bucket_helper()
|
||||
.get_existing_bucket(bucket_id)
|
||||
.await?;
|
||||
Ok(AdminRpc::BucketInfo(bucket))
|
||||
}
|
||||
BucketOperation::Create(query) => {
|
||||
let bucket = match self.garage.bucket_table.get(&EmptyKey, &query.name).await? {
|
||||
Some(mut bucket) => {
|
||||
if !bucket.is_deleted() {
|
||||
return Err(Error::BadRpc(format!(
|
||||
"Bucket {} already exists",
|
||||
query.name
|
||||
)));
|
||||
}
|
||||
bucket
|
||||
.state
|
||||
.update(BucketState::Present(BucketParams::new()));
|
||||
bucket
|
||||
}
|
||||
None => Bucket::new(query.name.clone()),
|
||||
};
|
||||
self.garage.bucket_table.insert(&bucket).await?;
|
||||
Ok(AdminRpc::Ok(format!("Bucket {} was created.", query.name)))
|
||||
}
|
||||
BucketOperation::Delete(query) => {
|
||||
let mut bucket = self.get_existing_bucket(&query.name).await?;
|
||||
let objects = self
|
||||
.garage
|
||||
.object_table
|
||||
.get_range(&query.name, None, Some(DeletedFilter::NotDeleted), 10)
|
||||
.await?;
|
||||
if !objects.is_empty() {
|
||||
return Err(Error::BadRpc(format!("Bucket {} is not empty", query.name)));
|
||||
}
|
||||
if !query.yes {
|
||||
return Err(Error::BadRpc(
|
||||
"Add --yes flag to really perform this operation".to_string(),
|
||||
));
|
||||
}
|
||||
// --- done checking, now commit ---
|
||||
for (key_id, _, _) in bucket.authorized_keys() {
|
||||
if let Some(key) = self.garage.key_table.get(&EmptyKey, key_id).await? {
|
||||
if !key.deleted.get() {
|
||||
self.update_key_bucket(&key, &bucket.name, false, false)
|
||||
.await?;
|
||||
}
|
||||
} else {
|
||||
return Err(Error::Message(format!("Key not found: {}", key_id)));
|
||||
}
|
||||
}
|
||||
bucket.state.update(BucketState::Deleted);
|
||||
self.garage.bucket_table.insert(&bucket).await?;
|
||||
Ok(AdminRpc::Ok(format!("Bucket {} was deleted.", query.name)))
|
||||
}
|
||||
BucketOperation::Allow(query) => {
|
||||
let key = self.get_existing_key(&query.key_pattern).await?;
|
||||
let bucket = self.get_existing_bucket(&query.bucket).await?;
|
||||
let allow_read = query.read || key.allow_read(&query.bucket);
|
||||
let allow_write = query.write || key.allow_write(&query.bucket);
|
||||
self.update_key_bucket(&key, &query.bucket, allow_read, allow_write)
|
||||
.await?;
|
||||
self.update_bucket_key(bucket, &key.key_id, allow_read, allow_write)
|
||||
.await?;
|
||||
Ok(AdminRpc::Ok(format!(
|
||||
"New permissions for {} on {}: read {}, write {}.",
|
||||
&key.key_id, &query.bucket, allow_read, allow_write
|
||||
)))
|
||||
}
|
||||
BucketOperation::Deny(query) => {
|
||||
let key = self.get_existing_key(&query.key_pattern).await?;
|
||||
let bucket = self.get_existing_bucket(&query.bucket).await?;
|
||||
let allow_read = !query.read && key.allow_read(&query.bucket);
|
||||
let allow_write = !query.write && key.allow_write(&query.bucket);
|
||||
self.update_key_bucket(&key, &query.bucket, allow_read, allow_write)
|
||||
.await?;
|
||||
self.update_bucket_key(bucket, &key.key_id, allow_read, allow_write)
|
||||
.await?;
|
||||
Ok(AdminRpc::Ok(format!(
|
||||
"New permissions for {} on {}: read {}, write {}.",
|
||||
&key.key_id, &query.bucket, allow_read, allow_write
|
||||
)))
|
||||
}
|
||||
BucketOperation::Website(query) => {
|
||||
let mut bucket = self.get_existing_bucket(&query.bucket).await?;
|
||||
BucketOperation::Create(query) => self.handle_create_bucket(&query.name).await,
|
||||
BucketOperation::Delete(query) => self.handle_delete_bucket(query).await,
|
||||
BucketOperation::Allow(query) => self.handle_bucket_allow(query).await,
|
||||
BucketOperation::Deny(query) => self.handle_bucket_deny(query).await,
|
||||
BucketOperation::Website(query) => self.handle_bucket_website(query).await,
|
||||
}
|
||||
}
|
||||
|
||||
if !(query.allow ^ query.deny) {
|
||||
return Err(Error::Message(
|
||||
"You must specify exactly one flag, either --allow or --deny".to_string(),
|
||||
));
|
||||
}
|
||||
async fn handle_list_buckets(&self) -> Result<AdminRpc, Error> {
|
||||
let bucket_aliases = self
|
||||
.garage
|
||||
.bucket_alias_table
|
||||
.get_range(&EmptyKey, None, Some(DeletedFilter::NotDeleted), 10000)
|
||||
.await?;
|
||||
Ok(AdminRpc::BucketList(bucket_aliases))
|
||||
}
|
||||
|
||||
if let BucketState::Present(state) = bucket.state.get_mut() {
|
||||
state.website.update(query.allow);
|
||||
self.garage.bucket_table.insert(&bucket).await?;
|
||||
let msg = if query.allow {
|
||||
format!("Website access allowed for {}", &query.bucket)
|
||||
} else {
|
||||
format!("Website access denied for {}", &query.bucket)
|
||||
};
|
||||
|
||||
Ok(AdminRpc::Ok(msg))
|
||||
} else {
|
||||
unreachable!();
|
||||
#[allow(clippy::ptr_arg)]
|
||||
async fn handle_create_bucket(&self, name: &String) -> Result<AdminRpc, Error> {
|
||||
let mut bucket = Bucket::new();
|
||||
let alias = match self.garage.bucket_alias_table.get(&EmptyKey, name).await? {
|
||||
Some(mut alias) => {
|
||||
if !alias.state.get().is_deleted() {
|
||||
return Err(Error::BadRpc(format!("Bucket {} already exists", name)));
|
||||
}
|
||||
alias.state.update(Deletable::Present(AliasParams {
|
||||
bucket_id: bucket.id,
|
||||
website_access: false,
|
||||
}));
|
||||
alias
|
||||
}
|
||||
None => BucketAlias::new(name.clone(), bucket.id, false),
|
||||
};
|
||||
bucket
|
||||
.state
|
||||
.as_option_mut()
|
||||
.unwrap()
|
||||
.aliases
|
||||
.update_in_place(name.clone(), true);
|
||||
self.garage.bucket_table.insert(&bucket).await?;
|
||||
self.garage.bucket_alias_table.insert(&alias).await?;
|
||||
Ok(AdminRpc::Ok(format!("Bucket {} was created.", name)))
|
||||
}
|
||||
|
||||
async fn handle_delete_bucket(&self, query: &DeleteBucketOpt) -> Result<AdminRpc, Error> {
|
||||
let mut bucket_alias = self
|
||||
.garage
|
||||
.bucket_alias_table
|
||||
.get(&EmptyKey, &query.name)
|
||||
.await?
|
||||
.filter(|a| !a.is_deleted())
|
||||
.ok_or_message(format!("Bucket {} does not exist", query.name))?;
|
||||
|
||||
let bucket_id = bucket_alias.state.get().as_option().unwrap().bucket_id;
|
||||
|
||||
// Check bucket doesn't have other aliases
|
||||
let mut bucket = self
|
||||
.garage
|
||||
.bucket_helper()
|
||||
.get_existing_bucket(bucket_id)
|
||||
.await?;
|
||||
let bucket_state = bucket.state.as_option().unwrap();
|
||||
if bucket_state
|
||||
.aliases
|
||||
.items()
|
||||
.iter()
|
||||
.filter(|(_, _, active)| *active)
|
||||
.any(|(name, _, _)| name != &query.name)
|
||||
{
|
||||
return Err(Error::Message(format!("Bucket {} still has other global aliases. Use `bucket unalias` to delete them one by one.", query.name)));
|
||||
}
|
||||
if bucket_state
|
||||
.local_aliases
|
||||
.items()
|
||||
.iter()
|
||||
.any(|(_, _, active)| *active)
|
||||
{
|
||||
return Err(Error::Message(format!("Bucket {} still has other local aliases. Use `bucket unalias` to delete them one by one.", query.name)));
|
||||
}
|
||||
|
||||
// Check bucket is empty
|
||||
let objects = self
|
||||
.garage
|
||||
.object_table
|
||||
.get_range(&bucket_id, None, Some(DeletedFilter::NotDeleted), 10)
|
||||
.await?;
|
||||
if !objects.is_empty() {
|
||||
return Err(Error::BadRpc(format!("Bucket {} is not empty", query.name)));
|
||||
}
|
||||
|
||||
if !query.yes {
|
||||
return Err(Error::BadRpc(
|
||||
"Add --yes flag to really perform this operation".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
// --- done checking, now commit ---
|
||||
// 1. delete authorization from keys that had access
|
||||
for (key_id, _) in bucket.authorized_keys() {
|
||||
if let Some(key) = self.garage.key_table.get(&EmptyKey, key_id).await? {
|
||||
if !key.state.is_deleted() {
|
||||
self.update_key_bucket(&key, bucket.id, false, false)
|
||||
.await?;
|
||||
}
|
||||
} else {
|
||||
return Err(Error::Message(format!("Key not found: {}", key_id)));
|
||||
}
|
||||
}
|
||||
// 2. delete bucket alias
|
||||
bucket_alias.state.update(Deletable::Deleted);
|
||||
self.garage.bucket_alias_table.insert(&bucket_alias).await?;
|
||||
// 3. delete bucket alias
|
||||
bucket.state = Deletable::delete();
|
||||
self.garage.bucket_table.insert(&bucket).await?;
|
||||
|
||||
Ok(AdminRpc::Ok(format!("Bucket {} was deleted.", query.name)))
|
||||
}
|
||||
|
||||
async fn handle_bucket_allow(&self, query: &PermBucketOpt) -> Result<AdminRpc, Error> {
|
||||
let bucket_id = self
|
||||
.garage
|
||||
.bucket_helper()
|
||||
.resolve_global_bucket_name(&query.bucket)
|
||||
.await?
|
||||
.ok_or_message("Bucket not found")?;
|
||||
let bucket = self
|
||||
.garage
|
||||
.bucket_helper()
|
||||
.get_existing_bucket(bucket_id)
|
||||
.await?;
|
||||
let key = self.get_existing_key(&query.key_pattern).await?;
|
||||
|
||||
let allow_read = query.read || key.allow_read(&bucket_id);
|
||||
let allow_write = query.write || key.allow_write(&bucket_id);
|
||||
|
||||
let new_perm = self
|
||||
.update_key_bucket(&key, bucket_id, allow_read, allow_write)
|
||||
.await?;
|
||||
self.update_bucket_key(bucket, &key.key_id, new_perm)
|
||||
.await?;
|
||||
|
||||
Ok(AdminRpc::Ok(format!(
|
||||
"New permissions for {} on {}: read {}, write {}.",
|
||||
&key.key_id, &query.bucket, allow_read, allow_write
|
||||
)))
|
||||
}
|
||||
|
||||
async fn handle_bucket_deny(&self, query: &PermBucketOpt) -> Result<AdminRpc, Error> {
|
||||
let bucket_id = self
|
||||
.garage
|
||||
.bucket_helper()
|
||||
.resolve_global_bucket_name(&query.bucket)
|
||||
.await?
|
||||
.ok_or_message("Bucket not found")?;
|
||||
let bucket = self
|
||||
.garage
|
||||
.bucket_helper()
|
||||
.get_existing_bucket(bucket_id)
|
||||
.await?;
|
||||
let key = self.get_existing_key(&query.key_pattern).await?;
|
||||
|
||||
let allow_read = !query.read && key.allow_read(&bucket_id);
|
||||
let allow_write = !query.write && key.allow_write(&bucket_id);
|
||||
|
||||
let new_perm = self
|
||||
.update_key_bucket(&key, bucket_id, allow_read, allow_write)
|
||||
.await?;
|
||||
self.update_bucket_key(bucket, &key.key_id, new_perm)
|
||||
.await?;
|
||||
|
||||
Ok(AdminRpc::Ok(format!(
|
||||
"New permissions for {} on {}: read {}, write {}.",
|
||||
&key.key_id, &query.bucket, allow_read, allow_write
|
||||
)))
|
||||
}
|
||||
|
||||
async fn handle_bucket_website(&self, query: &WebsiteOpt) -> Result<AdminRpc, Error> {
|
||||
let mut bucket_alias = self
|
||||
.garage
|
||||
.bucket_alias_table
|
||||
.get(&EmptyKey, &query.bucket)
|
||||
.await?
|
||||
.filter(|a| !a.is_deleted())
|
||||
.ok_or_message(format!("Bucket {} does not exist", query.bucket))?;
|
||||
|
||||
let mut state = bucket_alias.state.get().as_option().unwrap().clone();
|
||||
|
||||
if !(query.allow ^ query.deny) {
|
||||
return Err(Error::Message(
|
||||
"You must specify exactly one flag, either --allow or --deny".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
state.website_access = query.allow;
|
||||
bucket_alias.state.update(Deletable::present(state));
|
||||
self.garage.bucket_alias_table.insert(&bucket_alias).await?;
|
||||
|
||||
let msg = if query.allow {
|
||||
format!("Website access allowed for {}", &query.bucket)
|
||||
} else {
|
||||
format!("Website access denied for {}", &query.bucket)
|
||||
};
|
||||
|
||||
Ok(AdminRpc::Ok(msg))
|
||||
}
|
||||
|
||||
async fn handle_key_cmd(&self, cmd: &KeyOperation) -> Result<AdminRpc, Error> {
|
||||
match cmd {
|
||||
KeyOperation::List => {
|
||||
let key_ids = self
|
||||
.garage
|
||||
.key_table
|
||||
.get_range(
|
||||
&EmptyKey,
|
||||
None,
|
||||
Some(KeyFilter::Deleted(DeletedFilter::NotDeleted)),
|
||||
10000,
|
||||
)
|
||||
.await?
|
||||
.iter()
|
||||
.map(|k| (k.key_id.to_string(), k.name.get().clone()))
|
||||
.collect::<Vec<_>>();
|
||||
Ok(AdminRpc::KeyList(key_ids))
|
||||
}
|
||||
KeyOperation::List => self.handle_list_keys().await,
|
||||
KeyOperation::Info(query) => {
|
||||
let key = self.get_existing_key(&query.key_pattern).await?;
|
||||
Ok(AdminRpc::KeyInfo(key))
|
||||
}
|
||||
KeyOperation::New(query) => {
|
||||
let key = Key::new(query.name.clone());
|
||||
self.garage.key_table.insert(&key).await?;
|
||||
Ok(AdminRpc::KeyInfo(key))
|
||||
}
|
||||
KeyOperation::Rename(query) => {
|
||||
let mut key = self.get_existing_key(&query.key_pattern).await?;
|
||||
key.name.update(query.new_name.clone());
|
||||
self.garage.key_table.insert(&key).await?;
|
||||
Ok(AdminRpc::KeyInfo(key))
|
||||
}
|
||||
KeyOperation::Delete(query) => {
|
||||
let key = self.get_existing_key(&query.key_pattern).await?;
|
||||
if !query.yes {
|
||||
return Err(Error::BadRpc(
|
||||
"Add --yes flag to really perform this operation".to_string(),
|
||||
));
|
||||
}
|
||||
// --- done checking, now commit ---
|
||||
for (ab_name, _, _) in key.authorized_buckets.items().iter() {
|
||||
if let Some(bucket) = self.garage.bucket_table.get(&EmptyKey, ab_name).await? {
|
||||
if !bucket.is_deleted() {
|
||||
self.update_bucket_key(bucket, &key.key_id, false, false)
|
||||
.await?;
|
||||
}
|
||||
} else {
|
||||
return Err(Error::Message(format!("Bucket not found: {}", ab_name)));
|
||||
}
|
||||
}
|
||||
let del_key = Key::delete(key.key_id.to_string());
|
||||
self.garage.key_table.insert(&del_key).await?;
|
||||
Ok(AdminRpc::Ok(format!(
|
||||
"Key {} was deleted successfully.",
|
||||
key.key_id
|
||||
)))
|
||||
}
|
||||
KeyOperation::Import(query) => {
|
||||
let prev_key = self.garage.key_table.get(&EmptyKey, &query.key_id).await?;
|
||||
if prev_key.is_some() {
|
||||
return Err(Error::Message(format!("Key {} already exists in data store. Even if it is deleted, we can't let you create a new key with the same ID. Sorry.", query.key_id)));
|
||||
}
|
||||
let imported_key = Key::import(&query.key_id, &query.secret_key, &query.name);
|
||||
self.garage.key_table.insert(&imported_key).await?;
|
||||
Ok(AdminRpc::KeyInfo(imported_key))
|
||||
}
|
||||
KeyOperation::New(query) => self.handle_create_key(query).await,
|
||||
KeyOperation::Rename(query) => self.handle_rename_key(query).await,
|
||||
KeyOperation::Delete(query) => self.handle_delete_key(query).await,
|
||||
KeyOperation::Import(query) => self.handle_import_key(query).await,
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::ptr_arg)]
|
||||
async fn get_existing_bucket(&self, bucket: &String) -> Result<Bucket, Error> {
|
||||
self.garage
|
||||
.bucket_table
|
||||
.get(&EmptyKey, bucket)
|
||||
async fn handle_list_keys(&self) -> Result<AdminRpc, Error> {
|
||||
let key_ids = self
|
||||
.garage
|
||||
.key_table
|
||||
.get_range(
|
||||
&EmptyKey,
|
||||
None,
|
||||
Some(KeyFilter::Deleted(DeletedFilter::NotDeleted)),
|
||||
10000,
|
||||
)
|
||||
.await?
|
||||
.filter(|b| !b.is_deleted())
|
||||
.map(Ok)
|
||||
.unwrap_or_else(|| Err(Error::BadRpc(format!("Bucket {} does not exist", bucket))))
|
||||
.iter()
|
||||
.map(|k| (k.key_id.to_string(), k.name.get().clone()))
|
||||
.collect::<Vec<_>>();
|
||||
Ok(AdminRpc::KeyList(key_ids))
|
||||
}
|
||||
|
||||
async fn handle_create_key(&self, query: &KeyNewOpt) -> Result<AdminRpc, Error> {
|
||||
let key = Key::new(query.name.clone());
|
||||
self.garage.key_table.insert(&key).await?;
|
||||
Ok(AdminRpc::KeyInfo(key))
|
||||
}
|
||||
|
||||
async fn handle_rename_key(&self, query: &KeyRenameOpt) -> Result<AdminRpc, Error> {
|
||||
let mut key = self.get_existing_key(&query.key_pattern).await?;
|
||||
key.name.update(query.new_name.clone());
|
||||
self.garage.key_table.insert(&key).await?;
|
||||
Ok(AdminRpc::KeyInfo(key))
|
||||
}
|
||||
|
||||
async fn handle_delete_key(&self, query: &KeyDeleteOpt) -> Result<AdminRpc, Error> {
|
||||
let mut key = self.get_existing_key(&query.key_pattern).await?;
|
||||
if !query.yes {
|
||||
return Err(Error::BadRpc(
|
||||
"Add --yes flag to really perform this operation".to_string(),
|
||||
));
|
||||
}
|
||||
let state = key.state.as_option_mut().unwrap();
|
||||
|
||||
// --- done checking, now commit ---
|
||||
// 1. Delete local aliases
|
||||
for (alias, _, to) in state.local_aliases.items().iter() {
|
||||
if let Deletable::Present(bucket_id) = to {
|
||||
if let Some(mut bucket) = self.garage.bucket_table.get(bucket_id, &EmptyKey).await?
|
||||
{
|
||||
if let Deletable::Present(bucket_state) = &mut bucket.state {
|
||||
bucket_state.local_aliases = bucket_state
|
||||
.local_aliases
|
||||
.update_mutator((key.key_id.to_string(), alias.to_string()), false);
|
||||
self.garage.bucket_table.insert(&bucket).await?;
|
||||
}
|
||||
} else {
|
||||
// ignore
|
||||
}
|
||||
}
|
||||
}
|
||||
// 2. Delete authorized buckets
|
||||
for (ab_id, auth) in state.authorized_buckets.items().iter() {
|
||||
if let Some(bucket) = self.garage.bucket_table.get(ab_id, &EmptyKey).await? {
|
||||
let new_perm = BucketKeyPerm {
|
||||
timestamp: increment_logical_clock(auth.timestamp),
|
||||
allow_read: false,
|
||||
allow_write: false,
|
||||
};
|
||||
if !bucket.is_deleted() {
|
||||
self.update_bucket_key(bucket, &key.key_id, new_perm)
|
||||
.await?;
|
||||
}
|
||||
} else {
|
||||
// ignore
|
||||
}
|
||||
}
|
||||
// 3. Actually delete key
|
||||
key.state = Deletable::delete();
|
||||
self.garage.key_table.insert(&key).await?;
|
||||
|
||||
Ok(AdminRpc::Ok(format!(
|
||||
"Key {} was deleted successfully.",
|
||||
key.key_id
|
||||
)))
|
||||
}
|
||||
|
||||
async fn handle_import_key(&self, query: &KeyImportOpt) -> Result<AdminRpc, Error> {
|
||||
let prev_key = self.garage.key_table.get(&EmptyKey, &query.key_id).await?;
|
||||
if prev_key.is_some() {
|
||||
return Err(Error::Message(format!("Key {} already exists in data store. Even if it is deleted, we can't let you create a new key with the same ID. Sorry.", query.key_id)));
|
||||
}
|
||||
let imported_key = Key::import(&query.key_id, &query.secret_key, &query.name);
|
||||
self.garage.key_table.insert(&imported_key).await?;
|
||||
Ok(AdminRpc::KeyInfo(imported_key))
|
||||
}
|
||||
|
||||
async fn get_existing_key(&self, pattern: &str) -> Result<Key, Error> {
|
||||
|
@ -267,7 +399,7 @@ impl AdminRpcHandler {
|
|||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.filter(|k| !k.deleted.get())
|
||||
.filter(|k| !k.state.is_deleted())
|
||||
.collect::<Vec<_>>();
|
||||
if candidates.len() != 1 {
|
||||
Err(Error::Message(format!(
|
||||
|
@ -279,54 +411,51 @@ impl AdminRpcHandler {
|
|||
}
|
||||
}
|
||||
|
||||
/// Update **key table** to inform of the new linked bucket
|
||||
async fn update_key_bucket(
|
||||
&self,
|
||||
key: &Key,
|
||||
bucket_id: Uuid,
|
||||
allow_read: bool,
|
||||
allow_write: bool,
|
||||
) -> Result<BucketKeyPerm, Error> {
|
||||
let mut key = key.clone();
|
||||
let mut key_state = key.state.as_option_mut().unwrap();
|
||||
|
||||
let perm = key_state
|
||||
.authorized_buckets
|
||||
.get(&bucket_id)
|
||||
.cloned()
|
||||
.map(|old_perm| BucketKeyPerm {
|
||||
timestamp: increment_logical_clock(old_perm.timestamp),
|
||||
allow_read,
|
||||
allow_write,
|
||||
})
|
||||
.unwrap_or(BucketKeyPerm {
|
||||
timestamp: now_msec(),
|
||||
allow_read,
|
||||
allow_write,
|
||||
});
|
||||
|
||||
key_state.authorized_buckets = Map::put_mutator(bucket_id, perm);
|
||||
|
||||
self.garage.key_table.insert(&key).await?;
|
||||
Ok(perm)
|
||||
}
|
||||
|
||||
/// Update **bucket table** to inform of the new linked key
|
||||
async fn update_bucket_key(
|
||||
&self,
|
||||
mut bucket: Bucket,
|
||||
key_id: &str,
|
||||
allow_read: bool,
|
||||
allow_write: bool,
|
||||
new_perm: BucketKeyPerm,
|
||||
) -> Result<(), Error> {
|
||||
if let BucketState::Present(params) = bucket.state.get_mut() {
|
||||
let ak = &mut params.authorized_keys;
|
||||
let old_ak = ak.take_and_clear();
|
||||
ak.merge(&old_ak.update_mutator(
|
||||
key_id.to_string(),
|
||||
PermissionSet {
|
||||
allow_read,
|
||||
allow_write,
|
||||
},
|
||||
));
|
||||
} else {
|
||||
return Err(Error::Message(
|
||||
"Bucket is deleted in update_bucket_key".to_string(),
|
||||
));
|
||||
}
|
||||
bucket.state.as_option_mut().unwrap().authorized_keys =
|
||||
Map::put_mutator(key_id.to_string(), new_perm);
|
||||
self.garage.bucket_table.insert(&bucket).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Update **key table** to inform of the new linked bucket
|
||||
async fn update_key_bucket(
|
||||
&self,
|
||||
key: &Key,
|
||||
bucket: &str,
|
||||
allow_read: bool,
|
||||
allow_write: bool,
|
||||
) -> Result<(), Error> {
|
||||
let mut key = key.clone();
|
||||
let old_map = key.authorized_buckets.take_and_clear();
|
||||
key.authorized_buckets.merge(&old_map.update_mutator(
|
||||
bucket.to_string(),
|
||||
PermissionSet {
|
||||
allow_read,
|
||||
allow_write,
|
||||
},
|
||||
));
|
||||
self.garage.key_table.insert(&key).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn handle_launch_repair(self: &Arc<Self>, opt: RepairOpt) -> Result<AdminRpc, Error> {
|
||||
if !opt.yes {
|
||||
return Err(Error::BadRpc(
|
||||
|
|
|
@ -161,8 +161,11 @@ pub async fn cmd_admin(
|
|||
}
|
||||
AdminRpc::BucketList(bl) => {
|
||||
println!("List of buckets:");
|
||||
for bucket in bl {
|
||||
println!("{}", bucket);
|
||||
for alias in bl {
|
||||
if let Some(p) = alias.state.get().as_option() {
|
||||
let wflag = if p.website_access { "W" } else { " " };
|
||||
println!("- {} {} {:?}", wflag, alias.name, p.bucket_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
AdminRpc::BucketInfo(bucket) => {
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
use garage_util::crdt::*;
|
||||
use garage_util::data::Uuid;
|
||||
use garage_util::error::*;
|
||||
|
||||
|
@ -8,26 +9,50 @@ pub fn print_key_info(key: &Key) {
|
|||
println!("Key name: {}", key.name.get());
|
||||
println!("Key ID: {}", key.key_id);
|
||||
println!("Secret key: {}", key.secret_key);
|
||||
if key.deleted.get() {
|
||||
println!("Key is deleted.");
|
||||
} else {
|
||||
println!("Authorized buckets:");
|
||||
for (b, _, perm) in key.authorized_buckets.items().iter() {
|
||||
println!("- {} R:{} W:{}", b, perm.allow_read, perm.allow_write);
|
||||
match &key.state {
|
||||
Deletable::Present(p) => {
|
||||
println!("\nKey-specific bucket aliases:");
|
||||
for (alias_name, _, alias) in p.local_aliases.items().iter() {
|
||||
if let Some(bucket_id) = alias.as_option() {
|
||||
println!("- {} {:?}", alias_name, bucket_id);
|
||||
}
|
||||
}
|
||||
println!("\nAuthorized buckets:");
|
||||
for (b, perm) in p.authorized_buckets.items().iter() {
|
||||
let rflag = if perm.allow_read { "R" } else { " " };
|
||||
let wflag = if perm.allow_write { "W" } else { " " };
|
||||
println!("- {}{} {:?}", rflag, wflag, b);
|
||||
}
|
||||
}
|
||||
Deletable::Deleted => {
|
||||
println!("\nKey is deleted.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn print_bucket_info(bucket: &Bucket) {
|
||||
println!("Bucket name: {}", bucket.name);
|
||||
match bucket.state.get() {
|
||||
BucketState::Deleted => println!("Bucket is deleted."),
|
||||
BucketState::Present(p) => {
|
||||
println!("Authorized keys:");
|
||||
for (k, _, perm) in p.authorized_keys.items().iter() {
|
||||
println!("- {} R:{} W:{}", k, perm.allow_read, perm.allow_write);
|
||||
println!("Bucket: {}", hex::encode(bucket.id));
|
||||
match &bucket.state {
|
||||
Deletable::Deleted => println!("Bucket is deleted."),
|
||||
Deletable::Present(p) => {
|
||||
println!("\nGlobal aliases:");
|
||||
for (alias, _, active) in p.aliases.items().iter() {
|
||||
if *active {
|
||||
println!("- {}", alias);
|
||||
}
|
||||
}
|
||||
println!("\nKey-specific aliases:");
|
||||
for ((key_id, alias), _, active) in p.local_aliases.items().iter() {
|
||||
if *active {
|
||||
println!("- {} {}", key_id, alias);
|
||||
}
|
||||
}
|
||||
println!("\nAuthorized keys:");
|
||||
for (k, perm) in p.authorized_keys.items().iter() {
|
||||
let rflag = if perm.allow_read { "R" } else { " " };
|
||||
let wflag = if perm.allow_write { "W" } else { " " };
|
||||
println!("- {}{} {}", rflag, wflag, k);
|
||||
}
|
||||
println!("Website access: {}", p.website.get());
|
||||
}
|
||||
};
|
||||
}
|
||||
|
|
|
@ -77,7 +77,7 @@ impl Repair {
|
|||
let object = self
|
||||
.garage
|
||||
.object_table
|
||||
.get(&version.bucket, &version.key)
|
||||
.get(&version.bucket_id, &version.key)
|
||||
.await?;
|
||||
let version_exists = match object {
|
||||
Some(o) => o
|
||||
|
@ -92,7 +92,7 @@ impl Repair {
|
|||
.version_table
|
||||
.insert(&Version::new(
|
||||
version.uuid,
|
||||
version.bucket,
|
||||
version.bucket_id,
|
||||
version.key,
|
||||
true,
|
||||
))
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_model"
|
||||
version = "0.5.0"
|
||||
version = "0.6.0"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
@ -14,9 +14,10 @@ path = "lib.rs"
|
|||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
garage_rpc = { version = "0.5.0", path = "../rpc" }
|
||||
garage_table = { version = "0.5.0", path = "../table" }
|
||||
garage_util = { version = "0.5.0", path = "../util" }
|
||||
garage_rpc = { version = "0.6.0", path = "../rpc" }
|
||||
garage_table = { version = "0.6.0", path = "../table" }
|
||||
garage_util = { version = "0.6.0", path = "../util" }
|
||||
garage_model_050 = { package = "garage_model", version = "0.5.0" }
|
||||
|
||||
async-trait = "0.1.7"
|
||||
arc-swap = "1.0"
|
||||
|
|
68
src/model/bucket_alias_table.rs
Normal file
|
@ -0,0 +1,68 @@
|
|||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use garage_table::crdt::*;
|
||||
use garage_table::*;
|
||||
use garage_util::data::*;
|
||||
|
||||
/// The bucket alias table holds the names given to buckets
|
||||
/// in the global namespace.
|
||||
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct BucketAlias {
|
||||
pub name: String,
|
||||
pub state: crdt::Lww<crdt::Deletable<AliasParams>>,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct AliasParams {
|
||||
pub bucket_id: Uuid,
|
||||
pub website_access: bool,
|
||||
}
|
||||
|
||||
impl AutoCrdt for AliasParams {
|
||||
const WARN_IF_DIFFERENT: bool = true;
|
||||
}
|
||||
|
||||
impl BucketAlias {
|
||||
pub fn new(name: String, bucket_id: Uuid, website_access: bool) -> Self {
|
||||
BucketAlias {
|
||||
name,
|
||||
state: crdt::Lww::new(crdt::Deletable::present(AliasParams {
|
||||
bucket_id,
|
||||
website_access,
|
||||
})),
|
||||
}
|
||||
}
|
||||
pub fn is_deleted(&self) -> bool {
|
||||
self.state.get().is_deleted()
|
||||
}
|
||||
}
|
||||
|
||||
impl Crdt for BucketAlias {
|
||||
fn merge(&mut self, o: &Self) {
|
||||
self.state.merge(&o.state);
|
||||
}
|
||||
}
|
||||
|
||||
impl Entry<EmptyKey, String> for BucketAlias {
|
||||
fn partition_key(&self) -> &EmptyKey {
|
||||
&EmptyKey
|
||||
}
|
||||
fn sort_key(&self) -> &String {
|
||||
&self.name
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BucketAliasTable;
|
||||
|
||||
impl TableSchema for BucketAliasTable {
|
||||
const TABLE_NAME: &'static str = "bucket_alias";
|
||||
|
||||
type P = EmptyKey;
|
||||
type S = String;
|
||||
type E = BucketAlias;
|
||||
type Filter = DeletedFilter;
|
||||
|
||||
fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool {
|
||||
filter.apply(entry.is_deleted())
|
||||
}
|
||||
}
|
41
src/model/bucket_helper.rs
Normal file
|
@ -0,0 +1,41 @@
|
|||
use garage_util::data::*;
|
||||
use garage_util::error::*;
|
||||
|
||||
use garage_table::util::EmptyKey;
|
||||
|
||||
use crate::bucket_table::Bucket;
|
||||
use crate::garage::Garage;
|
||||
|
||||
pub struct BucketHelper<'a>(pub(crate) &'a Garage);
|
||||
|
||||
#[allow(clippy::ptr_arg)]
|
||||
lx marked this conversation as resolved
Outdated
trinity-1686a
commented
I think this link could be one line lower to only cover the "offending" function I think this link could be one line lower to only cover the "offending" function
|
||||
impl<'a> BucketHelper<'a> {
|
||||
pub async fn resolve_global_bucket_name(
|
||||
&self,
|
||||
bucket_name: &String,
|
||||
) -> Result<Option<Uuid>, Error> {
|
||||
Ok(self
|
||||
.0
|
||||
.bucket_alias_table
|
||||
.get(&EmptyKey, bucket_name)
|
||||
.await?
|
||||
lx marked this conversation as resolved
Outdated
trinity-1686a
commented
can't this pose problem if someone create a bucket which name is hex of the right size? can't this pose problem if someone create a bucket which name is hex of the right size?
lx
commented
I added a check of the validity of bucket names. AWS bucket names are max 63 characters long, so no risk of it being a hex uuid. I added a check of the validity of bucket names. AWS bucket names are max 63 characters long, so no risk of it being a hex uuid.
trinity-1686a
commented
could you add a comment saying that? could you add a comment saying that?
|
||||
.map(|x| x.state.get().as_option().map(|x| x.bucket_id))
|
||||
.flatten())
|
||||
}
|
||||
|
||||
#[allow(clippy::ptr_arg)]
|
||||
pub async fn get_existing_bucket(&self, bucket_id: Uuid) -> Result<Bucket, Error> {
|
||||
self.0
|
||||
.bucket_table
|
||||
.get(&bucket_id, &EmptyKey)
|
||||
.await?
|
||||
.filter(|b| !b.is_deleted())
|
||||
.map(Ok)
|
||||
.unwrap_or_else(|| {
|
||||
Err(Error::BadRpc(format!(
|
||||
"Bucket {:?} does not exist",
|
||||
bucket_id
|
||||
)))
|
||||
})
|
||||
}
|
||||
lx marked this conversation as resolved
Outdated
trinity-1686a
commented
I don't think this lint is required I don't think this lint is required
|
||||
}
|
|
@ -2,8 +2,10 @@ use serde::{Deserialize, Serialize};
|
|||
|
||||
use garage_table::crdt::Crdt;
|
||||
use garage_table::*;
|
||||
use garage_util::data::*;
|
||||
use garage_util::time::*;
|
||||
|
||||
use crate::key_table::PermissionSet;
|
||||
use crate::permission::BucketKeyPerm;
|
||||
|
||||
/// A bucket is a collection of objects
|
||||
///
|
||||
lx marked this conversation as resolved
Outdated
trinity-1686a
commented
this is the equivalent to garage default config in 0.5.0
I'm still not convinced this should be XML, not because of performances reasons, but because I don't think we should store raw S3 payloads in general (and I generaly don't like XML) this is the equivalent to garage default config in 0.5.0
```xml
<?xml version="1.0" encoding="UTF-8"?>
<WebsiteConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<IndexDocument>
<Suffix>index.html</Suffix>
</IndexDocument>
</WebsiteConfiguration>
```
I'm still not convinced this should be XML, not because of performances reasons, but because I don't think we should store raw S3 payloads in general (and I generaly don't like XML)
|
||||
|
@ -12,49 +14,38 @@ use crate::key_table::PermissionSet;
|
|||
/// - A bucket has 2 states, Present or Deleted and parameters make sense only if present.
|
||||
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct Bucket {
|
||||
/// Name of the bucket
|
||||
pub name: String,
|
||||
/// ID of the bucket
|
||||
pub id: Uuid,
|
||||
/// State, and configuration if not deleted, of the bucket
|
||||
pub state: crdt::Lww<BucketState>,
|
||||
}
|
||||
|
||||
/// State of a bucket
|
||||
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
|
||||
pub enum BucketState {
|
||||
/// The bucket is deleted
|
||||
Deleted,
|
||||
/// The bucket exists
|
||||
Present(BucketParams),
|
||||
}
|
||||
|
||||
impl Crdt for BucketState {
|
||||
fn merge(&mut self, o: &Self) {
|
||||
match o {
|
||||
BucketState::Deleted => *self = BucketState::Deleted,
|
||||
BucketState::Present(other_params) => {
|
||||
if let BucketState::Present(params) = self {
|
||||
params.merge(other_params);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
pub state: crdt::Deletable<BucketParams>,
|
||||
}
|
||||
|
||||
/// Configuration for a bucket
|
||||
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct BucketParams {
|
||||
/// Bucket's creation date
|
||||
pub creation_date: u64,
|
||||
/// Map of key with access to the bucket, and what kind of access they give
|
||||
pub authorized_keys: crdt::LwwMap<String, PermissionSet>,
|
||||
/// Is the bucket served as http
|
||||
pub website: crdt::Lww<bool>,
|
||||
pub authorized_keys: crdt::Map<String, BucketKeyPerm>,
|
||||
/// Map of aliases that are or have been given to this bucket
|
||||
/// in the global namespace
|
||||
/// (not authoritative: this is just used as an indication to
|
||||
/// map back to aliases when doing ListBuckets)
|
||||
pub aliases: crdt::LwwMap<String, bool>,
|
||||
/// Map of aliases that are or have been given to this bucket
|
||||
lx marked this conversation as resolved
Outdated
trinity-1686a
commented
I'd argue only I'd argue only `website_config` should exist, with a `Some(_)` generated automatically when migrating from 0.5 with website enabled.
I also think this should probably contain a `WebsiteConfiguration` (or some flattened form of it), to not require parsing XML on each web request, however doing so does have downsides if we add things to this struct in the future.
lx
commented
Concerning the first point (removing Concerning storing a WebsiteConfiguration instead of a ByteBuf, there are upsides and downsides. Upsides:
Downsides:
Alternatives:
In other words, storing a struct here 1/ has disadvantages in terms of keeping a clean architectures, and 2/ looks to me a bit like a case of premature optimization (we can have a separate reflexion later concerning how we approach performance on Garage tables, which is a whole topic on its own with multiple aspects such as caching, minimizing representation sizes, splitting stuff in separate tables, etc). Concerning the first point (removing `website_access` and storing only the config as an option), I think yes that's probably better because it's closer to how Garage works currently. I was hesitant because in AWS the permissions and the website config seem to be handled separately, but Garage has its own logic and doesn't implement AWS's ACLs, so yes we can simplify here for now and come back to it later when/if we work on ACLs.
Concerning storing a WebsiteConfiguration instead of a ByteBuf, there are upsides and downsides.
Upsides:
- don't parse xml at every web request
- the messagepack representation is probably smaller
Downsides:
- we have to move the definition of WebsiteConfiguration in the model crate (for now we have kept very separate the structs that manage Garage's internal state and structs that represent official items of the S3 spec) ; if we don't want to do this, the alternative is to have our own struct that keeps only the relevant aspects of a WebsiteConfiguration, but it adds more complexity
- we are now parsing messagepack for the website configuration for all accesses to this and not just when doing a website call
Alternatives:
- if we store XML, we can cache the parsed XML in the `web/` module for a nominal duratio of a few seconds to avoid parsing at every request
- if handling the website config ends up taking too much cpu resource, we can move it to a separate table
- we could also implement optimisations in the `table/` module to keep a cache of deserialized versions of stuff stored in the table
In other words, storing a struct here 1/ has disadvantages in terms of keeping a clean architectures, and 2/ looks to me a bit like a case of premature optimization (we can have a separate reflexion later concerning how we approach performance on Garage tables, which is a whole topic on its own with multiple aspects such as caching, minimizing representation sizes, splitting stuff in separate tables, etc).
|
||||
/// in namespaces local to keys
|
||||
/// key = (access key id, alias name)
|
||||
pub local_aliases: crdt::LwwMap<(String, String), bool>,
|
||||
}
|
||||
|
||||
impl BucketParams {
|
||||
/// Create an empty BucketParams with no authorized keys and no website accesss
|
||||
pub fn new() -> Self {
|
||||
BucketParams {
|
||||
authorized_keys: crdt::LwwMap::new(),
|
||||
website: crdt::Lww::new(false),
|
||||
creation_date: now_msec(),
|
||||
authorized_keys: crdt::Map::new(),
|
||||
aliases: crdt::LwwMap::new(),
|
||||
local_aliases: crdt::LwwMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -62,7 +53,14 @@ impl BucketParams {
|
|||
impl Crdt for BucketParams {
|
||||
fn merge(&mut self, o: &Self) {
|
||||
self.authorized_keys.merge(&o.authorized_keys);
|
||||
self.website.merge(&o.website);
|
||||
self.aliases.merge(&o.aliases);
|
||||
self.local_aliases.merge(&o.local_aliases);
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Bucket {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -74,34 +72,34 @@ impl Default for BucketParams {
|
|||
|
||||
impl Bucket {
|
||||
/// Initializes a new instance of the Bucket struct
|
||||
pub fn new(name: String) -> Self {
|
||||
pub fn new() -> Self {
|
||||
Bucket {
|
||||
name,
|
||||
state: crdt::Lww::new(BucketState::Present(BucketParams::new())),
|
||||
id: gen_uuid(),
|
||||
state: crdt::Deletable::present(BucketParams::new()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if this represents a deleted bucket
|
||||
pub fn is_deleted(&self) -> bool {
|
||||
*self.state.get() == BucketState::Deleted
|
||||
self.state.is_deleted()
|
||||
}
|
||||
|
||||
/// Return the list of authorized keys, when each was updated, and the permission associated to
|
||||
/// the key
|
||||
pub fn authorized_keys(&self) -> &[(String, u64, PermissionSet)] {
|
||||
match self.state.get() {
|
||||
BucketState::Deleted => &[],
|
||||
BucketState::Present(state) => state.authorized_keys.items(),
|
||||
pub fn authorized_keys(&self) -> &[(String, BucketKeyPerm)] {
|
||||
match &self.state {
|
||||
crdt::Deletable::Deleted => &[],
|
||||
crdt::Deletable::Present(state) => state.authorized_keys.items(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Entry<EmptyKey, String> for Bucket {
|
||||
fn partition_key(&self) -> &EmptyKey {
|
||||
&EmptyKey
|
||||
impl Entry<Uuid, EmptyKey> for Bucket {
|
||||
fn partition_key(&self) -> &Uuid {
|
||||
&self.id
|
||||
}
|
||||
fn sort_key(&self) -> &String {
|
||||
&self.name
|
||||
fn sort_key(&self) -> &EmptyKey {
|
||||
&EmptyKey
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -114,10 +112,10 @@ impl Crdt for Bucket {
|
|||
pub struct BucketTable;
|
||||
|
||||
impl TableSchema for BucketTable {
|
||||
const TABLE_NAME: &'static str = "bucket";
|
||||
const TABLE_NAME: &'static str = "bucket_v2";
|
||||
|
||||
type P = EmptyKey;
|
||||
type S = String;
|
||||
type P = Uuid;
|
||||
type S = EmptyKey;
|
||||
type E = Bucket;
|
||||
type Filter = DeletedFilter;
|
||||
|
||||
|
|
|
@ -14,6 +14,8 @@ use garage_table::*;
|
|||
|
||||
use crate::block::*;
|
||||
use crate::block_ref_table::*;
|
||||
use crate::bucket_alias_table::*;
|
||||
use crate::bucket_helper::*;
|
||||
use crate::bucket_table::*;
|
||||
use crate::key_table::*;
|
||||
use crate::object_table::*;
|
||||
|
@ -35,6 +37,8 @@ pub struct Garage {
|
|||
|
||||
/// Table containing informations about buckets
|
||||
pub bucket_table: Arc<Table<BucketTable, TableFullReplication>>,
|
||||
/// Table containing informations about bucket aliases
|
||||
pub bucket_alias_table: Arc<Table<BucketAliasTable, TableFullReplication>>,
|
||||
/// Table containing informations about api keys
|
||||
pub key_table: Arc<Table<KeyTable, TableFullReplication>>,
|
||||
|
||||
|
@ -120,6 +124,14 @@ impl Garage {
|
|||
info!("Initialize bucket_table...");
|
||||
let bucket_table = Table::new(BucketTable, control_rep_param.clone(), system.clone(), &db);
|
||||
|
||||
info!("Initialize bucket_alias_table...");
|
||||
let bucket_alias_table = Table::new(
|
||||
BucketAliasTable,
|
||||
control_rep_param.clone(),
|
||||
system.clone(),
|
||||
&db,
|
||||
);
|
||||
|
||||
info!("Initialize key_table_table...");
|
||||
let key_table = Table::new(KeyTable, control_rep_param, system.clone(), &db);
|
||||
|
||||
|
@ -131,6 +143,7 @@ impl Garage {
|
|||
system,
|
||||
block_manager,
|
||||
bucket_table,
|
||||
bucket_alias_table,
|
||||
key_table,
|
||||
object_table,
|
||||
version_table,
|
||||
|
@ -148,4 +161,8 @@ impl Garage {
|
|||
pub fn break_reference_cycles(&self) {
|
||||
self.block_manager.garage.swap(None);
|
||||
}
|
||||
|
||||
pub fn bucket_helper(&self) -> BucketHelper {
|
||||
BucketHelper(self)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,6 +2,9 @@ use serde::{Deserialize, Serialize};
|
|||
|
||||
use garage_table::crdt::*;
|
||||
use garage_table::*;
|
||||
use garage_util::data::*;
|
||||
|
||||
use crate::permission::BucketKeyPerm;
|
||||
|
||||
/// An api key
|
||||
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
|
||||
|
@ -15,12 +18,39 @@ pub struct Key {
|
|||
/// Name for the key
|
||||
pub name: crdt::Lww<String>,
|
||||
|
||||
/// Is the key deleted
|
||||
pub deleted: crdt::Bool,
|
||||
/// If the key is present: it gives some permissions,
|
||||
/// a map of bucket IDs (uuids) to permissions.
|
||||
/// Otherwise no permissions are granted to key
|
||||
pub state: crdt::Deletable<KeyParams>,
|
||||
}
|
||||
|
||||
/// Buckets in which the key is authorized. Empty if `Key` is deleted
|
||||
// CRDT interaction: deleted implies authorized_buckets is empty
|
||||
pub authorized_buckets: crdt::LwwMap<String, PermissionSet>,
|
||||
/// Configuration for a key
|
||||
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct KeyParams {
|
||||
pub authorized_buckets: crdt::Map<Uuid, BucketKeyPerm>,
|
||||
pub local_aliases: crdt::LwwMap<String, crdt::Deletable<Uuid>>,
|
||||
}
|
||||
|
||||
impl KeyParams {
|
||||
pub fn new() -> Self {
|
||||
KeyParams {
|
||||
authorized_buckets: crdt::Map::new(),
|
||||
local_aliases: crdt::LwwMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for KeyParams {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl Crdt for KeyParams {
|
||||
fn merge(&mut self, o: &Self) {
|
||||
self.authorized_buckets.merge(&o.authorized_buckets);
|
||||
self.local_aliases.merge(&o.local_aliases);
|
||||
}
|
||||
}
|
||||
|
||||
impl Key {
|
||||
|
@ -32,8 +62,7 @@ impl Key {
|
|||
key_id,
|
||||
secret_key,
|
||||
name: crdt::Lww::new(name),
|
||||
deleted: crdt::Bool::new(false),
|
||||
authorized_buckets: crdt::LwwMap::new(),
|
||||
state: crdt::Deletable::present(KeyParams::new()),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -43,8 +72,7 @@ impl Key {
|
|||
key_id: key_id.to_string(),
|
||||
secret_key: secret_key.to_string(),
|
||||
name: crdt::Lww::new(name.to_string()),
|
||||
deleted: crdt::Bool::new(false),
|
||||
authorized_buckets: crdt::LwwMap::new(),
|
||||
state: crdt::Deletable::present(KeyParams::new()),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -54,41 +82,37 @@ impl Key {
|
|||
key_id,
|
||||
secret_key: "".into(),
|
||||
name: crdt::Lww::new("".to_string()),
|
||||
deleted: crdt::Bool::new(true),
|
||||
authorized_buckets: crdt::LwwMap::new(),
|
||||
state: crdt::Deletable::Deleted,
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if `Key` is allowed to read in bucket
|
||||
pub fn allow_read(&self, bucket: &str) -> bool {
|
||||
self.authorized_buckets
|
||||
.get(&bucket.to_string())
|
||||
.map(|x| x.allow_read)
|
||||
.unwrap_or(false)
|
||||
pub fn allow_read(&self, bucket: &Uuid) -> bool {
|
||||
if let crdt::Deletable::Present(params) = &self.state {
|
||||
params
|
||||
.authorized_buckets
|
||||
.get(bucket)
|
||||
.map(|x| x.allow_read)
|
||||
.unwrap_or(false)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if `Key` is allowed to write in bucket
|
||||
pub fn allow_write(&self, bucket: &str) -> bool {
|
||||
self.authorized_buckets
|
||||
.get(&bucket.to_string())
|
||||
.map(|x| x.allow_write)
|
||||
.unwrap_or(false)
|
||||
pub fn allow_write(&self, bucket: &Uuid) -> bool {
|
||||
if let crdt::Deletable::Present(params) = &self.state {
|
||||
params
|
||||
.authorized_buckets
|
||||
.get(bucket)
|
||||
.map(|x| x.allow_write)
|
||||
.unwrap_or(false)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Permission given to a key in a bucket
|
||||
#[derive(PartialOrd, Ord, PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct PermissionSet {
|
||||
/// The key can be used to read the bucket
|
||||
pub allow_read: bool,
|
||||
/// The key can be used to write in the bucket
|
||||
pub allow_write: bool,
|
||||
}
|
||||
|
||||
impl AutoCrdt for PermissionSet {
|
||||
const WARN_IF_DIFFERENT: bool = true;
|
||||
}
|
||||
|
||||
impl Entry<EmptyKey, String> for Key {
|
||||
fn partition_key(&self) -> &EmptyKey {
|
||||
&EmptyKey
|
||||
|
@ -101,13 +125,7 @@ impl Entry<EmptyKey, String> for Key {
|
|||
impl Crdt for Key {
|
||||
fn merge(&mut self, other: &Self) {
|
||||
self.name.merge(&other.name);
|
||||
self.deleted.merge(&other.deleted);
|
||||
|
||||
if self.deleted.get() {
|
||||
self.authorized_buckets.clear();
|
||||
} else {
|
||||
self.authorized_buckets.merge(&other.authorized_buckets);
|
||||
}
|
||||
self.state.merge(&other.state);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -129,7 +147,7 @@ impl TableSchema for KeyTable {
|
|||
|
||||
fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool {
|
||||
match filter {
|
||||
KeyFilter::Deleted(df) => df.apply(entry.deleted.get()),
|
||||
KeyFilter::Deleted(df) => df.apply(entry.state.is_deleted()),
|
||||
KeyFilter::Matches(pat) => {
|
||||
let pat = pat.to_lowercase();
|
||||
entry.key_id.to_lowercase().starts_with(&pat)
|
||||
|
|
|
@ -3,8 +3,11 @@ extern crate log;
|
|||
|
||||
pub mod block;
|
||||
pub mod block_ref_table;
|
||||
pub mod bucket_alias_table;
|
||||
pub mod bucket_helper;
|
||||
pub mod bucket_table;
|
||||
pub mod garage;
|
||||
pub mod key_table;
|
||||
pub mod object_table;
|
||||
pub mod permission;
|
||||
pub mod version_table;
|
||||
|
|
|
@ -15,7 +15,7 @@ use crate::version_table::*;
|
|||
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct Object {
|
||||
/// The bucket in which the object is stored, used as partition key
|
||||
pub bucket: String,
|
||||
pub bucket_id: Uuid,
|
||||
|
||||
/// The key at which the object is stored in its bucket, used as sorting key
|
||||
pub key: String,
|
||||
|
@ -26,9 +26,9 @@ pub struct Object {
|
|||
|
||||
impl Object {
|
||||
/// Initialize an Object struct from parts
|
||||
pub fn new(bucket: String, key: String, versions: Vec<ObjectVersion>) -> Self {
|
||||
pub fn new(bucket_id: Uuid, key: String, versions: Vec<ObjectVersion>) -> Self {
|
||||
let mut ret = Self {
|
||||
bucket,
|
||||
bucket_id,
|
||||
key,
|
||||
versions: vec![],
|
||||
};
|
||||
|
@ -164,9 +164,9 @@ impl ObjectVersion {
|
|||
}
|
||||
}
|
||||
|
||||
impl Entry<String, String> for Object {
|
||||
fn partition_key(&self) -> &String {
|
||||
&self.bucket
|
||||
impl Entry<Uuid, String> for Object {
|
||||
fn partition_key(&self) -> &Uuid {
|
||||
&self.bucket_id
|
||||
}
|
||||
fn sort_key(&self) -> &String {
|
||||
&self.key
|
||||
|
@ -219,7 +219,7 @@ pub struct ObjectTable {
|
|||
impl TableSchema for ObjectTable {
|
||||
const TABLE_NAME: &'static str = "object";
|
||||
|
||||
type P = String;
|
||||
type P = Uuid;
|
||||
type S = String;
|
||||
type E = Object;
|
||||
type Filter = DeletedFilter;
|
||||
|
@ -242,7 +242,7 @@ impl TableSchema for ObjectTable {
|
|||
};
|
||||
if newly_deleted {
|
||||
let deleted_version =
|
||||
Version::new(v.uuid, old_v.bucket.clone(), old_v.key.clone(), true);
|
||||
Version::new(v.uuid, old_v.bucket_id, old_v.key.clone(), true);
|
||||
version_table.insert(&deleted_version).await?;
|
||||
}
|
||||
}
|
||||
|
|
37
src/model/permission.rs
Normal file
|
@ -0,0 +1,37 @@
|
|||
use std::cmp::Ordering;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use garage_util::crdt::*;
|
||||
|
||||
/// Permission given to a key in a bucket
|
||||
#[derive(PartialOrd, Ord, PartialEq, Eq, Clone, Copy, Debug, Serialize, Deserialize)]
|
||||
pub struct BucketKeyPerm {
|
||||
/// Timestamp at which the permission was given
|
||||
pub timestamp: u64,
|
||||
|
||||
/// The key can be used to read the bucket
|
||||
pub allow_read: bool,
|
||||
/// The key can be used to write in the bucket
|
||||
pub allow_write: bool,
|
||||
}
|
||||
|
||||
impl Crdt for BucketKeyPerm {
|
||||
fn merge(&mut self, other: &Self) {
|
||||
match other.timestamp.cmp(&self.timestamp) {
|
||||
Ordering::Greater => {
|
||||
*self = *other;
|
||||
}
|
||||
Ordering::Equal if other != self => {
|
||||
warn!("Different permission sets with same timestamp: {:?} and {:?}, merging to most restricted permission set.", self, other);
|
||||
if !other.allow_read {
|
||||
self.allow_read = false;
|
||||
}
|
||||
if !other.allow_write {
|
||||
self.allow_write = false;
|
||||
}
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
lx marked this conversation as resolved
Outdated
trinity-1686a
commented
allow_owner is not merged. Is it sementically read-only hence don't needs to (if so, please document it), or just forgotten? allow_owner is not merged. Is it sementically read-only hence don't needs to (if so, please document it), or just forgotten?
lx
commented
Just forgotten, thx Just forgotten, thx
|
||||
}
|
|
@ -29,19 +29,19 @@ pub struct Version {
|
|||
// Back link to bucket+key so that we can figure if
|
||||
// this was deleted later on
|
||||
/// Bucket in which the related object is stored
|
||||
pub bucket: String,
|
||||
pub bucket_id: Uuid,
|
||||
/// Key in which the related object is stored
|
||||
pub key: String,
|
||||
}
|
||||
|
||||
impl Version {
|
||||
pub fn new(uuid: Uuid, bucket: String, key: String, deleted: bool) -> Self {
|
||||
pub fn new(uuid: Uuid, bucket_id: Uuid, key: String, deleted: bool) -> Self {
|
||||
Self {
|
||||
uuid,
|
||||
deleted: deleted.into(),
|
||||
blocks: crdt::Map::new(),
|
||||
parts_etags: crdt::Map::new(),
|
||||
bucket,
|
||||
bucket_id,
|
||||
key,
|
||||
}
|
||||
}
|
||||
|
@ -82,8 +82,8 @@ impl AutoCrdt for VersionBlock {
|
|||
const WARN_IF_DIFFERENT: bool = true;
|
||||
}
|
||||
|
||||
impl Entry<Hash, EmptyKey> for Version {
|
||||
fn partition_key(&self) -> &Hash {
|
||||
impl Entry<Uuid, EmptyKey> for Version {
|
||||
fn partition_key(&self) -> &Uuid {
|
||||
&self.uuid
|
||||
}
|
||||
fn sort_key(&self) -> &EmptyKey {
|
||||
|
@ -116,7 +116,7 @@ pub struct VersionTable {
|
|||
impl TableSchema for VersionTable {
|
||||
const TABLE_NAME: &'static str = "version";
|
||||
|
||||
type P = Hash;
|
||||
type P = Uuid;
|
||||
type S = EmptyKey;
|
||||
type E = Version;
|
||||
type Filter = DeletedFilter;
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_rpc"
|
||||
version = "0.5.0"
|
||||
version = "0.6.0"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
@ -14,7 +14,7 @@ path = "lib.rs"
|
|||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
garage_util = { version = "0.5.0", path = "../util" }
|
||||
garage_util = { version = "0.6.0", path = "../util" }
|
||||
|
||||
arc-swap = "1.0"
|
||||
bytes = "1.0"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_table"
|
||||
version = "0.5.0"
|
||||
version = "0.6.0"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
@ -14,8 +14,8 @@ path = "lib.rs"
|
|||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
garage_rpc = { version = "0.5.0", path = "../rpc" }
|
||||
garage_util = { version = "0.5.0", path = "../util" }
|
||||
garage_rpc = { version = "0.6.0", path = "../rpc" }
|
||||
garage_util = { version = "0.6.0", path = "../util" }
|
||||
|
||||
async-trait = "0.1.7"
|
||||
bytes = "1.0"
|
||||
|
|
|
@ -16,7 +16,7 @@ impl PartitionKey for String {
|
|||
}
|
||||
}
|
||||
|
||||
impl PartitionKey for Hash {
|
||||
impl PartitionKey for FixedBytes32 {
|
||||
fn hash(&self) -> Hash {
|
||||
*self
|
||||
}
|
||||
|
@ -34,7 +34,7 @@ impl SortKey for String {
|
|||
}
|
||||
}
|
||||
|
||||
impl SortKey for Hash {
|
||||
impl SortKey for FixedBytes32 {
|
||||
fn sort_key(&self) -> &[u8] {
|
||||
self.as_slice()
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_util"
|
||||
version = "0.5.0"
|
||||
version = "0.6.0"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
72
src/util/crdt/deletable.rs
Normal file
|
@ -0,0 +1,72 @@
|
|||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::crdt::crdt::*;
|
||||
|
||||
/// Deletable object (once deleted, cannot go back)
|
||||
#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq)]
|
||||
pub enum Deletable<T> {
|
||||
Present(T),
|
||||
Deleted,
|
||||
}
|
||||
|
||||
impl<T: Crdt> Deletable<T> {
|
||||
/// Create a new deletable object that isn't deleted
|
||||
pub fn present(v: T) -> Self {
|
||||
Self::Present(v)
|
||||
}
|
||||
/// Create a new deletable object that is deleted
|
||||
pub fn delete() -> Self {
|
||||
Self::Deleted
|
||||
}
|
||||
/// As option
|
||||
pub fn as_option(&self) -> Option<&T> {
|
||||
match self {
|
||||
Self::Present(v) => Some(v),
|
||||
Self::Deleted => None,
|
||||
}
|
||||
}
|
||||
/// As option, mutable
|
||||
pub fn as_option_mut(&mut self) -> Option<&mut T> {
|
||||
match self {
|
||||
Self::Present(v) => Some(v),
|
||||
Self::Deleted => None,
|
||||
}
|
||||
}
|
||||
/// Into option
|
||||
pub fn into_option(self) -> Option<T> {
|
||||
match self {
|
||||
Self::Present(v) => Some(v),
|
||||
Self::Deleted => None,
|
||||
}
|
||||
}
|
||||
/// Is object deleted?
|
||||
pub fn is_deleted(&self) -> bool {
|
||||
matches!(self, Self::Deleted)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<Option<T>> for Deletable<T> {
|
||||
fn from(v: Option<T>) -> Self {
|
||||
v.map(Self::Present).unwrap_or(Self::Deleted)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<Deletable<T>> for Option<T> {
|
||||
fn from(v: Deletable<T>) -> Option<T> {
|
||||
match v {
|
||||
Deletable::Present(v) => Some(v),
|
||||
Deletable::Deleted => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Crdt> Crdt for Deletable<T> {
|
||||
fn merge(&mut self, other: &Self) {
|
||||
if let Deletable::Present(v) = self {
|
||||
match other {
|
||||
Deletable::Deleted => *self = Deletable::Deleted,
|
||||
Deletable::Present(v2) => v.merge(v2),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -82,6 +82,11 @@ where
|
|||
&self.v
|
||||
}
|
||||
|
||||
/// Take the value inside the CRDT (discards the timesamp)
|
||||
pub fn take(self) -> T {
|
||||
self.v
|
||||
}
|
||||
|
||||
/// Get a mutable reference to the CRDT's value
|
||||
///
|
||||
/// This is usefull to mutate the inside value without changing the LWW timestamp.
|
||||
|
|
|
@ -30,8 +30,8 @@ pub struct LwwMap<K, V> {
|
|||
|
||||
impl<K, V> LwwMap<K, V>
|
||||
where
|
||||
K: Ord,
|
||||
V: Crdt,
|
||||
K: Clone + Ord,
|
||||
V: Clone + Crdt,
|
||||
{
|
||||
/// Create a new empty map CRDT
|
||||
pub fn new() -> Self {
|
||||
|
@ -73,6 +73,10 @@ where
|
|||
};
|
||||
Self { vals: new_vals }
|
||||
}
|
||||
|
||||
pub fn update_in_place(&mut self, k: K, new_v: V) {
|
||||
self.merge(&self.update_mutator(k, new_v));
|
||||
}
|
||||
/// Takes all of the values of the map and returns them. The current map is reset to the
|
||||
/// empty map. This is very usefull to produce in-place a new map that contains only a delta
|
||||
/// that modifies a certain value:
|
||||
|
@ -158,8 +162,8 @@ where
|
|||
|
||||
impl<K, V> Default for LwwMap<K, V>
|
||||
where
|
||||
K: Ord,
|
||||
V: Crdt,
|
||||
K: Clone + Ord,
|
||||
V: Clone + Crdt,
|
||||
{
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
|
|
|
@ -12,12 +12,14 @@
|
|||
mod bool;
|
||||
#[allow(clippy::module_inception)]
|
||||
mod crdt;
|
||||
mod deletable;
|
||||
mod lww;
|
||||
mod lww_map;
|
||||
mod map;
|
||||
|
||||
pub use self::bool::*;
|
||||
pub use crdt::*;
|
||||
pub use deletable::*;
|
||||
pub use lww::*;
|
||||
pub use lww_map::*;
|
||||
pub use map::*;
|
||||
|
|
|
@ -119,6 +119,35 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
/// Trait to map error to the Bad Request error code
|
||||
pub trait OkOrMessage {
|
||||
type S2;
|
||||
fn ok_or_message<M: Into<String>>(self, message: M) -> Self::S2;
|
||||
}
|
||||
|
||||
impl<T, E> OkOrMessage for Result<T, E>
|
||||
where
|
||||
E: std::fmt::Display,
|
||||
{
|
||||
type S2 = Result<T, Error>;
|
||||
fn ok_or_message<M: Into<String>>(self, message: M) -> Result<T, Error> {
|
||||
match self {
|
||||
Ok(x) => Ok(x),
|
||||
Err(e) => Err(Error::Message(format!("{}: {}", message.into(), e))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> OkOrMessage for Option<T> {
|
||||
type S2 = Result<T, Error>;
|
||||
fn ok_or_message<M: Into<String>>(self, message: M) -> Result<T, Error> {
|
||||
match self {
|
||||
Some(x) => Ok(x),
|
||||
None => Err(Error::Message(message.into())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Custom serialization for our error type, for use in RPC.
|
||||
// Errors are serialized as a string of their Display representation.
|
||||
// Upon deserialization, they all become a RemoteError with the
|
||||
|
|
|
@ -10,6 +10,11 @@ pub fn now_msec() -> u64 {
|
|||
.as_millis() as u64
|
||||
}
|
||||
|
||||
/// Increment logical clock
|
||||
pub fn increment_logical_clock(prev: u64) -> u64 {
|
||||
std::cmp::max(prev + 1, now_msec())
|
||||
}
|
||||
|
||||
/// Convert a timestamp represented as milliseconds since UNIX Epoch to
|
||||
/// its RFC3339 representation, such as "2021-01-01T12:30:00Z"
|
||||
pub fn msec_to_rfc3339(msecs: u64) -> String {
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_web"
|
||||
version = "0.5.0"
|
||||
version = "0.6.0"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>", "Quentin Dufour <quentin@dufour.io>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
@ -14,10 +14,10 @@ path = "lib.rs"
|
|||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
garage_api = { version = "0.5.0", path = "../api" }
|
||||
garage_model = { version = "0.5.0", path = "../model" }
|
||||
garage_util = { version = "0.5.0", path = "../util" }
|
||||
garage_table = { version = "0.5.0", path = "../table" }
|
||||
garage_api = { version = "0.6.0", path = "../api" }
|
||||
garage_model = { version = "0.6.0", path = "../model" }
|
||||
garage_util = { version = "0.6.0", path = "../util" }
|
||||
garage_table = { version = "0.6.0", path = "../table" }
|
||||
|
||||
err-derive = "0.3"
|
||||
log = "0.4"
|
||||
|
|
|
@ -12,7 +12,6 @@ use hyper::{
|
|||
use crate::error::*;
|
||||
use garage_api::helpers::{authority_to_host, host_to_bucket};
|
||||
use garage_api::s3_get::{handle_get, handle_head};
|
||||
use garage_model::bucket_table::*;
|
||||
use garage_model::garage::Garage;
|
||||
use garage_table::*;
|
||||
use garage_util::error::Error as GarageError;
|
||||
|
@ -77,31 +76,39 @@ async fn serve_file(garage: Arc<Garage>, req: Request<Body>) -> Result<Response<
|
|||
// Get bucket
|
||||
let host = authority_to_host(authority)?;
|
||||
let root = &garage.config.s3_web.root_domain;
|
||||
let bucket = host_to_bucket(&host, root).unwrap_or(&host);
|
||||
|
||||
// Check bucket is exposed as a website
|
||||
let bucket_desc = garage
|
||||
let bucket_name = host_to_bucket(&host, root).unwrap_or(&host);
|
||||
let bucket_id = garage
|
||||
.bucket_alias_table
|
||||
.get(&EmptyKey, &bucket_name.to_string())
|
||||
.await?
|
||||
.map(|x| x.state.take().into_option())
|
||||
.flatten()
|
||||
.filter(|param| param.website_access)
|
||||
.map(|param| param.bucket_id)
|
||||
.ok_or(Error::NotFound)?;
|
||||
|
||||
// Sanity check: check bucket isn't deleted
|
||||
garage
|
||||
.bucket_table
|
||||
.get(&EmptyKey, &bucket.to_string())
|
||||
.get(&bucket_id, &EmptyKey)
|
||||
.await?
|
||||
.filter(|b| !b.is_deleted())
|
||||
.ok_or(Error::NotFound)?;
|
||||
|
||||
match bucket_desc.state.get() {
|
||||
BucketState::Present(params) if *params.website.get() => Ok(()),
|
||||
_ => Err(Error::NotFound),
|
||||
}?;
|
||||
|
||||
// Get path
|
||||
let path = req.uri().path().to_string();
|
||||
let index = &garage.config.s3_web.index;
|
||||
let key = path_to_key(&path, index)?;
|
||||
|
||||
info!("Selected bucket: \"{}\", selected key: \"{}\"", bucket, key);
|
||||
info!(
|
||||
"Selected bucket: \"{}\" {:?}, selected key: \"{}\"",
|
||||
bucket_name, bucket_id, key
|
||||
);
|
||||
|
||||
let res = match *req.method() {
|
||||
Method::HEAD => handle_head(garage, &req, bucket, &key).await?,
|
||||
Method::GET => handle_get(garage, &req, bucket, &key).await?,
|
||||
Method::HEAD => handle_head(garage, &req, bucket_id, &key).await?,
|
||||
Method::GET => handle_get(garage, &req, bucket_id, &key).await?,
|
||||
_ => return Err(Error::BadRequest("HTTP method not supported".to_string())),
|
||||
};
|
||||
|
||||
|
|
there is a
.get_bucket()
onEndpoint
which can be used to do the same thing more concisely