1
0
Fork 0
forked from Deuxfleurs/garage

New model for buckets

This commit is contained in:
Alex 2021-12-14 13:55:11 +01:00
parent 8f6026de5e
commit 5b1117e582
No known key found for this signature in database
GPG key ID: EDABF9711E244EB1
38 changed files with 1173 additions and 496 deletions

138
Cargo.lock generated
View file

@ -382,17 +382,17 @@ dependencies = [
[[package]] [[package]]
name = "garage" name = "garage"
version = "0.5.0" version = "0.6.0"
dependencies = [ dependencies = [
"async-trait", "async-trait",
"bytes 1.1.0", "bytes 1.1.0",
"futures", "futures",
"futures-util", "futures-util",
"garage_api", "garage_api",
"garage_model", "garage_model 0.6.0",
"garage_rpc", "garage_rpc 0.6.0",
"garage_table", "garage_table 0.6.0",
"garage_util", "garage_util 0.6.0",
"garage_web", "garage_web",
"git-version", "git-version",
"hex", "hex",
@ -411,7 +411,7 @@ dependencies = [
[[package]] [[package]]
name = "garage_api" name = "garage_api"
version = "0.5.0" version = "0.6.0"
dependencies = [ dependencies = [
"base64", "base64",
"bytes 1.1.0", "bytes 1.1.0",
@ -420,9 +420,9 @@ dependencies = [
"err-derive 0.3.0", "err-derive 0.3.0",
"futures", "futures",
"futures-util", "futures-util",
"garage_model", "garage_model 0.6.0",
"garage_table", "garage_table 0.6.0",
"garage_util", "garage_util 0.6.0",
"hex", "hex",
"hmac", "hmac",
"http", "http",
@ -444,14 +444,39 @@ dependencies = [
[[package]] [[package]]
name = "garage_model" name = "garage_model"
version = "0.5.0" version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c56150ee02bc26c77996b19fee0851f7d53cf42ae80370a8cf3a5dd5bb0bba76"
dependencies = [ dependencies = [
"arc-swap", "arc-swap",
"async-trait", "async-trait",
"futures", "futures",
"futures-util", "futures-util",
"garage_rpc", "garage_rpc 0.5.0",
"garage_table", "garage_table 0.5.0",
"garage_util", "garage_util 0.5.0",
"hex",
"log",
"netapp",
"rand",
"rmp-serde 0.15.5",
"serde",
"serde_bytes",
"sled",
"tokio",
]
[[package]]
name = "garage_model"
version = "0.6.0"
dependencies = [
"arc-swap",
"async-trait",
"futures",
"futures-util",
"garage_model 0.5.0",
"garage_rpc 0.6.0",
"garage_table 0.6.0",
"garage_util 0.6.0",
"hex", "hex",
"log", "log",
"netapp", "netapp",
@ -467,13 +492,40 @@ dependencies = [
[[package]] [[package]]
name = "garage_rpc" name = "garage_rpc"
version = "0.5.0" version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c5743c49f616b260f548454ff52b81d10372593d4c4bc01d516ee3c3c4e515a"
dependencies = [ dependencies = [
"arc-swap", "arc-swap",
"async-trait", "async-trait",
"bytes 1.1.0", "bytes 1.1.0",
"futures", "futures",
"futures-util", "futures-util",
"garage_util", "garage_util 0.5.0",
"gethostname",
"hex",
"hyper",
"kuska-sodiumoxide",
"log",
"netapp",
"rand",
"rmp-serde 0.15.5",
"serde",
"serde_bytes",
"serde_json",
"tokio",
"tokio-stream",
]
[[package]]
name = "garage_rpc"
version = "0.6.0"
dependencies = [
"arc-swap",
"async-trait",
"bytes 1.1.0",
"futures",
"futures-util",
"garage_util 0.6.0",
"gethostname", "gethostname",
"hex", "hex",
"hyper", "hyper",
@ -492,13 +544,35 @@ dependencies = [
[[package]] [[package]]
name = "garage_table" name = "garage_table"
version = "0.5.0" version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "378ffd69e8fd084e0817dc64a23a1692b58ffc86509ac2cadc64aa2d83c3e1e0"
dependencies = [ dependencies = [
"async-trait", "async-trait",
"bytes 1.1.0", "bytes 1.1.0",
"futures", "futures",
"futures-util", "futures-util",
"garage_rpc", "garage_rpc 0.5.0",
"garage_util", "garage_util 0.5.0",
"hexdump",
"log",
"rand",
"rmp-serde 0.15.5",
"serde",
"serde_bytes",
"sled",
"tokio",
]
[[package]]
name = "garage_table"
version = "0.6.0"
dependencies = [
"async-trait",
"bytes 1.1.0",
"futures",
"futures-util",
"garage_rpc 0.6.0",
"garage_util 0.6.0",
"hexdump", "hexdump",
"log", "log",
"rand", "rand",
@ -512,6 +586,32 @@ dependencies = [
[[package]] [[package]]
name = "garage_util" name = "garage_util"
version = "0.5.0" version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5282e613b4da5ecca5bfec8c48ce9f25226cc1f35fbc439ed5fc698cce1aa549"
dependencies = [
"blake2",
"chrono",
"err-derive 0.3.0",
"futures",
"hex",
"http",
"hyper",
"log",
"netapp",
"rand",
"rmp-serde 0.15.5",
"serde",
"serde_json",
"sha2",
"sled",
"tokio",
"toml",
"xxhash-rust",
]
[[package]]
name = "garage_util"
version = "0.6.0"
dependencies = [ dependencies = [
"blake2", "blake2",
"chrono", "chrono",
@ -535,14 +635,14 @@ dependencies = [
[[package]] [[package]]
name = "garage_web" name = "garage_web"
version = "0.5.0" version = "0.6.0"
dependencies = [ dependencies = [
"err-derive 0.3.0", "err-derive 0.3.0",
"futures", "futures",
"garage_api", "garage_api",
"garage_model", "garage_model 0.6.0",
"garage_table", "garage_table 0.6.0",
"garage_util", "garage_util 0.6.0",
"http", "http",
"hyper", "hyper",
"log", "log",

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage_api" name = "garage_api"
version = "0.5.0" version = "0.6.0"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
@ -14,9 +14,9 @@ path = "lib.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
garage_model = { version = "0.5.0", path = "../model" } garage_model = { version = "0.6.0", path = "../model" }
garage_table = { version = "0.5.0", path = "../table" } garage_table = { version = "0.6.0", path = "../table" }
garage_util = { version = "0.5.0", path = "../util" } garage_util = { version = "0.6.0", path = "../util" }
base64 = "0.13" base64 = "0.13"
bytes = "1.0" bytes = "1.0"

View file

@ -7,9 +7,12 @@ use hyper::server::conn::AddrStream;
use hyper::service::{make_service_fn, service_fn}; use hyper::service::{make_service_fn, service_fn};
use hyper::{Body, Request, Response, Server}; use hyper::{Body, Request, Response, Server};
use garage_util::crdt;
use garage_util::data::*;
use garage_util::error::Error as GarageError; use garage_util::error::Error as GarageError;
use garage_model::garage::Garage; use garage_model::garage::Garage;
use garage_model::key_table::Key;
use crate::error::*; use crate::error::*;
use crate::signature::check_signature; use crate::signature::check_signature;
@ -105,10 +108,20 @@ async fn handler_inner(garage: Arc<Garage>, req: Request<Body>) -> Result<Respon
.and_then(|root_domain| host_to_bucket(&host, root_domain)); .and_then(|root_domain| host_to_bucket(&host, root_domain));
let endpoint = Endpoint::from_request(&req, bucket.map(ToOwned::to_owned))?; let endpoint = Endpoint::from_request(&req, bucket.map(ToOwned::to_owned))?;
let bucket_name = match endpoint.authorization_type() {
Authorization::None => {
return handle_request_without_bucket(garage, req, api_key, endpoint).await
}
Authorization::Read(bucket) | Authorization::Write(bucket) => bucket.to_string(),
};
let bucket_id = resolve_bucket(&garage, &bucket_name, &api_key).await?;
let allowed = match endpoint.authorization_type() { let allowed = match endpoint.authorization_type() {
Authorization::None => true, Authorization::Read(_) => api_key.allow_read(&bucket_id),
Authorization::Read(bucket) => api_key.allow_read(bucket), Authorization::Write(_) => api_key.allow_write(&bucket_id),
Authorization::Write(bucket) => api_key.allow_write(bucket), _ => unreachable!(),
}; };
if !allowed { if !allowed {
@ -118,19 +131,18 @@ async fn handler_inner(garage: Arc<Garage>, req: Request<Body>) -> Result<Respon
} }
match endpoint { match endpoint {
Endpoint::ListBuckets => handle_list_buckets(&api_key), Endpoint::HeadObject { key, .. } => handle_head(garage, &req, bucket_id, &key).await,
Endpoint::HeadObject { bucket, key, .. } => handle_head(garage, &req, &bucket, &key).await, Endpoint::GetObject { key, .. } => handle_get(garage, &req, bucket_id, &key).await,
Endpoint::GetObject { bucket, key, .. } => handle_get(garage, &req, &bucket, &key).await,
Endpoint::UploadPart { Endpoint::UploadPart {
bucket,
key, key,
part_number, part_number,
upload_id, upload_id,
..
} => { } => {
handle_put_part( handle_put_part(
garage, garage,
req, req,
&bucket, bucket_id,
&key, &key,
part_number, part_number,
&upload_id, &upload_id,
@ -138,37 +150,45 @@ async fn handler_inner(garage: Arc<Garage>, req: Request<Body>) -> Result<Respon
) )
.await .await
} }
Endpoint::CopyObject { bucket, key } => { Endpoint::CopyObject { key, .. } => {
let copy_source = req.headers().get("x-amz-copy-source").unwrap().to_str()?; let copy_source = req.headers().get("x-amz-copy-source").unwrap().to_str()?;
let copy_source = percent_encoding::percent_decode_str(copy_source).decode_utf8()?; let copy_source = percent_encoding::percent_decode_str(copy_source).decode_utf8()?;
let (source_bucket, source_key) = parse_bucket_key(&copy_source, None)?; let (source_bucket, source_key) = parse_bucket_key(&copy_source, None)?;
if !api_key.allow_read(source_bucket) { let source_bucket_id =
resolve_bucket(&garage, &source_bucket.to_string(), &api_key).await?;
if !api_key.allow_read(&source_bucket_id) {
return Err(Error::Forbidden(format!( return Err(Error::Forbidden(format!(
"Reading from bucket {} not allowed for this key", "Reading from bucket {} not allowed for this key",
source_bucket source_bucket
))); )));
} }
let source_key = source_key.ok_or_bad_request("No source key specified")?; let source_key = source_key.ok_or_bad_request("No source key specified")?;
handle_copy(garage, &req, &bucket, &key, source_bucket, source_key).await handle_copy(garage, &req, bucket_id, &key, source_bucket_id, source_key).await
} }
Endpoint::PutObject { bucket, key } => { Endpoint::PutObject { key, .. } => {
handle_put(garage, req, &bucket, &key, content_sha256).await handle_put(garage, req, bucket_id, &key, content_sha256).await
} }
Endpoint::AbortMultipartUpload { Endpoint::AbortMultipartUpload { key, upload_id, .. } => {
bucket, handle_abort_multipart_upload(garage, bucket_id, &key, &upload_id).await
key, }
upload_id, Endpoint::DeleteObject { key, .. } => handle_delete(garage, bucket_id, &key).await,
} => handle_abort_multipart_upload(garage, &bucket, &key, &upload_id).await,
Endpoint::DeleteObject { bucket, key, .. } => handle_delete(garage, &bucket, &key).await,
Endpoint::CreateMultipartUpload { bucket, key } => { Endpoint::CreateMultipartUpload { bucket, key } => {
handle_create_multipart_upload(garage, &req, &bucket, &key).await handle_create_multipart_upload(garage, &req, &bucket, bucket_id, &key).await
} }
Endpoint::CompleteMultipartUpload { Endpoint::CompleteMultipartUpload {
bucket, bucket,
key, key,
upload_id, upload_id,
} => { } => {
handle_complete_multipart_upload(garage, req, &bucket, &key, &upload_id, content_sha256) handle_complete_multipart_upload(
garage,
req,
&bucket,
bucket_id,
&key,
&upload_id,
content_sha256,
)
.await .await
} }
Endpoint::CreateBucket { bucket } => { Endpoint::CreateBucket { bucket } => {
@ -206,7 +226,8 @@ async fn handler_inner(garage: Arc<Garage>, req: Request<Body>) -> Result<Respon
garage, garage,
&ListObjectsQuery { &ListObjectsQuery {
is_v2: false, is_v2: false,
bucket, bucket_name: bucket,
bucket_id,
delimiter: delimiter.map(|d| d.to_string()), delimiter: delimiter.map(|d| d.to_string()),
max_keys: max_keys.unwrap_or(1000), max_keys: max_keys.unwrap_or(1000),
prefix: prefix.unwrap_or_default(), prefix: prefix.unwrap_or_default(),
@ -234,7 +255,8 @@ async fn handler_inner(garage: Arc<Garage>, req: Request<Body>) -> Result<Respon
garage, garage,
&ListObjectsQuery { &ListObjectsQuery {
is_v2: true, is_v2: true,
bucket, bucket_name: bucket,
bucket_id,
delimiter: delimiter.map(|d| d.to_string()), delimiter: delimiter.map(|d| d.to_string()),
max_keys: max_keys.unwrap_or(1000), max_keys: max_keys.unwrap_or(1000),
prefix: prefix.unwrap_or_default(), prefix: prefix.unwrap_or_default(),
@ -252,8 +274,8 @@ async fn handler_inner(garage: Arc<Garage>, req: Request<Body>) -> Result<Respon
))) )))
} }
} }
Endpoint::DeleteObjects { bucket } => { Endpoint::DeleteObjects { .. } => {
handle_delete_objects(garage, &bucket, req, content_sha256).await handle_delete_objects(garage, bucket_id, req, content_sha256).await
} }
Endpoint::PutBucketWebsite { bucket } => { Endpoint::PutBucketWebsite { bucket } => {
handle_put_website(garage, bucket, req, content_sha256).await handle_put_website(garage, bucket, req, content_sha256).await
@ -263,6 +285,41 @@ async fn handler_inner(garage: Arc<Garage>, req: Request<Body>) -> Result<Respon
} }
} }
async fn handle_request_without_bucket(
garage: Arc<Garage>,
_req: Request<Body>,
api_key: Key,
endpoint: Endpoint,
) -> Result<Response<Body>, Error> {
match endpoint {
Endpoint::ListBuckets => handle_list_buckets(&garage, &api_key).await,
endpoint => Err(Error::NotImplemented(endpoint.name().to_owned())),
}
}
#[allow(clippy::ptr_arg)]
async fn resolve_bucket(
garage: &Garage,
bucket_name: &String,
api_key: &Key,
) -> Result<Uuid, Error> {
let api_key_params = api_key
.state
.as_option()
.ok_or_else(|| Error::Forbidden("Operation is not allowed for this key.".to_string()))?;
if let Some(crdt::Deletable::Present(bucket_id)) = api_key_params.local_aliases.get(bucket_name)
{
Ok(*bucket_id)
} else {
Ok(garage
.bucket_helper()
.resolve_global_bucket_name(bucket_name)
.await?
.ok_or(Error::NotFound)?)
}
}
/// Extract the bucket name and the key name from an HTTP path and possibly a bucket provided in /// Extract the bucket name and the key name from an HTTP path and possibly a bucket provided in
/// the host header of the request /// the host header of the request
/// ///

View file

@ -1,9 +1,12 @@
use std::collections::HashMap;
use std::sync::Arc; use std::sync::Arc;
use hyper::{Body, Response}; use hyper::{Body, Response};
use garage_model::garage::Garage; use garage_model::garage::Garage;
use garage_model::key_table::Key; use garage_model::key_table::Key;
use garage_table::util::EmptyKey;
use garage_util::crdt::*;
use garage_util::time::*; use garage_util::time::*;
use crate::error::*; use crate::error::*;
@ -34,20 +37,65 @@ pub fn handle_get_bucket_versioning() -> Result<Response<Body>, Error> {
.body(Body::from(xml.into_bytes()))?) .body(Body::from(xml.into_bytes()))?)
} }
pub fn handle_list_buckets(api_key: &Key) -> Result<Response<Body>, Error> { pub async fn handle_list_buckets(garage: &Garage, api_key: &Key) -> Result<Response<Body>, Error> {
let key_state = api_key.state.as_option().ok_or_internal_error(
"Key should not be in deleted state at this point (internal error)",
)?;
// Collect buckets user has access to
let ids = api_key
.state
.as_option()
.unwrap()
.authorized_buckets
.items()
.iter()
.filter(|(_, perms)| perms.allow_read || perms.allow_write)
.map(|(id, _)| *id)
.collect::<Vec<_>>();
let mut buckets_by_id = HashMap::new();
let mut aliases = HashMap::new();
for bucket_id in ids.iter() {
let bucket = garage.bucket_table.get(bucket_id, &EmptyKey).await?;
if let Some(bucket) = bucket {
if let Deletable::Present(param) = bucket.state {
for (alias, _, active) in param.aliases.items() {
if *active {
let alias_ent = garage.bucket_alias_table.get(&EmptyKey, alias).await?;
if let Some(alias_ent) = alias_ent {
if let Some(alias_p) = alias_ent.state.get().as_option() {
if alias_p.bucket_id == *bucket_id {
aliases.insert(alias_ent.name.clone(), *bucket_id);
}
}
}
}
}
buckets_by_id.insert(bucket_id, param);
}
}
}
for (alias, _, id) in key_state.local_aliases.items() {
if let Some(id) = id.as_option() {
aliases.insert(alias.clone(), *id);
}
}
// Generate response
let list_buckets = s3_xml::ListAllMyBucketsResult { let list_buckets = s3_xml::ListAllMyBucketsResult {
owner: s3_xml::Owner { owner: s3_xml::Owner {
display_name: s3_xml::Value(api_key.name.get().to_string()), display_name: s3_xml::Value(api_key.name.get().to_string()),
id: s3_xml::Value(api_key.key_id.to_string()), id: s3_xml::Value(api_key.key_id.to_string()),
}, },
buckets: s3_xml::BucketList { buckets: s3_xml::BucketList {
entries: api_key entries: aliases
.authorized_buckets
.items()
.iter() .iter()
.filter(|(_, _, perms)| perms.allow_read || perms.allow_write) .filter_map(|(name, id)| buckets_by_id.get(id).map(|p| (name, id, p)))
.map(|(name, ts, _)| s3_xml::Bucket { .map(|(name, _id, param)| s3_xml::Bucket {
creation_date: s3_xml::Value(msec_to_rfc3339(*ts)), creation_date: s3_xml::Value(msec_to_rfc3339(param.creation_date)),
name: s3_xml::Value(name.to_string()), name: s3_xml::Value(name.to_string()),
}) })
.collect(), .collect(),

View file

@ -18,14 +18,14 @@ use crate::s3_xml;
pub async fn handle_copy( pub async fn handle_copy(
garage: Arc<Garage>, garage: Arc<Garage>,
req: &Request<Body>, req: &Request<Body>,
dest_bucket: &str, dest_bucket_id: Uuid,
dest_key: &str, dest_key: &str,
source_bucket: &str, source_bucket_id: Uuid,
source_key: &str, source_key: &str,
) -> Result<Response<Body>, Error> { ) -> Result<Response<Body>, Error> {
let source_object = garage let source_object = garage
.object_table .object_table
.get(&source_bucket.to_string(), &source_key.to_string()) .get(&source_bucket_id, &source_key.to_string())
.await? .await?
.ok_or(Error::NotFound)?; .ok_or(Error::NotFound)?;
@ -76,7 +76,7 @@ pub async fn handle_copy(
)), )),
}; };
let dest_object = Object::new( let dest_object = Object::new(
dest_bucket.to_string(), dest_bucket_id,
dest_key.to_string(), dest_key.to_string(),
vec![dest_object_version], vec![dest_object_version],
); );
@ -99,7 +99,7 @@ pub async fn handle_copy(
state: ObjectVersionState::Uploading(new_meta.headers.clone()), state: ObjectVersionState::Uploading(new_meta.headers.clone()),
}; };
let tmp_dest_object = Object::new( let tmp_dest_object = Object::new(
dest_bucket.to_string(), dest_bucket_id,
dest_key.to_string(), dest_key.to_string(),
vec![tmp_dest_object_version], vec![tmp_dest_object_version],
); );
@ -109,12 +109,8 @@ pub async fn handle_copy(
// this means that the BlockRef entries linked to this version cannot be // this means that the BlockRef entries linked to this version cannot be
// marked as deleted (they are marked as deleted only if the Version // marked as deleted (they are marked as deleted only if the Version
// doesn't exist or is marked as deleted). // doesn't exist or is marked as deleted).
let mut dest_version = Version::new( let mut dest_version =
new_uuid, Version::new(new_uuid, dest_bucket_id, dest_key.to_string(), false);
dest_bucket.to_string(),
dest_key.to_string(),
false,
);
garage.version_table.insert(&dest_version).await?; garage.version_table.insert(&dest_version).await?;
// Fill in block list for version and insert block refs // Fill in block list for version and insert block refs
@ -151,7 +147,7 @@ pub async fn handle_copy(
)), )),
}; };
let dest_object = Object::new( let dest_object = Object::new(
dest_bucket.to_string(), dest_bucket_id,
dest_key.to_string(), dest_key.to_string(),
vec![dest_object_version], vec![dest_object_version],
); );

View file

@ -14,12 +14,12 @@ use crate::signature::verify_signed_content;
async fn handle_delete_internal( async fn handle_delete_internal(
garage: &Garage, garage: &Garage,
bucket: &str, bucket_id: Uuid,
key: &str, key: &str,
) -> Result<(Uuid, Uuid), Error> { ) -> Result<(Uuid, Uuid), Error> {
let object = garage let object = garage
.object_table .object_table
.get(&bucket.to_string(), &key.to_string()) .get(&bucket_id, &key.to_string())
.await? .await?
.ok_or(Error::NotFound)?; // No need to delete .ok_or(Error::NotFound)?; // No need to delete
@ -45,7 +45,7 @@ async fn handle_delete_internal(
let version_uuid = gen_uuid(); let version_uuid = gen_uuid();
let object = Object::new( let object = Object::new(
bucket.into(), bucket_id,
key.into(), key.into(),
vec![ObjectVersion { vec![ObjectVersion {
uuid: version_uuid, uuid: version_uuid,
@ -61,11 +61,11 @@ async fn handle_delete_internal(
pub async fn handle_delete( pub async fn handle_delete(
garage: Arc<Garage>, garage: Arc<Garage>,
bucket: &str, bucket_id: Uuid,
key: &str, key: &str,
) -> Result<Response<Body>, Error> { ) -> Result<Response<Body>, Error> {
let (_deleted_version, delete_marker_version) = let (_deleted_version, delete_marker_version) =
handle_delete_internal(&garage, bucket, key).await?; handle_delete_internal(&garage, bucket_id, key).await?;
Ok(Response::builder() Ok(Response::builder()
.header("x-amz-version-id", hex::encode(delete_marker_version)) .header("x-amz-version-id", hex::encode(delete_marker_version))
@ -76,7 +76,7 @@ pub async fn handle_delete(
pub async fn handle_delete_objects( pub async fn handle_delete_objects(
garage: Arc<Garage>, garage: Arc<Garage>,
bucket: &str, bucket_id: Uuid,
req: Request<Body>, req: Request<Body>,
content_sha256: Option<Hash>, content_sha256: Option<Hash>,
) -> Result<Response<Body>, Error> { ) -> Result<Response<Body>, Error> {
@ -90,7 +90,7 @@ pub async fn handle_delete_objects(
let mut ret_errors = Vec::new(); let mut ret_errors = Vec::new();
for obj in cmd.objects.iter() { for obj in cmd.objects.iter() {
match handle_delete_internal(&garage, bucket, &obj.key).await { match handle_delete_internal(&garage, bucket_id, &obj.key).await {
Ok((deleted_version, delete_marker_version)) => { Ok((deleted_version, delete_marker_version)) => {
if cmd.quiet { if cmd.quiet {
continue; continue;

View file

@ -7,6 +7,7 @@ use hyper::body::Bytes;
use hyper::{Body, Request, Response, StatusCode}; use hyper::{Body, Request, Response, StatusCode};
use garage_table::EmptyKey; use garage_table::EmptyKey;
use garage_util::data::*;
use garage_model::garage::Garage; use garage_model::garage::Garage;
use garage_model::object_table::*; use garage_model::object_table::*;
@ -84,12 +85,12 @@ fn try_answer_cached(
pub async fn handle_head( pub async fn handle_head(
garage: Arc<Garage>, garage: Arc<Garage>,
req: &Request<Body>, req: &Request<Body>,
bucket: &str, bucket_id: Uuid,
key: &str, key: &str,
) -> Result<Response<Body>, Error> { ) -> Result<Response<Body>, Error> {
let object = garage let object = garage
.object_table .object_table
.get(&bucket.to_string(), &key.to_string()) .get(&bucket_id, &key.to_string())
.await? .await?
.ok_or(Error::NotFound)?; .ok_or(Error::NotFound)?;
@ -123,12 +124,12 @@ pub async fn handle_head(
pub async fn handle_get( pub async fn handle_get(
garage: Arc<Garage>, garage: Arc<Garage>,
req: &Request<Body>, req: &Request<Body>,
bucket: &str, bucket_id: Uuid,
key: &str, key: &str,
) -> Result<Response<Body>, Error> { ) -> Result<Response<Body>, Error> {
let object = garage let object = garage
.object_table .object_table
.get(&bucket.to_string(), &key.to_string()) .get(&bucket_id, &key.to_string())
.await? .await?
.ok_or(Error::NotFound)?; .ok_or(Error::NotFound)?;

View file

@ -3,6 +3,7 @@ use std::sync::Arc;
use hyper::{Body, Response}; use hyper::{Body, Response};
use garage_util::data::*;
use garage_util::error::Error as GarageError; use garage_util::error::Error as GarageError;
use garage_util::time::*; use garage_util::time::*;
@ -18,7 +19,8 @@ use crate::s3_xml;
#[derive(Debug)] #[derive(Debug)]
pub struct ListObjectsQuery { pub struct ListObjectsQuery {
pub is_v2: bool, pub is_v2: bool,
pub bucket: String, pub bucket_name: String,
pub bucket_id: Uuid,
pub delimiter: Option<String>, pub delimiter: Option<String>,
pub max_keys: usize, pub max_keys: usize,
pub prefix: String, pub prefix: String,
@ -102,7 +104,7 @@ pub async fn handle_list(
let objects = garage let objects = garage
.object_table .object_table
.get_range( .get_range(
&query.bucket, &query.bucket_id,
Some(next_chunk_start.clone()), Some(next_chunk_start.clone()),
Some(DeletedFilter::NotDeleted), Some(DeletedFilter::NotDeleted),
query.max_keys + 1, query.max_keys + 1,
@ -232,7 +234,7 @@ pub async fn handle_list(
let mut result = s3_xml::ListBucketResult { let mut result = s3_xml::ListBucketResult {
xmlns: (), xmlns: (),
name: s3_xml::Value(query.bucket.to_string()), name: s3_xml::Value(query.bucket_name.to_string()),
prefix: uriencode_maybe(&query.prefix, query.urlencode_resp), prefix: uriencode_maybe(&query.prefix, query.urlencode_resp),
marker: None, marker: None,
next_marker: None, next_marker: None,

View file

@ -24,7 +24,7 @@ use crate::signature::verify_signed_content;
pub async fn handle_put( pub async fn handle_put(
garage: Arc<Garage>, garage: Arc<Garage>,
req: Request<Body>, req: Request<Body>,
bucket: &str, bucket_id: Uuid,
key: &str, key: &str,
content_sha256: Option<Hash>, content_sha256: Option<Hash>,
) -> Result<Response<Body>, Error> { ) -> Result<Response<Body>, Error> {
@ -77,7 +77,7 @@ pub async fn handle_put(
)), )),
}; };
let object = Object::new(bucket.into(), key.into(), vec![object_version]); let object = Object::new(bucket_id, key.into(), vec![object_version]);
garage.object_table.insert(&object).await?; garage.object_table.insert(&object).await?;
return Ok(put_response(version_uuid, data_md5sum_hex)); return Ok(put_response(version_uuid, data_md5sum_hex));
@ -90,14 +90,14 @@ pub async fn handle_put(
timestamp: version_timestamp, timestamp: version_timestamp,
state: ObjectVersionState::Uploading(headers.clone()), state: ObjectVersionState::Uploading(headers.clone()),
}; };
let object = Object::new(bucket.into(), key.into(), vec![object_version.clone()]); let object = Object::new(bucket_id, key.into(), vec![object_version.clone()]);
garage.object_table.insert(&object).await?; garage.object_table.insert(&object).await?;
// Initialize corresponding entry in version table // Initialize corresponding entry in version table
// Write this entry now, even with empty block list, // Write this entry now, even with empty block list,
// to prevent block_ref entries from being deleted (they can be deleted // to prevent block_ref entries from being deleted (they can be deleted
// if the reference a version that isn't found in the version table) // if the reference a version that isn't found in the version table)
let version = Version::new(version_uuid, bucket.into(), key.into(), false); let version = Version::new(version_uuid, bucket_id, key.into(), false);
garage.version_table.insert(&version).await?; garage.version_table.insert(&version).await?;
// Transfer data and verify checksum // Transfer data and verify checksum
@ -127,7 +127,7 @@ pub async fn handle_put(
Err(e) => { Err(e) => {
// Mark object as aborted, this will free the blocks further down // Mark object as aborted, this will free the blocks further down
object_version.state = ObjectVersionState::Aborted; object_version.state = ObjectVersionState::Aborted;
let object = Object::new(bucket.into(), key.into(), vec![object_version.clone()]); let object = Object::new(bucket_id, key.into(), vec![object_version.clone()]);
garage.object_table.insert(&object).await?; garage.object_table.insert(&object).await?;
return Err(e); return Err(e);
} }
@ -143,7 +143,7 @@ pub async fn handle_put(
}, },
first_block_hash, first_block_hash,
)); ));
let object = Object::new(bucket.into(), key.into(), vec![object_version]); let object = Object::new(bucket_id, key.into(), vec![object_version]);
garage.object_table.insert(&object).await?; garage.object_table.insert(&object).await?;
Ok(put_response(version_uuid, md5sum_hex)) Ok(put_response(version_uuid, md5sum_hex))
@ -315,7 +315,8 @@ pub fn put_response(version_uuid: Uuid, md5sum_hex: String) -> Response<Body> {
pub async fn handle_create_multipart_upload( pub async fn handle_create_multipart_upload(
garage: Arc<Garage>, garage: Arc<Garage>,
req: &Request<Body>, req: &Request<Body>,
bucket: &str, bucket_name: &str,
bucket_id: Uuid,
key: &str, key: &str,
) -> Result<Response<Body>, Error> { ) -> Result<Response<Body>, Error> {
let version_uuid = gen_uuid(); let version_uuid = gen_uuid();
@ -327,20 +328,20 @@ pub async fn handle_create_multipart_upload(
timestamp: now_msec(), timestamp: now_msec(),
state: ObjectVersionState::Uploading(headers), state: ObjectVersionState::Uploading(headers),
}; };
let object = Object::new(bucket.to_string(), key.to_string(), vec![object_version]); let object = Object::new(bucket_id, key.to_string(), vec![object_version]);
garage.object_table.insert(&object).await?; garage.object_table.insert(&object).await?;
// Insert empty version so that block_ref entries refer to something // Insert empty version so that block_ref entries refer to something
// (they are inserted concurrently with blocks in the version table, so // (they are inserted concurrently with blocks in the version table, so
// there is the possibility that they are inserted before the version table // there is the possibility that they are inserted before the version table
// is created, in which case it is allowed to delete them, e.g. in repair_*) // is created, in which case it is allowed to delete them, e.g. in repair_*)
let version = Version::new(version_uuid, bucket.into(), key.into(), false); let version = Version::new(version_uuid, bucket_id, key.into(), false);
garage.version_table.insert(&version).await?; garage.version_table.insert(&version).await?;
// Send success response // Send success response
let result = s3_xml::InitiateMultipartUploadResult { let result = s3_xml::InitiateMultipartUploadResult {
xmlns: (), xmlns: (),
bucket: s3_xml::Value(bucket.to_string()), bucket: s3_xml::Value(bucket_name.to_string()),
key: s3_xml::Value(key.to_string()), key: s3_xml::Value(key.to_string()),
upload_id: s3_xml::Value(hex::encode(version_uuid)), upload_id: s3_xml::Value(hex::encode(version_uuid)),
}; };
@ -352,7 +353,7 @@ pub async fn handle_create_multipart_upload(
pub async fn handle_put_part( pub async fn handle_put_part(
garage: Arc<Garage>, garage: Arc<Garage>,
req: Request<Body>, req: Request<Body>,
bucket: &str, bucket_id: Uuid,
key: &str, key: &str,
part_number: u64, part_number: u64,
upload_id: &str, upload_id: &str,
@ -366,12 +367,11 @@ pub async fn handle_put_part(
}; };
// Read first chuck, and at the same time try to get object to see if it exists // Read first chuck, and at the same time try to get object to see if it exists
let bucket = bucket.to_string();
let key = key.to_string(); let key = key.to_string();
let mut chunker = BodyChunker::new(req.into_body(), garage.config.block_size); let mut chunker = BodyChunker::new(req.into_body(), garage.config.block_size);
let (object, first_block) = let (object, first_block) =
futures::try_join!(garage.object_table.get(&bucket, &key), chunker.next(),)?; futures::try_join!(garage.object_table.get(&bucket_id, &key), chunker.next(),)?;
// Check object is valid and multipart block can be accepted // Check object is valid and multipart block can be accepted
let first_block = first_block.ok_or_else(|| Error::BadRequest("Empty body".to_string()))?; let first_block = first_block.ok_or_else(|| Error::BadRequest("Empty body".to_string()))?;
@ -386,7 +386,7 @@ pub async fn handle_put_part(
} }
// Copy block to store // Copy block to store
let version = Version::new(version_uuid, bucket, key, false); let version = Version::new(version_uuid, bucket_id, key, false);
let first_block_hash = blake2sum(&first_block[..]); let first_block_hash = blake2sum(&first_block[..]);
let (_, data_md5sum, data_sha256sum) = read_and_put_blocks( let (_, data_md5sum, data_sha256sum) = read_and_put_blocks(
&garage, &garage,
@ -424,7 +424,8 @@ pub async fn handle_put_part(
pub async fn handle_complete_multipart_upload( pub async fn handle_complete_multipart_upload(
garage: Arc<Garage>, garage: Arc<Garage>,
req: Request<Body>, req: Request<Body>,
bucket: &str, bucket_name: &str,
bucket_id: Uuid,
key: &str, key: &str,
upload_id: &str, upload_id: &str,
content_sha256: Option<Hash>, content_sha256: Option<Hash>,
@ -442,10 +443,9 @@ pub async fn handle_complete_multipart_upload(
let version_uuid = decode_upload_id(upload_id)?; let version_uuid = decode_upload_id(upload_id)?;
let bucket = bucket.to_string();
let key = key.to_string(); let key = key.to_string();
let (object, version) = futures::try_join!( let (object, version) = futures::try_join!(
garage.object_table.get(&bucket, &key), garage.object_table.get(&bucket_id, &key),
garage.version_table.get(&version_uuid, &EmptyKey), garage.version_table.get(&version_uuid, &EmptyKey),
)?; )?;
@ -510,14 +510,14 @@ pub async fn handle_complete_multipart_upload(
version.blocks.items()[0].1.hash, version.blocks.items()[0].1.hash,
)); ));
let final_object = Object::new(bucket.clone(), key.clone(), vec![object_version]); let final_object = Object::new(bucket_id, key.clone(), vec![object_version]);
garage.object_table.insert(&final_object).await?; garage.object_table.insert(&final_object).await?;
// Send response saying ok we're done // Send response saying ok we're done
let result = s3_xml::CompleteMultipartUploadResult { let result = s3_xml::CompleteMultipartUploadResult {
xmlns: (), xmlns: (),
location: None, location: None,
bucket: s3_xml::Value(bucket), bucket: s3_xml::Value(bucket_name.to_string()),
key: s3_xml::Value(key), key: s3_xml::Value(key),
etag: s3_xml::Value(etag), etag: s3_xml::Value(etag),
}; };
@ -528,7 +528,7 @@ pub async fn handle_complete_multipart_upload(
pub async fn handle_abort_multipart_upload( pub async fn handle_abort_multipart_upload(
garage: Arc<Garage>, garage: Arc<Garage>,
bucket: &str, bucket_id: Uuid,
key: &str, key: &str,
upload_id: &str, upload_id: &str,
) -> Result<Response<Body>, Error> { ) -> Result<Response<Body>, Error> {
@ -536,7 +536,7 @@ pub async fn handle_abort_multipart_upload(
let object = garage let object = garage
.object_table .object_table
.get(&bucket.to_string(), &key.to_string()) .get(&bucket_id, &key.to_string())
.await?; .await?;
let object = object.ok_or_else(|| Error::BadRequest("Object not found".to_string()))?; let object = object.ok_or_else(|| Error::BadRequest("Object not found".to_string()))?;
@ -550,7 +550,7 @@ pub async fn handle_abort_multipart_upload(
}; };
object_version.state = ObjectVersionState::Aborted; object_version.state = ObjectVersionState::Aborted;
let final_object = Object::new(bucket.to_string(), key.to_string(), vec![object_version]); let final_object = Object::new(bucket_id, key.to_string(), vec![object_version]);
garage.object_table.insert(&final_object).await?; garage.object_table.insert(&final_object).await?;
Ok(Response::new(Body::from(vec![]))) Ok(Response::new(Body::from(vec![])))

View file

@ -7,9 +7,10 @@ use serde::{Deserialize, Serialize};
use crate::error::*; use crate::error::*;
use crate::s3_xml::{xmlns_tag, IntValue, Value}; use crate::s3_xml::{xmlns_tag, IntValue, Value};
use crate::signature::verify_signed_content; use crate::signature::verify_signed_content;
use garage_model::bucket_table::BucketState;
use garage_model::garage::Garage; use garage_model::garage::Garage;
use garage_table::*; use garage_table::*;
use garage_util::crdt;
use garage_util::data::Hash; use garage_util::data::Hash;
pub async fn handle_delete_website( pub async fn handle_delete_website(
@ -17,14 +18,18 @@ pub async fn handle_delete_website(
bucket: String, bucket: String,
) -> Result<Response<Body>, Error> { ) -> Result<Response<Body>, Error> {
let mut bucket = garage let mut bucket = garage
.bucket_table .bucket_alias_table
.get(&EmptyKey, &bucket) .get(&EmptyKey, &bucket)
.await? .await?
.ok_or(Error::NotFound)?; .ok_or(Error::NotFound)?;
if let BucketState::Present(state) = bucket.state.get_mut() { if let crdt::Deletable::Present(state) = bucket.state.get_mut() {
state.website.update(false); let mut new_param = state.clone();
garage.bucket_table.insert(&bucket).await?; new_param.website_access = false;
bucket.state.update(crdt::Deletable::present(new_param));
garage.bucket_alias_table.insert(&bucket).await?;
} else {
unreachable!();
} }
Ok(Response::builder() Ok(Response::builder()
@ -43,7 +48,7 @@ pub async fn handle_put_website(
verify_signed_content(content_sha256, &body[..])?; verify_signed_content(content_sha256, &body[..])?;
let mut bucket = garage let mut bucket = garage
.bucket_table .bucket_alias_table
.get(&EmptyKey, &bucket) .get(&EmptyKey, &bucket)
.await? .await?
.ok_or(Error::NotFound)?; .ok_or(Error::NotFound)?;
@ -51,9 +56,13 @@ pub async fn handle_put_website(
let conf: WebsiteConfiguration = from_reader(&body as &[u8])?; let conf: WebsiteConfiguration = from_reader(&body as &[u8])?;
conf.validate()?; conf.validate()?;
if let BucketState::Present(state) = bucket.state.get_mut() { if let crdt::Deletable::Present(state) = bucket.state.get() {
state.website.update(true); let mut new_param = state.clone();
garage.bucket_table.insert(&bucket).await?; new_param.website_access = true;
bucket.state.update(crdt::Deletable::present(new_param));
garage.bucket_alias_table.insert(&bucket).await?;
} else {
unreachable!();
} }
Ok(Response::builder() Ok(Response::builder()

View file

@ -64,7 +64,7 @@ pub async fn check_signature(
.key_table .key_table
.get(&EmptyKey, &authorization.key_id) .get(&EmptyKey, &authorization.key_id)
.await? .await?
.filter(|k| !k.deleted.get()) .filter(|k| !k.state.is_deleted())
.ok_or_else(|| Error::Forbidden(format!("No such key: {}", authorization.key_id)))?; .ok_or_else(|| Error::Forbidden(format!("No such key: {}", authorization.key_id)))?;
let canonical_request = canonical_request( let canonical_request = canonical_request(

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage" name = "garage"
version = "0.5.0" version = "0.6.0"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
@ -15,12 +15,12 @@ path = "main.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
garage_api = { version = "0.5.0", path = "../api" } garage_api = { version = "0.6.0", path = "../api" }
garage_model = { version = "0.5.0", path = "../model" } garage_model = { version = "0.6.0", path = "../model" }
garage_rpc = { version = "0.5.0", path = "../rpc" } garage_rpc = { version = "0.6.0", path = "../rpc" }
garage_table = { version = "0.5.0", path = "../table" } garage_table = { version = "0.6.0", path = "../table" }
garage_util = { version = "0.5.0", path = "../util" } garage_util = { version = "0.6.0", path = "../util" }
garage_web = { version = "0.5.0", path = "../web" } garage_web = { version = "0.6.0", path = "../web" }
bytes = "1.0" bytes = "1.0"
git-version = "0.3.4" git-version = "0.3.4"

View file

@ -5,17 +5,21 @@ use std::sync::Arc;
use async_trait::async_trait; use async_trait::async_trait;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use garage_util::error::Error; use garage_util::crdt::*;
use garage_util::data::*;
use garage_util::error::*;
use garage_util::time::*;
use garage_table::crdt::Crdt;
use garage_table::replication::*; use garage_table::replication::*;
use garage_table::*; use garage_table::*;
use garage_rpc::*; use garage_rpc::*;
use garage_model::bucket_alias_table::*;
use garage_model::bucket_table::*; use garage_model::bucket_table::*;
use garage_model::garage::Garage; use garage_model::garage::Garage;
use garage_model::key_table::*; use garage_model::key_table::*;
use garage_model::permission::*;
use crate::cli::*; use crate::cli::*;
use crate::repair::Repair; use crate::repair::Repair;
@ -31,7 +35,7 @@ pub enum AdminRpc {
// Replies // Replies
Ok(String), Ok(String),
BucketList(Vec<String>), BucketList(Vec<BucketAlias>),
BucketInfo(Bucket), BucketInfo(Bucket),
KeyList(Vec<(String, String)>), KeyList(Vec<(String, String)>),
KeyInfo(Key), KeyInfo(Key),
@ -56,100 +60,207 @@ impl AdminRpcHandler {
async fn handle_bucket_cmd(&self, cmd: &BucketOperation) -> Result<AdminRpc, Error> { async fn handle_bucket_cmd(&self, cmd: &BucketOperation) -> Result<AdminRpc, Error> {
match cmd { match cmd {
BucketOperation::List => { BucketOperation::List => self.handle_list_buckets().await,
let bucket_names = self
.garage
.bucket_table
.get_range(&EmptyKey, None, Some(DeletedFilter::NotDeleted), 10000)
.await?
.iter()
.map(|b| b.name.to_string())
.collect::<Vec<_>>();
Ok(AdminRpc::BucketList(bucket_names))
}
BucketOperation::Info(query) => { BucketOperation::Info(query) => {
let bucket = self.get_existing_bucket(&query.name).await?; let bucket_id = self
.garage
.bucket_helper()
.resolve_global_bucket_name(&query.name)
.await?
.ok_or_message("Bucket not found")?;
let bucket = self
.garage
.bucket_helper()
.get_existing_bucket(bucket_id)
.await?;
Ok(AdminRpc::BucketInfo(bucket)) Ok(AdminRpc::BucketInfo(bucket))
} }
BucketOperation::Create(query) => { BucketOperation::Create(query) => self.handle_create_bucket(&query.name).await,
let bucket = match self.garage.bucket_table.get(&EmptyKey, &query.name).await? { BucketOperation::Delete(query) => self.handle_delete_bucket(query).await,
Some(mut bucket) => { BucketOperation::Allow(query) => self.handle_bucket_allow(query).await,
if !bucket.is_deleted() { BucketOperation::Deny(query) => self.handle_bucket_deny(query).await,
return Err(Error::BadRpc(format!( BucketOperation::Website(query) => self.handle_bucket_website(query).await,
"Bucket {} already exists",
query.name
)));
} }
}
async fn handle_list_buckets(&self) -> Result<AdminRpc, Error> {
let bucket_aliases = self
.garage
.bucket_alias_table
.get_range(&EmptyKey, None, Some(DeletedFilter::NotDeleted), 10000)
.await?;
Ok(AdminRpc::BucketList(bucket_aliases))
}
#[allow(clippy::ptr_arg)]
async fn handle_create_bucket(&self, name: &String) -> Result<AdminRpc, Error> {
let mut bucket = Bucket::new();
let alias = match self.garage.bucket_alias_table.get(&EmptyKey, name).await? {
Some(mut alias) => {
if !alias.state.get().is_deleted() {
return Err(Error::BadRpc(format!("Bucket {} already exists", name)));
}
alias.state.update(Deletable::Present(AliasParams {
bucket_id: bucket.id,
website_access: false,
}));
alias
}
None => BucketAlias::new(name.clone(), bucket.id, false),
};
bucket bucket
.state .state
.update(BucketState::Present(BucketParams::new())); .as_option_mut()
bucket .unwrap()
} .aliases
None => Bucket::new(query.name.clone()), .update_in_place(name.clone(), true);
};
self.garage.bucket_table.insert(&bucket).await?; self.garage.bucket_table.insert(&bucket).await?;
Ok(AdminRpc::Ok(format!("Bucket {} was created.", query.name))) self.garage.bucket_alias_table.insert(&alias).await?;
Ok(AdminRpc::Ok(format!("Bucket {} was created.", name)))
} }
BucketOperation::Delete(query) => {
let mut bucket = self.get_existing_bucket(&query.name).await?; async fn handle_delete_bucket(&self, query: &DeleteBucketOpt) -> Result<AdminRpc, Error> {
let mut bucket_alias = self
.garage
.bucket_alias_table
.get(&EmptyKey, &query.name)
.await?
.filter(|a| !a.is_deleted())
.ok_or_message(format!("Bucket {} does not exist", query.name))?;
let bucket_id = bucket_alias.state.get().as_option().unwrap().bucket_id;
// Check bucket doesn't have other aliases
let mut bucket = self
.garage
.bucket_helper()
.get_existing_bucket(bucket_id)
.await?;
let bucket_state = bucket.state.as_option().unwrap();
if bucket_state
.aliases
.items()
.iter()
.filter(|(_, _, active)| *active)
.any(|(name, _, _)| name != &query.name)
{
return Err(Error::Message(format!("Bucket {} still has other global aliases. Use `bucket unalias` to delete them one by one.", query.name)));
}
if bucket_state
.local_aliases
.items()
.iter()
.any(|(_, _, active)| *active)
{
return Err(Error::Message(format!("Bucket {} still has other local aliases. Use `bucket unalias` to delete them one by one.", query.name)));
}
// Check bucket is empty
let objects = self let objects = self
.garage .garage
.object_table .object_table
.get_range(&query.name, None, Some(DeletedFilter::NotDeleted), 10) .get_range(&bucket_id, None, Some(DeletedFilter::NotDeleted), 10)
.await?; .await?;
if !objects.is_empty() { if !objects.is_empty() {
return Err(Error::BadRpc(format!("Bucket {} is not empty", query.name))); return Err(Error::BadRpc(format!("Bucket {} is not empty", query.name)));
} }
if !query.yes { if !query.yes {
return Err(Error::BadRpc( return Err(Error::BadRpc(
"Add --yes flag to really perform this operation".to_string(), "Add --yes flag to really perform this operation".to_string(),
)); ));
} }
// --- done checking, now commit --- // --- done checking, now commit ---
for (key_id, _, _) in bucket.authorized_keys() { // 1. delete authorization from keys that had access
for (key_id, _) in bucket.authorized_keys() {
if let Some(key) = self.garage.key_table.get(&EmptyKey, key_id).await? { if let Some(key) = self.garage.key_table.get(&EmptyKey, key_id).await? {
if !key.deleted.get() { if !key.state.is_deleted() {
self.update_key_bucket(&key, &bucket.name, false, false) self.update_key_bucket(&key, bucket.id, false, false)
.await?; .await?;
} }
} else { } else {
return Err(Error::Message(format!("Key not found: {}", key_id))); return Err(Error::Message(format!("Key not found: {}", key_id)));
} }
} }
bucket.state.update(BucketState::Deleted); // 2. delete bucket alias
bucket_alias.state.update(Deletable::Deleted);
self.garage.bucket_alias_table.insert(&bucket_alias).await?;
// 3. delete bucket alias
bucket.state = Deletable::delete();
self.garage.bucket_table.insert(&bucket).await?; self.garage.bucket_table.insert(&bucket).await?;
Ok(AdminRpc::Ok(format!("Bucket {} was deleted.", query.name))) Ok(AdminRpc::Ok(format!("Bucket {} was deleted.", query.name)))
} }
BucketOperation::Allow(query) => {
async fn handle_bucket_allow(&self, query: &PermBucketOpt) -> Result<AdminRpc, Error> {
let bucket_id = self
.garage
.bucket_helper()
.resolve_global_bucket_name(&query.bucket)
.await?
.ok_or_message("Bucket not found")?;
let bucket = self
.garage
.bucket_helper()
.get_existing_bucket(bucket_id)
.await?;
let key = self.get_existing_key(&query.key_pattern).await?; let key = self.get_existing_key(&query.key_pattern).await?;
let bucket = self.get_existing_bucket(&query.bucket).await?;
let allow_read = query.read || key.allow_read(&query.bucket); let allow_read = query.read || key.allow_read(&bucket_id);
let allow_write = query.write || key.allow_write(&query.bucket); let allow_write = query.write || key.allow_write(&bucket_id);
self.update_key_bucket(&key, &query.bucket, allow_read, allow_write)
let new_perm = self
.update_key_bucket(&key, bucket_id, allow_read, allow_write)
.await?; .await?;
self.update_bucket_key(bucket, &key.key_id, allow_read, allow_write) self.update_bucket_key(bucket, &key.key_id, new_perm)
.await?; .await?;
Ok(AdminRpc::Ok(format!( Ok(AdminRpc::Ok(format!(
"New permissions for {} on {}: read {}, write {}.", "New permissions for {} on {}: read {}, write {}.",
&key.key_id, &query.bucket, allow_read, allow_write &key.key_id, &query.bucket, allow_read, allow_write
))) )))
} }
BucketOperation::Deny(query) => {
async fn handle_bucket_deny(&self, query: &PermBucketOpt) -> Result<AdminRpc, Error> {
let bucket_id = self
.garage
.bucket_helper()
.resolve_global_bucket_name(&query.bucket)
.await?
.ok_or_message("Bucket not found")?;
let bucket = self
.garage
.bucket_helper()
.get_existing_bucket(bucket_id)
.await?;
let key = self.get_existing_key(&query.key_pattern).await?; let key = self.get_existing_key(&query.key_pattern).await?;
let bucket = self.get_existing_bucket(&query.bucket).await?;
let allow_read = !query.read && key.allow_read(&query.bucket); let allow_read = !query.read && key.allow_read(&bucket_id);
let allow_write = !query.write && key.allow_write(&query.bucket); let allow_write = !query.write && key.allow_write(&bucket_id);
self.update_key_bucket(&key, &query.bucket, allow_read, allow_write)
let new_perm = self
.update_key_bucket(&key, bucket_id, allow_read, allow_write)
.await?; .await?;
self.update_bucket_key(bucket, &key.key_id, allow_read, allow_write) self.update_bucket_key(bucket, &key.key_id, new_perm)
.await?; .await?;
Ok(AdminRpc::Ok(format!( Ok(AdminRpc::Ok(format!(
"New permissions for {} on {}: read {}, write {}.", "New permissions for {} on {}: read {}, write {}.",
&key.key_id, &query.bucket, allow_read, allow_write &key.key_id, &query.bucket, allow_read, allow_write
))) )))
} }
BucketOperation::Website(query) => {
let mut bucket = self.get_existing_bucket(&query.bucket).await?; async fn handle_bucket_website(&self, query: &WebsiteOpt) -> Result<AdminRpc, Error> {
let mut bucket_alias = self
.garage
.bucket_alias_table
.get(&EmptyKey, &query.bucket)
.await?
.filter(|a| !a.is_deleted())
.ok_or_message(format!("Bucket {} does not exist", query.bucket))?;
let mut state = bucket_alias.state.get().as_option().unwrap().clone();
if !(query.allow ^ query.deny) { if !(query.allow ^ query.deny) {
return Err(Error::Message( return Err(Error::Message(
@ -157,9 +268,10 @@ impl AdminRpcHandler {
)); ));
} }
if let BucketState::Present(state) = bucket.state.get_mut() { state.website_access = query.allow;
state.website.update(query.allow); bucket_alias.state.update(Deletable::present(state));
self.garage.bucket_table.insert(&bucket).await?; self.garage.bucket_alias_table.insert(&bucket_alias).await?;
let msg = if query.allow { let msg = if query.allow {
format!("Website access allowed for {}", &query.bucket) format!("Website access allowed for {}", &query.bucket)
} else { } else {
@ -167,16 +279,23 @@ impl AdminRpcHandler {
}; };
Ok(AdminRpc::Ok(msg)) Ok(AdminRpc::Ok(msg))
} else {
unreachable!();
}
}
}
} }
async fn handle_key_cmd(&self, cmd: &KeyOperation) -> Result<AdminRpc, Error> { async fn handle_key_cmd(&self, cmd: &KeyOperation) -> Result<AdminRpc, Error> {
match cmd { match cmd {
KeyOperation::List => { KeyOperation::List => self.handle_list_keys().await,
KeyOperation::Info(query) => {
let key = self.get_existing_key(&query.key_pattern).await?;
Ok(AdminRpc::KeyInfo(key))
}
KeyOperation::New(query) => self.handle_create_key(query).await,
KeyOperation::Rename(query) => self.handle_rename_key(query).await,
KeyOperation::Delete(query) => self.handle_delete_key(query).await,
KeyOperation::Import(query) => self.handle_import_key(query).await,
}
}
async fn handle_list_keys(&self) -> Result<AdminRpc, Error> {
let key_ids = self let key_ids = self
.garage .garage
.key_table .key_table
@ -192,47 +311,73 @@ impl AdminRpcHandler {
.collect::<Vec<_>>(); .collect::<Vec<_>>();
Ok(AdminRpc::KeyList(key_ids)) Ok(AdminRpc::KeyList(key_ids))
} }
KeyOperation::Info(query) => {
let key = self.get_existing_key(&query.key_pattern).await?; async fn handle_create_key(&self, query: &KeyNewOpt) -> Result<AdminRpc, Error> {
Ok(AdminRpc::KeyInfo(key))
}
KeyOperation::New(query) => {
let key = Key::new(query.name.clone()); let key = Key::new(query.name.clone());
self.garage.key_table.insert(&key).await?; self.garage.key_table.insert(&key).await?;
Ok(AdminRpc::KeyInfo(key)) Ok(AdminRpc::KeyInfo(key))
} }
KeyOperation::Rename(query) => {
async fn handle_rename_key(&self, query: &KeyRenameOpt) -> Result<AdminRpc, Error> {
let mut key = self.get_existing_key(&query.key_pattern).await?; let mut key = self.get_existing_key(&query.key_pattern).await?;
key.name.update(query.new_name.clone()); key.name.update(query.new_name.clone());
self.garage.key_table.insert(&key).await?; self.garage.key_table.insert(&key).await?;
Ok(AdminRpc::KeyInfo(key)) Ok(AdminRpc::KeyInfo(key))
} }
KeyOperation::Delete(query) => {
let key = self.get_existing_key(&query.key_pattern).await?; async fn handle_delete_key(&self, query: &KeyDeleteOpt) -> Result<AdminRpc, Error> {
let mut key = self.get_existing_key(&query.key_pattern).await?;
if !query.yes { if !query.yes {
return Err(Error::BadRpc( return Err(Error::BadRpc(
"Add --yes flag to really perform this operation".to_string(), "Add --yes flag to really perform this operation".to_string(),
)); ));
} }
let state = key.state.as_option_mut().unwrap();
// --- done checking, now commit --- // --- done checking, now commit ---
for (ab_name, _, _) in key.authorized_buckets.items().iter() { // 1. Delete local aliases
if let Some(bucket) = self.garage.bucket_table.get(&EmptyKey, ab_name).await? { for (alias, _, to) in state.local_aliases.items().iter() {
if let Deletable::Present(bucket_id) = to {
if let Some(mut bucket) = self.garage.bucket_table.get(bucket_id, &EmptyKey).await?
{
if let Deletable::Present(bucket_state) = &mut bucket.state {
bucket_state.local_aliases = bucket_state
.local_aliases
.update_mutator((key.key_id.to_string(), alias.to_string()), false);
self.garage.bucket_table.insert(&bucket).await?;
}
} else {
// ignore
}
}
}
// 2. Delete authorized buckets
for (ab_id, auth) in state.authorized_buckets.items().iter() {
if let Some(bucket) = self.garage.bucket_table.get(ab_id, &EmptyKey).await? {
let new_perm = BucketKeyPerm {
timestamp: increment_logical_clock(auth.timestamp),
allow_read: false,
allow_write: false,
};
if !bucket.is_deleted() { if !bucket.is_deleted() {
self.update_bucket_key(bucket, &key.key_id, false, false) self.update_bucket_key(bucket, &key.key_id, new_perm)
.await?; .await?;
} }
} else { } else {
return Err(Error::Message(format!("Bucket not found: {}", ab_name))); // ignore
} }
} }
let del_key = Key::delete(key.key_id.to_string()); // 3. Actually delete key
self.garage.key_table.insert(&del_key).await?; key.state = Deletable::delete();
self.garage.key_table.insert(&key).await?;
Ok(AdminRpc::Ok(format!( Ok(AdminRpc::Ok(format!(
"Key {} was deleted successfully.", "Key {} was deleted successfully.",
key.key_id key.key_id
))) )))
} }
KeyOperation::Import(query) => {
async fn handle_import_key(&self, query: &KeyImportOpt) -> Result<AdminRpc, Error> {
let prev_key = self.garage.key_table.get(&EmptyKey, &query.key_id).await?; let prev_key = self.garage.key_table.get(&EmptyKey, &query.key_id).await?;
if prev_key.is_some() { if prev_key.is_some() {
return Err(Error::Message(format!("Key {} already exists in data store. Even if it is deleted, we can't let you create a new key with the same ID. Sorry.", query.key_id))); return Err(Error::Message(format!("Key {} already exists in data store. Even if it is deleted, we can't let you create a new key with the same ID. Sorry.", query.key_id)));
@ -241,19 +386,6 @@ impl AdminRpcHandler {
self.garage.key_table.insert(&imported_key).await?; self.garage.key_table.insert(&imported_key).await?;
Ok(AdminRpc::KeyInfo(imported_key)) Ok(AdminRpc::KeyInfo(imported_key))
} }
}
}
#[allow(clippy::ptr_arg)]
async fn get_existing_bucket(&self, bucket: &String) -> Result<Bucket, Error> {
self.garage
.bucket_table
.get(&EmptyKey, bucket)
.await?
.filter(|b| !b.is_deleted())
.map(Ok)
.unwrap_or_else(|| Err(Error::BadRpc(format!("Bucket {} does not exist", bucket))))
}
async fn get_existing_key(&self, pattern: &str) -> Result<Key, Error> { async fn get_existing_key(&self, pattern: &str) -> Result<Key, Error> {
let candidates = self let candidates = self
@ -267,7 +399,7 @@ impl AdminRpcHandler {
) )
.await? .await?
.into_iter() .into_iter()
.filter(|k| !k.deleted.get()) .filter(|k| !k.state.is_deleted())
.collect::<Vec<_>>(); .collect::<Vec<_>>();
if candidates.len() != 1 { if candidates.len() != 1 {
Err(Error::Message(format!( Err(Error::Message(format!(
@ -279,54 +411,51 @@ impl AdminRpcHandler {
} }
} }
/// Update **key table** to inform of the new linked bucket
async fn update_key_bucket(
&self,
key: &Key,
bucket_id: Uuid,
allow_read: bool,
allow_write: bool,
) -> Result<BucketKeyPerm, Error> {
let mut key = key.clone();
let mut key_state = key.state.as_option_mut().unwrap();
let perm = key_state
.authorized_buckets
.get(&bucket_id)
.cloned()
.map(|old_perm| BucketKeyPerm {
timestamp: increment_logical_clock(old_perm.timestamp),
allow_read,
allow_write,
})
.unwrap_or(BucketKeyPerm {
timestamp: now_msec(),
allow_read,
allow_write,
});
key_state.authorized_buckets = Map::put_mutator(bucket_id, perm);
self.garage.key_table.insert(&key).await?;
Ok(perm)
}
/// Update **bucket table** to inform of the new linked key /// Update **bucket table** to inform of the new linked key
async fn update_bucket_key( async fn update_bucket_key(
&self, &self,
mut bucket: Bucket, mut bucket: Bucket,
key_id: &str, key_id: &str,
allow_read: bool, new_perm: BucketKeyPerm,
allow_write: bool,
) -> Result<(), Error> { ) -> Result<(), Error> {
if let BucketState::Present(params) = bucket.state.get_mut() { bucket.state.as_option_mut().unwrap().authorized_keys =
let ak = &mut params.authorized_keys; Map::put_mutator(key_id.to_string(), new_perm);
let old_ak = ak.take_and_clear();
ak.merge(&old_ak.update_mutator(
key_id.to_string(),
PermissionSet {
allow_read,
allow_write,
},
));
} else {
return Err(Error::Message(
"Bucket is deleted in update_bucket_key".to_string(),
));
}
self.garage.bucket_table.insert(&bucket).await?; self.garage.bucket_table.insert(&bucket).await?;
Ok(()) Ok(())
} }
/// Update **key table** to inform of the new linked bucket
async fn update_key_bucket(
&self,
key: &Key,
bucket: &str,
allow_read: bool,
allow_write: bool,
) -> Result<(), Error> {
let mut key = key.clone();
let old_map = key.authorized_buckets.take_and_clear();
key.authorized_buckets.merge(&old_map.update_mutator(
bucket.to_string(),
PermissionSet {
allow_read,
allow_write,
},
));
self.garage.key_table.insert(&key).await?;
Ok(())
}
async fn handle_launch_repair(self: &Arc<Self>, opt: RepairOpt) -> Result<AdminRpc, Error> { async fn handle_launch_repair(self: &Arc<Self>, opt: RepairOpt) -> Result<AdminRpc, Error> {
if !opt.yes { if !opt.yes {
return Err(Error::BadRpc( return Err(Error::BadRpc(

View file

@ -161,8 +161,11 @@ pub async fn cmd_admin(
} }
AdminRpc::BucketList(bl) => { AdminRpc::BucketList(bl) => {
println!("List of buckets:"); println!("List of buckets:");
for bucket in bl { for alias in bl {
println!("{}", bucket); if let Some(p) = alias.state.get().as_option() {
let wflag = if p.website_access { "W" } else { " " };
println!("- {} {} {:?}", wflag, alias.name, p.bucket_id);
}
} }
} }
AdminRpc::BucketInfo(bucket) => { AdminRpc::BucketInfo(bucket) => {

View file

@ -1,3 +1,4 @@
use garage_util::crdt::*;
use garage_util::data::Uuid; use garage_util::data::Uuid;
use garage_util::error::*; use garage_util::error::*;
@ -8,26 +9,50 @@ pub fn print_key_info(key: &Key) {
println!("Key name: {}", key.name.get()); println!("Key name: {}", key.name.get());
println!("Key ID: {}", key.key_id); println!("Key ID: {}", key.key_id);
println!("Secret key: {}", key.secret_key); println!("Secret key: {}", key.secret_key);
if key.deleted.get() { match &key.state {
println!("Key is deleted."); Deletable::Present(p) => {
} else { println!("\nKey-specific bucket aliases:");
println!("Authorized buckets:"); for (alias_name, _, alias) in p.local_aliases.items().iter() {
for (b, _, perm) in key.authorized_buckets.items().iter() { if let Some(bucket_id) = alias.as_option() {
println!("- {} R:{} W:{}", b, perm.allow_read, perm.allow_write); println!("- {} {:?}", alias_name, bucket_id);
}
}
println!("\nAuthorized buckets:");
for (b, perm) in p.authorized_buckets.items().iter() {
let rflag = if perm.allow_read { "R" } else { " " };
let wflag = if perm.allow_write { "W" } else { " " };
println!("- {}{} {:?}", rflag, wflag, b);
}
}
Deletable::Deleted => {
println!("\nKey is deleted.");
} }
} }
} }
pub fn print_bucket_info(bucket: &Bucket) { pub fn print_bucket_info(bucket: &Bucket) {
println!("Bucket name: {}", bucket.name); println!("Bucket: {}", hex::encode(bucket.id));
match bucket.state.get() { match &bucket.state {
BucketState::Deleted => println!("Bucket is deleted."), Deletable::Deleted => println!("Bucket is deleted."),
BucketState::Present(p) => { Deletable::Present(p) => {
println!("Authorized keys:"); println!("\nGlobal aliases:");
for (k, _, perm) in p.authorized_keys.items().iter() { for (alias, _, active) in p.aliases.items().iter() {
println!("- {} R:{} W:{}", k, perm.allow_read, perm.allow_write); if *active {
println!("- {}", alias);
}
}
println!("\nKey-specific aliases:");
for ((key_id, alias), _, active) in p.local_aliases.items().iter() {
if *active {
println!("- {} {}", key_id, alias);
}
}
println!("\nAuthorized keys:");
for (k, perm) in p.authorized_keys.items().iter() {
let rflag = if perm.allow_read { "R" } else { " " };
let wflag = if perm.allow_write { "W" } else { " " };
println!("- {}{} {}", rflag, wflag, k);
} }
println!("Website access: {}", p.website.get());
} }
}; };
} }

View file

@ -77,7 +77,7 @@ impl Repair {
let object = self let object = self
.garage .garage
.object_table .object_table
.get(&version.bucket, &version.key) .get(&version.bucket_id, &version.key)
.await?; .await?;
let version_exists = match object { let version_exists = match object {
Some(o) => o Some(o) => o
@ -92,7 +92,7 @@ impl Repair {
.version_table .version_table
.insert(&Version::new( .insert(&Version::new(
version.uuid, version.uuid,
version.bucket, version.bucket_id,
version.key, version.key,
true, true,
)) ))

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage_model" name = "garage_model"
version = "0.5.0" version = "0.6.0"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
@ -14,9 +14,10 @@ path = "lib.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
garage_rpc = { version = "0.5.0", path = "../rpc" } garage_rpc = { version = "0.6.0", path = "../rpc" }
garage_table = { version = "0.5.0", path = "../table" } garage_table = { version = "0.6.0", path = "../table" }
garage_util = { version = "0.5.0", path = "../util" } garage_util = { version = "0.6.0", path = "../util" }
garage_model_050 = { package = "garage_model", version = "0.5.0" }
async-trait = "0.1.7" async-trait = "0.1.7"
arc-swap = "1.0" arc-swap = "1.0"

View file

@ -0,0 +1,68 @@
use serde::{Deserialize, Serialize};
use garage_table::crdt::*;
use garage_table::*;
use garage_util::data::*;
/// The bucket alias table holds the names given to buckets
/// in the global namespace.
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
pub struct BucketAlias {
pub name: String,
pub state: crdt::Lww<crdt::Deletable<AliasParams>>,
}
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
pub struct AliasParams {
pub bucket_id: Uuid,
pub website_access: bool,
}
impl AutoCrdt for AliasParams {
const WARN_IF_DIFFERENT: bool = true;
}
impl BucketAlias {
pub fn new(name: String, bucket_id: Uuid, website_access: bool) -> Self {
BucketAlias {
name,
state: crdt::Lww::new(crdt::Deletable::present(AliasParams {
bucket_id,
website_access,
})),
}
}
pub fn is_deleted(&self) -> bool {
self.state.get().is_deleted()
}
}
impl Crdt for BucketAlias {
fn merge(&mut self, o: &Self) {
self.state.merge(&o.state);
}
}
impl Entry<EmptyKey, String> for BucketAlias {
fn partition_key(&self) -> &EmptyKey {
&EmptyKey
}
fn sort_key(&self) -> &String {
&self.name
}
}
pub struct BucketAliasTable;
impl TableSchema for BucketAliasTable {
const TABLE_NAME: &'static str = "bucket_alias";
type P = EmptyKey;
type S = String;
type E = BucketAlias;
type Filter = DeletedFilter;
fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool {
filter.apply(entry.is_deleted())
}
}

View file

@ -0,0 +1,41 @@
use garage_util::data::*;
use garage_util::error::*;
use garage_table::util::EmptyKey;
use crate::bucket_table::Bucket;
use crate::garage::Garage;
pub struct BucketHelper<'a>(pub(crate) &'a Garage);
#[allow(clippy::ptr_arg)]
impl<'a> BucketHelper<'a> {
pub async fn resolve_global_bucket_name(
&self,
bucket_name: &String,
) -> Result<Option<Uuid>, Error> {
Ok(self
.0
.bucket_alias_table
.get(&EmptyKey, bucket_name)
.await?
.map(|x| x.state.get().as_option().map(|x| x.bucket_id))
.flatten())
}
#[allow(clippy::ptr_arg)]
pub async fn get_existing_bucket(&self, bucket_id: Uuid) -> Result<Bucket, Error> {
self.0
.bucket_table
.get(&bucket_id, &EmptyKey)
.await?
.filter(|b| !b.is_deleted())
.map(Ok)
.unwrap_or_else(|| {
Err(Error::BadRpc(format!(
"Bucket {:?} does not exist",
bucket_id
)))
})
}
}

View file

@ -2,8 +2,10 @@ use serde::{Deserialize, Serialize};
use garage_table::crdt::Crdt; use garage_table::crdt::Crdt;
use garage_table::*; use garage_table::*;
use garage_util::data::*;
use garage_util::time::*;
use crate::key_table::PermissionSet; use crate::permission::BucketKeyPerm;
/// A bucket is a collection of objects /// A bucket is a collection of objects
/// ///
@ -12,49 +14,38 @@ use crate::key_table::PermissionSet;
/// - A bucket has 2 states, Present or Deleted and parameters make sense only if present. /// - A bucket has 2 states, Present or Deleted and parameters make sense only if present.
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] #[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
pub struct Bucket { pub struct Bucket {
/// Name of the bucket /// ID of the bucket
pub name: String, pub id: Uuid,
/// State, and configuration if not deleted, of the bucket /// State, and configuration if not deleted, of the bucket
pub state: crdt::Lww<BucketState>, pub state: crdt::Deletable<BucketParams>,
}
/// State of a bucket
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
pub enum BucketState {
/// The bucket is deleted
Deleted,
/// The bucket exists
Present(BucketParams),
}
impl Crdt for BucketState {
fn merge(&mut self, o: &Self) {
match o {
BucketState::Deleted => *self = BucketState::Deleted,
BucketState::Present(other_params) => {
if let BucketState::Present(params) = self {
params.merge(other_params);
}
}
}
}
} }
/// Configuration for a bucket /// Configuration for a bucket
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] #[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
pub struct BucketParams { pub struct BucketParams {
/// Bucket's creation date
pub creation_date: u64,
/// Map of key with access to the bucket, and what kind of access they give /// Map of key with access to the bucket, and what kind of access they give
pub authorized_keys: crdt::LwwMap<String, PermissionSet>, pub authorized_keys: crdt::Map<String, BucketKeyPerm>,
/// Is the bucket served as http /// Map of aliases that are or have been given to this bucket
pub website: crdt::Lww<bool>, /// in the global namespace
/// (not authoritative: this is just used as an indication to
/// map back to aliases when doing ListBuckets)
pub aliases: crdt::LwwMap<String, bool>,
/// Map of aliases that are or have been given to this bucket
/// in namespaces local to keys
/// key = (access key id, alias name)
pub local_aliases: crdt::LwwMap<(String, String), bool>,
} }
impl BucketParams { impl BucketParams {
/// Create an empty BucketParams with no authorized keys and no website accesss /// Create an empty BucketParams with no authorized keys and no website accesss
pub fn new() -> Self { pub fn new() -> Self {
BucketParams { BucketParams {
authorized_keys: crdt::LwwMap::new(), creation_date: now_msec(),
website: crdt::Lww::new(false), authorized_keys: crdt::Map::new(),
aliases: crdt::LwwMap::new(),
local_aliases: crdt::LwwMap::new(),
} }
} }
} }
@ -62,7 +53,14 @@ impl BucketParams {
impl Crdt for BucketParams { impl Crdt for BucketParams {
fn merge(&mut self, o: &Self) { fn merge(&mut self, o: &Self) {
self.authorized_keys.merge(&o.authorized_keys); self.authorized_keys.merge(&o.authorized_keys);
self.website.merge(&o.website); self.aliases.merge(&o.aliases);
self.local_aliases.merge(&o.local_aliases);
}
}
impl Default for Bucket {
fn default() -> Self {
Self::new()
} }
} }
@ -74,34 +72,34 @@ impl Default for BucketParams {
impl Bucket { impl Bucket {
/// Initializes a new instance of the Bucket struct /// Initializes a new instance of the Bucket struct
pub fn new(name: String) -> Self { pub fn new() -> Self {
Bucket { Bucket {
name, id: gen_uuid(),
state: crdt::Lww::new(BucketState::Present(BucketParams::new())), state: crdt::Deletable::present(BucketParams::new()),
} }
} }
/// Returns true if this represents a deleted bucket /// Returns true if this represents a deleted bucket
pub fn is_deleted(&self) -> bool { pub fn is_deleted(&self) -> bool {
*self.state.get() == BucketState::Deleted self.state.is_deleted()
} }
/// Return the list of authorized keys, when each was updated, and the permission associated to /// Return the list of authorized keys, when each was updated, and the permission associated to
/// the key /// the key
pub fn authorized_keys(&self) -> &[(String, u64, PermissionSet)] { pub fn authorized_keys(&self) -> &[(String, BucketKeyPerm)] {
match self.state.get() { match &self.state {
BucketState::Deleted => &[], crdt::Deletable::Deleted => &[],
BucketState::Present(state) => state.authorized_keys.items(), crdt::Deletable::Present(state) => state.authorized_keys.items(),
} }
} }
} }
impl Entry<EmptyKey, String> for Bucket { impl Entry<Uuid, EmptyKey> for Bucket {
fn partition_key(&self) -> &EmptyKey { fn partition_key(&self) -> &Uuid {
&EmptyKey &self.id
} }
fn sort_key(&self) -> &String { fn sort_key(&self) -> &EmptyKey {
&self.name &EmptyKey
} }
} }
@ -114,10 +112,10 @@ impl Crdt for Bucket {
pub struct BucketTable; pub struct BucketTable;
impl TableSchema for BucketTable { impl TableSchema for BucketTable {
const TABLE_NAME: &'static str = "bucket"; const TABLE_NAME: &'static str = "bucket_v2";
type P = EmptyKey; type P = Uuid;
type S = String; type S = EmptyKey;
type E = Bucket; type E = Bucket;
type Filter = DeletedFilter; type Filter = DeletedFilter;

View file

@ -14,6 +14,8 @@ use garage_table::*;
use crate::block::*; use crate::block::*;
use crate::block_ref_table::*; use crate::block_ref_table::*;
use crate::bucket_alias_table::*;
use crate::bucket_helper::*;
use crate::bucket_table::*; use crate::bucket_table::*;
use crate::key_table::*; use crate::key_table::*;
use crate::object_table::*; use crate::object_table::*;
@ -35,6 +37,8 @@ pub struct Garage {
/// Table containing informations about buckets /// Table containing informations about buckets
pub bucket_table: Arc<Table<BucketTable, TableFullReplication>>, pub bucket_table: Arc<Table<BucketTable, TableFullReplication>>,
/// Table containing informations about bucket aliases
pub bucket_alias_table: Arc<Table<BucketAliasTable, TableFullReplication>>,
/// Table containing informations about api keys /// Table containing informations about api keys
pub key_table: Arc<Table<KeyTable, TableFullReplication>>, pub key_table: Arc<Table<KeyTable, TableFullReplication>>,
@ -120,6 +124,14 @@ impl Garage {
info!("Initialize bucket_table..."); info!("Initialize bucket_table...");
let bucket_table = Table::new(BucketTable, control_rep_param.clone(), system.clone(), &db); let bucket_table = Table::new(BucketTable, control_rep_param.clone(), system.clone(), &db);
info!("Initialize bucket_alias_table...");
let bucket_alias_table = Table::new(
BucketAliasTable,
control_rep_param.clone(),
system.clone(),
&db,
);
info!("Initialize key_table_table..."); info!("Initialize key_table_table...");
let key_table = Table::new(KeyTable, control_rep_param, system.clone(), &db); let key_table = Table::new(KeyTable, control_rep_param, system.clone(), &db);
@ -131,6 +143,7 @@ impl Garage {
system, system,
block_manager, block_manager,
bucket_table, bucket_table,
bucket_alias_table,
key_table, key_table,
object_table, object_table,
version_table, version_table,
@ -148,4 +161,8 @@ impl Garage {
pub fn break_reference_cycles(&self) { pub fn break_reference_cycles(&self) {
self.block_manager.garage.swap(None); self.block_manager.garage.swap(None);
} }
pub fn bucket_helper(&self) -> BucketHelper {
BucketHelper(self)
}
} }

View file

@ -2,6 +2,9 @@ use serde::{Deserialize, Serialize};
use garage_table::crdt::*; use garage_table::crdt::*;
use garage_table::*; use garage_table::*;
use garage_util::data::*;
use crate::permission::BucketKeyPerm;
/// An api key /// An api key
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] #[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
@ -15,12 +18,39 @@ pub struct Key {
/// Name for the key /// Name for the key
pub name: crdt::Lww<String>, pub name: crdt::Lww<String>,
/// Is the key deleted /// If the key is present: it gives some permissions,
pub deleted: crdt::Bool, /// a map of bucket IDs (uuids) to permissions.
/// Otherwise no permissions are granted to key
pub state: crdt::Deletable<KeyParams>,
}
/// Buckets in which the key is authorized. Empty if `Key` is deleted /// Configuration for a key
// CRDT interaction: deleted implies authorized_buckets is empty #[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
pub authorized_buckets: crdt::LwwMap<String, PermissionSet>, pub struct KeyParams {
pub authorized_buckets: crdt::Map<Uuid, BucketKeyPerm>,
pub local_aliases: crdt::LwwMap<String, crdt::Deletable<Uuid>>,
}
impl KeyParams {
pub fn new() -> Self {
KeyParams {
authorized_buckets: crdt::Map::new(),
local_aliases: crdt::LwwMap::new(),
}
}
}
impl Default for KeyParams {
fn default() -> Self {
Self::new()
}
}
impl Crdt for KeyParams {
fn merge(&mut self, o: &Self) {
self.authorized_buckets.merge(&o.authorized_buckets);
self.local_aliases.merge(&o.local_aliases);
}
} }
impl Key { impl Key {
@ -32,8 +62,7 @@ impl Key {
key_id, key_id,
secret_key, secret_key,
name: crdt::Lww::new(name), name: crdt::Lww::new(name),
deleted: crdt::Bool::new(false), state: crdt::Deletable::present(KeyParams::new()),
authorized_buckets: crdt::LwwMap::new(),
} }
} }
@ -43,8 +72,7 @@ impl Key {
key_id: key_id.to_string(), key_id: key_id.to_string(),
secret_key: secret_key.to_string(), secret_key: secret_key.to_string(),
name: crdt::Lww::new(name.to_string()), name: crdt::Lww::new(name.to_string()),
deleted: crdt::Bool::new(false), state: crdt::Deletable::present(KeyParams::new()),
authorized_buckets: crdt::LwwMap::new(),
} }
} }
@ -54,39 +82,35 @@ impl Key {
key_id, key_id,
secret_key: "".into(), secret_key: "".into(),
name: crdt::Lww::new("".to_string()), name: crdt::Lww::new("".to_string()),
deleted: crdt::Bool::new(true), state: crdt::Deletable::Deleted,
authorized_buckets: crdt::LwwMap::new(),
} }
} }
/// Check if `Key` is allowed to read in bucket /// Check if `Key` is allowed to read in bucket
pub fn allow_read(&self, bucket: &str) -> bool { pub fn allow_read(&self, bucket: &Uuid) -> bool {
self.authorized_buckets if let crdt::Deletable::Present(params) = &self.state {
.get(&bucket.to_string()) params
.authorized_buckets
.get(bucket)
.map(|x| x.allow_read) .map(|x| x.allow_read)
.unwrap_or(false) .unwrap_or(false)
} else {
false
}
} }
/// Check if `Key` is allowed to write in bucket /// Check if `Key` is allowed to write in bucket
pub fn allow_write(&self, bucket: &str) -> bool { pub fn allow_write(&self, bucket: &Uuid) -> bool {
self.authorized_buckets if let crdt::Deletable::Present(params) = &self.state {
.get(&bucket.to_string()) params
.authorized_buckets
.get(bucket)
.map(|x| x.allow_write) .map(|x| x.allow_write)
.unwrap_or(false) .unwrap_or(false)
} else {
false
}
} }
}
/// Permission given to a key in a bucket
#[derive(PartialOrd, Ord, PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct PermissionSet {
/// The key can be used to read the bucket
pub allow_read: bool,
/// The key can be used to write in the bucket
pub allow_write: bool,
}
impl AutoCrdt for PermissionSet {
const WARN_IF_DIFFERENT: bool = true;
} }
impl Entry<EmptyKey, String> for Key { impl Entry<EmptyKey, String> for Key {
@ -101,13 +125,7 @@ impl Entry<EmptyKey, String> for Key {
impl Crdt for Key { impl Crdt for Key {
fn merge(&mut self, other: &Self) { fn merge(&mut self, other: &Self) {
self.name.merge(&other.name); self.name.merge(&other.name);
self.deleted.merge(&other.deleted); self.state.merge(&other.state);
if self.deleted.get() {
self.authorized_buckets.clear();
} else {
self.authorized_buckets.merge(&other.authorized_buckets);
}
} }
} }
@ -129,7 +147,7 @@ impl TableSchema for KeyTable {
fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool { fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool {
match filter { match filter {
KeyFilter::Deleted(df) => df.apply(entry.deleted.get()), KeyFilter::Deleted(df) => df.apply(entry.state.is_deleted()),
KeyFilter::Matches(pat) => { KeyFilter::Matches(pat) => {
let pat = pat.to_lowercase(); let pat = pat.to_lowercase();
entry.key_id.to_lowercase().starts_with(&pat) entry.key_id.to_lowercase().starts_with(&pat)

View file

@ -3,8 +3,11 @@ extern crate log;
pub mod block; pub mod block;
pub mod block_ref_table; pub mod block_ref_table;
pub mod bucket_alias_table;
pub mod bucket_helper;
pub mod bucket_table; pub mod bucket_table;
pub mod garage; pub mod garage;
pub mod key_table; pub mod key_table;
pub mod object_table; pub mod object_table;
pub mod permission;
pub mod version_table; pub mod version_table;

View file

@ -15,7 +15,7 @@ use crate::version_table::*;
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] #[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
pub struct Object { pub struct Object {
/// The bucket in which the object is stored, used as partition key /// The bucket in which the object is stored, used as partition key
pub bucket: String, pub bucket_id: Uuid,
/// The key at which the object is stored in its bucket, used as sorting key /// The key at which the object is stored in its bucket, used as sorting key
pub key: String, pub key: String,
@ -26,9 +26,9 @@ pub struct Object {
impl Object { impl Object {
/// Initialize an Object struct from parts /// Initialize an Object struct from parts
pub fn new(bucket: String, key: String, versions: Vec<ObjectVersion>) -> Self { pub fn new(bucket_id: Uuid, key: String, versions: Vec<ObjectVersion>) -> Self {
let mut ret = Self { let mut ret = Self {
bucket, bucket_id,
key, key,
versions: vec![], versions: vec![],
}; };
@ -164,9 +164,9 @@ impl ObjectVersion {
} }
} }
impl Entry<String, String> for Object { impl Entry<Uuid, String> for Object {
fn partition_key(&self) -> &String { fn partition_key(&self) -> &Uuid {
&self.bucket &self.bucket_id
} }
fn sort_key(&self) -> &String { fn sort_key(&self) -> &String {
&self.key &self.key
@ -219,7 +219,7 @@ pub struct ObjectTable {
impl TableSchema for ObjectTable { impl TableSchema for ObjectTable {
const TABLE_NAME: &'static str = "object"; const TABLE_NAME: &'static str = "object";
type P = String; type P = Uuid;
type S = String; type S = String;
type E = Object; type E = Object;
type Filter = DeletedFilter; type Filter = DeletedFilter;
@ -242,7 +242,7 @@ impl TableSchema for ObjectTable {
}; };
if newly_deleted { if newly_deleted {
let deleted_version = let deleted_version =
Version::new(v.uuid, old_v.bucket.clone(), old_v.key.clone(), true); Version::new(v.uuid, old_v.bucket_id, old_v.key.clone(), true);
version_table.insert(&deleted_version).await?; version_table.insert(&deleted_version).await?;
} }
} }

37
src/model/permission.rs Normal file
View file

@ -0,0 +1,37 @@
use std::cmp::Ordering;
use serde::{Deserialize, Serialize};
use garage_util::crdt::*;
/// Permission given to a key in a bucket
#[derive(PartialOrd, Ord, PartialEq, Eq, Clone, Copy, Debug, Serialize, Deserialize)]
pub struct BucketKeyPerm {
/// Timestamp at which the permission was given
pub timestamp: u64,
/// The key can be used to read the bucket
pub allow_read: bool,
/// The key can be used to write in the bucket
pub allow_write: bool,
}
impl Crdt for BucketKeyPerm {
fn merge(&mut self, other: &Self) {
match other.timestamp.cmp(&self.timestamp) {
Ordering::Greater => {
*self = *other;
}
Ordering::Equal if other != self => {
warn!("Different permission sets with same timestamp: {:?} and {:?}, merging to most restricted permission set.", self, other);
if !other.allow_read {
self.allow_read = false;
}
if !other.allow_write {
self.allow_write = false;
}
}
_ => (),
}
}
}

View file

@ -29,19 +29,19 @@ pub struct Version {
// Back link to bucket+key so that we can figure if // Back link to bucket+key so that we can figure if
// this was deleted later on // this was deleted later on
/// Bucket in which the related object is stored /// Bucket in which the related object is stored
pub bucket: String, pub bucket_id: Uuid,
/// Key in which the related object is stored /// Key in which the related object is stored
pub key: String, pub key: String,
} }
impl Version { impl Version {
pub fn new(uuid: Uuid, bucket: String, key: String, deleted: bool) -> Self { pub fn new(uuid: Uuid, bucket_id: Uuid, key: String, deleted: bool) -> Self {
Self { Self {
uuid, uuid,
deleted: deleted.into(), deleted: deleted.into(),
blocks: crdt::Map::new(), blocks: crdt::Map::new(),
parts_etags: crdt::Map::new(), parts_etags: crdt::Map::new(),
bucket, bucket_id,
key, key,
} }
} }
@ -82,8 +82,8 @@ impl AutoCrdt for VersionBlock {
const WARN_IF_DIFFERENT: bool = true; const WARN_IF_DIFFERENT: bool = true;
} }
impl Entry<Hash, EmptyKey> for Version { impl Entry<Uuid, EmptyKey> for Version {
fn partition_key(&self) -> &Hash { fn partition_key(&self) -> &Uuid {
&self.uuid &self.uuid
} }
fn sort_key(&self) -> &EmptyKey { fn sort_key(&self) -> &EmptyKey {
@ -116,7 +116,7 @@ pub struct VersionTable {
impl TableSchema for VersionTable { impl TableSchema for VersionTable {
const TABLE_NAME: &'static str = "version"; const TABLE_NAME: &'static str = "version";
type P = Hash; type P = Uuid;
type S = EmptyKey; type S = EmptyKey;
type E = Version; type E = Version;
type Filter = DeletedFilter; type Filter = DeletedFilter;

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage_rpc" name = "garage_rpc"
version = "0.5.0" version = "0.6.0"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
@ -14,7 +14,7 @@ path = "lib.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
garage_util = { version = "0.5.0", path = "../util" } garage_util = { version = "0.6.0", path = "../util" }
arc-swap = "1.0" arc-swap = "1.0"
bytes = "1.0" bytes = "1.0"

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage_table" name = "garage_table"
version = "0.5.0" version = "0.6.0"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
@ -14,8 +14,8 @@ path = "lib.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
garage_rpc = { version = "0.5.0", path = "../rpc" } garage_rpc = { version = "0.6.0", path = "../rpc" }
garage_util = { version = "0.5.0", path = "../util" } garage_util = { version = "0.6.0", path = "../util" }
async-trait = "0.1.7" async-trait = "0.1.7"
bytes = "1.0" bytes = "1.0"

View file

@ -16,7 +16,7 @@ impl PartitionKey for String {
} }
} }
impl PartitionKey for Hash { impl PartitionKey for FixedBytes32 {
fn hash(&self) -> Hash { fn hash(&self) -> Hash {
*self *self
} }
@ -34,7 +34,7 @@ impl SortKey for String {
} }
} }
impl SortKey for Hash { impl SortKey for FixedBytes32 {
fn sort_key(&self) -> &[u8] { fn sort_key(&self) -> &[u8] {
self.as_slice() self.as_slice()
} }

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage_util" name = "garage_util"
version = "0.5.0" version = "0.6.0"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"

View file

@ -0,0 +1,72 @@
use serde::{Deserialize, Serialize};
use crate::crdt::crdt::*;
/// Deletable object (once deleted, cannot go back)
#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq)]
pub enum Deletable<T> {
Present(T),
Deleted,
}
impl<T: Crdt> Deletable<T> {
/// Create a new deletable object that isn't deleted
pub fn present(v: T) -> Self {
Self::Present(v)
}
/// Create a new deletable object that is deleted
pub fn delete() -> Self {
Self::Deleted
}
/// As option
pub fn as_option(&self) -> Option<&T> {
match self {
Self::Present(v) => Some(v),
Self::Deleted => None,
}
}
/// As option, mutable
pub fn as_option_mut(&mut self) -> Option<&mut T> {
match self {
Self::Present(v) => Some(v),
Self::Deleted => None,
}
}
/// Into option
pub fn into_option(self) -> Option<T> {
match self {
Self::Present(v) => Some(v),
Self::Deleted => None,
}
}
/// Is object deleted?
pub fn is_deleted(&self) -> bool {
matches!(self, Self::Deleted)
}
}
impl<T> From<Option<T>> for Deletable<T> {
fn from(v: Option<T>) -> Self {
v.map(Self::Present).unwrap_or(Self::Deleted)
}
}
impl<T> From<Deletable<T>> for Option<T> {
fn from(v: Deletable<T>) -> Option<T> {
match v {
Deletable::Present(v) => Some(v),
Deletable::Deleted => None,
}
}
}
impl<T: Crdt> Crdt for Deletable<T> {
fn merge(&mut self, other: &Self) {
if let Deletable::Present(v) = self {
match other {
Deletable::Deleted => *self = Deletable::Deleted,
Deletable::Present(v2) => v.merge(v2),
}
}
}
}

View file

@ -82,6 +82,11 @@ where
&self.v &self.v
} }
/// Take the value inside the CRDT (discards the timesamp)
pub fn take(self) -> T {
self.v
}
/// Get a mutable reference to the CRDT's value /// Get a mutable reference to the CRDT's value
/// ///
/// This is usefull to mutate the inside value without changing the LWW timestamp. /// This is usefull to mutate the inside value without changing the LWW timestamp.

View file

@ -30,8 +30,8 @@ pub struct LwwMap<K, V> {
impl<K, V> LwwMap<K, V> impl<K, V> LwwMap<K, V>
where where
K: Ord, K: Clone + Ord,
V: Crdt, V: Clone + Crdt,
{ {
/// Create a new empty map CRDT /// Create a new empty map CRDT
pub fn new() -> Self { pub fn new() -> Self {
@ -73,6 +73,10 @@ where
}; };
Self { vals: new_vals } Self { vals: new_vals }
} }
pub fn update_in_place(&mut self, k: K, new_v: V) {
self.merge(&self.update_mutator(k, new_v));
}
/// Takes all of the values of the map and returns them. The current map is reset to the /// Takes all of the values of the map and returns them. The current map is reset to the
/// empty map. This is very usefull to produce in-place a new map that contains only a delta /// empty map. This is very usefull to produce in-place a new map that contains only a delta
/// that modifies a certain value: /// that modifies a certain value:
@ -158,8 +162,8 @@ where
impl<K, V> Default for LwwMap<K, V> impl<K, V> Default for LwwMap<K, V>
where where
K: Ord, K: Clone + Ord,
V: Crdt, V: Clone + Crdt,
{ {
fn default() -> Self { fn default() -> Self {
Self::new() Self::new()

View file

@ -12,12 +12,14 @@
mod bool; mod bool;
#[allow(clippy::module_inception)] #[allow(clippy::module_inception)]
mod crdt; mod crdt;
mod deletable;
mod lww; mod lww;
mod lww_map; mod lww_map;
mod map; mod map;
pub use self::bool::*; pub use self::bool::*;
pub use crdt::*; pub use crdt::*;
pub use deletable::*;
pub use lww::*; pub use lww::*;
pub use lww_map::*; pub use lww_map::*;
pub use map::*; pub use map::*;

View file

@ -119,6 +119,35 @@ where
} }
} }
/// Trait to map error to the Bad Request error code
pub trait OkOrMessage {
type S2;
fn ok_or_message<M: Into<String>>(self, message: M) -> Self::S2;
}
impl<T, E> OkOrMessage for Result<T, E>
where
E: std::fmt::Display,
{
type S2 = Result<T, Error>;
fn ok_or_message<M: Into<String>>(self, message: M) -> Result<T, Error> {
match self {
Ok(x) => Ok(x),
Err(e) => Err(Error::Message(format!("{}: {}", message.into(), e))),
}
}
}
impl<T> OkOrMessage for Option<T> {
type S2 = Result<T, Error>;
fn ok_or_message<M: Into<String>>(self, message: M) -> Result<T, Error> {
match self {
Some(x) => Ok(x),
None => Err(Error::Message(message.into())),
}
}
}
// Custom serialization for our error type, for use in RPC. // Custom serialization for our error type, for use in RPC.
// Errors are serialized as a string of their Display representation. // Errors are serialized as a string of their Display representation.
// Upon deserialization, they all become a RemoteError with the // Upon deserialization, they all become a RemoteError with the

View file

@ -10,6 +10,11 @@ pub fn now_msec() -> u64 {
.as_millis() as u64 .as_millis() as u64
} }
/// Increment logical clock
pub fn increment_logical_clock(prev: u64) -> u64 {
std::cmp::max(prev + 1, now_msec())
}
/// Convert a timestamp represented as milliseconds since UNIX Epoch to /// Convert a timestamp represented as milliseconds since UNIX Epoch to
/// its RFC3339 representation, such as "2021-01-01T12:30:00Z" /// its RFC3339 representation, such as "2021-01-01T12:30:00Z"
pub fn msec_to_rfc3339(msecs: u64) -> String { pub fn msec_to_rfc3339(msecs: u64) -> String {

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage_web" name = "garage_web"
version = "0.5.0" version = "0.6.0"
authors = ["Alex Auvolat <alex@adnab.me>", "Quentin Dufour <quentin@dufour.io>"] authors = ["Alex Auvolat <alex@adnab.me>", "Quentin Dufour <quentin@dufour.io>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
@ -14,10 +14,10 @@ path = "lib.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
garage_api = { version = "0.5.0", path = "../api" } garage_api = { version = "0.6.0", path = "../api" }
garage_model = { version = "0.5.0", path = "../model" } garage_model = { version = "0.6.0", path = "../model" }
garage_util = { version = "0.5.0", path = "../util" } garage_util = { version = "0.6.0", path = "../util" }
garage_table = { version = "0.5.0", path = "../table" } garage_table = { version = "0.6.0", path = "../table" }
err-derive = "0.3" err-derive = "0.3"
log = "0.4" log = "0.4"

View file

@ -12,7 +12,6 @@ use hyper::{
use crate::error::*; use crate::error::*;
use garage_api::helpers::{authority_to_host, host_to_bucket}; use garage_api::helpers::{authority_to_host, host_to_bucket};
use garage_api::s3_get::{handle_get, handle_head}; use garage_api::s3_get::{handle_get, handle_head};
use garage_model::bucket_table::*;
use garage_model::garage::Garage; use garage_model::garage::Garage;
use garage_table::*; use garage_table::*;
use garage_util::error::Error as GarageError; use garage_util::error::Error as GarageError;
@ -77,31 +76,39 @@ async fn serve_file(garage: Arc<Garage>, req: Request<Body>) -> Result<Response<
// Get bucket // Get bucket
let host = authority_to_host(authority)?; let host = authority_to_host(authority)?;
let root = &garage.config.s3_web.root_domain; let root = &garage.config.s3_web.root_domain;
let bucket = host_to_bucket(&host, root).unwrap_or(&host);
// Check bucket is exposed as a website let bucket_name = host_to_bucket(&host, root).unwrap_or(&host);
let bucket_desc = garage let bucket_id = garage
.bucket_alias_table
.get(&EmptyKey, &bucket_name.to_string())
.await?
.map(|x| x.state.take().into_option())
.flatten()
.filter(|param| param.website_access)
.map(|param| param.bucket_id)
.ok_or(Error::NotFound)?;
// Sanity check: check bucket isn't deleted
garage
.bucket_table .bucket_table
.get(&EmptyKey, &bucket.to_string()) .get(&bucket_id, &EmptyKey)
.await? .await?
.filter(|b| !b.is_deleted()) .filter(|b| !b.is_deleted())
.ok_or(Error::NotFound)?; .ok_or(Error::NotFound)?;
match bucket_desc.state.get() {
BucketState::Present(params) if *params.website.get() => Ok(()),
_ => Err(Error::NotFound),
}?;
// Get path // Get path
let path = req.uri().path().to_string(); let path = req.uri().path().to_string();
let index = &garage.config.s3_web.index; let index = &garage.config.s3_web.index;
let key = path_to_key(&path, index)?; let key = path_to_key(&path, index)?;
info!("Selected bucket: \"{}\", selected key: \"{}\"", bucket, key); info!(
"Selected bucket: \"{}\" {:?}, selected key: \"{}\"",
bucket_name, bucket_id, key
);
let res = match *req.method() { let res = match *req.method() {
Method::HEAD => handle_head(garage, &req, bucket, &key).await?, Method::HEAD => handle_head(garage, &req, bucket_id, &key).await?,
Method::GET => handle_get(garage, &req, bucket, &key).await?, Method::GET => handle_get(garage, &req, bucket_id, &key).await?,
_ => return Err(Error::BadRequest("HTTP method not supported".to_string())), _ => return Err(Error::BadRequest("HTTP method not supported".to_string())),
}; };