From 8f6026de5ecd44cbe0fc0bcd47638a1ece860439 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Tue, 14 Dec 2021 12:34:01 +0100 Subject: [PATCH 01/19] Make table name a const in trait --- src/garage/admin.rs | 2 +- src/model/block_ref_table.rs | 2 ++ src/model/bucket_table.rs | 2 ++ src/model/garage.rs | 19 ++-------------- src/model/key_table.rs | 2 ++ src/model/object_table.rs | 2 ++ src/model/version_table.rs | 2 ++ src/table/data.rs | 20 ++++++----------- src/table/gc.rs | 13 +++++------ src/table/merkle.rs | 15 +++++++------ src/table/schema.rs | 7 ++++++ src/table/sync.rs | 42 +++++++++++++++++++++--------------- src/table/table.rs | 12 +++-------- 13 files changed, 70 insertions(+), 70 deletions(-) diff --git a/src/garage/admin.rs b/src/garage/admin.rs index f0444988..c7472670 100644 --- a/src/garage/admin.rs +++ b/src/garage/admin.rs @@ -466,7 +466,7 @@ impl AdminRpcHandler { F: TableSchema + 'static, R: TableReplication + 'static, { - writeln!(to, "\nTable stats for {}", t.data.name).unwrap(); + writeln!(to, "\nTable stats for {}", F::TABLE_NAME).unwrap(); if opt.detailed { writeln!(to, " number of items: {}", t.data.store.len()).unwrap(); writeln!( diff --git a/src/model/block_ref_table.rs b/src/model/block_ref_table.rs index f8f529c4..7dc973d5 100644 --- a/src/model/block_ref_table.rs +++ b/src/model/block_ref_table.rs @@ -44,6 +44,8 @@ pub struct BlockRefTable { } impl TableSchema for BlockRefTable { + const TABLE_NAME: &'static str = "block_ref"; + type P = Hash; type S = Uuid; type E = BlockRef; diff --git a/src/model/bucket_table.rs b/src/model/bucket_table.rs index 168ed713..2cb206ce 100644 --- a/src/model/bucket_table.rs +++ b/src/model/bucket_table.rs @@ -114,6 +114,8 @@ impl Crdt for Bucket { pub struct BucketTable; impl TableSchema for BucketTable { + const TABLE_NAME: &'static str = "bucket"; + type P = EmptyKey; type S = String; type E = Bucket; diff --git a/src/model/garage.rs b/src/model/garage.rs index d12c781f..a874cca8 100644 --- a/src/model/garage.rs +++ b/src/model/garage.rs @@ -93,7 +93,6 @@ impl Garage { meta_rep_param.clone(), system.clone(), &db, - "block_ref".to_string(), ); info!("Initialize version_table..."); @@ -105,7 +104,6 @@ impl Garage { meta_rep_param.clone(), system.clone(), &db, - "version".to_string(), ); info!("Initialize object_table..."); @@ -117,26 +115,13 @@ impl Garage { meta_rep_param, system.clone(), &db, - "object".to_string(), ); info!("Initialize bucket_table..."); - let bucket_table = Table::new( - BucketTable, - control_rep_param.clone(), - system.clone(), - &db, - "bucket".to_string(), - ); + let bucket_table = Table::new(BucketTable, control_rep_param.clone(), system.clone(), &db); info!("Initialize key_table_table..."); - let key_table = Table::new( - KeyTable, - control_rep_param, - system.clone(), - &db, - "key".to_string(), - ); + let key_table = Table::new(KeyTable, control_rep_param, system.clone(), &db); info!("Initialize Garage..."); let garage = Arc::new(Self { diff --git a/src/model/key_table.rs b/src/model/key_table.rs index a6186aa9..225f51c7 100644 --- a/src/model/key_table.rs +++ b/src/model/key_table.rs @@ -120,6 +120,8 @@ pub enum KeyFilter { } impl TableSchema for KeyTable { + const TABLE_NAME: &'static str = "key"; + type P = EmptyKey; type S = String; type E = Key; diff --git a/src/model/object_table.rs b/src/model/object_table.rs index d743a2b6..9eec47ff 100644 --- a/src/model/object_table.rs +++ b/src/model/object_table.rs @@ -217,6 +217,8 @@ pub struct ObjectTable { } impl TableSchema for ObjectTable { + const TABLE_NAME: &'static str = "object"; + type P = String; type S = String; type E = Object; diff --git a/src/model/version_table.rs b/src/model/version_table.rs index bff7d4bb..18ec8e1d 100644 --- a/src/model/version_table.rs +++ b/src/model/version_table.rs @@ -114,6 +114,8 @@ pub struct VersionTable { } impl TableSchema for VersionTable { + const TABLE_NAME: &'static str = "version"; + type P = Hash; type S = EmptyKey; type E = Version; diff --git a/src/table/data.rs b/src/table/data.rs index fb0b6d02..7af5f552 100644 --- a/src/table/data.rs +++ b/src/table/data.rs @@ -19,7 +19,6 @@ use crate::schema::*; pub struct TableData { system: Arc, - pub name: String, pub(crate) instance: F, pub(crate) replication: R, @@ -36,31 +35,24 @@ where F: TableSchema, R: TableReplication, { - pub fn new( - system: Arc, - name: String, - instance: F, - replication: R, - db: &sled::Db, - ) -> Arc { + pub fn new(system: Arc, instance: F, replication: R, db: &sled::Db) -> Arc { let store = db - .open_tree(&format!("{}:table", name)) + .open_tree(&format!("{}:table", F::TABLE_NAME)) .expect("Unable to open DB tree"); let merkle_tree = db - .open_tree(&format!("{}:merkle_tree", name)) + .open_tree(&format!("{}:merkle_tree", F::TABLE_NAME)) .expect("Unable to open DB Merkle tree tree"); let merkle_todo = db - .open_tree(&format!("{}:merkle_todo", name)) + .open_tree(&format!("{}:merkle_todo", F::TABLE_NAME)) .expect("Unable to open DB Merkle TODO tree"); let gc_todo = db - .open_tree(&format!("{}:gc_todo_v2", name)) + .open_tree(&format!("{}:gc_todo_v2", F::TABLE_NAME)) .expect("Unable to open DB tree"); Arc::new(Self { system, - name, instance, replication, store, @@ -245,7 +237,7 @@ where Err(e) => match F::try_migrate(bytes) { Some(x) => Ok(x), None => { - warn!("Unable to decode entry of {}: {}", self.name, e); + warn!("Unable to decode entry of {}: {}", F::TABLE_NAME, e); for line in hexdump::hexdump_iter(bytes) { debug!("{}", line); } diff --git a/src/table/gc.rs b/src/table/gc.rs index 98d7c95d..5cb8cb9b 100644 --- a/src/table/gc.rs +++ b/src/table/gc.rs @@ -57,11 +57,11 @@ where pub(crate) fn launch(system: Arc, data: Arc>) -> Arc { let endpoint = system .netapp - .endpoint(format!("garage_table/gc.rs/Rpc:{}", data.name)); + .endpoint(format!("garage_table/gc.rs/Rpc:{}", F::TABLE_NAME)); let gc = Arc::new(Self { system: system.clone(), - data: data.clone(), + data, endpoint, }); @@ -69,7 +69,7 @@ where let gc1 = gc.clone(); system.background.spawn_worker( - format!("GC loop for {}", data.name), + format!("GC loop for {}", F::TABLE_NAME), move |must_exit: watch::Receiver| gc1.gc_loop(must_exit), ); @@ -90,7 +90,7 @@ where } } Err(e) => { - warn!("({}) Error doing GC: {}", self.data.name, e); + warn!("({}) Error doing GC: {}", F::TABLE_NAME, e); } } } @@ -160,7 +160,7 @@ where return Ok(Some(Duration::from_secs(60))); } - debug!("({}) GC: doing {} items", self.data.name, entries.len()); + debug!("({}) GC: doing {} items", F::TABLE_NAME, entries.len()); // Split entries to GC by the set of nodes on which they are stored. // Here we call them partitions but they are not exactly @@ -262,7 +262,8 @@ where info!( "({}) GC: {} items successfully pushed, will try to delete.", - self.data.name, n_items + F::TABLE_NAME, + n_items ); // Step 2: delete tombstones everywhere. diff --git a/src/table/merkle.rs b/src/table/merkle.rs index 56f307d3..5ec6ab61 100644 --- a/src/table/merkle.rs +++ b/src/table/merkle.rs @@ -82,7 +82,7 @@ where let ret2 = ret.clone(); background.spawn_worker( - format!("Merkle tree updater for {}", ret.data.name), + format!("Merkle tree updater for {}", F::TABLE_NAME), |must_exit: watch::Receiver| ret2.updater_loop(must_exit), ); @@ -97,14 +97,16 @@ where if let Err(e) = self.update_item(&key[..], &valhash[..]) { warn!( "({}) Error while updating Merkle tree item: {}", - self.data.name, e + F::TABLE_NAME, + e ); } } Err(e) => { warn!( "({}) Error while iterating on Merkle todo tree: {}", - self.data.name, e + F::TABLE_NAME, + e ); tokio::time::sleep(Duration::from_secs(10)).await; } @@ -147,7 +149,8 @@ where if !deleted { debug!( "({}) Item not deleted from Merkle todo because it changed: {:?}", - self.data.name, k + F::TABLE_NAME, + k ); } Ok(()) @@ -183,7 +186,7 @@ where // should not happen warn!( "({}) Replacing intermediate node with empty node, should not happen.", - self.data.name + F::TABLE_NAME ); Some(MerkleNode::Empty) } else if children.len() == 1 { @@ -195,7 +198,7 @@ where MerkleNode::Empty => { warn!( "({}) Single subnode in tree is empty Merkle node", - self.data.name + F::TABLE_NAME ); Some(MerkleNode::Empty) } diff --git a/src/table/schema.rs b/src/table/schema.rs index 4d6050e8..fa51fa84 100644 --- a/src/table/schema.rs +++ b/src/table/schema.rs @@ -57,12 +57,19 @@ pub trait Entry: /// Trait for the schema used in a table pub trait TableSchema: Send + Sync { + /// The name of the table in the database + const TABLE_NAME: &'static str; + /// The partition key used in that table type P: PartitionKey + Clone + PartialEq + Serialize + for<'de> Deserialize<'de> + Send + Sync; /// The sort key used int that table type S: SortKey + Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync; + /// They type for an entry in that table type E: Entry; + + /// The type for a filter that can be applied to select entries + /// (e.g. filter out deleted entries) type Filter: Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync; // Action to take if not able to decode current version: diff --git a/src/table/sync.rs b/src/table/sync.rs index c5795f65..df9fb4d0 100644 --- a/src/table/sync.rs +++ b/src/table/sync.rs @@ -77,13 +77,13 @@ where ) -> Arc { let endpoint = system .netapp - .endpoint(format!("garage_table/sync.rs/Rpc:{}", data.name)); + .endpoint(format!("garage_table/sync.rs/Rpc:{}", F::TABLE_NAME)); let todo = SyncTodo { todo: vec![] }; let syncer = Arc::new(Self { system: system.clone(), - data: data.clone(), + data, merkle, todo: Mutex::new(todo), endpoint, @@ -95,13 +95,13 @@ where let s1 = syncer.clone(); system.background.spawn_worker( - format!("table sync watcher for {}", data.name), + format!("table sync watcher for {}", F::TABLE_NAME), move |must_exit: watch::Receiver| s1.watcher_task(must_exit, busy_rx), ); let s2 = syncer.clone(); system.background.spawn_worker( - format!("table syncer for {}", data.name), + format!("table syncer for {}", F::TABLE_NAME), move |must_exit: watch::Receiver| s2.syncer_task(must_exit, busy_tx), ); @@ -128,7 +128,7 @@ where _ = ring_recv.changed().fuse() => { let new_ring = ring_recv.borrow(); if !Arc::ptr_eq(&new_ring, &prev_ring) { - debug!("({}) Ring changed, adding full sync to syncer todo list", self.data.name); + debug!("({}) Ring changed, adding full sync to syncer todo list", F::TABLE_NAME); self.add_full_sync(); prev_ring = new_ring.clone(); } @@ -146,7 +146,7 @@ where _ = tokio::time::sleep(Duration::from_secs(1)).fuse() => { if nothing_to_do_since.map(|t| Instant::now() - t >= ANTI_ENTROPY_INTERVAL).unwrap_or(false) { nothing_to_do_since = None; - debug!("({}) Interval passed, adding full sync to syncer todo list", self.data.name); + debug!("({}) Interval passed, adding full sync to syncer todo list", F::TABLE_NAME); self.add_full_sync(); } } @@ -177,7 +177,9 @@ where if let Err(e) = res { warn!( "({}) Error while syncing {:?}: {}", - self.data.name, partition, e + F::TABLE_NAME, + partition, + e ); } } else { @@ -205,7 +207,9 @@ where debug!( "({}) Syncing {:?} with {:?}...", - self.data.name, partition, nodes + F::TABLE_NAME, + partition, + nodes ); let mut sync_futures = nodes .iter() @@ -219,7 +223,7 @@ where while let Some(r) = sync_futures.next().await { if let Err(e) = r { n_errors += 1; - warn!("({}) Sync error: {}", self.data.name, e); + warn!("({}) Sync error: {}", F::TABLE_NAME, e); } } if n_errors > self.data.replication.max_write_errors() { @@ -272,7 +276,7 @@ where if nodes.contains(&self.system.id) { warn!( "({}) Interrupting offload as partitions seem to have changed", - self.data.name + F::TABLE_NAME ); break; } @@ -286,7 +290,7 @@ where counter += 1; info!( "({}) Offloading {} items from {:?}..{:?} ({})", - self.data.name, + F::TABLE_NAME, items.len(), begin, end, @@ -329,7 +333,7 @@ where } if not_removed > 0 { - debug!("({}) {} items not removed during offload because they changed in between (trying again...)", self.data.name, not_removed); + debug!("({}) {} items not removed during offload because they changed in between (trying again...)", F::TABLE_NAME, not_removed); } Ok(()) @@ -360,7 +364,9 @@ where if root_ck.is_empty() { debug!( "({}) Sync {:?} with {:?}: partition is empty.", - self.data.name, partition, who + F::TABLE_NAME, + partition, + who ); return Ok(()); } @@ -384,7 +390,9 @@ where SyncRpc::RootCkDifferent(false) => { debug!( "({}) Sync {:?} with {:?}: no difference", - self.data.name, partition, who + F::TABLE_NAME, + partition, + who ); return Ok(()); } @@ -413,11 +421,11 @@ where // Just send that item directly if let Some(val) = self.data.store.get(&ik[..])? { if blake2sum(&val[..]) != ivhash { - warn!("({}) Hashes differ between stored value and Merkle tree, key: {:?} (if your server is very busy, don't worry, this happens when the Merkle tree can't be updated fast enough)", self.data.name, ik); + warn!("({}) Hashes differ between stored value and Merkle tree, key: {:?} (if your server is very busy, don't worry, this happens when the Merkle tree can't be updated fast enough)", F::TABLE_NAME, ik); } todo_items.push(val.to_vec()); } else { - warn!("({}) Item from Merkle tree not found in store: {:?} (if your server is very busy, don't worry, this happens when the Merkle tree can't be updated fast enough)", self.data.name, ik); + warn!("({}) Item from Merkle tree not found in store: {:?} (if your server is very busy, don't worry, this happens when the Merkle tree can't be updated fast enough)", F::TABLE_NAME, ik); } } MerkleNode::Intermediate(l) => { @@ -482,7 +490,7 @@ where async fn send_items(&self, who: Uuid, item_value_list: Vec>) -> Result<(), Error> { info!( "({}) Sending {} items to {:?}", - self.data.name, + F::TABLE_NAME, item_value_list.len(), who ); diff --git a/src/table/table.rs b/src/table/table.rs index e1357471..396888c1 100644 --- a/src/table/table.rs +++ b/src/table/table.rs @@ -55,18 +55,12 @@ where { // =============== PUBLIC INTERFACE FUNCTIONS (new, insert, get, etc) =============== - pub fn new( - instance: F, - replication: R, - system: Arc, - db: &sled::Db, - name: String, - ) -> Arc { + pub fn new(instance: F, replication: R, system: Arc, db: &sled::Db) -> Arc { let endpoint = system .netapp - .endpoint(format!("garage_table/table.rs/Rpc:{}", name)); + .endpoint(format!("garage_table/table.rs/Rpc:{}", F::TABLE_NAME)); - let data = TableData::new(system.clone(), name, instance, replication, db); + let data = TableData::new(system.clone(), instance, replication, db); let merkle_updater = MerkleUpdater::launch(&system.background, data.clone()); -- 2.43.4 From 5b1117e582db16cc5aa50840a685875cbd5501f4 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Tue, 14 Dec 2021 13:55:11 +0100 Subject: [PATCH 02/19] New model for buckets --- Cargo.lock | 138 ++++++-- src/api/Cargo.toml | 8 +- src/api/api_server.rs | 109 ++++-- src/api/s3_bucket.rs | 62 +++- src/api/s3_copy.rs | 20 +- src/api/s3_delete.rs | 14 +- src/api/s3_get.rs | 9 +- src/api/s3_list.rs | 8 +- src/api/s3_put.rs | 44 +-- src/api/s3_website.rs | 27 +- src/api/signature.rs | 2 +- src/garage/Cargo.toml | 14 +- src/garage/admin.rs | 567 ++++++++++++++++++++------------ src/garage/cli/cmd.rs | 7 +- src/garage/cli/util.rs | 53 ++- src/garage/repair.rs | 4 +- src/model/Cargo.toml | 9 +- src/model/bucket_alias_table.rs | 68 ++++ src/model/bucket_helper.rs | 41 +++ src/model/bucket_table.rs | 94 +++--- src/model/garage.rs | 17 + src/model/key_table.rs | 102 +++--- src/model/lib.rs | 3 + src/model/object_table.rs | 16 +- src/model/permission.rs | 37 +++ src/model/version_table.rs | 12 +- src/rpc/Cargo.toml | 4 +- src/table/Cargo.toml | 6 +- src/table/schema.rs | 4 +- src/util/Cargo.toml | 2 +- src/util/crdt/deletable.rs | 72 ++++ src/util/crdt/lww.rs | 5 + src/util/crdt/lww_map.rs | 12 +- src/util/crdt/mod.rs | 2 + src/util/error.rs | 29 ++ src/util/time.rs | 5 + src/web/Cargo.toml | 10 +- src/web/web_server.rs | 33 +- 38 files changed, 1173 insertions(+), 496 deletions(-) create mode 100644 src/model/bucket_alias_table.rs create mode 100644 src/model/bucket_helper.rs create mode 100644 src/model/permission.rs create mode 100644 src/util/crdt/deletable.rs diff --git a/Cargo.lock b/Cargo.lock index c07acac2..dc1a1154 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -382,17 +382,17 @@ dependencies = [ [[package]] name = "garage" -version = "0.5.0" +version = "0.6.0" dependencies = [ "async-trait", "bytes 1.1.0", "futures", "futures-util", "garage_api", - "garage_model", - "garage_rpc", - "garage_table", - "garage_util", + "garage_model 0.6.0", + "garage_rpc 0.6.0", + "garage_table 0.6.0", + "garage_util 0.6.0", "garage_web", "git-version", "hex", @@ -411,7 +411,7 @@ dependencies = [ [[package]] name = "garage_api" -version = "0.5.0" +version = "0.6.0" dependencies = [ "base64", "bytes 1.1.0", @@ -420,9 +420,9 @@ dependencies = [ "err-derive 0.3.0", "futures", "futures-util", - "garage_model", - "garage_table", - "garage_util", + "garage_model 0.6.0", + "garage_table 0.6.0", + "garage_util 0.6.0", "hex", "hmac", "http", @@ -444,14 +444,39 @@ dependencies = [ [[package]] name = "garage_model" version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c56150ee02bc26c77996b19fee0851f7d53cf42ae80370a8cf3a5dd5bb0bba76" dependencies = [ "arc-swap", "async-trait", "futures", "futures-util", - "garage_rpc", - "garage_table", - "garage_util", + "garage_rpc 0.5.0", + "garage_table 0.5.0", + "garage_util 0.5.0", + "hex", + "log", + "netapp", + "rand", + "rmp-serde 0.15.5", + "serde", + "serde_bytes", + "sled", + "tokio", +] + +[[package]] +name = "garage_model" +version = "0.6.0" +dependencies = [ + "arc-swap", + "async-trait", + "futures", + "futures-util", + "garage_model 0.5.0", + "garage_rpc 0.6.0", + "garage_table 0.6.0", + "garage_util 0.6.0", "hex", "log", "netapp", @@ -467,13 +492,40 @@ dependencies = [ [[package]] name = "garage_rpc" version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c5743c49f616b260f548454ff52b81d10372593d4c4bc01d516ee3c3c4e515a" dependencies = [ "arc-swap", "async-trait", "bytes 1.1.0", "futures", "futures-util", - "garage_util", + "garage_util 0.5.0", + "gethostname", + "hex", + "hyper", + "kuska-sodiumoxide", + "log", + "netapp", + "rand", + "rmp-serde 0.15.5", + "serde", + "serde_bytes", + "serde_json", + "tokio", + "tokio-stream", +] + +[[package]] +name = "garage_rpc" +version = "0.6.0" +dependencies = [ + "arc-swap", + "async-trait", + "bytes 1.1.0", + "futures", + "futures-util", + "garage_util 0.6.0", "gethostname", "hex", "hyper", @@ -492,13 +544,35 @@ dependencies = [ [[package]] name = "garage_table" version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "378ffd69e8fd084e0817dc64a23a1692b58ffc86509ac2cadc64aa2d83c3e1e0" dependencies = [ "async-trait", "bytes 1.1.0", "futures", "futures-util", - "garage_rpc", - "garage_util", + "garage_rpc 0.5.0", + "garage_util 0.5.0", + "hexdump", + "log", + "rand", + "rmp-serde 0.15.5", + "serde", + "serde_bytes", + "sled", + "tokio", +] + +[[package]] +name = "garage_table" +version = "0.6.0" +dependencies = [ + "async-trait", + "bytes 1.1.0", + "futures", + "futures-util", + "garage_rpc 0.6.0", + "garage_util 0.6.0", "hexdump", "log", "rand", @@ -512,6 +586,32 @@ dependencies = [ [[package]] name = "garage_util" version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5282e613b4da5ecca5bfec8c48ce9f25226cc1f35fbc439ed5fc698cce1aa549" +dependencies = [ + "blake2", + "chrono", + "err-derive 0.3.0", + "futures", + "hex", + "http", + "hyper", + "log", + "netapp", + "rand", + "rmp-serde 0.15.5", + "serde", + "serde_json", + "sha2", + "sled", + "tokio", + "toml", + "xxhash-rust", +] + +[[package]] +name = "garage_util" +version = "0.6.0" dependencies = [ "blake2", "chrono", @@ -535,14 +635,14 @@ dependencies = [ [[package]] name = "garage_web" -version = "0.5.0" +version = "0.6.0" dependencies = [ "err-derive 0.3.0", "futures", "garage_api", - "garage_model", - "garage_table", - "garage_util", + "garage_model 0.6.0", + "garage_table 0.6.0", + "garage_util 0.6.0", "http", "hyper", "log", diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml index 3ca46764..de58f78b 100644 --- a/src/api/Cargo.toml +++ b/src/api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "garage_api" -version = "0.5.0" +version = "0.6.0" authors = ["Alex Auvolat "] edition = "2018" license = "AGPL-3.0" @@ -14,9 +14,9 @@ path = "lib.rs" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -garage_model = { version = "0.5.0", path = "../model" } -garage_table = { version = "0.5.0", path = "../table" } -garage_util = { version = "0.5.0", path = "../util" } +garage_model = { version = "0.6.0", path = "../model" } +garage_table = { version = "0.6.0", path = "../table" } +garage_util = { version = "0.6.0", path = "../util" } base64 = "0.13" bytes = "1.0" diff --git a/src/api/api_server.rs b/src/api/api_server.rs index 2de86233..cc9b9c38 100644 --- a/src/api/api_server.rs +++ b/src/api/api_server.rs @@ -7,9 +7,12 @@ use hyper::server::conn::AddrStream; use hyper::service::{make_service_fn, service_fn}; use hyper::{Body, Request, Response, Server}; +use garage_util::crdt; +use garage_util::data::*; use garage_util::error::Error as GarageError; use garage_model::garage::Garage; +use garage_model::key_table::Key; use crate::error::*; use crate::signature::check_signature; @@ -105,10 +108,20 @@ async fn handler_inner(garage: Arc, req: Request) -> Result { + return handle_request_without_bucket(garage, req, api_key, endpoint).await + } + Authorization::Read(bucket) | Authorization::Write(bucket) => bucket.to_string(), + }; + + let bucket_id = resolve_bucket(&garage, &bucket_name, &api_key).await?; + let allowed = match endpoint.authorization_type() { - Authorization::None => true, - Authorization::Read(bucket) => api_key.allow_read(bucket), - Authorization::Write(bucket) => api_key.allow_write(bucket), + Authorization::Read(_) => api_key.allow_read(&bucket_id), + Authorization::Write(_) => api_key.allow_write(&bucket_id), + _ => unreachable!(), }; if !allowed { @@ -118,19 +131,18 @@ async fn handler_inner(garage: Arc, req: Request) -> Result handle_list_buckets(&api_key), - Endpoint::HeadObject { bucket, key, .. } => handle_head(garage, &req, &bucket, &key).await, - Endpoint::GetObject { bucket, key, .. } => handle_get(garage, &req, &bucket, &key).await, + Endpoint::HeadObject { key, .. } => handle_head(garage, &req, bucket_id, &key).await, + Endpoint::GetObject { key, .. } => handle_get(garage, &req, bucket_id, &key).await, Endpoint::UploadPart { - bucket, key, part_number, upload_id, + .. } => { handle_put_part( garage, req, - &bucket, + bucket_id, &key, part_number, &upload_id, @@ -138,38 +150,46 @@ async fn handler_inner(garage: Arc, req: Request) -> Result { + Endpoint::CopyObject { key, .. } => { let copy_source = req.headers().get("x-amz-copy-source").unwrap().to_str()?; let copy_source = percent_encoding::percent_decode_str(copy_source).decode_utf8()?; let (source_bucket, source_key) = parse_bucket_key(©_source, None)?; - if !api_key.allow_read(source_bucket) { + let source_bucket_id = + resolve_bucket(&garage, &source_bucket.to_string(), &api_key).await?; + if !api_key.allow_read(&source_bucket_id) { return Err(Error::Forbidden(format!( "Reading from bucket {} not allowed for this key", source_bucket ))); } let source_key = source_key.ok_or_bad_request("No source key specified")?; - handle_copy(garage, &req, &bucket, &key, source_bucket, source_key).await + handle_copy(garage, &req, bucket_id, &key, source_bucket_id, source_key).await } - Endpoint::PutObject { bucket, key } => { - handle_put(garage, req, &bucket, &key, content_sha256).await + Endpoint::PutObject { key, .. } => { + handle_put(garage, req, bucket_id, &key, content_sha256).await } - Endpoint::AbortMultipartUpload { - bucket, - key, - upload_id, - } => handle_abort_multipart_upload(garage, &bucket, &key, &upload_id).await, - Endpoint::DeleteObject { bucket, key, .. } => handle_delete(garage, &bucket, &key).await, + Endpoint::AbortMultipartUpload { key, upload_id, .. } => { + handle_abort_multipart_upload(garage, bucket_id, &key, &upload_id).await + } + Endpoint::DeleteObject { key, .. } => handle_delete(garage, bucket_id, &key).await, Endpoint::CreateMultipartUpload { bucket, key } => { - handle_create_multipart_upload(garage, &req, &bucket, &key).await + handle_create_multipart_upload(garage, &req, &bucket, bucket_id, &key).await } Endpoint::CompleteMultipartUpload { bucket, key, upload_id, } => { - handle_complete_multipart_upload(garage, req, &bucket, &key, &upload_id, content_sha256) - .await + handle_complete_multipart_upload( + garage, + req, + &bucket, + bucket_id, + &key, + &upload_id, + content_sha256, + ) + .await } Endpoint::CreateBucket { bucket } => { debug!( @@ -206,7 +226,8 @@ async fn handler_inner(garage: Arc, req: Request) -> Result, req: Request) -> Result, req: Request) -> Result { - handle_delete_objects(garage, &bucket, req, content_sha256).await + Endpoint::DeleteObjects { .. } => { + handle_delete_objects(garage, bucket_id, req, content_sha256).await } Endpoint::PutBucketWebsite { bucket } => { handle_put_website(garage, bucket, req, content_sha256).await @@ -263,6 +285,41 @@ async fn handler_inner(garage: Arc, req: Request) -> Result, + _req: Request, + api_key: Key, + endpoint: Endpoint, +) -> Result, Error> { + match endpoint { + Endpoint::ListBuckets => handle_list_buckets(&garage, &api_key).await, + endpoint => Err(Error::NotImplemented(endpoint.name().to_owned())), + } +} + +#[allow(clippy::ptr_arg)] +async fn resolve_bucket( + garage: &Garage, + bucket_name: &String, + api_key: &Key, +) -> Result { + let api_key_params = api_key + .state + .as_option() + .ok_or_else(|| Error::Forbidden("Operation is not allowed for this key.".to_string()))?; + + if let Some(crdt::Deletable::Present(bucket_id)) = api_key_params.local_aliases.get(bucket_name) + { + Ok(*bucket_id) + } else { + Ok(garage + .bucket_helper() + .resolve_global_bucket_name(bucket_name) + .await? + .ok_or(Error::NotFound)?) + } +} + /// Extract the bucket name and the key name from an HTTP path and possibly a bucket provided in /// the host header of the request /// diff --git a/src/api/s3_bucket.rs b/src/api/s3_bucket.rs index 2be0a818..dc131a31 100644 --- a/src/api/s3_bucket.rs +++ b/src/api/s3_bucket.rs @@ -1,9 +1,12 @@ +use std::collections::HashMap; use std::sync::Arc; use hyper::{Body, Response}; use garage_model::garage::Garage; use garage_model::key_table::Key; +use garage_table::util::EmptyKey; +use garage_util::crdt::*; use garage_util::time::*; use crate::error::*; @@ -34,20 +37,65 @@ pub fn handle_get_bucket_versioning() -> Result, Error> { .body(Body::from(xml.into_bytes()))?) } -pub fn handle_list_buckets(api_key: &Key) -> Result, Error> { +pub async fn handle_list_buckets(garage: &Garage, api_key: &Key) -> Result, Error> { + let key_state = api_key.state.as_option().ok_or_internal_error( + "Key should not be in deleted state at this point (internal error)", + )?; + + // Collect buckets user has access to + let ids = api_key + .state + .as_option() + .unwrap() + .authorized_buckets + .items() + .iter() + .filter(|(_, perms)| perms.allow_read || perms.allow_write) + .map(|(id, _)| *id) + .collect::>(); + + let mut buckets_by_id = HashMap::new(); + let mut aliases = HashMap::new(); + + for bucket_id in ids.iter() { + let bucket = garage.bucket_table.get(bucket_id, &EmptyKey).await?; + if let Some(bucket) = bucket { + if let Deletable::Present(param) = bucket.state { + for (alias, _, active) in param.aliases.items() { + if *active { + let alias_ent = garage.bucket_alias_table.get(&EmptyKey, alias).await?; + if let Some(alias_ent) = alias_ent { + if let Some(alias_p) = alias_ent.state.get().as_option() { + if alias_p.bucket_id == *bucket_id { + aliases.insert(alias_ent.name.clone(), *bucket_id); + } + } + } + } + } + buckets_by_id.insert(bucket_id, param); + } + } + } + + for (alias, _, id) in key_state.local_aliases.items() { + if let Some(id) = id.as_option() { + aliases.insert(alias.clone(), *id); + } + } + + // Generate response let list_buckets = s3_xml::ListAllMyBucketsResult { owner: s3_xml::Owner { display_name: s3_xml::Value(api_key.name.get().to_string()), id: s3_xml::Value(api_key.key_id.to_string()), }, buckets: s3_xml::BucketList { - entries: api_key - .authorized_buckets - .items() + entries: aliases .iter() - .filter(|(_, _, perms)| perms.allow_read || perms.allow_write) - .map(|(name, ts, _)| s3_xml::Bucket { - creation_date: s3_xml::Value(msec_to_rfc3339(*ts)), + .filter_map(|(name, id)| buckets_by_id.get(id).map(|p| (name, id, p))) + .map(|(name, _id, param)| s3_xml::Bucket { + creation_date: s3_xml::Value(msec_to_rfc3339(param.creation_date)), name: s3_xml::Value(name.to_string()), }) .collect(), diff --git a/src/api/s3_copy.rs b/src/api/s3_copy.rs index 9ade6985..4ede8230 100644 --- a/src/api/s3_copy.rs +++ b/src/api/s3_copy.rs @@ -18,14 +18,14 @@ use crate::s3_xml; pub async fn handle_copy( garage: Arc, req: &Request, - dest_bucket: &str, + dest_bucket_id: Uuid, dest_key: &str, - source_bucket: &str, + source_bucket_id: Uuid, source_key: &str, ) -> Result, Error> { let source_object = garage .object_table - .get(&source_bucket.to_string(), &source_key.to_string()) + .get(&source_bucket_id, &source_key.to_string()) .await? .ok_or(Error::NotFound)?; @@ -76,7 +76,7 @@ pub async fn handle_copy( )), }; let dest_object = Object::new( - dest_bucket.to_string(), + dest_bucket_id, dest_key.to_string(), vec![dest_object_version], ); @@ -99,7 +99,7 @@ pub async fn handle_copy( state: ObjectVersionState::Uploading(new_meta.headers.clone()), }; let tmp_dest_object = Object::new( - dest_bucket.to_string(), + dest_bucket_id, dest_key.to_string(), vec![tmp_dest_object_version], ); @@ -109,12 +109,8 @@ pub async fn handle_copy( // this means that the BlockRef entries linked to this version cannot be // marked as deleted (they are marked as deleted only if the Version // doesn't exist or is marked as deleted). - let mut dest_version = Version::new( - new_uuid, - dest_bucket.to_string(), - dest_key.to_string(), - false, - ); + let mut dest_version = + Version::new(new_uuid, dest_bucket_id, dest_key.to_string(), false); garage.version_table.insert(&dest_version).await?; // Fill in block list for version and insert block refs @@ -151,7 +147,7 @@ pub async fn handle_copy( )), }; let dest_object = Object::new( - dest_bucket.to_string(), + dest_bucket_id, dest_key.to_string(), vec![dest_object_version], ); diff --git a/src/api/s3_delete.rs b/src/api/s3_delete.rs index 425f86d7..1976139b 100644 --- a/src/api/s3_delete.rs +++ b/src/api/s3_delete.rs @@ -14,12 +14,12 @@ use crate::signature::verify_signed_content; async fn handle_delete_internal( garage: &Garage, - bucket: &str, + bucket_id: Uuid, key: &str, ) -> Result<(Uuid, Uuid), Error> { let object = garage .object_table - .get(&bucket.to_string(), &key.to_string()) + .get(&bucket_id, &key.to_string()) .await? .ok_or(Error::NotFound)?; // No need to delete @@ -45,7 +45,7 @@ async fn handle_delete_internal( let version_uuid = gen_uuid(); let object = Object::new( - bucket.into(), + bucket_id, key.into(), vec![ObjectVersion { uuid: version_uuid, @@ -61,11 +61,11 @@ async fn handle_delete_internal( pub async fn handle_delete( garage: Arc, - bucket: &str, + bucket_id: Uuid, key: &str, ) -> Result, Error> { let (_deleted_version, delete_marker_version) = - handle_delete_internal(&garage, bucket, key).await?; + handle_delete_internal(&garage, bucket_id, key).await?; Ok(Response::builder() .header("x-amz-version-id", hex::encode(delete_marker_version)) @@ -76,7 +76,7 @@ pub async fn handle_delete( pub async fn handle_delete_objects( garage: Arc, - bucket: &str, + bucket_id: Uuid, req: Request, content_sha256: Option, ) -> Result, Error> { @@ -90,7 +90,7 @@ pub async fn handle_delete_objects( let mut ret_errors = Vec::new(); for obj in cmd.objects.iter() { - match handle_delete_internal(&garage, bucket, &obj.key).await { + match handle_delete_internal(&garage, bucket_id, &obj.key).await { Ok((deleted_version, delete_marker_version)) => { if cmd.quiet { continue; diff --git a/src/api/s3_get.rs b/src/api/s3_get.rs index 428bbf34..269a3fa8 100644 --- a/src/api/s3_get.rs +++ b/src/api/s3_get.rs @@ -7,6 +7,7 @@ use hyper::body::Bytes; use hyper::{Body, Request, Response, StatusCode}; use garage_table::EmptyKey; +use garage_util::data::*; use garage_model::garage::Garage; use garage_model::object_table::*; @@ -84,12 +85,12 @@ fn try_answer_cached( pub async fn handle_head( garage: Arc, req: &Request, - bucket: &str, + bucket_id: Uuid, key: &str, ) -> Result, Error> { let object = garage .object_table - .get(&bucket.to_string(), &key.to_string()) + .get(&bucket_id, &key.to_string()) .await? .ok_or(Error::NotFound)?; @@ -123,12 +124,12 @@ pub async fn handle_head( pub async fn handle_get( garage: Arc, req: &Request, - bucket: &str, + bucket_id: Uuid, key: &str, ) -> Result, Error> { let object = garage .object_table - .get(&bucket.to_string(), &key.to_string()) + .get(&bucket_id, &key.to_string()) .await? .ok_or(Error::NotFound)?; diff --git a/src/api/s3_list.rs b/src/api/s3_list.rs index df9c3e6b..07efb02d 100644 --- a/src/api/s3_list.rs +++ b/src/api/s3_list.rs @@ -3,6 +3,7 @@ use std::sync::Arc; use hyper::{Body, Response}; +use garage_util::data::*; use garage_util::error::Error as GarageError; use garage_util::time::*; @@ -18,7 +19,8 @@ use crate::s3_xml; #[derive(Debug)] pub struct ListObjectsQuery { pub is_v2: bool, - pub bucket: String, + pub bucket_name: String, + pub bucket_id: Uuid, pub delimiter: Option, pub max_keys: usize, pub prefix: String, @@ -102,7 +104,7 @@ pub async fn handle_list( let objects = garage .object_table .get_range( - &query.bucket, + &query.bucket_id, Some(next_chunk_start.clone()), Some(DeletedFilter::NotDeleted), query.max_keys + 1, @@ -232,7 +234,7 @@ pub async fn handle_list( let mut result = s3_xml::ListBucketResult { xmlns: (), - name: s3_xml::Value(query.bucket.to_string()), + name: s3_xml::Value(query.bucket_name.to_string()), prefix: uriencode_maybe(&query.prefix, query.urlencode_resp), marker: None, next_marker: None, diff --git a/src/api/s3_put.rs b/src/api/s3_put.rs index f63e8307..152e59b4 100644 --- a/src/api/s3_put.rs +++ b/src/api/s3_put.rs @@ -24,7 +24,7 @@ use crate::signature::verify_signed_content; pub async fn handle_put( garage: Arc, req: Request, - bucket: &str, + bucket_id: Uuid, key: &str, content_sha256: Option, ) -> Result, Error> { @@ -77,7 +77,7 @@ pub async fn handle_put( )), }; - let object = Object::new(bucket.into(), key.into(), vec![object_version]); + let object = Object::new(bucket_id, key.into(), vec![object_version]); garage.object_table.insert(&object).await?; return Ok(put_response(version_uuid, data_md5sum_hex)); @@ -90,14 +90,14 @@ pub async fn handle_put( timestamp: version_timestamp, state: ObjectVersionState::Uploading(headers.clone()), }; - let object = Object::new(bucket.into(), key.into(), vec![object_version.clone()]); + let object = Object::new(bucket_id, key.into(), vec![object_version.clone()]); garage.object_table.insert(&object).await?; // Initialize corresponding entry in version table // Write this entry now, even with empty block list, // to prevent block_ref entries from being deleted (they can be deleted // if the reference a version that isn't found in the version table) - let version = Version::new(version_uuid, bucket.into(), key.into(), false); + let version = Version::new(version_uuid, bucket_id, key.into(), false); garage.version_table.insert(&version).await?; // Transfer data and verify checksum @@ -127,7 +127,7 @@ pub async fn handle_put( Err(e) => { // Mark object as aborted, this will free the blocks further down object_version.state = ObjectVersionState::Aborted; - let object = Object::new(bucket.into(), key.into(), vec![object_version.clone()]); + let object = Object::new(bucket_id, key.into(), vec![object_version.clone()]); garage.object_table.insert(&object).await?; return Err(e); } @@ -143,7 +143,7 @@ pub async fn handle_put( }, first_block_hash, )); - let object = Object::new(bucket.into(), key.into(), vec![object_version]); + let object = Object::new(bucket_id, key.into(), vec![object_version]); garage.object_table.insert(&object).await?; Ok(put_response(version_uuid, md5sum_hex)) @@ -315,7 +315,8 @@ pub fn put_response(version_uuid: Uuid, md5sum_hex: String) -> Response { pub async fn handle_create_multipart_upload( garage: Arc, req: &Request, - bucket: &str, + bucket_name: &str, + bucket_id: Uuid, key: &str, ) -> Result, Error> { let version_uuid = gen_uuid(); @@ -327,20 +328,20 @@ pub async fn handle_create_multipart_upload( timestamp: now_msec(), state: ObjectVersionState::Uploading(headers), }; - let object = Object::new(bucket.to_string(), key.to_string(), vec![object_version]); + let object = Object::new(bucket_id, key.to_string(), vec![object_version]); garage.object_table.insert(&object).await?; // Insert empty version so that block_ref entries refer to something // (they are inserted concurrently with blocks in the version table, so // there is the possibility that they are inserted before the version table // is created, in which case it is allowed to delete them, e.g. in repair_*) - let version = Version::new(version_uuid, bucket.into(), key.into(), false); + let version = Version::new(version_uuid, bucket_id, key.into(), false); garage.version_table.insert(&version).await?; // Send success response let result = s3_xml::InitiateMultipartUploadResult { xmlns: (), - bucket: s3_xml::Value(bucket.to_string()), + bucket: s3_xml::Value(bucket_name.to_string()), key: s3_xml::Value(key.to_string()), upload_id: s3_xml::Value(hex::encode(version_uuid)), }; @@ -352,7 +353,7 @@ pub async fn handle_create_multipart_upload( pub async fn handle_put_part( garage: Arc, req: Request, - bucket: &str, + bucket_id: Uuid, key: &str, part_number: u64, upload_id: &str, @@ -366,12 +367,11 @@ pub async fn handle_put_part( }; // Read first chuck, and at the same time try to get object to see if it exists - let bucket = bucket.to_string(); let key = key.to_string(); let mut chunker = BodyChunker::new(req.into_body(), garage.config.block_size); let (object, first_block) = - futures::try_join!(garage.object_table.get(&bucket, &key), chunker.next(),)?; + futures::try_join!(garage.object_table.get(&bucket_id, &key), chunker.next(),)?; // Check object is valid and multipart block can be accepted let first_block = first_block.ok_or_else(|| Error::BadRequest("Empty body".to_string()))?; @@ -386,7 +386,7 @@ pub async fn handle_put_part( } // Copy block to store - let version = Version::new(version_uuid, bucket, key, false); + let version = Version::new(version_uuid, bucket_id, key, false); let first_block_hash = blake2sum(&first_block[..]); let (_, data_md5sum, data_sha256sum) = read_and_put_blocks( &garage, @@ -424,7 +424,8 @@ pub async fn handle_put_part( pub async fn handle_complete_multipart_upload( garage: Arc, req: Request, - bucket: &str, + bucket_name: &str, + bucket_id: Uuid, key: &str, upload_id: &str, content_sha256: Option, @@ -442,10 +443,9 @@ pub async fn handle_complete_multipart_upload( let version_uuid = decode_upload_id(upload_id)?; - let bucket = bucket.to_string(); let key = key.to_string(); let (object, version) = futures::try_join!( - garage.object_table.get(&bucket, &key), + garage.object_table.get(&bucket_id, &key), garage.version_table.get(&version_uuid, &EmptyKey), )?; @@ -510,14 +510,14 @@ pub async fn handle_complete_multipart_upload( version.blocks.items()[0].1.hash, )); - let final_object = Object::new(bucket.clone(), key.clone(), vec![object_version]); + let final_object = Object::new(bucket_id, key.clone(), vec![object_version]); garage.object_table.insert(&final_object).await?; // Send response saying ok we're done let result = s3_xml::CompleteMultipartUploadResult { xmlns: (), location: None, - bucket: s3_xml::Value(bucket), + bucket: s3_xml::Value(bucket_name.to_string()), key: s3_xml::Value(key), etag: s3_xml::Value(etag), }; @@ -528,7 +528,7 @@ pub async fn handle_complete_multipart_upload( pub async fn handle_abort_multipart_upload( garage: Arc, - bucket: &str, + bucket_id: Uuid, key: &str, upload_id: &str, ) -> Result, Error> { @@ -536,7 +536,7 @@ pub async fn handle_abort_multipart_upload( let object = garage .object_table - .get(&bucket.to_string(), &key.to_string()) + .get(&bucket_id, &key.to_string()) .await?; let object = object.ok_or_else(|| Error::BadRequest("Object not found".to_string()))?; @@ -550,7 +550,7 @@ pub async fn handle_abort_multipart_upload( }; object_version.state = ObjectVersionState::Aborted; - let final_object = Object::new(bucket.to_string(), key.to_string(), vec![object_version]); + let final_object = Object::new(bucket_id, key.to_string(), vec![object_version]); garage.object_table.insert(&final_object).await?; Ok(Response::new(Body::from(vec![]))) diff --git a/src/api/s3_website.rs b/src/api/s3_website.rs index 37c8b86c..da67c4cd 100644 --- a/src/api/s3_website.rs +++ b/src/api/s3_website.rs @@ -7,9 +7,10 @@ use serde::{Deserialize, Serialize}; use crate::error::*; use crate::s3_xml::{xmlns_tag, IntValue, Value}; use crate::signature::verify_signed_content; -use garage_model::bucket_table::BucketState; + use garage_model::garage::Garage; use garage_table::*; +use garage_util::crdt; use garage_util::data::Hash; pub async fn handle_delete_website( @@ -17,14 +18,18 @@ pub async fn handle_delete_website( bucket: String, ) -> Result, Error> { let mut bucket = garage - .bucket_table + .bucket_alias_table .get(&EmptyKey, &bucket) .await? .ok_or(Error::NotFound)?; - if let BucketState::Present(state) = bucket.state.get_mut() { - state.website.update(false); - garage.bucket_table.insert(&bucket).await?; + if let crdt::Deletable::Present(state) = bucket.state.get_mut() { + let mut new_param = state.clone(); + new_param.website_access = false; + bucket.state.update(crdt::Deletable::present(new_param)); + garage.bucket_alias_table.insert(&bucket).await?; + } else { + unreachable!(); } Ok(Response::builder() @@ -43,7 +48,7 @@ pub async fn handle_put_website( verify_signed_content(content_sha256, &body[..])?; let mut bucket = garage - .bucket_table + .bucket_alias_table .get(&EmptyKey, &bucket) .await? .ok_or(Error::NotFound)?; @@ -51,9 +56,13 @@ pub async fn handle_put_website( let conf: WebsiteConfiguration = from_reader(&body as &[u8])?; conf.validate()?; - if let BucketState::Present(state) = bucket.state.get_mut() { - state.website.update(true); - garage.bucket_table.insert(&bucket).await?; + if let crdt::Deletable::Present(state) = bucket.state.get() { + let mut new_param = state.clone(); + new_param.website_access = true; + bucket.state.update(crdt::Deletable::present(new_param)); + garage.bucket_alias_table.insert(&bucket).await?; + } else { + unreachable!(); } Ok(Response::builder() diff --git a/src/api/signature.rs b/src/api/signature.rs index 53ca2ce5..b5da7b62 100644 --- a/src/api/signature.rs +++ b/src/api/signature.rs @@ -64,7 +64,7 @@ pub async fn check_signature( .key_table .get(&EmptyKey, &authorization.key_id) .await? - .filter(|k| !k.deleted.get()) + .filter(|k| !k.state.is_deleted()) .ok_or_else(|| Error::Forbidden(format!("No such key: {}", authorization.key_id)))?; let canonical_request = canonical_request( diff --git a/src/garage/Cargo.toml b/src/garage/Cargo.toml index 74a6ab0e..44cacde3 100644 --- a/src/garage/Cargo.toml +++ b/src/garage/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "garage" -version = "0.5.0" +version = "0.6.0" authors = ["Alex Auvolat "] edition = "2018" license = "AGPL-3.0" @@ -15,12 +15,12 @@ path = "main.rs" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -garage_api = { version = "0.5.0", path = "../api" } -garage_model = { version = "0.5.0", path = "../model" } -garage_rpc = { version = "0.5.0", path = "../rpc" } -garage_table = { version = "0.5.0", path = "../table" } -garage_util = { version = "0.5.0", path = "../util" } -garage_web = { version = "0.5.0", path = "../web" } +garage_api = { version = "0.6.0", path = "../api" } +garage_model = { version = "0.6.0", path = "../model" } +garage_rpc = { version = "0.6.0", path = "../rpc" } +garage_table = { version = "0.6.0", path = "../table" } +garage_util = { version = "0.6.0", path = "../util" } +garage_web = { version = "0.6.0", path = "../web" } bytes = "1.0" git-version = "0.3.4" diff --git a/src/garage/admin.rs b/src/garage/admin.rs index c7472670..6db8bfbe 100644 --- a/src/garage/admin.rs +++ b/src/garage/admin.rs @@ -5,17 +5,21 @@ use std::sync::Arc; use async_trait::async_trait; use serde::{Deserialize, Serialize}; -use garage_util::error::Error; +use garage_util::crdt::*; +use garage_util::data::*; +use garage_util::error::*; +use garage_util::time::*; -use garage_table::crdt::Crdt; use garage_table::replication::*; use garage_table::*; use garage_rpc::*; +use garage_model::bucket_alias_table::*; use garage_model::bucket_table::*; use garage_model::garage::Garage; use garage_model::key_table::*; +use garage_model::permission::*; use crate::cli::*; use crate::repair::Repair; @@ -31,7 +35,7 @@ pub enum AdminRpc { // Replies Ok(String), - BucketList(Vec), + BucketList(Vec), BucketInfo(Bucket), KeyList(Vec<(String, String)>), KeyInfo(Key), @@ -56,203 +60,331 @@ impl AdminRpcHandler { async fn handle_bucket_cmd(&self, cmd: &BucketOperation) -> Result { match cmd { - BucketOperation::List => { - let bucket_names = self - .garage - .bucket_table - .get_range(&EmptyKey, None, Some(DeletedFilter::NotDeleted), 10000) - .await? - .iter() - .map(|b| b.name.to_string()) - .collect::>(); - Ok(AdminRpc::BucketList(bucket_names)) - } + BucketOperation::List => self.handle_list_buckets().await, BucketOperation::Info(query) => { - let bucket = self.get_existing_bucket(&query.name).await?; + let bucket_id = self + .garage + .bucket_helper() + .resolve_global_bucket_name(&query.name) + .await? + .ok_or_message("Bucket not found")?; + let bucket = self + .garage + .bucket_helper() + .get_existing_bucket(bucket_id) + .await?; Ok(AdminRpc::BucketInfo(bucket)) } - BucketOperation::Create(query) => { - let bucket = match self.garage.bucket_table.get(&EmptyKey, &query.name).await? { - Some(mut bucket) => { - if !bucket.is_deleted() { - return Err(Error::BadRpc(format!( - "Bucket {} already exists", - query.name - ))); - } - bucket - .state - .update(BucketState::Present(BucketParams::new())); - bucket - } - None => Bucket::new(query.name.clone()), - }; - self.garage.bucket_table.insert(&bucket).await?; - Ok(AdminRpc::Ok(format!("Bucket {} was created.", query.name))) - } - BucketOperation::Delete(query) => { - let mut bucket = self.get_existing_bucket(&query.name).await?; - let objects = self - .garage - .object_table - .get_range(&query.name, None, Some(DeletedFilter::NotDeleted), 10) - .await?; - if !objects.is_empty() { - return Err(Error::BadRpc(format!("Bucket {} is not empty", query.name))); - } - if !query.yes { - return Err(Error::BadRpc( - "Add --yes flag to really perform this operation".to_string(), - )); - } - // --- done checking, now commit --- - for (key_id, _, _) in bucket.authorized_keys() { - if let Some(key) = self.garage.key_table.get(&EmptyKey, key_id).await? { - if !key.deleted.get() { - self.update_key_bucket(&key, &bucket.name, false, false) - .await?; - } - } else { - return Err(Error::Message(format!("Key not found: {}", key_id))); - } - } - bucket.state.update(BucketState::Deleted); - self.garage.bucket_table.insert(&bucket).await?; - Ok(AdminRpc::Ok(format!("Bucket {} was deleted.", query.name))) - } - BucketOperation::Allow(query) => { - let key = self.get_existing_key(&query.key_pattern).await?; - let bucket = self.get_existing_bucket(&query.bucket).await?; - let allow_read = query.read || key.allow_read(&query.bucket); - let allow_write = query.write || key.allow_write(&query.bucket); - self.update_key_bucket(&key, &query.bucket, allow_read, allow_write) - .await?; - self.update_bucket_key(bucket, &key.key_id, allow_read, allow_write) - .await?; - Ok(AdminRpc::Ok(format!( - "New permissions for {} on {}: read {}, write {}.", - &key.key_id, &query.bucket, allow_read, allow_write - ))) - } - BucketOperation::Deny(query) => { - let key = self.get_existing_key(&query.key_pattern).await?; - let bucket = self.get_existing_bucket(&query.bucket).await?; - let allow_read = !query.read && key.allow_read(&query.bucket); - let allow_write = !query.write && key.allow_write(&query.bucket); - self.update_key_bucket(&key, &query.bucket, allow_read, allow_write) - .await?; - self.update_bucket_key(bucket, &key.key_id, allow_read, allow_write) - .await?; - Ok(AdminRpc::Ok(format!( - "New permissions for {} on {}: read {}, write {}.", - &key.key_id, &query.bucket, allow_read, allow_write - ))) - } - BucketOperation::Website(query) => { - let mut bucket = self.get_existing_bucket(&query.bucket).await?; + BucketOperation::Create(query) => self.handle_create_bucket(&query.name).await, + BucketOperation::Delete(query) => self.handle_delete_bucket(query).await, + BucketOperation::Allow(query) => self.handle_bucket_allow(query).await, + BucketOperation::Deny(query) => self.handle_bucket_deny(query).await, + BucketOperation::Website(query) => self.handle_bucket_website(query).await, + } + } - if !(query.allow ^ query.deny) { - return Err(Error::Message( - "You must specify exactly one flag, either --allow or --deny".to_string(), - )); - } + async fn handle_list_buckets(&self) -> Result { + let bucket_aliases = self + .garage + .bucket_alias_table + .get_range(&EmptyKey, None, Some(DeletedFilter::NotDeleted), 10000) + .await?; + Ok(AdminRpc::BucketList(bucket_aliases)) + } - if let BucketState::Present(state) = bucket.state.get_mut() { - state.website.update(query.allow); - self.garage.bucket_table.insert(&bucket).await?; - let msg = if query.allow { - format!("Website access allowed for {}", &query.bucket) - } else { - format!("Website access denied for {}", &query.bucket) - }; - - Ok(AdminRpc::Ok(msg)) - } else { - unreachable!(); + #[allow(clippy::ptr_arg)] + async fn handle_create_bucket(&self, name: &String) -> Result { + let mut bucket = Bucket::new(); + let alias = match self.garage.bucket_alias_table.get(&EmptyKey, name).await? { + Some(mut alias) => { + if !alias.state.get().is_deleted() { + return Err(Error::BadRpc(format!("Bucket {} already exists", name))); } + alias.state.update(Deletable::Present(AliasParams { + bucket_id: bucket.id, + website_access: false, + })); + alias + } + None => BucketAlias::new(name.clone(), bucket.id, false), + }; + bucket + .state + .as_option_mut() + .unwrap() + .aliases + .update_in_place(name.clone(), true); + self.garage.bucket_table.insert(&bucket).await?; + self.garage.bucket_alias_table.insert(&alias).await?; + Ok(AdminRpc::Ok(format!("Bucket {} was created.", name))) + } + + async fn handle_delete_bucket(&self, query: &DeleteBucketOpt) -> Result { + let mut bucket_alias = self + .garage + .bucket_alias_table + .get(&EmptyKey, &query.name) + .await? + .filter(|a| !a.is_deleted()) + .ok_or_message(format!("Bucket {} does not exist", query.name))?; + + let bucket_id = bucket_alias.state.get().as_option().unwrap().bucket_id; + + // Check bucket doesn't have other aliases + let mut bucket = self + .garage + .bucket_helper() + .get_existing_bucket(bucket_id) + .await?; + let bucket_state = bucket.state.as_option().unwrap(); + if bucket_state + .aliases + .items() + .iter() + .filter(|(_, _, active)| *active) + .any(|(name, _, _)| name != &query.name) + { + return Err(Error::Message(format!("Bucket {} still has other global aliases. Use `bucket unalias` to delete them one by one.", query.name))); + } + if bucket_state + .local_aliases + .items() + .iter() + .any(|(_, _, active)| *active) + { + return Err(Error::Message(format!("Bucket {} still has other local aliases. Use `bucket unalias` to delete them one by one.", query.name))); + } + + // Check bucket is empty + let objects = self + .garage + .object_table + .get_range(&bucket_id, None, Some(DeletedFilter::NotDeleted), 10) + .await?; + if !objects.is_empty() { + return Err(Error::BadRpc(format!("Bucket {} is not empty", query.name))); + } + + if !query.yes { + return Err(Error::BadRpc( + "Add --yes flag to really perform this operation".to_string(), + )); + } + + // --- done checking, now commit --- + // 1. delete authorization from keys that had access + for (key_id, _) in bucket.authorized_keys() { + if let Some(key) = self.garage.key_table.get(&EmptyKey, key_id).await? { + if !key.state.is_deleted() { + self.update_key_bucket(&key, bucket.id, false, false) + .await?; + } + } else { + return Err(Error::Message(format!("Key not found: {}", key_id))); } } + // 2. delete bucket alias + bucket_alias.state.update(Deletable::Deleted); + self.garage.bucket_alias_table.insert(&bucket_alias).await?; + // 3. delete bucket alias + bucket.state = Deletable::delete(); + self.garage.bucket_table.insert(&bucket).await?; + + Ok(AdminRpc::Ok(format!("Bucket {} was deleted.", query.name))) + } + + async fn handle_bucket_allow(&self, query: &PermBucketOpt) -> Result { + let bucket_id = self + .garage + .bucket_helper() + .resolve_global_bucket_name(&query.bucket) + .await? + .ok_or_message("Bucket not found")?; + let bucket = self + .garage + .bucket_helper() + .get_existing_bucket(bucket_id) + .await?; + let key = self.get_existing_key(&query.key_pattern).await?; + + let allow_read = query.read || key.allow_read(&bucket_id); + let allow_write = query.write || key.allow_write(&bucket_id); + + let new_perm = self + .update_key_bucket(&key, bucket_id, allow_read, allow_write) + .await?; + self.update_bucket_key(bucket, &key.key_id, new_perm) + .await?; + + Ok(AdminRpc::Ok(format!( + "New permissions for {} on {}: read {}, write {}.", + &key.key_id, &query.bucket, allow_read, allow_write + ))) + } + + async fn handle_bucket_deny(&self, query: &PermBucketOpt) -> Result { + let bucket_id = self + .garage + .bucket_helper() + .resolve_global_bucket_name(&query.bucket) + .await? + .ok_or_message("Bucket not found")?; + let bucket = self + .garage + .bucket_helper() + .get_existing_bucket(bucket_id) + .await?; + let key = self.get_existing_key(&query.key_pattern).await?; + + let allow_read = !query.read && key.allow_read(&bucket_id); + let allow_write = !query.write && key.allow_write(&bucket_id); + + let new_perm = self + .update_key_bucket(&key, bucket_id, allow_read, allow_write) + .await?; + self.update_bucket_key(bucket, &key.key_id, new_perm) + .await?; + + Ok(AdminRpc::Ok(format!( + "New permissions for {} on {}: read {}, write {}.", + &key.key_id, &query.bucket, allow_read, allow_write + ))) + } + + async fn handle_bucket_website(&self, query: &WebsiteOpt) -> Result { + let mut bucket_alias = self + .garage + .bucket_alias_table + .get(&EmptyKey, &query.bucket) + .await? + .filter(|a| !a.is_deleted()) + .ok_or_message(format!("Bucket {} does not exist", query.bucket))?; + + let mut state = bucket_alias.state.get().as_option().unwrap().clone(); + + if !(query.allow ^ query.deny) { + return Err(Error::Message( + "You must specify exactly one flag, either --allow or --deny".to_string(), + )); + } + + state.website_access = query.allow; + bucket_alias.state.update(Deletable::present(state)); + self.garage.bucket_alias_table.insert(&bucket_alias).await?; + + let msg = if query.allow { + format!("Website access allowed for {}", &query.bucket) + } else { + format!("Website access denied for {}", &query.bucket) + }; + + Ok(AdminRpc::Ok(msg)) } async fn handle_key_cmd(&self, cmd: &KeyOperation) -> Result { match cmd { - KeyOperation::List => { - let key_ids = self - .garage - .key_table - .get_range( - &EmptyKey, - None, - Some(KeyFilter::Deleted(DeletedFilter::NotDeleted)), - 10000, - ) - .await? - .iter() - .map(|k| (k.key_id.to_string(), k.name.get().clone())) - .collect::>(); - Ok(AdminRpc::KeyList(key_ids)) - } + KeyOperation::List => self.handle_list_keys().await, KeyOperation::Info(query) => { let key = self.get_existing_key(&query.key_pattern).await?; Ok(AdminRpc::KeyInfo(key)) } - KeyOperation::New(query) => { - let key = Key::new(query.name.clone()); - self.garage.key_table.insert(&key).await?; - Ok(AdminRpc::KeyInfo(key)) - } - KeyOperation::Rename(query) => { - let mut key = self.get_existing_key(&query.key_pattern).await?; - key.name.update(query.new_name.clone()); - self.garage.key_table.insert(&key).await?; - Ok(AdminRpc::KeyInfo(key)) - } - KeyOperation::Delete(query) => { - let key = self.get_existing_key(&query.key_pattern).await?; - if !query.yes { - return Err(Error::BadRpc( - "Add --yes flag to really perform this operation".to_string(), - )); - } - // --- done checking, now commit --- - for (ab_name, _, _) in key.authorized_buckets.items().iter() { - if let Some(bucket) = self.garage.bucket_table.get(&EmptyKey, ab_name).await? { - if !bucket.is_deleted() { - self.update_bucket_key(bucket, &key.key_id, false, false) - .await?; - } - } else { - return Err(Error::Message(format!("Bucket not found: {}", ab_name))); - } - } - let del_key = Key::delete(key.key_id.to_string()); - self.garage.key_table.insert(&del_key).await?; - Ok(AdminRpc::Ok(format!( - "Key {} was deleted successfully.", - key.key_id - ))) - } - KeyOperation::Import(query) => { - let prev_key = self.garage.key_table.get(&EmptyKey, &query.key_id).await?; - if prev_key.is_some() { - return Err(Error::Message(format!("Key {} already exists in data store. Even if it is deleted, we can't let you create a new key with the same ID. Sorry.", query.key_id))); - } - let imported_key = Key::import(&query.key_id, &query.secret_key, &query.name); - self.garage.key_table.insert(&imported_key).await?; - Ok(AdminRpc::KeyInfo(imported_key)) - } + KeyOperation::New(query) => self.handle_create_key(query).await, + KeyOperation::Rename(query) => self.handle_rename_key(query).await, + KeyOperation::Delete(query) => self.handle_delete_key(query).await, + KeyOperation::Import(query) => self.handle_import_key(query).await, } } - #[allow(clippy::ptr_arg)] - async fn get_existing_bucket(&self, bucket: &String) -> Result { - self.garage - .bucket_table - .get(&EmptyKey, bucket) + async fn handle_list_keys(&self) -> Result { + let key_ids = self + .garage + .key_table + .get_range( + &EmptyKey, + None, + Some(KeyFilter::Deleted(DeletedFilter::NotDeleted)), + 10000, + ) .await? - .filter(|b| !b.is_deleted()) - .map(Ok) - .unwrap_or_else(|| Err(Error::BadRpc(format!("Bucket {} does not exist", bucket)))) + .iter() + .map(|k| (k.key_id.to_string(), k.name.get().clone())) + .collect::>(); + Ok(AdminRpc::KeyList(key_ids)) + } + + async fn handle_create_key(&self, query: &KeyNewOpt) -> Result { + let key = Key::new(query.name.clone()); + self.garage.key_table.insert(&key).await?; + Ok(AdminRpc::KeyInfo(key)) + } + + async fn handle_rename_key(&self, query: &KeyRenameOpt) -> Result { + let mut key = self.get_existing_key(&query.key_pattern).await?; + key.name.update(query.new_name.clone()); + self.garage.key_table.insert(&key).await?; + Ok(AdminRpc::KeyInfo(key)) + } + + async fn handle_delete_key(&self, query: &KeyDeleteOpt) -> Result { + let mut key = self.get_existing_key(&query.key_pattern).await?; + if !query.yes { + return Err(Error::BadRpc( + "Add --yes flag to really perform this operation".to_string(), + )); + } + let state = key.state.as_option_mut().unwrap(); + + // --- done checking, now commit --- + // 1. Delete local aliases + for (alias, _, to) in state.local_aliases.items().iter() { + if let Deletable::Present(bucket_id) = to { + if let Some(mut bucket) = self.garage.bucket_table.get(bucket_id, &EmptyKey).await? + { + if let Deletable::Present(bucket_state) = &mut bucket.state { + bucket_state.local_aliases = bucket_state + .local_aliases + .update_mutator((key.key_id.to_string(), alias.to_string()), false); + self.garage.bucket_table.insert(&bucket).await?; + } + } else { + // ignore + } + } + } + // 2. Delete authorized buckets + for (ab_id, auth) in state.authorized_buckets.items().iter() { + if let Some(bucket) = self.garage.bucket_table.get(ab_id, &EmptyKey).await? { + let new_perm = BucketKeyPerm { + timestamp: increment_logical_clock(auth.timestamp), + allow_read: false, + allow_write: false, + }; + if !bucket.is_deleted() { + self.update_bucket_key(bucket, &key.key_id, new_perm) + .await?; + } + } else { + // ignore + } + } + // 3. Actually delete key + key.state = Deletable::delete(); + self.garage.key_table.insert(&key).await?; + + Ok(AdminRpc::Ok(format!( + "Key {} was deleted successfully.", + key.key_id + ))) + } + + async fn handle_import_key(&self, query: &KeyImportOpt) -> Result { + let prev_key = self.garage.key_table.get(&EmptyKey, &query.key_id).await?; + if prev_key.is_some() { + return Err(Error::Message(format!("Key {} already exists in data store. Even if it is deleted, we can't let you create a new key with the same ID. Sorry.", query.key_id))); + } + let imported_key = Key::import(&query.key_id, &query.secret_key, &query.name); + self.garage.key_table.insert(&imported_key).await?; + Ok(AdminRpc::KeyInfo(imported_key)) } async fn get_existing_key(&self, pattern: &str) -> Result { @@ -267,7 +399,7 @@ impl AdminRpcHandler { ) .await? .into_iter() - .filter(|k| !k.deleted.get()) + .filter(|k| !k.state.is_deleted()) .collect::>(); if candidates.len() != 1 { Err(Error::Message(format!( @@ -279,54 +411,51 @@ impl AdminRpcHandler { } } + /// Update **key table** to inform of the new linked bucket + async fn update_key_bucket( + &self, + key: &Key, + bucket_id: Uuid, + allow_read: bool, + allow_write: bool, + ) -> Result { + let mut key = key.clone(); + let mut key_state = key.state.as_option_mut().unwrap(); + + let perm = key_state + .authorized_buckets + .get(&bucket_id) + .cloned() + .map(|old_perm| BucketKeyPerm { + timestamp: increment_logical_clock(old_perm.timestamp), + allow_read, + allow_write, + }) + .unwrap_or(BucketKeyPerm { + timestamp: now_msec(), + allow_read, + allow_write, + }); + + key_state.authorized_buckets = Map::put_mutator(bucket_id, perm); + + self.garage.key_table.insert(&key).await?; + Ok(perm) + } + /// Update **bucket table** to inform of the new linked key async fn update_bucket_key( &self, mut bucket: Bucket, key_id: &str, - allow_read: bool, - allow_write: bool, + new_perm: BucketKeyPerm, ) -> Result<(), Error> { - if let BucketState::Present(params) = bucket.state.get_mut() { - let ak = &mut params.authorized_keys; - let old_ak = ak.take_and_clear(); - ak.merge(&old_ak.update_mutator( - key_id.to_string(), - PermissionSet { - allow_read, - allow_write, - }, - )); - } else { - return Err(Error::Message( - "Bucket is deleted in update_bucket_key".to_string(), - )); - } + bucket.state.as_option_mut().unwrap().authorized_keys = + Map::put_mutator(key_id.to_string(), new_perm); self.garage.bucket_table.insert(&bucket).await?; Ok(()) } - /// Update **key table** to inform of the new linked bucket - async fn update_key_bucket( - &self, - key: &Key, - bucket: &str, - allow_read: bool, - allow_write: bool, - ) -> Result<(), Error> { - let mut key = key.clone(); - let old_map = key.authorized_buckets.take_and_clear(); - key.authorized_buckets.merge(&old_map.update_mutator( - bucket.to_string(), - PermissionSet { - allow_read, - allow_write, - }, - )); - self.garage.key_table.insert(&key).await?; - Ok(()) - } - async fn handle_launch_repair(self: &Arc, opt: RepairOpt) -> Result { if !opt.yes { return Err(Error::BadRpc( diff --git a/src/garage/cli/cmd.rs b/src/garage/cli/cmd.rs index a916974e..3cdf4d26 100644 --- a/src/garage/cli/cmd.rs +++ b/src/garage/cli/cmd.rs @@ -161,8 +161,11 @@ pub async fn cmd_admin( } AdminRpc::BucketList(bl) => { println!("List of buckets:"); - for bucket in bl { - println!("{}", bucket); + for alias in bl { + if let Some(p) = alias.state.get().as_option() { + let wflag = if p.website_access { "W" } else { " " }; + println!("- {} {} {:?}", wflag, alias.name, p.bucket_id); + } } } AdminRpc::BucketInfo(bucket) => { diff --git a/src/garage/cli/util.rs b/src/garage/cli/util.rs index 647a2449..be34183e 100644 --- a/src/garage/cli/util.rs +++ b/src/garage/cli/util.rs @@ -1,3 +1,4 @@ +use garage_util::crdt::*; use garage_util::data::Uuid; use garage_util::error::*; @@ -8,26 +9,50 @@ pub fn print_key_info(key: &Key) { println!("Key name: {}", key.name.get()); println!("Key ID: {}", key.key_id); println!("Secret key: {}", key.secret_key); - if key.deleted.get() { - println!("Key is deleted."); - } else { - println!("Authorized buckets:"); - for (b, _, perm) in key.authorized_buckets.items().iter() { - println!("- {} R:{} W:{}", b, perm.allow_read, perm.allow_write); + match &key.state { + Deletable::Present(p) => { + println!("\nKey-specific bucket aliases:"); + for (alias_name, _, alias) in p.local_aliases.items().iter() { + if let Some(bucket_id) = alias.as_option() { + println!("- {} {:?}", alias_name, bucket_id); + } + } + println!("\nAuthorized buckets:"); + for (b, perm) in p.authorized_buckets.items().iter() { + let rflag = if perm.allow_read { "R" } else { " " }; + let wflag = if perm.allow_write { "W" } else { " " }; + println!("- {}{} {:?}", rflag, wflag, b); + } + } + Deletable::Deleted => { + println!("\nKey is deleted."); } } } pub fn print_bucket_info(bucket: &Bucket) { - println!("Bucket name: {}", bucket.name); - match bucket.state.get() { - BucketState::Deleted => println!("Bucket is deleted."), - BucketState::Present(p) => { - println!("Authorized keys:"); - for (k, _, perm) in p.authorized_keys.items().iter() { - println!("- {} R:{} W:{}", k, perm.allow_read, perm.allow_write); + println!("Bucket: {}", hex::encode(bucket.id)); + match &bucket.state { + Deletable::Deleted => println!("Bucket is deleted."), + Deletable::Present(p) => { + println!("\nGlobal aliases:"); + for (alias, _, active) in p.aliases.items().iter() { + if *active { + println!("- {}", alias); + } + } + println!("\nKey-specific aliases:"); + for ((key_id, alias), _, active) in p.local_aliases.items().iter() { + if *active { + println!("- {} {}", key_id, alias); + } + } + println!("\nAuthorized keys:"); + for (k, perm) in p.authorized_keys.items().iter() { + let rflag = if perm.allow_read { "R" } else { " " }; + let wflag = if perm.allow_write { "W" } else { " " }; + println!("- {}{} {}", rflag, wflag, k); } - println!("Website access: {}", p.website.get()); } }; } diff --git a/src/garage/repair.rs b/src/garage/repair.rs index a786f1f1..3666ca8f 100644 --- a/src/garage/repair.rs +++ b/src/garage/repair.rs @@ -77,7 +77,7 @@ impl Repair { let object = self .garage .object_table - .get(&version.bucket, &version.key) + .get(&version.bucket_id, &version.key) .await?; let version_exists = match object { Some(o) => o @@ -92,7 +92,7 @@ impl Repair { .version_table .insert(&Version::new( version.uuid, - version.bucket, + version.bucket_id, version.key, true, )) diff --git a/src/model/Cargo.toml b/src/model/Cargo.toml index 1d695192..12c08719 100644 --- a/src/model/Cargo.toml +++ b/src/model/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "garage_model" -version = "0.5.0" +version = "0.6.0" authors = ["Alex Auvolat "] edition = "2018" license = "AGPL-3.0" @@ -14,9 +14,10 @@ path = "lib.rs" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -garage_rpc = { version = "0.5.0", path = "../rpc" } -garage_table = { version = "0.5.0", path = "../table" } -garage_util = { version = "0.5.0", path = "../util" } +garage_rpc = { version = "0.6.0", path = "../rpc" } +garage_table = { version = "0.6.0", path = "../table" } +garage_util = { version = "0.6.0", path = "../util" } +garage_model_050 = { package = "garage_model", version = "0.5.0" } async-trait = "0.1.7" arc-swap = "1.0" diff --git a/src/model/bucket_alias_table.rs b/src/model/bucket_alias_table.rs new file mode 100644 index 00000000..4d300d05 --- /dev/null +++ b/src/model/bucket_alias_table.rs @@ -0,0 +1,68 @@ +use serde::{Deserialize, Serialize}; + +use garage_table::crdt::*; +use garage_table::*; +use garage_util::data::*; + +/// The bucket alias table holds the names given to buckets +/// in the global namespace. +#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] +pub struct BucketAlias { + pub name: String, + pub state: crdt::Lww>, +} + +#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)] +pub struct AliasParams { + pub bucket_id: Uuid, + pub website_access: bool, +} + +impl AutoCrdt for AliasParams { + const WARN_IF_DIFFERENT: bool = true; +} + +impl BucketAlias { + pub fn new(name: String, bucket_id: Uuid, website_access: bool) -> Self { + BucketAlias { + name, + state: crdt::Lww::new(crdt::Deletable::present(AliasParams { + bucket_id, + website_access, + })), + } + } + pub fn is_deleted(&self) -> bool { + self.state.get().is_deleted() + } +} + +impl Crdt for BucketAlias { + fn merge(&mut self, o: &Self) { + self.state.merge(&o.state); + } +} + +impl Entry for BucketAlias { + fn partition_key(&self) -> &EmptyKey { + &EmptyKey + } + fn sort_key(&self) -> &String { + &self.name + } +} + +pub struct BucketAliasTable; + +impl TableSchema for BucketAliasTable { + const TABLE_NAME: &'static str = "bucket_alias"; + + type P = EmptyKey; + type S = String; + type E = BucketAlias; + type Filter = DeletedFilter; + + fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool { + filter.apply(entry.is_deleted()) + } +} diff --git a/src/model/bucket_helper.rs b/src/model/bucket_helper.rs new file mode 100644 index 00000000..e0720b4e --- /dev/null +++ b/src/model/bucket_helper.rs @@ -0,0 +1,41 @@ +use garage_util::data::*; +use garage_util::error::*; + +use garage_table::util::EmptyKey; + +use crate::bucket_table::Bucket; +use crate::garage::Garage; + +pub struct BucketHelper<'a>(pub(crate) &'a Garage); + +#[allow(clippy::ptr_arg)] +impl<'a> BucketHelper<'a> { + pub async fn resolve_global_bucket_name( + &self, + bucket_name: &String, + ) -> Result, Error> { + Ok(self + .0 + .bucket_alias_table + .get(&EmptyKey, bucket_name) + .await? + .map(|x| x.state.get().as_option().map(|x| x.bucket_id)) + .flatten()) + } + + #[allow(clippy::ptr_arg)] + pub async fn get_existing_bucket(&self, bucket_id: Uuid) -> Result { + self.0 + .bucket_table + .get(&bucket_id, &EmptyKey) + .await? + .filter(|b| !b.is_deleted()) + .map(Ok) + .unwrap_or_else(|| { + Err(Error::BadRpc(format!( + "Bucket {:?} does not exist", + bucket_id + ))) + }) + } +} diff --git a/src/model/bucket_table.rs b/src/model/bucket_table.rs index 2cb206ce..ac40407e 100644 --- a/src/model/bucket_table.rs +++ b/src/model/bucket_table.rs @@ -2,8 +2,10 @@ use serde::{Deserialize, Serialize}; use garage_table::crdt::Crdt; use garage_table::*; +use garage_util::data::*; +use garage_util::time::*; -use crate::key_table::PermissionSet; +use crate::permission::BucketKeyPerm; /// A bucket is a collection of objects /// @@ -12,49 +14,38 @@ use crate::key_table::PermissionSet; /// - A bucket has 2 states, Present or Deleted and parameters make sense only if present. #[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] pub struct Bucket { - /// Name of the bucket - pub name: String, + /// ID of the bucket + pub id: Uuid, /// State, and configuration if not deleted, of the bucket - pub state: crdt::Lww, -} - -/// State of a bucket -#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] -pub enum BucketState { - /// The bucket is deleted - Deleted, - /// The bucket exists - Present(BucketParams), -} - -impl Crdt for BucketState { - fn merge(&mut self, o: &Self) { - match o { - BucketState::Deleted => *self = BucketState::Deleted, - BucketState::Present(other_params) => { - if let BucketState::Present(params) = self { - params.merge(other_params); - } - } - } - } + pub state: crdt::Deletable, } /// Configuration for a bucket #[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] pub struct BucketParams { + /// Bucket's creation date + pub creation_date: u64, /// Map of key with access to the bucket, and what kind of access they give - pub authorized_keys: crdt::LwwMap, - /// Is the bucket served as http - pub website: crdt::Lww, + pub authorized_keys: crdt::Map, + /// Map of aliases that are or have been given to this bucket + /// in the global namespace + /// (not authoritative: this is just used as an indication to + /// map back to aliases when doing ListBuckets) + pub aliases: crdt::LwwMap, + /// Map of aliases that are or have been given to this bucket + /// in namespaces local to keys + /// key = (access key id, alias name) + pub local_aliases: crdt::LwwMap<(String, String), bool>, } impl BucketParams { /// Create an empty BucketParams with no authorized keys and no website accesss pub fn new() -> Self { BucketParams { - authorized_keys: crdt::LwwMap::new(), - website: crdt::Lww::new(false), + creation_date: now_msec(), + authorized_keys: crdt::Map::new(), + aliases: crdt::LwwMap::new(), + local_aliases: crdt::LwwMap::new(), } } } @@ -62,7 +53,14 @@ impl BucketParams { impl Crdt for BucketParams { fn merge(&mut self, o: &Self) { self.authorized_keys.merge(&o.authorized_keys); - self.website.merge(&o.website); + self.aliases.merge(&o.aliases); + self.local_aliases.merge(&o.local_aliases); + } +} + +impl Default for Bucket { + fn default() -> Self { + Self::new() } } @@ -74,34 +72,34 @@ impl Default for BucketParams { impl Bucket { /// Initializes a new instance of the Bucket struct - pub fn new(name: String) -> Self { + pub fn new() -> Self { Bucket { - name, - state: crdt::Lww::new(BucketState::Present(BucketParams::new())), + id: gen_uuid(), + state: crdt::Deletable::present(BucketParams::new()), } } /// Returns true if this represents a deleted bucket pub fn is_deleted(&self) -> bool { - *self.state.get() == BucketState::Deleted + self.state.is_deleted() } /// Return the list of authorized keys, when each was updated, and the permission associated to /// the key - pub fn authorized_keys(&self) -> &[(String, u64, PermissionSet)] { - match self.state.get() { - BucketState::Deleted => &[], - BucketState::Present(state) => state.authorized_keys.items(), + pub fn authorized_keys(&self) -> &[(String, BucketKeyPerm)] { + match &self.state { + crdt::Deletable::Deleted => &[], + crdt::Deletable::Present(state) => state.authorized_keys.items(), } } } -impl Entry for Bucket { - fn partition_key(&self) -> &EmptyKey { - &EmptyKey +impl Entry for Bucket { + fn partition_key(&self) -> &Uuid { + &self.id } - fn sort_key(&self) -> &String { - &self.name + fn sort_key(&self) -> &EmptyKey { + &EmptyKey } } @@ -114,10 +112,10 @@ impl Crdt for Bucket { pub struct BucketTable; impl TableSchema for BucketTable { - const TABLE_NAME: &'static str = "bucket"; + const TABLE_NAME: &'static str = "bucket_v2"; - type P = EmptyKey; - type S = String; + type P = Uuid; + type S = EmptyKey; type E = Bucket; type Filter = DeletedFilter; diff --git a/src/model/garage.rs b/src/model/garage.rs index a874cca8..9db1843c 100644 --- a/src/model/garage.rs +++ b/src/model/garage.rs @@ -14,6 +14,8 @@ use garage_table::*; use crate::block::*; use crate::block_ref_table::*; +use crate::bucket_alias_table::*; +use crate::bucket_helper::*; use crate::bucket_table::*; use crate::key_table::*; use crate::object_table::*; @@ -35,6 +37,8 @@ pub struct Garage { /// Table containing informations about buckets pub bucket_table: Arc>, + /// Table containing informations about bucket aliases + pub bucket_alias_table: Arc>, /// Table containing informations about api keys pub key_table: Arc>, @@ -120,6 +124,14 @@ impl Garage { info!("Initialize bucket_table..."); let bucket_table = Table::new(BucketTable, control_rep_param.clone(), system.clone(), &db); + info!("Initialize bucket_alias_table..."); + let bucket_alias_table = Table::new( + BucketAliasTable, + control_rep_param.clone(), + system.clone(), + &db, + ); + info!("Initialize key_table_table..."); let key_table = Table::new(KeyTable, control_rep_param, system.clone(), &db); @@ -131,6 +143,7 @@ impl Garage { system, block_manager, bucket_table, + bucket_alias_table, key_table, object_table, version_table, @@ -148,4 +161,8 @@ impl Garage { pub fn break_reference_cycles(&self) { self.block_manager.garage.swap(None); } + + pub fn bucket_helper(&self) -> BucketHelper { + BucketHelper(self) + } } diff --git a/src/model/key_table.rs b/src/model/key_table.rs index 225f51c7..e87f5949 100644 --- a/src/model/key_table.rs +++ b/src/model/key_table.rs @@ -2,6 +2,9 @@ use serde::{Deserialize, Serialize}; use garage_table::crdt::*; use garage_table::*; +use garage_util::data::*; + +use crate::permission::BucketKeyPerm; /// An api key #[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] @@ -15,12 +18,39 @@ pub struct Key { /// Name for the key pub name: crdt::Lww, - /// Is the key deleted - pub deleted: crdt::Bool, + /// If the key is present: it gives some permissions, + /// a map of bucket IDs (uuids) to permissions. + /// Otherwise no permissions are granted to key + pub state: crdt::Deletable, +} - /// Buckets in which the key is authorized. Empty if `Key` is deleted - // CRDT interaction: deleted implies authorized_buckets is empty - pub authorized_buckets: crdt::LwwMap, +/// Configuration for a key +#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] +pub struct KeyParams { + pub authorized_buckets: crdt::Map, + pub local_aliases: crdt::LwwMap>, +} + +impl KeyParams { + pub fn new() -> Self { + KeyParams { + authorized_buckets: crdt::Map::new(), + local_aliases: crdt::LwwMap::new(), + } + } +} + +impl Default for KeyParams { + fn default() -> Self { + Self::new() + } +} + +impl Crdt for KeyParams { + fn merge(&mut self, o: &Self) { + self.authorized_buckets.merge(&o.authorized_buckets); + self.local_aliases.merge(&o.local_aliases); + } } impl Key { @@ -32,8 +62,7 @@ impl Key { key_id, secret_key, name: crdt::Lww::new(name), - deleted: crdt::Bool::new(false), - authorized_buckets: crdt::LwwMap::new(), + state: crdt::Deletable::present(KeyParams::new()), } } @@ -43,8 +72,7 @@ impl Key { key_id: key_id.to_string(), secret_key: secret_key.to_string(), name: crdt::Lww::new(name.to_string()), - deleted: crdt::Bool::new(false), - authorized_buckets: crdt::LwwMap::new(), + state: crdt::Deletable::present(KeyParams::new()), } } @@ -54,41 +82,37 @@ impl Key { key_id, secret_key: "".into(), name: crdt::Lww::new("".to_string()), - deleted: crdt::Bool::new(true), - authorized_buckets: crdt::LwwMap::new(), + state: crdt::Deletable::Deleted, } } /// Check if `Key` is allowed to read in bucket - pub fn allow_read(&self, bucket: &str) -> bool { - self.authorized_buckets - .get(&bucket.to_string()) - .map(|x| x.allow_read) - .unwrap_or(false) + pub fn allow_read(&self, bucket: &Uuid) -> bool { + if let crdt::Deletable::Present(params) = &self.state { + params + .authorized_buckets + .get(bucket) + .map(|x| x.allow_read) + .unwrap_or(false) + } else { + false + } } /// Check if `Key` is allowed to write in bucket - pub fn allow_write(&self, bucket: &str) -> bool { - self.authorized_buckets - .get(&bucket.to_string()) - .map(|x| x.allow_write) - .unwrap_or(false) + pub fn allow_write(&self, bucket: &Uuid) -> bool { + if let crdt::Deletable::Present(params) = &self.state { + params + .authorized_buckets + .get(bucket) + .map(|x| x.allow_write) + .unwrap_or(false) + } else { + false + } } } -/// Permission given to a key in a bucket -#[derive(PartialOrd, Ord, PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] -pub struct PermissionSet { - /// The key can be used to read the bucket - pub allow_read: bool, - /// The key can be used to write in the bucket - pub allow_write: bool, -} - -impl AutoCrdt for PermissionSet { - const WARN_IF_DIFFERENT: bool = true; -} - impl Entry for Key { fn partition_key(&self) -> &EmptyKey { &EmptyKey @@ -101,13 +125,7 @@ impl Entry for Key { impl Crdt for Key { fn merge(&mut self, other: &Self) { self.name.merge(&other.name); - self.deleted.merge(&other.deleted); - - if self.deleted.get() { - self.authorized_buckets.clear(); - } else { - self.authorized_buckets.merge(&other.authorized_buckets); - } + self.state.merge(&other.state); } } @@ -129,7 +147,7 @@ impl TableSchema for KeyTable { fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool { match filter { - KeyFilter::Deleted(df) => df.apply(entry.deleted.get()), + KeyFilter::Deleted(df) => df.apply(entry.state.is_deleted()), KeyFilter::Matches(pat) => { let pat = pat.to_lowercase(); entry.key_id.to_lowercase().starts_with(&pat) diff --git a/src/model/lib.rs b/src/model/lib.rs index b4a8ddb7..fe8cfdad 100644 --- a/src/model/lib.rs +++ b/src/model/lib.rs @@ -3,8 +3,11 @@ extern crate log; pub mod block; pub mod block_ref_table; +pub mod bucket_alias_table; +pub mod bucket_helper; pub mod bucket_table; pub mod garage; pub mod key_table; pub mod object_table; +pub mod permission; pub mod version_table; diff --git a/src/model/object_table.rs b/src/model/object_table.rs index 9eec47ff..285cb5a7 100644 --- a/src/model/object_table.rs +++ b/src/model/object_table.rs @@ -15,7 +15,7 @@ use crate::version_table::*; #[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] pub struct Object { /// The bucket in which the object is stored, used as partition key - pub bucket: String, + pub bucket_id: Uuid, /// The key at which the object is stored in its bucket, used as sorting key pub key: String, @@ -26,9 +26,9 @@ pub struct Object { impl Object { /// Initialize an Object struct from parts - pub fn new(bucket: String, key: String, versions: Vec) -> Self { + pub fn new(bucket_id: Uuid, key: String, versions: Vec) -> Self { let mut ret = Self { - bucket, + bucket_id, key, versions: vec![], }; @@ -164,9 +164,9 @@ impl ObjectVersion { } } -impl Entry for Object { - fn partition_key(&self) -> &String { - &self.bucket +impl Entry for Object { + fn partition_key(&self) -> &Uuid { + &self.bucket_id } fn sort_key(&self) -> &String { &self.key @@ -219,7 +219,7 @@ pub struct ObjectTable { impl TableSchema for ObjectTable { const TABLE_NAME: &'static str = "object"; - type P = String; + type P = Uuid; type S = String; type E = Object; type Filter = DeletedFilter; @@ -242,7 +242,7 @@ impl TableSchema for ObjectTable { }; if newly_deleted { let deleted_version = - Version::new(v.uuid, old_v.bucket.clone(), old_v.key.clone(), true); + Version::new(v.uuid, old_v.bucket_id, old_v.key.clone(), true); version_table.insert(&deleted_version).await?; } } diff --git a/src/model/permission.rs b/src/model/permission.rs new file mode 100644 index 00000000..b61c92ce --- /dev/null +++ b/src/model/permission.rs @@ -0,0 +1,37 @@ +use std::cmp::Ordering; + +use serde::{Deserialize, Serialize}; + +use garage_util::crdt::*; + +/// Permission given to a key in a bucket +#[derive(PartialOrd, Ord, PartialEq, Eq, Clone, Copy, Debug, Serialize, Deserialize)] +pub struct BucketKeyPerm { + /// Timestamp at which the permission was given + pub timestamp: u64, + + /// The key can be used to read the bucket + pub allow_read: bool, + /// The key can be used to write in the bucket + pub allow_write: bool, +} + +impl Crdt for BucketKeyPerm { + fn merge(&mut self, other: &Self) { + match other.timestamp.cmp(&self.timestamp) { + Ordering::Greater => { + *self = *other; + } + Ordering::Equal if other != self => { + warn!("Different permission sets with same timestamp: {:?} and {:?}, merging to most restricted permission set.", self, other); + if !other.allow_read { + self.allow_read = false; + } + if !other.allow_write { + self.allow_write = false; + } + } + _ => (), + } + } +} diff --git a/src/model/version_table.rs b/src/model/version_table.rs index 18ec8e1d..4edea0b7 100644 --- a/src/model/version_table.rs +++ b/src/model/version_table.rs @@ -29,19 +29,19 @@ pub struct Version { // Back link to bucket+key so that we can figure if // this was deleted later on /// Bucket in which the related object is stored - pub bucket: String, + pub bucket_id: Uuid, /// Key in which the related object is stored pub key: String, } impl Version { - pub fn new(uuid: Uuid, bucket: String, key: String, deleted: bool) -> Self { + pub fn new(uuid: Uuid, bucket_id: Uuid, key: String, deleted: bool) -> Self { Self { uuid, deleted: deleted.into(), blocks: crdt::Map::new(), parts_etags: crdt::Map::new(), - bucket, + bucket_id, key, } } @@ -82,8 +82,8 @@ impl AutoCrdt for VersionBlock { const WARN_IF_DIFFERENT: bool = true; } -impl Entry for Version { - fn partition_key(&self) -> &Hash { +impl Entry for Version { + fn partition_key(&self) -> &Uuid { &self.uuid } fn sort_key(&self) -> &EmptyKey { @@ -116,7 +116,7 @@ pub struct VersionTable { impl TableSchema for VersionTable { const TABLE_NAME: &'static str = "version"; - type P = Hash; + type P = Uuid; type S = EmptyKey; type E = Version; type Filter = DeletedFilter; diff --git a/src/rpc/Cargo.toml b/src/rpc/Cargo.toml index d8ebb71e..b49a126a 100644 --- a/src/rpc/Cargo.toml +++ b/src/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "garage_rpc" -version = "0.5.0" +version = "0.6.0" authors = ["Alex Auvolat "] edition = "2018" license = "AGPL-3.0" @@ -14,7 +14,7 @@ path = "lib.rs" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -garage_util = { version = "0.5.0", path = "../util" } +garage_util = { version = "0.6.0", path = "../util" } arc-swap = "1.0" bytes = "1.0" diff --git a/src/table/Cargo.toml b/src/table/Cargo.toml index dc37f12c..91d71ddd 100644 --- a/src/table/Cargo.toml +++ b/src/table/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "garage_table" -version = "0.5.0" +version = "0.6.0" authors = ["Alex Auvolat "] edition = "2018" license = "AGPL-3.0" @@ -14,8 +14,8 @@ path = "lib.rs" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -garage_rpc = { version = "0.5.0", path = "../rpc" } -garage_util = { version = "0.5.0", path = "../util" } +garage_rpc = { version = "0.6.0", path = "../rpc" } +garage_util = { version = "0.6.0", path = "../util" } async-trait = "0.1.7" bytes = "1.0" diff --git a/src/table/schema.rs b/src/table/schema.rs index fa51fa84..cfe86fba 100644 --- a/src/table/schema.rs +++ b/src/table/schema.rs @@ -16,7 +16,7 @@ impl PartitionKey for String { } } -impl PartitionKey for Hash { +impl PartitionKey for FixedBytes32 { fn hash(&self) -> Hash { *self } @@ -34,7 +34,7 @@ impl SortKey for String { } } -impl SortKey for Hash { +impl SortKey for FixedBytes32 { fn sort_key(&self) -> &[u8] { self.as_slice() } diff --git a/src/util/Cargo.toml b/src/util/Cargo.toml index e33f8a66..d5200f98 100644 --- a/src/util/Cargo.toml +++ b/src/util/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "garage_util" -version = "0.5.0" +version = "0.6.0" authors = ["Alex Auvolat "] edition = "2018" license = "AGPL-3.0" diff --git a/src/util/crdt/deletable.rs b/src/util/crdt/deletable.rs new file mode 100644 index 00000000..c76f5cbb --- /dev/null +++ b/src/util/crdt/deletable.rs @@ -0,0 +1,72 @@ +use serde::{Deserialize, Serialize}; + +use crate::crdt::crdt::*; + +/// Deletable object (once deleted, cannot go back) +#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq)] +pub enum Deletable { + Present(T), + Deleted, +} + +impl Deletable { + /// Create a new deletable object that isn't deleted + pub fn present(v: T) -> Self { + Self::Present(v) + } + /// Create a new deletable object that is deleted + pub fn delete() -> Self { + Self::Deleted + } + /// As option + pub fn as_option(&self) -> Option<&T> { + match self { + Self::Present(v) => Some(v), + Self::Deleted => None, + } + } + /// As option, mutable + pub fn as_option_mut(&mut self) -> Option<&mut T> { + match self { + Self::Present(v) => Some(v), + Self::Deleted => None, + } + } + /// Into option + pub fn into_option(self) -> Option { + match self { + Self::Present(v) => Some(v), + Self::Deleted => None, + } + } + /// Is object deleted? + pub fn is_deleted(&self) -> bool { + matches!(self, Self::Deleted) + } +} + +impl From> for Deletable { + fn from(v: Option) -> Self { + v.map(Self::Present).unwrap_or(Self::Deleted) + } +} + +impl From> for Option { + fn from(v: Deletable) -> Option { + match v { + Deletable::Present(v) => Some(v), + Deletable::Deleted => None, + } + } +} + +impl Crdt for Deletable { + fn merge(&mut self, other: &Self) { + if let Deletable::Present(v) = self { + match other { + Deletable::Deleted => *self = Deletable::Deleted, + Deletable::Present(v2) => v.merge(v2), + } + } + } +} diff --git a/src/util/crdt/lww.rs b/src/util/crdt/lww.rs index 43d13f27..bc686e05 100644 --- a/src/util/crdt/lww.rs +++ b/src/util/crdt/lww.rs @@ -82,6 +82,11 @@ where &self.v } + /// Take the value inside the CRDT (discards the timesamp) + pub fn take(self) -> T { + self.v + } + /// Get a mutable reference to the CRDT's value /// /// This is usefull to mutate the inside value without changing the LWW timestamp. diff --git a/src/util/crdt/lww_map.rs b/src/util/crdt/lww_map.rs index 3e9aba79..21cb6e12 100644 --- a/src/util/crdt/lww_map.rs +++ b/src/util/crdt/lww_map.rs @@ -30,8 +30,8 @@ pub struct LwwMap { impl LwwMap where - K: Ord, - V: Crdt, + K: Clone + Ord, + V: Clone + Crdt, { /// Create a new empty map CRDT pub fn new() -> Self { @@ -73,6 +73,10 @@ where }; Self { vals: new_vals } } + + pub fn update_in_place(&mut self, k: K, new_v: V) { + self.merge(&self.update_mutator(k, new_v)); + } /// Takes all of the values of the map and returns them. The current map is reset to the /// empty map. This is very usefull to produce in-place a new map that contains only a delta /// that modifies a certain value: @@ -158,8 +162,8 @@ where impl Default for LwwMap where - K: Ord, - V: Crdt, + K: Clone + Ord, + V: Clone + Crdt, { fn default() -> Self { Self::new() diff --git a/src/util/crdt/mod.rs b/src/util/crdt/mod.rs index 9663a5a5..6ba575ed 100644 --- a/src/util/crdt/mod.rs +++ b/src/util/crdt/mod.rs @@ -12,12 +12,14 @@ mod bool; #[allow(clippy::module_inception)] mod crdt; +mod deletable; mod lww; mod lww_map; mod map; pub use self::bool::*; pub use crdt::*; +pub use deletable::*; pub use lww::*; pub use lww_map::*; pub use map::*; diff --git a/src/util/error.rs b/src/util/error.rs index ff03d05b..08cf1302 100644 --- a/src/util/error.rs +++ b/src/util/error.rs @@ -119,6 +119,35 @@ where } } +/// Trait to map error to the Bad Request error code +pub trait OkOrMessage { + type S2; + fn ok_or_message>(self, message: M) -> Self::S2; +} + +impl OkOrMessage for Result +where + E: std::fmt::Display, +{ + type S2 = Result; + fn ok_or_message>(self, message: M) -> Result { + match self { + Ok(x) => Ok(x), + Err(e) => Err(Error::Message(format!("{}: {}", message.into(), e))), + } + } +} + +impl OkOrMessage for Option { + type S2 = Result; + fn ok_or_message>(self, message: M) -> Result { + match self { + Some(x) => Ok(x), + None => Err(Error::Message(message.into())), + } + } +} + // Custom serialization for our error type, for use in RPC. // Errors are serialized as a string of their Display representation. // Upon deserialization, they all become a RemoteError with the diff --git a/src/util/time.rs b/src/util/time.rs index 238db2c3..d9192443 100644 --- a/src/util/time.rs +++ b/src/util/time.rs @@ -10,6 +10,11 @@ pub fn now_msec() -> u64 { .as_millis() as u64 } +/// Increment logical clock +pub fn increment_logical_clock(prev: u64) -> u64 { + std::cmp::max(prev + 1, now_msec()) +} + /// Convert a timestamp represented as milliseconds since UNIX Epoch to /// its RFC3339 representation, such as "2021-01-01T12:30:00Z" pub fn msec_to_rfc3339(msecs: u64) -> String { diff --git a/src/web/Cargo.toml b/src/web/Cargo.toml index 72701c90..54211f5d 100644 --- a/src/web/Cargo.toml +++ b/src/web/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "garage_web" -version = "0.5.0" +version = "0.6.0" authors = ["Alex Auvolat ", "Quentin Dufour "] edition = "2018" license = "AGPL-3.0" @@ -14,10 +14,10 @@ path = "lib.rs" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -garage_api = { version = "0.5.0", path = "../api" } -garage_model = { version = "0.5.0", path = "../model" } -garage_util = { version = "0.5.0", path = "../util" } -garage_table = { version = "0.5.0", path = "../table" } +garage_api = { version = "0.6.0", path = "../api" } +garage_model = { version = "0.6.0", path = "../model" } +garage_util = { version = "0.6.0", path = "../util" } +garage_table = { version = "0.6.0", path = "../table" } err-derive = "0.3" log = "0.4" diff --git a/src/web/web_server.rs b/src/web/web_server.rs index 4a603c05..5eb25e93 100644 --- a/src/web/web_server.rs +++ b/src/web/web_server.rs @@ -12,7 +12,6 @@ use hyper::{ use crate::error::*; use garage_api::helpers::{authority_to_host, host_to_bucket}; use garage_api::s3_get::{handle_get, handle_head}; -use garage_model::bucket_table::*; use garage_model::garage::Garage; use garage_table::*; use garage_util::error::Error as GarageError; @@ -77,31 +76,39 @@ async fn serve_file(garage: Arc, req: Request) -> Result Ok(()), - _ => Err(Error::NotFound), - }?; - // Get path let path = req.uri().path().to_string(); let index = &garage.config.s3_web.index; let key = path_to_key(&path, index)?; - info!("Selected bucket: \"{}\", selected key: \"{}\"", bucket, key); + info!( + "Selected bucket: \"{}\" {:?}, selected key: \"{}\"", + bucket_name, bucket_id, key + ); let res = match *req.method() { - Method::HEAD => handle_head(garage, &req, bucket, &key).await?, - Method::GET => handle_get(garage, &req, bucket, &key).await?, + Method::HEAD => handle_head(garage, &req, bucket_id, &key).await?, + Method::GET => handle_get(garage, &req, bucket_id, &key).await?, _ => return Err(Error::BadRequest("HTTP method not supported".to_string())), }; -- 2.43.4 From 53f71b3a57b3c1828292e26b7865d31e9bec44d6 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Wed, 15 Dec 2021 18:36:15 +0100 Subject: [PATCH 03/19] Implement bucket alias and bucket unalias --- src/garage/admin.rs | 187 +++++++++++++++++++++++++++++++++++++ src/garage/cli/cmd.rs | 5 +- src/garage/cli/structs.rs | 31 ++++++ src/garage/cli/util.rs | 11 ++- src/model/bucket_helper.rs | 28 ++++-- 5 files changed, 252 insertions(+), 10 deletions(-) diff --git a/src/garage/admin.rs b/src/garage/admin.rs index 6db8bfbe..756f6007 100644 --- a/src/garage/admin.rs +++ b/src/garage/admin.rs @@ -77,6 +77,8 @@ impl AdminRpcHandler { } BucketOperation::Create(query) => self.handle_create_bucket(&query.name).await, BucketOperation::Delete(query) => self.handle_delete_bucket(query).await, + BucketOperation::Alias(query) => self.handle_alias_bucket(query).await, + BucketOperation::Unalias(query) => self.handle_unalias_bucket(query).await, BucketOperation::Allow(query) => self.handle_bucket_allow(query).await, BucketOperation::Deny(query) => self.handle_bucket_deny(query).await, BucketOperation::Website(query) => self.handle_bucket_website(query).await, @@ -193,6 +195,191 @@ impl AdminRpcHandler { Ok(AdminRpc::Ok(format!("Bucket {} was deleted.", query.name))) } + async fn handle_alias_bucket(&self, query: &AliasBucketOpt) -> Result { + let bucket_id = self + .garage + .bucket_helper() + .resolve_global_bucket_name(&query.existing_bucket) + .await? + .ok_or_message("Bucket not found")?; + let mut bucket = self + .garage + .bucket_helper() + .get_existing_bucket(bucket_id) + .await?; + + if let Some(key_local) = &query.local { + let mut key = self.get_existing_key(key_local).await?; + let mut key_param = key.state.as_option_mut().unwrap(); + + if let Some(Deletable::Present(existing_alias)) = + key_param.local_aliases.get(&query.new_name) + { + if *existing_alias == bucket_id { + return Ok(AdminRpc::Ok(format!( + "Alias {} already points to bucket {:?} in namespace of key {}", + query.new_name, bucket_id, key.key_id + ))); + } else { + return Err(Error::Message(format!("Alias {} already exists and points to different bucket: {:?} in namespace of key {}", query.new_name, existing_alias, key.key_id))); + } + } + + key_param.local_aliases = key_param + .local_aliases + .update_mutator(query.new_name.clone(), Deletable::present(bucket_id)); + self.garage.key_table.insert(&key).await?; + + let mut bucket_p = bucket.state.as_option_mut().unwrap(); + bucket_p.local_aliases = bucket_p + .local_aliases + .update_mutator((key.key_id.clone(), query.new_name.clone()), true); + self.garage.bucket_table.insert(&bucket).await?; + + Ok(AdminRpc::Ok(format!( + "Alias {} created to bucket {:?} in namespace of key {}", + query.new_name, bucket_id, key.key_id + ))) + } else { + let mut alias = self + .garage + .bucket_alias_table + .get(&EmptyKey, &query.new_name) + .await? + .unwrap_or(BucketAlias { + name: query.new_name.clone(), + state: Lww::new(Deletable::delete()), + }); + + if let Some(existing_alias) = alias.state.get().as_option() { + if existing_alias.bucket_id == bucket_id { + return Ok(AdminRpc::Ok(format!( + "Alias {} already points to bucket {:?}", + query.new_name, bucket_id + ))); + } else { + return Err(Error::Message(format!( + "Alias {} already exists and points to different bucket: {:?}", + query.new_name, existing_alias.bucket_id + ))); + } + } + + // Checks ok, add alias + alias.state.update(Deletable::present(AliasParams { + bucket_id, + website_access: false, + })); + self.garage.bucket_alias_table.insert(&alias).await?; + + let mut bucket_p = bucket.state.as_option_mut().unwrap(); + bucket_p.aliases = bucket_p + .aliases + .update_mutator(query.new_name.clone(), true); + self.garage.bucket_table.insert(&bucket).await?; + + Ok(AdminRpc::Ok(format!( + "Alias {} created to bucket {:?}", + query.new_name, bucket_id + ))) + } + } + + async fn handle_unalias_bucket(&self, query: &UnaliasBucketOpt) -> Result { + if let Some(key_local) = &query.local { + let mut key = self.get_existing_key(key_local).await?; + + let bucket_id = key + .state + .as_option() + .unwrap() + .local_aliases + .get(&query.name) + .map(|a| a.into_option()) + .flatten() + .ok_or_message("Bucket not found")?; + let mut bucket = self + .garage + .bucket_helper() + .get_existing_bucket(bucket_id) + .await?; + let mut bucket_state = bucket.state.as_option_mut().unwrap(); + + let has_other_aliases = bucket_state + .aliases + .items() + .iter() + .any(|(_, _, active)| *active) + || bucket_state + .local_aliases + .items() + .iter() + .any(|((k, n), _, active)| *k == key.key_id && *n == query.name && *active); + if !has_other_aliases { + return Err(Error::Message(format!("Bucket {} doesn't have other aliases, please delete it instead of just unaliasing.", query.name))); + } + + let mut key_param = key.state.as_option_mut().unwrap(); + key_param.local_aliases = key_param + .local_aliases + .update_mutator(query.name.clone(), Deletable::delete()); + self.garage.key_table.insert(&key).await?; + + bucket_state.local_aliases = bucket_state + .local_aliases + .update_mutator((key.key_id.clone(), query.name.clone()), false); + self.garage.bucket_table.insert(&bucket).await?; + + Ok(AdminRpc::Ok(format!( + "Bucket alias {} deleted from namespace of key {}", + query.name, key.key_id + ))) + } else { + let bucket_id = self + .garage + .bucket_helper() + .resolve_global_bucket_name(&query.name) + .await? + .ok_or_message("Bucket not found")?; + let mut bucket = self + .garage + .bucket_helper() + .get_existing_bucket(bucket_id) + .await?; + let mut bucket_state = bucket.state.as_option_mut().unwrap(); + + let has_other_aliases = bucket_state + .aliases + .items() + .iter() + .any(|(name, _, active)| *name != query.name && *active) + || bucket_state + .local_aliases + .items() + .iter() + .any(|(_, _, active)| *active); + if !has_other_aliases { + return Err(Error::Message(format!("Bucket {} doesn't have other aliases, please delete it instead of just unaliasing.", query.name))); + } + + let mut alias = self + .garage + .bucket_alias_table + .get(&EmptyKey, &query.name) + .await? + .ok_or_message("Internal error: alias not found")?; + alias.state.update(Deletable::delete()); + self.garage.bucket_alias_table.insert(&alias).await?; + + bucket_state.aliases = bucket_state + .aliases + .update_mutator(query.name.clone(), false); + self.garage.bucket_table.insert(&bucket).await?; + + Ok(AdminRpc::Ok(format!("Bucket alias {} deleted", query.name))) + } + } + async fn handle_bucket_allow(&self, query: &PermBucketOpt) -> Result { let bucket_id = self .garage diff --git a/src/garage/cli/cmd.rs b/src/garage/cli/cmd.rs index 3cdf4d26..015eeec9 100644 --- a/src/garage/cli/cmd.rs +++ b/src/garage/cli/cmd.rs @@ -161,12 +161,15 @@ pub async fn cmd_admin( } AdminRpc::BucketList(bl) => { println!("List of buckets:"); + let mut table = vec![]; for alias in bl { if let Some(p) = alias.state.get().as_option() { let wflag = if p.website_access { "W" } else { " " }; - println!("- {} {} {:?}", wflag, alias.name, p.bucket_id); + table.push(format!("{}\t{}\t{:?}", wflag, alias.name, p.bucket_id)); } } + format_table(table); + println!("Buckets that don't have a global alias (i.e. that only exist in the namespace of an access key) are not shown."); } AdminRpc::BucketInfo(bucket) => { print_bucket_info(&bucket); diff --git a/src/garage/cli/structs.rs b/src/garage/cli/structs.rs index b2b5375d..590be1c0 100644 --- a/src/garage/cli/structs.rs +++ b/src/garage/cli/structs.rs @@ -150,6 +150,14 @@ pub enum BucketOperation { #[structopt(name = "delete")] Delete(DeleteBucketOpt), + /// Alias bucket under new name + #[structopt(name = "alias")] + Alias(AliasBucketOpt), + + /// Remove bucket alias + #[structopt(name = "unalias")] + Unalias(UnaliasBucketOpt), + /// Allow key to read or write to bucket #[structopt(name = "allow")] Allow(PermBucketOpt), @@ -193,6 +201,29 @@ pub struct DeleteBucketOpt { pub yes: bool, } +#[derive(Serialize, Deserialize, StructOpt, Debug)] +pub struct AliasBucketOpt { + /// Existing bucket name (its alias in global namespace or its full hex uuid) + pub existing_bucket: String, + + /// New bucket name + pub new_name: String, + + /// Make this alias local to the specified access key + #[structopt(long = "local")] + pub local: Option, +} + +#[derive(Serialize, Deserialize, StructOpt, Debug)] +pub struct UnaliasBucketOpt { + /// Bucket name + pub name: String, + + /// Unalias in bucket namespace local to this access key + #[structopt(long = "local")] + pub local: Option, +} + #[derive(Serialize, Deserialize, StructOpt, Debug)] pub struct PermBucketOpt { /// Access key name or ID diff --git a/src/garage/cli/util.rs b/src/garage/cli/util.rs index be34183e..ba88502d 100644 --- a/src/garage/cli/util.rs +++ b/src/garage/cli/util.rs @@ -12,17 +12,22 @@ pub fn print_key_info(key: &Key) { match &key.state { Deletable::Present(p) => { println!("\nKey-specific bucket aliases:"); + let mut table = vec![]; for (alias_name, _, alias) in p.local_aliases.items().iter() { if let Some(bucket_id) = alias.as_option() { - println!("- {} {:?}", alias_name, bucket_id); + table.push(format!("\t{}\t{}", alias_name, hex::encode(bucket_id))); } } + format_table(table); + println!("\nAuthorized buckets:"); + let mut table = vec![]; for (b, perm) in p.authorized_buckets.items().iter() { let rflag = if perm.allow_read { "R" } else { " " }; let wflag = if perm.allow_write { "W" } else { " " }; - println!("- {}{} {:?}", rflag, wflag, b); + table.push(format!("\t{}{}\t{:?}", rflag, wflag, b)); } + format_table(table); } Deletable::Deleted => { println!("\nKey is deleted."); @@ -41,12 +46,14 @@ pub fn print_bucket_info(bucket: &Bucket) { println!("- {}", alias); } } + println!("\nKey-specific aliases:"); for ((key_id, alias), _, active) in p.local_aliases.items().iter() { if *active { println!("- {} {}", key_id, alias); } } + println!("\nAuthorized keys:"); for (k, perm) in p.authorized_keys.items().iter() { let rflag = if perm.allow_read { "R" } else { " " }; diff --git a/src/model/bucket_helper.rs b/src/model/bucket_helper.rs index e0720b4e..c1280afa 100644 --- a/src/model/bucket_helper.rs +++ b/src/model/bucket_helper.rs @@ -14,13 +14,27 @@ impl<'a> BucketHelper<'a> { &self, bucket_name: &String, ) -> Result, Error> { - Ok(self - .0 - .bucket_alias_table - .get(&EmptyKey, bucket_name) - .await? - .map(|x| x.state.get().as_option().map(|x| x.bucket_id)) - .flatten()) + let hexbucket = hex::decode(bucket_name.as_str()) + .ok() + .map(|by| Uuid::try_from(&by)) + .flatten(); + if let Some(bucket_id) = hexbucket { + Ok(self + .0 + .bucket_table + .get(&bucket_id, &EmptyKey) + .await? + .filter(|x| !x.state.is_deleted()) + .map(|_| bucket_id)) + } else { + Ok(self + .0 + .bucket_alias_table + .get(&EmptyKey, bucket_name) + .await? + .map(|x| x.state.get().as_option().map(|x| x.bucket_id)) + .flatten()) + } } #[allow(clippy::ptr_arg)] -- 2.43.4 From 0bbb6673e7ce703e470a3c2aad620ee5f009bc84 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Thu, 16 Dec 2021 11:47:58 +0100 Subject: [PATCH 04/19] Model changes --- Cargo.lock | 1 + src/api/Cargo.toml | 1 + src/api/api_server.rs | 6 ++-- src/api/s3_website.rs | 35 +++++++++++----------- src/garage/admin.rs | 51 +++++++++++++++++++-------------- src/garage/cli/cmd.rs | 3 +- src/garage/cli/structs.rs | 5 ++++ src/garage/cli/util.rs | 7 +++-- src/model/bucket_alias_table.rs | 8 ++---- src/model/bucket_table.rs | 10 +++++++ src/model/key_table.rs | 16 +++++++++++ src/model/permission.rs | 6 +++- src/util/crdt/crdt.rs | 11 +++++++ src/web/web_server.rs | 16 ++++++++--- 14 files changed, 119 insertions(+), 57 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dc1a1154..fecfaea6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -436,6 +436,7 @@ dependencies = [ "quick-xml", "roxmltree", "serde", + "serde_bytes", "sha2", "tokio", "url", diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml index de58f78b..ca4950a1 100644 --- a/src/api/Cargo.toml +++ b/src/api/Cargo.toml @@ -41,5 +41,6 @@ hyper = { version = "0.14", features = ["server", "http1", "runtime", "tcp", "st percent-encoding = "2.1.0" roxmltree = "0.14" serde = { version = "1.0", features = ["derive"] } +serde_bytes = "0.11" quick-xml = { version = "0.21", features = [ "serialize" ] } url = "2.1" diff --git a/src/api/api_server.rs b/src/api/api_server.rs index cc9b9c38..cd866c9f 100644 --- a/src/api/api_server.rs +++ b/src/api/api_server.rs @@ -277,10 +277,10 @@ async fn handler_inner(garage: Arc, req: Request) -> Result { handle_delete_objects(garage, bucket_id, req, content_sha256).await } - Endpoint::PutBucketWebsite { bucket } => { - handle_put_website(garage, bucket, req, content_sha256).await + Endpoint::PutBucketWebsite { .. } => { + handle_put_website(garage, bucket_id, req, content_sha256).await } - Endpoint::DeleteBucketWebsite { bucket } => handle_delete_website(garage, bucket).await, + Endpoint::DeleteBucketWebsite { .. } => handle_delete_website(garage, bucket_id).await, endpoint => Err(Error::NotImplemented(endpoint.name().to_owned())), } } diff --git a/src/api/s3_website.rs b/src/api/s3_website.rs index da67c4cd..8a93e8c5 100644 --- a/src/api/s3_website.rs +++ b/src/api/s3_website.rs @@ -3,6 +3,7 @@ use std::sync::Arc; use hyper::{Body, Request, Response, StatusCode}; use serde::{Deserialize, Serialize}; +use serde_bytes::ByteBuf; use crate::error::*; use crate::s3_xml::{xmlns_tag, IntValue, Value}; @@ -11,23 +12,22 @@ use crate::signature::verify_signed_content; use garage_model::garage::Garage; use garage_table::*; use garage_util::crdt; -use garage_util::data::Hash; +use garage_util::data::*; pub async fn handle_delete_website( garage: Arc, - bucket: String, + bucket_id: Uuid, ) -> Result, Error> { let mut bucket = garage - .bucket_alias_table - .get(&EmptyKey, &bucket) + .bucket_table + .get(&bucket_id, &EmptyKey) .await? .ok_or(Error::NotFound)?; - if let crdt::Deletable::Present(state) = bucket.state.get_mut() { - let mut new_param = state.clone(); - new_param.website_access = false; - bucket.state.update(crdt::Deletable::present(new_param)); - garage.bucket_alias_table.insert(&bucket).await?; + if let crdt::Deletable::Present(param) = &mut bucket.state { + param.website_access.update(false); + param.website_config.update(None); + garage.bucket_table.insert(&bucket).await?; } else { unreachable!(); } @@ -40,7 +40,7 @@ pub async fn handle_delete_website( pub async fn handle_put_website( garage: Arc, - bucket: String, + bucket_id: Uuid, req: Request, content_sha256: Option, ) -> Result, Error> { @@ -48,19 +48,20 @@ pub async fn handle_put_website( verify_signed_content(content_sha256, &body[..])?; let mut bucket = garage - .bucket_alias_table - .get(&EmptyKey, &bucket) + .bucket_table + .get(&bucket_id, &EmptyKey) .await? .ok_or(Error::NotFound)?; let conf: WebsiteConfiguration = from_reader(&body as &[u8])?; conf.validate()?; - if let crdt::Deletable::Present(state) = bucket.state.get() { - let mut new_param = state.clone(); - new_param.website_access = true; - bucket.state.update(crdt::Deletable::present(new_param)); - garage.bucket_alias_table.insert(&bucket).await?; + if let crdt::Deletable::Present(param) = &mut bucket.state { + param.website_access.update(true); + param + .website_config + .update(Some(ByteBuf::from(body.to_vec()))); + garage.bucket_table.insert(&bucket).await?; } else { unreachable!(); } diff --git a/src/garage/admin.rs b/src/garage/admin.rs index 756f6007..5599c53f 100644 --- a/src/garage/admin.rs +++ b/src/garage/admin.rs @@ -104,11 +104,10 @@ impl AdminRpcHandler { } alias.state.update(Deletable::Present(AliasParams { bucket_id: bucket.id, - website_access: false, })); alias } - None => BucketAlias::new(name.clone(), bucket.id, false), + None => BucketAlias::new(name.clone(), bucket.id), }; bucket .state @@ -178,7 +177,7 @@ impl AdminRpcHandler { for (key_id, _) in bucket.authorized_keys() { if let Some(key) = self.garage.key_table.get(&EmptyKey, key_id).await? { if !key.state.is_deleted() { - self.update_key_bucket(&key, bucket.id, false, false) + self.update_key_bucket(&key, bucket.id, false, false, false) .await?; } } else { @@ -266,10 +265,9 @@ impl AdminRpcHandler { } // Checks ok, add alias - alias.state.update(Deletable::present(AliasParams { - bucket_id, - website_access: false, - })); + alias + .state + .update(Deletable::present(AliasParams { bucket_id })); self.garage.bucket_alias_table.insert(&alias).await?; let mut bucket_p = bucket.state.as_option_mut().unwrap(); @@ -396,16 +394,17 @@ impl AdminRpcHandler { let allow_read = query.read || key.allow_read(&bucket_id); let allow_write = query.write || key.allow_write(&bucket_id); + let allow_owner = query.owner || key.allow_owner(&bucket_id); let new_perm = self - .update_key_bucket(&key, bucket_id, allow_read, allow_write) + .update_key_bucket(&key, bucket_id, allow_read, allow_write, allow_owner) .await?; self.update_bucket_key(bucket, &key.key_id, new_perm) .await?; Ok(AdminRpc::Ok(format!( - "New permissions for {} on {}: read {}, write {}.", - &key.key_id, &query.bucket, allow_read, allow_write + "New permissions for {} on {}: read {}, write {}, owner {}.", + &key.key_id, &query.bucket, allow_read, allow_write, allow_owner ))) } @@ -425,29 +424,34 @@ impl AdminRpcHandler { let allow_read = !query.read && key.allow_read(&bucket_id); let allow_write = !query.write && key.allow_write(&bucket_id); + let allow_owner = !query.owner && key.allow_owner(&bucket_id); let new_perm = self - .update_key_bucket(&key, bucket_id, allow_read, allow_write) + .update_key_bucket(&key, bucket_id, allow_read, allow_write, allow_owner) .await?; self.update_bucket_key(bucket, &key.key_id, new_perm) .await?; Ok(AdminRpc::Ok(format!( - "New permissions for {} on {}: read {}, write {}.", - &key.key_id, &query.bucket, allow_read, allow_write + "New permissions for {} on {}: read {}, write {}, owner {}.", + &key.key_id, &query.bucket, allow_read, allow_write, allow_owner ))) } async fn handle_bucket_website(&self, query: &WebsiteOpt) -> Result { - let mut bucket_alias = self + let bucket_id = self .garage - .bucket_alias_table - .get(&EmptyKey, &query.bucket) + .bucket_helper() + .resolve_global_bucket_name(&query.bucket) .await? - .filter(|a| !a.is_deleted()) - .ok_or_message(format!("Bucket {} does not exist", query.bucket))?; + .ok_or_message("Bucket not found")?; - let mut state = bucket_alias.state.get().as_option().unwrap().clone(); + let mut bucket = self + .garage + .bucket_helper() + .get_existing_bucket(bucket_id) + .await?; + let bucket_state = bucket.state.as_option_mut().unwrap(); if !(query.allow ^ query.deny) { return Err(Error::Message( @@ -455,9 +459,8 @@ impl AdminRpcHandler { )); } - state.website_access = query.allow; - bucket_alias.state.update(Deletable::present(state)); - self.garage.bucket_alias_table.insert(&bucket_alias).await?; + bucket_state.website_access.update(query.allow); + self.garage.bucket_table.insert(&bucket).await?; let msg = if query.allow { format!("Website access allowed for {}", &query.bucket) @@ -545,6 +548,7 @@ impl AdminRpcHandler { timestamp: increment_logical_clock(auth.timestamp), allow_read: false, allow_write: false, + allow_owner: false, }; if !bucket.is_deleted() { self.update_bucket_key(bucket, &key.key_id, new_perm) @@ -605,6 +609,7 @@ impl AdminRpcHandler { bucket_id: Uuid, allow_read: bool, allow_write: bool, + allow_owner: bool, ) -> Result { let mut key = key.clone(); let mut key_state = key.state.as_option_mut().unwrap(); @@ -617,11 +622,13 @@ impl AdminRpcHandler { timestamp: increment_logical_clock(old_perm.timestamp), allow_read, allow_write, + allow_owner, }) .unwrap_or(BucketKeyPerm { timestamp: now_msec(), allow_read, allow_write, + allow_owner, }); key_state.authorized_buckets = Map::put_mutator(bucket_id, perm); diff --git a/src/garage/cli/cmd.rs b/src/garage/cli/cmd.rs index 015eeec9..b7508e45 100644 --- a/src/garage/cli/cmd.rs +++ b/src/garage/cli/cmd.rs @@ -164,8 +164,7 @@ pub async fn cmd_admin( let mut table = vec![]; for alias in bl { if let Some(p) = alias.state.get().as_option() { - let wflag = if p.website_access { "W" } else { " " }; - table.push(format!("{}\t{}\t{:?}", wflag, alias.name, p.bucket_id)); + table.push(format!("\t{}\t{:?}", alias.name, p.bucket_id)); } } format_table(table); diff --git a/src/garage/cli/structs.rs b/src/garage/cli/structs.rs index 590be1c0..1905069e 100644 --- a/src/garage/cli/structs.rs +++ b/src/garage/cli/structs.rs @@ -238,6 +238,11 @@ pub struct PermBucketOpt { #[structopt(long = "write")] pub write: bool, + /// Allow/deny administrative operations operations + /// (such as deleting bucket or changing bucket website configuration) + #[structopt(long = "owner")] + pub owner: bool, + /// Bucket name pub bucket: String, } diff --git a/src/garage/cli/util.rs b/src/garage/cli/util.rs index ba88502d..f586d55b 100644 --- a/src/garage/cli/util.rs +++ b/src/garage/cli/util.rs @@ -11,6 +11,7 @@ pub fn print_key_info(key: &Key) { println!("Secret key: {}", key.secret_key); match &key.state { Deletable::Present(p) => { + println!("Can create buckets: {}", p.allow_create_bucket.get()); println!("\nKey-specific bucket aliases:"); let mut table = vec![]; for (alias_name, _, alias) in p.local_aliases.items().iter() { @@ -25,7 +26,8 @@ pub fn print_key_info(key: &Key) { for (b, perm) in p.authorized_buckets.items().iter() { let rflag = if perm.allow_read { "R" } else { " " }; let wflag = if perm.allow_write { "W" } else { " " }; - table.push(format!("\t{}{}\t{:?}", rflag, wflag, b)); + let oflag = if perm.allow_owner { "O" } else { " " }; + table.push(format!("\t{}{}{}\t{:?}", rflag, wflag, oflag, b)); } format_table(table); } @@ -58,7 +60,8 @@ pub fn print_bucket_info(bucket: &Bucket) { for (k, perm) in p.authorized_keys.items().iter() { let rflag = if perm.allow_read { "R" } else { " " }; let wflag = if perm.allow_write { "W" } else { " " }; - println!("- {}{} {}", rflag, wflag, k); + let oflag = if perm.allow_owner { "O" } else { " " }; + println!("- {}{}{} {}", rflag, wflag, oflag, k); } } }; diff --git a/src/model/bucket_alias_table.rs b/src/model/bucket_alias_table.rs index 4d300d05..52484c5b 100644 --- a/src/model/bucket_alias_table.rs +++ b/src/model/bucket_alias_table.rs @@ -15,7 +15,6 @@ pub struct BucketAlias { #[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)] pub struct AliasParams { pub bucket_id: Uuid, - pub website_access: bool, } impl AutoCrdt for AliasParams { @@ -23,13 +22,10 @@ impl AutoCrdt for AliasParams { } impl BucketAlias { - pub fn new(name: String, bucket_id: Uuid, website_access: bool) -> Self { + pub fn new(name: String, bucket_id: Uuid) -> Self { BucketAlias { name, - state: crdt::Lww::new(crdt::Deletable::present(AliasParams { - bucket_id, - website_access, - })), + state: crdt::Lww::new(crdt::Deletable::present(AliasParams { bucket_id })), } } pub fn is_deleted(&self) -> bool { diff --git a/src/model/bucket_table.rs b/src/model/bucket_table.rs index ac40407e..6ae719ae 100644 --- a/src/model/bucket_table.rs +++ b/src/model/bucket_table.rs @@ -1,4 +1,5 @@ use serde::{Deserialize, Serialize}; +use serde_bytes::ByteBuf; use garage_table::crdt::Crdt; use garage_table::*; @@ -27,6 +28,11 @@ pub struct BucketParams { pub creation_date: u64, /// Map of key with access to the bucket, and what kind of access they give pub authorized_keys: crdt::Map, + /// Whether this bucket is allowed for website access + /// (under all of its global alias names) + pub website_access: crdt::Lww, + /// The website configuration XML document + pub website_config: crdt::Lww>, /// Map of aliases that are or have been given to this bucket /// in the global namespace /// (not authoritative: this is just used as an indication to @@ -44,6 +50,8 @@ impl BucketParams { BucketParams { creation_date: now_msec(), authorized_keys: crdt::Map::new(), + website_access: crdt::Lww::new(false), + website_config: crdt::Lww::new(None), aliases: crdt::LwwMap::new(), local_aliases: crdt::LwwMap::new(), } @@ -53,6 +61,8 @@ impl BucketParams { impl Crdt for BucketParams { fn merge(&mut self, o: &Self) { self.authorized_keys.merge(&o.authorized_keys); + self.website_access.merge(&o.website_access); + self.website_config.merge(&o.website_config); self.aliases.merge(&o.aliases); self.local_aliases.merge(&o.local_aliases); } diff --git a/src/model/key_table.rs b/src/model/key_table.rs index e87f5949..469dbd49 100644 --- a/src/model/key_table.rs +++ b/src/model/key_table.rs @@ -27,6 +27,7 @@ pub struct Key { /// Configuration for a key #[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] pub struct KeyParams { + pub allow_create_bucket: crdt::Lww, pub authorized_buckets: crdt::Map, pub local_aliases: crdt::LwwMap>, } @@ -34,6 +35,7 @@ pub struct KeyParams { impl KeyParams { pub fn new() -> Self { KeyParams { + allow_create_bucket: crdt::Lww::new(false), authorized_buckets: crdt::Map::new(), local_aliases: crdt::LwwMap::new(), } @@ -48,6 +50,7 @@ impl Default for KeyParams { impl Crdt for KeyParams { fn merge(&mut self, o: &Self) { + self.allow_create_bucket.merge(&o.allow_create_bucket); self.authorized_buckets.merge(&o.authorized_buckets); self.local_aliases.merge(&o.local_aliases); } @@ -111,6 +114,19 @@ impl Key { false } } + + /// Check if `Key` is owner of bucket + pub fn allow_owner(&self, bucket: &Uuid) -> bool { + if let crdt::Deletable::Present(params) = &self.state { + params + .authorized_buckets + .get(bucket) + .map(|x| x.allow_owner) + .unwrap_or(false) + } else { + false + } + } } impl Entry for Key { diff --git a/src/model/permission.rs b/src/model/permission.rs index b61c92ce..04bb2bc5 100644 --- a/src/model/permission.rs +++ b/src/model/permission.rs @@ -12,8 +12,12 @@ pub struct BucketKeyPerm { /// The key can be used to read the bucket pub allow_read: bool, - /// The key can be used to write in the bucket + /// The key can be used to write objects to the bucket pub allow_write: bool, + /// The key can be used to control other aspects of the bucket: + /// - enable / disable website access + /// - delete bucket + pub allow_owner: bool, } impl Crdt for BucketKeyPerm { diff --git a/src/util/crdt/crdt.rs b/src/util/crdt/crdt.rs index 9b5f230d..2508d03b 100644 --- a/src/util/crdt/crdt.rs +++ b/src/util/crdt/crdt.rs @@ -28,6 +28,17 @@ pub trait Crdt { fn merge(&mut self, other: &Self); } +impl Crdt for Option +where + T: Eq, +{ + fn merge(&mut self, other: &Self) { + if self != other { + *self = None; + } + } +} + /// All types that implement `Ord` (a total order) can also implement a trivial CRDT /// defined by the merge rule: `a ⊔ b = max(a, b)`. Implement this trait for your type /// to enable this behavior. diff --git a/src/web/web_server.rs b/src/web/web_server.rs index 5eb25e93..6152f282 100644 --- a/src/web/web_server.rs +++ b/src/web/web_server.rs @@ -10,9 +10,13 @@ use hyper::{ }; use crate::error::*; + use garage_api::helpers::{authority_to_host, host_to_bucket}; use garage_api::s3_get::{handle_get, handle_head}; + +use garage_model::bucket_table::Bucket; use garage_model::garage::Garage; + use garage_table::*; use garage_util::error::Error as GarageError; @@ -84,16 +88,20 @@ async fn serve_file(garage: Arc, req: Request) -> Result Date: Thu, 16 Dec 2021 13:17:09 +0100 Subject: [PATCH 05/19] New buckets for 0.6.0: migration code and build files --- Cargo.lock | 31 +++--- Cargo.nix | 200 ++++++++++++++++++++++++++++--------- src/garage/admin.rs | 19 ++++ src/garage/cli/cmd.rs | 3 + src/garage/cli/structs.rs | 22 ++++ src/model/Cargo.toml | 2 +- src/model/key_table.rs | 27 +++++ src/model/lib.rs | 1 + src/model/migrate.rs | 93 +++++++++++++++++ src/model/object_table.rs | 71 +++++++++++++ src/model/version_table.rs | 35 +++++++ 11 files changed, 441 insertions(+), 63 deletions(-) create mode 100644 src/model/migrate.rs diff --git a/Cargo.lock b/Cargo.lock index fecfaea6..8630855f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -444,17 +444,17 @@ dependencies = [ [[package]] name = "garage_model" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c56150ee02bc26c77996b19fee0851f7d53cf42ae80370a8cf3a5dd5bb0bba76" +checksum = "584619e8999713d73761775591ad6f01ff8c9d724f3b20984f5932f1fc7f9988" dependencies = [ "arc-swap", "async-trait", "futures", "futures-util", - "garage_rpc 0.5.0", - "garage_table 0.5.0", - "garage_util 0.5.0", + "garage_rpc 0.5.1", + "garage_table 0.5.1", + "garage_util 0.5.1", "hex", "log", "netapp", @@ -464,6 +464,7 @@ dependencies = [ "serde_bytes", "sled", "tokio", + "zstd", ] [[package]] @@ -474,7 +475,7 @@ dependencies = [ "async-trait", "futures", "futures-util", - "garage_model 0.5.0", + "garage_model 0.5.1", "garage_rpc 0.6.0", "garage_table 0.6.0", "garage_util 0.6.0", @@ -492,16 +493,16 @@ dependencies = [ [[package]] name = "garage_rpc" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c5743c49f616b260f548454ff52b81d10372593d4c4bc01d516ee3c3c4e515a" +checksum = "81e693aa4582cfe7a7ce70c07880e3662544b5d0cd68bc4b59c53febfbb8d1ec" dependencies = [ "arc-swap", "async-trait", "bytes 1.1.0", "futures", "futures-util", - "garage_util 0.5.0", + "garage_util 0.5.1", "gethostname", "hex", "hyper", @@ -544,16 +545,16 @@ dependencies = [ [[package]] name = "garage_table" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "378ffd69e8fd084e0817dc64a23a1692b58ffc86509ac2cadc64aa2d83c3e1e0" +checksum = "5c3557f3757e2acd29eaee86804d4e6c38d2abda81b4b349d8a0d2277044265c" dependencies = [ "async-trait", "bytes 1.1.0", "futures", "futures-util", - "garage_rpc 0.5.0", - "garage_util 0.5.0", + "garage_rpc 0.5.1", + "garage_util 0.5.1", "hexdump", "log", "rand", @@ -586,9 +587,9 @@ dependencies = [ [[package]] name = "garage_util" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5282e613b4da5ecca5bfec8c48ce9f25226cc1f35fbc439ed5fc698cce1aa549" +checksum = "1e096994382447431e2f3c70e3685eb8b24c00eceff8667bb22a2a27ff17832f" dependencies = [ "blake2", "chrono", diff --git a/Cargo.nix b/Cargo.nix index 5b38c55e..78f2b5c0 100644 --- a/Cargo.nix +++ b/Cargo.nix @@ -40,13 +40,13 @@ in { cargo2nixVersion = "0.9.0"; workspace = { - garage_util = rustPackages.unknown.garage_util."0.5.0"; - garage_rpc = rustPackages.unknown.garage_rpc."0.5.0"; - garage_table = rustPackages.unknown.garage_table."0.5.0"; - garage_model = rustPackages.unknown.garage_model."0.5.0"; - garage_api = rustPackages.unknown.garage_api."0.5.0"; - garage_web = rustPackages.unknown.garage_web."0.5.0"; - garage = rustPackages.unknown.garage."0.5.0"; + garage_util = rustPackages.unknown.garage_util."0.6.0"; + garage_rpc = rustPackages.unknown.garage_rpc."0.6.0"; + garage_table = rustPackages.unknown.garage_table."0.6.0"; + garage_model = rustPackages.unknown.garage_model."0.6.0"; + garage_api = rustPackages.unknown.garage_api."0.6.0"; + garage_web = rustPackages.unknown.garage_web."0.6.0"; + garage = rustPackages.unknown.garage."0.6.0"; }; "registry+https://github.com/rust-lang/crates.io-index".aho-corasick."0.7.18" = overridableMkRustCrate (profileName: rec { name = "aho-corasick"; @@ -253,7 +253,7 @@ in registry = "registry+https://github.com/rust-lang/crates.io-index"; src = fetchCratesIo { inherit name version; sha256 = "95059428f66df56b63431fdb4e1947ed2190586af5c5a8a8b71122bdf5a7f469"; }; dependencies = { - ${ if hostPlatform.parsed.cpu.name == "aarch64" && hostPlatform.parsed.kernel.name == "linux" || hostPlatform.config == "aarch64-apple-darwin" then "libc" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.103" { inherit profileName; }; + ${ if hostPlatform.config == "aarch64-apple-darwin" || hostPlatform.parsed.cpu.name == "aarch64" && hostPlatform.parsed.kernel.name == "linux" then "libc" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.103" { inherit profileName; }; }; }); @@ -613,9 +613,9 @@ in }; }); - "unknown".garage."0.5.0" = overridableMkRustCrate (profileName: rec { + "unknown".garage."0.6.0" = overridableMkRustCrate (profileName: rec { name = "garage"; - version = "0.5.0"; + version = "0.6.0"; registry = "unknown"; src = fetchCrateLocal (workspaceSrc + "/src/garage"); dependencies = { @@ -623,12 +623,12 @@ in bytes = rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.1.0" { inherit profileName; }; futures = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.17" { inherit profileName; }; futures_util = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.17" { inherit profileName; }; - garage_api = rustPackages."unknown".garage_api."0.5.0" { inherit profileName; }; - garage_model = rustPackages."unknown".garage_model."0.5.0" { inherit profileName; }; - garage_rpc = rustPackages."unknown".garage_rpc."0.5.0" { inherit profileName; }; - garage_table = rustPackages."unknown".garage_table."0.5.0" { inherit profileName; }; - garage_util = rustPackages."unknown".garage_util."0.5.0" { inherit profileName; }; - garage_web = rustPackages."unknown".garage_web."0.5.0" { inherit profileName; }; + garage_api = rustPackages."unknown".garage_api."0.6.0" { inherit profileName; }; + garage_model = rustPackages."unknown".garage_model."0.6.0" { inherit profileName; }; + garage_rpc = rustPackages."unknown".garage_rpc."0.6.0" { inherit profileName; }; + garage_table = rustPackages."unknown".garage_table."0.6.0" { inherit profileName; }; + garage_util = rustPackages."unknown".garage_util."0.6.0" { inherit profileName; }; + garage_web = rustPackages."unknown".garage_web."0.6.0" { inherit profileName; }; git_version = rustPackages."registry+https://github.com/rust-lang/crates.io-index".git-version."0.3.5" { inherit profileName; }; hex = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }; sodiumoxide = rustPackages."registry+https://github.com/rust-lang/crates.io-index".kuska-sodiumoxide."0.2.5-0" { inherit profileName; }; @@ -645,9 +645,9 @@ in }; }); - "unknown".garage_api."0.5.0" = overridableMkRustCrate (profileName: rec { + "unknown".garage_api."0.6.0" = overridableMkRustCrate (profileName: rec { name = "garage_api"; - version = "0.5.0"; + version = "0.6.0"; registry = "unknown"; src = fetchCrateLocal (workspaceSrc + "/src/api"); dependencies = { @@ -658,9 +658,9 @@ in err_derive = buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.0" { profileName = "__noProfile"; }; futures = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.17" { inherit profileName; }; futures_util = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.17" { inherit profileName; }; - garage_model = rustPackages."unknown".garage_model."0.5.0" { inherit profileName; }; - garage_table = rustPackages."unknown".garage_table."0.5.0" { inherit profileName; }; - garage_util = rustPackages."unknown".garage_util."0.5.0" { inherit profileName; }; + garage_model = rustPackages."unknown".garage_model."0.6.0" { inherit profileName; }; + garage_table = rustPackages."unknown".garage_table."0.6.0" { inherit profileName; }; + garage_util = rustPackages."unknown".garage_util."0.6.0" { inherit profileName; }; hex = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }; hmac = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hmac."0.10.1" { inherit profileName; }; http = rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.5" { inherit profileName; }; @@ -674,25 +674,26 @@ in quick_xml = rustPackages."registry+https://github.com/rust-lang/crates.io-index".quick-xml."0.21.0" { inherit profileName; }; roxmltree = rustPackages."registry+https://github.com/rust-lang/crates.io-index".roxmltree."0.14.1" { inherit profileName; }; serde = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.130" { inherit profileName; }; + serde_bytes = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }; sha2 = rustPackages."registry+https://github.com/rust-lang/crates.io-index".sha2."0.9.8" { inherit profileName; }; tokio = rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.12.0" { inherit profileName; }; url = rustPackages."registry+https://github.com/rust-lang/crates.io-index".url."2.2.2" { inherit profileName; }; }; }); - "unknown".garage_model."0.5.0" = overridableMkRustCrate (profileName: rec { + "registry+https://github.com/rust-lang/crates.io-index".garage_model."0.5.1" = overridableMkRustCrate (profileName: rec { name = "garage_model"; - version = "0.5.0"; - registry = "unknown"; - src = fetchCrateLocal (workspaceSrc + "/src/model"); + version = "0.5.1"; + registry = "registry+https://github.com/rust-lang/crates.io-index"; + src = fetchCratesIo { inherit name version; sha256 = "584619e8999713d73761775591ad6f01ff8c9d724f3b20984f5932f1fc7f9988"; }; dependencies = { arc_swap = rustPackages."registry+https://github.com/rust-lang/crates.io-index".arc-swap."1.4.0" { inherit profileName; }; async_trait = buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.51" { profileName = "__noProfile"; }; futures = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.17" { inherit profileName; }; futures_util = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.17" { inherit profileName; }; - garage_rpc = rustPackages."unknown".garage_rpc."0.5.0" { inherit profileName; }; - garage_table = rustPackages."unknown".garage_table."0.5.0" { inherit profileName; }; - garage_util = rustPackages."unknown".garage_util."0.5.0" { inherit profileName; }; + garage_rpc = rustPackages."registry+https://github.com/rust-lang/crates.io-index".garage_rpc."0.5.1" { inherit profileName; }; + garage_table = rustPackages."registry+https://github.com/rust-lang/crates.io-index".garage_table."0.5.1" { inherit profileName; }; + garage_util = rustPackages."registry+https://github.com/rust-lang/crates.io-index".garage_util."0.5.1" { inherit profileName; }; hex = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }; log = rustPackages."registry+https://github.com/rust-lang/crates.io-index".log."0.4.14" { inherit profileName; }; netapp = rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.3.0" { inherit profileName; }; @@ -706,18 +707,45 @@ in }; }); - "unknown".garage_rpc."0.5.0" = overridableMkRustCrate (profileName: rec { - name = "garage_rpc"; - version = "0.5.0"; + "unknown".garage_model."0.6.0" = overridableMkRustCrate (profileName: rec { + name = "garage_model"; + version = "0.6.0"; registry = "unknown"; - src = fetchCrateLocal (workspaceSrc + "/src/rpc"); + src = fetchCrateLocal (workspaceSrc + "/src/model"); + dependencies = { + arc_swap = rustPackages."registry+https://github.com/rust-lang/crates.io-index".arc-swap."1.4.0" { inherit profileName; }; + async_trait = buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.51" { profileName = "__noProfile"; }; + futures = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.17" { inherit profileName; }; + futures_util = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.17" { inherit profileName; }; + garage_model_050 = rustPackages."registry+https://github.com/rust-lang/crates.io-index".garage_model."0.5.1" { inherit profileName; }; + garage_rpc = rustPackages."unknown".garage_rpc."0.6.0" { inherit profileName; }; + garage_table = rustPackages."unknown".garage_table."0.6.0" { inherit profileName; }; + garage_util = rustPackages."unknown".garage_util."0.6.0" { inherit profileName; }; + hex = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }; + log = rustPackages."registry+https://github.com/rust-lang/crates.io-index".log."0.4.14" { inherit profileName; }; + netapp = rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.3.0" { inherit profileName; }; + rand = rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.4" { inherit profileName; }; + rmp_serde = rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" { inherit profileName; }; + serde = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.130" { inherit profileName; }; + serde_bytes = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }; + sled = rustPackages."registry+https://github.com/rust-lang/crates.io-index".sled."0.34.7" { inherit profileName; }; + tokio = rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.12.0" { inherit profileName; }; + zstd = rustPackages."registry+https://github.com/rust-lang/crates.io-index".zstd."0.9.0+zstd.1.5.0" { inherit profileName; }; + }; + }); + + "registry+https://github.com/rust-lang/crates.io-index".garage_rpc."0.5.1" = overridableMkRustCrate (profileName: rec { + name = "garage_rpc"; + version = "0.5.1"; + registry = "registry+https://github.com/rust-lang/crates.io-index"; + src = fetchCratesIo { inherit name version; sha256 = "81e693aa4582cfe7a7ce70c07880e3662544b5d0cd68bc4b59c53febfbb8d1ec"; }; dependencies = { arc_swap = rustPackages."registry+https://github.com/rust-lang/crates.io-index".arc-swap."1.4.0" { inherit profileName; }; async_trait = buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.51" { profileName = "__noProfile"; }; bytes = rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.1.0" { inherit profileName; }; futures = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.17" { inherit profileName; }; futures_util = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.17" { inherit profileName; }; - garage_util = rustPackages."unknown".garage_util."0.5.0" { inherit profileName; }; + garage_util = rustPackages."registry+https://github.com/rust-lang/crates.io-index".garage_util."0.5.1" { inherit profileName; }; gethostname = rustPackages."registry+https://github.com/rust-lang/crates.io-index".gethostname."0.2.1" { inherit profileName; }; hex = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }; hyper = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.13" { inherit profileName; }; @@ -734,18 +762,46 @@ in }; }); - "unknown".garage_table."0.5.0" = overridableMkRustCrate (profileName: rec { - name = "garage_table"; - version = "0.5.0"; + "unknown".garage_rpc."0.6.0" = overridableMkRustCrate (profileName: rec { + name = "garage_rpc"; + version = "0.6.0"; registry = "unknown"; - src = fetchCrateLocal (workspaceSrc + "/src/table"); + src = fetchCrateLocal (workspaceSrc + "/src/rpc"); + dependencies = { + arc_swap = rustPackages."registry+https://github.com/rust-lang/crates.io-index".arc-swap."1.4.0" { inherit profileName; }; + async_trait = buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.51" { profileName = "__noProfile"; }; + bytes = rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.1.0" { inherit profileName; }; + futures = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.17" { inherit profileName; }; + futures_util = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.17" { inherit profileName; }; + garage_util = rustPackages."unknown".garage_util."0.6.0" { inherit profileName; }; + gethostname = rustPackages."registry+https://github.com/rust-lang/crates.io-index".gethostname."0.2.1" { inherit profileName; }; + hex = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }; + hyper = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.13" { inherit profileName; }; + sodiumoxide = rustPackages."registry+https://github.com/rust-lang/crates.io-index".kuska-sodiumoxide."0.2.5-0" { inherit profileName; }; + log = rustPackages."registry+https://github.com/rust-lang/crates.io-index".log."0.4.14" { inherit profileName; }; + netapp = rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.3.0" { inherit profileName; }; + rand = rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.4" { inherit profileName; }; + rmp_serde = rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" { inherit profileName; }; + serde = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.130" { inherit profileName; }; + serde_bytes = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }; + serde_json = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_json."1.0.68" { inherit profileName; }; + tokio = rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.12.0" { inherit profileName; }; + tokio_stream = rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio-stream."0.1.7" { inherit profileName; }; + }; + }); + + "registry+https://github.com/rust-lang/crates.io-index".garage_table."0.5.1" = overridableMkRustCrate (profileName: rec { + name = "garage_table"; + version = "0.5.1"; + registry = "registry+https://github.com/rust-lang/crates.io-index"; + src = fetchCratesIo { inherit name version; sha256 = "5c3557f3757e2acd29eaee86804d4e6c38d2abda81b4b349d8a0d2277044265c"; }; dependencies = { async_trait = buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.51" { profileName = "__noProfile"; }; bytes = rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.1.0" { inherit profileName; }; futures = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.17" { inherit profileName; }; futures_util = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.17" { inherit profileName; }; - garage_rpc = rustPackages."unknown".garage_rpc."0.5.0" { inherit profileName; }; - garage_util = rustPackages."unknown".garage_util."0.5.0" { inherit profileName; }; + garage_rpc = rustPackages."registry+https://github.com/rust-lang/crates.io-index".garage_rpc."0.5.1" { inherit profileName; }; + garage_util = rustPackages."registry+https://github.com/rust-lang/crates.io-index".garage_util."0.5.1" { inherit profileName; }; hexdump = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }; log = rustPackages."registry+https://github.com/rust-lang/crates.io-index".log."0.4.14" { inherit profileName; }; rand = rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.4" { inherit profileName; }; @@ -757,9 +813,59 @@ in }; }); - "unknown".garage_util."0.5.0" = overridableMkRustCrate (profileName: rec { + "unknown".garage_table."0.6.0" = overridableMkRustCrate (profileName: rec { + name = "garage_table"; + version = "0.6.0"; + registry = "unknown"; + src = fetchCrateLocal (workspaceSrc + "/src/table"); + dependencies = { + async_trait = buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.51" { profileName = "__noProfile"; }; + bytes = rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.1.0" { inherit profileName; }; + futures = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.17" { inherit profileName; }; + futures_util = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.17" { inherit profileName; }; + garage_rpc = rustPackages."unknown".garage_rpc."0.6.0" { inherit profileName; }; + garage_util = rustPackages."unknown".garage_util."0.6.0" { inherit profileName; }; + hexdump = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }; + log = rustPackages."registry+https://github.com/rust-lang/crates.io-index".log."0.4.14" { inherit profileName; }; + rand = rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.4" { inherit profileName; }; + rmp_serde = rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" { inherit profileName; }; + serde = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.130" { inherit profileName; }; + serde_bytes = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }; + sled = rustPackages."registry+https://github.com/rust-lang/crates.io-index".sled."0.34.7" { inherit profileName; }; + tokio = rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.12.0" { inherit profileName; }; + }; + }); + + "registry+https://github.com/rust-lang/crates.io-index".garage_util."0.5.1" = overridableMkRustCrate (profileName: rec { name = "garage_util"; - version = "0.5.0"; + version = "0.5.1"; + registry = "registry+https://github.com/rust-lang/crates.io-index"; + src = fetchCratesIo { inherit name version; sha256 = "1e096994382447431e2f3c70e3685eb8b24c00eceff8667bb22a2a27ff17832f"; }; + dependencies = { + blake2 = rustPackages."registry+https://github.com/rust-lang/crates.io-index".blake2."0.9.2" { inherit profileName; }; + chrono = rustPackages."registry+https://github.com/rust-lang/crates.io-index".chrono."0.4.19" { inherit profileName; }; + err_derive = buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.0" { profileName = "__noProfile"; }; + futures = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.17" { inherit profileName; }; + hex = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }; + http = rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.5" { inherit profileName; }; + hyper = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.13" { inherit profileName; }; + log = rustPackages."registry+https://github.com/rust-lang/crates.io-index".log."0.4.14" { inherit profileName; }; + netapp = rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.3.0" { inherit profileName; }; + rand = rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.4" { inherit profileName; }; + rmp_serde = rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" { inherit profileName; }; + serde = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.130" { inherit profileName; }; + serde_json = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_json."1.0.68" { inherit profileName; }; + sha2 = rustPackages."registry+https://github.com/rust-lang/crates.io-index".sha2."0.9.8" { inherit profileName; }; + sled = rustPackages."registry+https://github.com/rust-lang/crates.io-index".sled."0.34.7" { inherit profileName; }; + tokio = rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.12.0" { inherit profileName; }; + toml = rustPackages."registry+https://github.com/rust-lang/crates.io-index".toml."0.5.8" { inherit profileName; }; + xxhash_rust = rustPackages."registry+https://github.com/rust-lang/crates.io-index".xxhash-rust."0.8.2" { inherit profileName; }; + }; + }); + + "unknown".garage_util."0.6.0" = overridableMkRustCrate (profileName: rec { + name = "garage_util"; + version = "0.6.0"; registry = "unknown"; src = fetchCrateLocal (workspaceSrc + "/src/util"); dependencies = { @@ -784,18 +890,18 @@ in }; }); - "unknown".garage_web."0.5.0" = overridableMkRustCrate (profileName: rec { + "unknown".garage_web."0.6.0" = overridableMkRustCrate (profileName: rec { name = "garage_web"; - version = "0.5.0"; + version = "0.6.0"; registry = "unknown"; src = fetchCrateLocal (workspaceSrc + "/src/web"); dependencies = { err_derive = buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.0" { profileName = "__noProfile"; }; futures = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.17" { inherit profileName; }; - garage_api = rustPackages."unknown".garage_api."0.5.0" { inherit profileName; }; - garage_model = rustPackages."unknown".garage_model."0.5.0" { inherit profileName; }; - garage_table = rustPackages."unknown".garage_table."0.5.0" { inherit profileName; }; - garage_util = rustPackages."unknown".garage_util."0.5.0" { inherit profileName; }; + garage_api = rustPackages."unknown".garage_api."0.6.0" { inherit profileName; }; + garage_model = rustPackages."unknown".garage_model."0.6.0" { inherit profileName; }; + garage_table = rustPackages."unknown".garage_table."0.6.0" { inherit profileName; }; + garage_util = rustPackages."unknown".garage_util."0.6.0" { inherit profileName; }; http = rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.5" { inherit profileName; }; hyper = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.13" { inherit profileName; }; log = rustPackages."registry+https://github.com/rust-lang/crates.io-index".log."0.4.14" { inherit profileName; }; diff --git a/src/garage/admin.rs b/src/garage/admin.rs index 5599c53f..74b24584 100644 --- a/src/garage/admin.rs +++ b/src/garage/admin.rs @@ -19,6 +19,7 @@ use garage_model::bucket_alias_table::*; use garage_model::bucket_table::*; use garage_model::garage::Garage; use garage_model::key_table::*; +use garage_model::migrate::Migrate; use garage_model::permission::*; use crate::cli::*; @@ -31,6 +32,7 @@ pub enum AdminRpc { BucketOperation(BucketOperation), KeyOperation(KeyOperation), LaunchRepair(RepairOpt), + Migrate(MigrateOpt), Stats(StatsOpt), // Replies @@ -650,6 +652,22 @@ impl AdminRpcHandler { Ok(()) } + async fn handle_migrate(self: &Arc, opt: MigrateOpt) -> Result { + if !opt.yes { + return Err(Error::BadRpc( + "Please provide the --yes flag to initiate migration operation.".to_string(), + )); + } + + let m = Migrate { + garage: self.garage.clone(), + }; + match opt.what { + MigrateWhat::Buckets050 => m.migrate_buckets050().await, + }?; + Ok(AdminRpc::Ok("Migration successfull.".into())) + } + async fn handle_launch_repair(self: &Arc, opt: RepairOpt) -> Result { if !opt.yes { return Err(Error::BadRpc( @@ -819,6 +837,7 @@ impl EndpointHandler for AdminRpcHandler { match message { AdminRpc::BucketOperation(bo) => self.handle_bucket_cmd(bo).await, AdminRpc::KeyOperation(ko) => self.handle_key_cmd(ko).await, + AdminRpc::Migrate(opt) => self.handle_migrate(opt.clone()).await, AdminRpc::LaunchRepair(opt) => self.handle_launch_repair(opt.clone()).await, AdminRpc::Stats(opt) => self.handle_stats(opt.clone()).await, _ => Err(Error::BadRpc("Invalid RPC".to_string())), diff --git a/src/garage/cli/cmd.rs b/src/garage/cli/cmd.rs index b7508e45..b65fea02 100644 --- a/src/garage/cli/cmd.rs +++ b/src/garage/cli/cmd.rs @@ -29,6 +29,9 @@ pub async fn cli_command_dispatch( Command::Key(ko) => { cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::KeyOperation(ko)).await } + Command::Migrate(mo) => { + cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::Migrate(mo)).await + } Command::Repair(ro) => { cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::LaunchRepair(ro)).await } diff --git a/src/garage/cli/structs.rs b/src/garage/cli/structs.rs index 1905069e..bd7abc8e 100644 --- a/src/garage/cli/structs.rs +++ b/src/garage/cli/structs.rs @@ -28,6 +28,11 @@ pub enum Command { #[structopt(name = "key")] Key(KeyOperation), + /// Run migrations from previous Garage version + /// (DO NOT USE WITHOUT READING FULL DOCUMENTATION) + #[structopt(name = "migrate")] + Migrate(MigrateOpt), + /// Start repair of node data #[structopt(name = "repair")] Repair(RepairOpt), @@ -319,6 +324,23 @@ pub struct KeyImportOpt { pub name: String, } +#[derive(Serialize, Deserialize, StructOpt, Debug, Clone)] +pub struct MigrateOpt { + /// Confirm the launch of the migrate operation + #[structopt(long = "yes")] + pub yes: bool, + + #[structopt(subcommand)] + pub what: MigrateWhat, +} + +#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)] +pub enum MigrateWhat { + /// Migrate buckets and permissions from v0.5.0 + #[structopt(name = "buckets050")] + Buckets050, +} + #[derive(Serialize, Deserialize, StructOpt, Debug, Clone)] pub struct RepairOpt { /// Launch repair operation on all nodes diff --git a/src/model/Cargo.toml b/src/model/Cargo.toml index 12c08719..03881f5d 100644 --- a/src/model/Cargo.toml +++ b/src/model/Cargo.toml @@ -17,7 +17,7 @@ path = "lib.rs" garage_rpc = { version = "0.6.0", path = "../rpc" } garage_table = { version = "0.6.0", path = "../table" } garage_util = { version = "0.6.0", path = "../util" } -garage_model_050 = { package = "garage_model", version = "0.5.0" } +garage_model_050 = { package = "garage_model", version = "0.5.1" } async-trait = "0.1.7" arc-swap = "1.0" diff --git a/src/model/key_table.rs b/src/model/key_table.rs index 469dbd49..526ed496 100644 --- a/src/model/key_table.rs +++ b/src/model/key_table.rs @@ -171,4 +171,31 @@ impl TableSchema for KeyTable { } } } + + fn try_migrate(bytes: &[u8]) -> Option { + let old_k = + match rmp_serde::decode::from_read_ref::<_, garage_model_050::key_table::Key>(bytes) { + Ok(x) => x, + Err(_) => return None, + }; + let state = if old_k.deleted.get() { + crdt::Deletable::Deleted + } else { + // Authorized buckets is ignored here, + // migration is performed in specific migration code in + // garage/migrate.rs + crdt::Deletable::Present(KeyParams { + allow_create_bucket: crdt::Lww::new(false), + authorized_buckets: crdt::Map::new(), + local_aliases: crdt::LwwMap::new(), + }) + }; + let name = crdt::Lww::migrate_from_raw(old_k.name.timestamp(), old_k.name.get().clone()); + Some(Key { + key_id: old_k.key_id, + secret_key: old_k.secret_key, + name, + state, + }) + } } diff --git a/src/model/lib.rs b/src/model/lib.rs index fe8cfdad..3f6b5cd4 100644 --- a/src/model/lib.rs +++ b/src/model/lib.rs @@ -8,6 +8,7 @@ pub mod bucket_helper; pub mod bucket_table; pub mod garage; pub mod key_table; +pub mod migrate; pub mod object_table; pub mod permission; pub mod version_table; diff --git a/src/model/migrate.rs b/src/model/migrate.rs new file mode 100644 index 00000000..35ff1807 --- /dev/null +++ b/src/model/migrate.rs @@ -0,0 +1,93 @@ +use std::sync::Arc; + +use garage_table::util::EmptyKey; +use garage_util::crdt::*; +use garage_util::data::*; +use garage_util::error::*; +use garage_util::time::*; + +use garage_model_050::bucket_table as old_bucket; + +use crate::bucket_alias_table::*; +use crate::bucket_table::*; +use crate::garage::Garage; +use crate::permission::*; + +pub struct Migrate { + pub garage: Arc, +} + +impl Migrate { + pub async fn migrate_buckets050(&self) -> Result<(), Error> { + let tree = self.garage.db.open_tree("bucket:table")?; + + for res in tree.iter() { + let (_k, v) = res?; + let bucket = rmp_serde::decode::from_read_ref::<_, old_bucket::Bucket>(&v[..])?; + + if let old_bucket::BucketState::Present(p) = bucket.state.get() { + self.migrate_buckets050_do_bucket(&bucket, p).await?; + } + } + + Ok(()) + } + + pub async fn migrate_buckets050_do_bucket( + &self, + old_bucket: &old_bucket::Bucket, + old_bucket_p: &old_bucket::BucketParams, + ) -> Result<(), Error> { + let mut new_ak = Map::new(); + for (k, ts, perm) in old_bucket_p.authorized_keys.items().iter() { + new_ak.put( + k.to_string(), + BucketKeyPerm { + timestamp: *ts, + allow_read: perm.allow_read, + allow_write: perm.allow_write, + allow_owner: false, + }, + ); + } + + let mut aliases = LwwMap::new(); + aliases.update_in_place(old_bucket.name.clone(), true); + + let new_bucket = Bucket { + id: blake2sum(old_bucket.name.as_bytes()), + state: Deletable::Present(BucketParams { + creation_date: now_msec(), + authorized_keys: new_ak.clone(), + website_access: Lww::new(*old_bucket_p.website.get()), + website_config: Lww::new(None), + aliases, + local_aliases: LwwMap::new(), + }), + }; + self.garage.bucket_table.insert(&new_bucket).await?; + + let new_alias = BucketAlias { + name: old_bucket.name.clone(), + state: Lww::new(Deletable::Present(AliasParams { + bucket_id: new_bucket.id, + })), + }; + self.garage.bucket_alias_table.insert(&new_alias).await?; + + for (k, perm) in new_ak.items().iter() { + let mut key = self + .garage + .key_table + .get(&EmptyKey, k) + .await? + .ok_or_message(format!("Missing key: {}", k))?; + if let Some(p) = key.state.as_option_mut() { + p.authorized_buckets.put(new_bucket.id, *perm); + } + self.garage.key_table.insert(&key).await?; + } + + Ok(()) + } +} diff --git a/src/model/object_table.rs b/src/model/object_table.rs index 285cb5a7..45f0daf4 100644 --- a/src/model/object_table.rs +++ b/src/model/object_table.rs @@ -11,6 +11,8 @@ use garage_table::*; use crate::version_table::*; +use garage_model_050::object_table as old; + /// An object #[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] pub struct Object { @@ -255,4 +257,73 @@ impl TableSchema for ObjectTable { let deleted = !entry.versions.iter().any(|v| v.is_data()); filter.apply(deleted) } + + fn try_migrate(bytes: &[u8]) -> Option { + let old_v = match rmp_serde::decode::from_read_ref::<_, old::Object>(bytes) { + Ok(x) => x, + Err(_) => return None, + }; + Some(migrate_object(old_v)) + } +} + +// vvvvvvvv migration code, stupid stuff vvvvvvvvvvvv +// (we just want to change bucket into bucket_id by hashing it) + +fn migrate_object(o: old::Object) -> Object { + let versions = o + .versions() + .iter() + .cloned() + .map(migrate_object_version) + .collect(); + Object { + bucket_id: blake2sum(o.bucket.as_bytes()), + key: o.key, + versions, + } +} + +fn migrate_object_version(v: old::ObjectVersion) -> ObjectVersion { + ObjectVersion { + uuid: Uuid::try_from(v.uuid.as_slice()).unwrap(), + timestamp: v.timestamp, + state: match v.state { + old::ObjectVersionState::Uploading(h) => { + ObjectVersionState::Uploading(migrate_object_version_headers(h)) + } + old::ObjectVersionState::Complete(d) => { + ObjectVersionState::Complete(migrate_object_version_data(d)) + } + old::ObjectVersionState::Aborted => ObjectVersionState::Aborted, + }, + } +} + +fn migrate_object_version_headers(h: old::ObjectVersionHeaders) -> ObjectVersionHeaders { + ObjectVersionHeaders { + content_type: h.content_type, + other: h.other, + } +} + +fn migrate_object_version_data(d: old::ObjectVersionData) -> ObjectVersionData { + match d { + old::ObjectVersionData::DeleteMarker => ObjectVersionData::DeleteMarker, + old::ObjectVersionData::Inline(m, b) => { + ObjectVersionData::Inline(migrate_object_version_meta(m), b) + } + old::ObjectVersionData::FirstBlock(m, h) => ObjectVersionData::FirstBlock( + migrate_object_version_meta(m), + Hash::try_from(h.as_slice()).unwrap(), + ), + } +} + +fn migrate_object_version_meta(m: old::ObjectVersionMeta) -> ObjectVersionMeta { + ObjectVersionMeta { + headers: migrate_object_version_headers(m.headers), + size: m.size, + etag: m.etag, + } } diff --git a/src/model/version_table.rs b/src/model/version_table.rs index 4edea0b7..05cae831 100644 --- a/src/model/version_table.rs +++ b/src/model/version_table.rs @@ -147,4 +147,39 @@ impl TableSchema for VersionTable { fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool { filter.apply(entry.deleted.get()) } + + fn try_migrate(bytes: &[u8]) -> Option { + let old = + match rmp_serde::decode::from_read_ref::<_, garage_model_050::version_table::Version>( + bytes, + ) { + Ok(x) => x, + Err(_) => return None, + }; + let mut new_blocks = crdt::Map::new(); + for (k, v) in old.blocks.items().iter() { + new_blocks.put( + VersionBlockKey { + part_number: k.part_number, + offset: k.offset, + }, + VersionBlock { + hash: Hash::try_from(v.hash.as_slice()).unwrap(), + size: v.size, + }, + ); + } + let mut new_parts_etags = crdt::Map::new(); + for (k, v) in old.parts_etags.items().iter() { + new_parts_etags.put(*k, v.clone()); + } + Some(Version { + uuid: Hash::try_from(old.uuid.as_slice()).unwrap(), + deleted: crdt::Bool::new(old.deleted.get()), + blocks: new_blocks, + parts_etags: new_parts_etags, + bucket_id: blake2sum(old.bucket.as_bytes()), + key: old.key, + }) + } } -- 2.43.4 From 5db600e2316b80102e3fd4df9e8974c9586aec9c Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Thu, 16 Dec 2021 16:17:51 +0100 Subject: [PATCH 06/19] More complete output to bucket info and key info --- src/garage/admin.rs | 101 +++++++++++++++++++++++++++++++---------- src/garage/cli/cmd.rs | 8 ++-- src/garage/cli/util.rs | 73 +++++++++++++++++++++++++---- 3 files changed, 147 insertions(+), 35 deletions(-) diff --git a/src/garage/admin.rs b/src/garage/admin.rs index 74b24584..2eb0f187 100644 --- a/src/garage/admin.rs +++ b/src/garage/admin.rs @@ -38,9 +38,9 @@ pub enum AdminRpc { // Replies Ok(String), BucketList(Vec), - BucketInfo(Bucket), + BucketInfo(Bucket, HashMap), KeyList(Vec<(String, String)>), - KeyInfo(Key), + KeyInfo(Key, HashMap), } impl Rpc for AdminRpc { @@ -63,20 +63,7 @@ impl AdminRpcHandler { async fn handle_bucket_cmd(&self, cmd: &BucketOperation) -> Result { match cmd { BucketOperation::List => self.handle_list_buckets().await, - BucketOperation::Info(query) => { - let bucket_id = self - .garage - .bucket_helper() - .resolve_global_bucket_name(&query.name) - .await? - .ok_or_message("Bucket not found")?; - let bucket = self - .garage - .bucket_helper() - .get_existing_bucket(bucket_id) - .await?; - Ok(AdminRpc::BucketInfo(bucket)) - } + BucketOperation::Info(query) => self.handle_bucket_info(query).await, BucketOperation::Create(query) => self.handle_create_bucket(&query.name).await, BucketOperation::Delete(query) => self.handle_delete_bucket(query).await, BucketOperation::Alias(query) => self.handle_alias_bucket(query).await, @@ -96,6 +83,52 @@ impl AdminRpcHandler { Ok(AdminRpc::BucketList(bucket_aliases)) } + async fn handle_bucket_info(&self, query: &BucketOpt) -> Result { + let bucket_id = self + .garage + .bucket_helper() + .resolve_global_bucket_name(&query.name) + .await? + .ok_or_message("Bucket not found")?; + + let bucket = self + .garage + .bucket_helper() + .get_existing_bucket(bucket_id) + .await?; + + let mut relevant_keys = HashMap::new(); + for (k, _) in bucket + .state + .as_option() + .unwrap() + .authorized_keys + .items() + .iter() + { + if let Some(key) = self.garage.key_table.get(&EmptyKey, k).await? { + relevant_keys.insert(k.clone(), key); + } + } + for ((k, _), _, _) in bucket + .state + .as_option() + .unwrap() + .local_aliases + .items() + .iter() + { + if relevant_keys.contains_key(k) { + continue; + } + if let Some(key) = self.garage.key_table.get(&EmptyKey, k).await? { + relevant_keys.insert(k.clone(), key); + } + } + + Ok(AdminRpc::BucketInfo(bucket, relevant_keys)) + } + #[allow(clippy::ptr_arg)] async fn handle_create_bucket(&self, name: &String) -> Result { let mut bucket = Bucket::new(); @@ -476,10 +509,7 @@ impl AdminRpcHandler { async fn handle_key_cmd(&self, cmd: &KeyOperation) -> Result { match cmd { KeyOperation::List => self.handle_list_keys().await, - KeyOperation::Info(query) => { - let key = self.get_existing_key(&query.key_pattern).await?; - Ok(AdminRpc::KeyInfo(key)) - } + KeyOperation::Info(query) => self.handle_key_info(query).await, KeyOperation::New(query) => self.handle_create_key(query).await, KeyOperation::Rename(query) => self.handle_rename_key(query).await, KeyOperation::Delete(query) => self.handle_delete_key(query).await, @@ -504,17 +534,22 @@ impl AdminRpcHandler { Ok(AdminRpc::KeyList(key_ids)) } + async fn handle_key_info(&self, query: &KeyOpt) -> Result { + let key = self.get_existing_key(&query.key_pattern).await?; + self.key_info_result(key).await + } + async fn handle_create_key(&self, query: &KeyNewOpt) -> Result { let key = Key::new(query.name.clone()); self.garage.key_table.insert(&key).await?; - Ok(AdminRpc::KeyInfo(key)) + self.key_info_result(key).await } async fn handle_rename_key(&self, query: &KeyRenameOpt) -> Result { let mut key = self.get_existing_key(&query.key_pattern).await?; key.name.update(query.new_name.clone()); self.garage.key_table.insert(&key).await?; - Ok(AdminRpc::KeyInfo(key)) + self.key_info_result(key).await } async fn handle_delete_key(&self, query: &KeyDeleteOpt) -> Result { @@ -577,7 +612,8 @@ impl AdminRpcHandler { } let imported_key = Key::import(&query.key_id, &query.secret_key, &query.name); self.garage.key_table.insert(&imported_key).await?; - Ok(AdminRpc::KeyInfo(imported_key)) + + self.key_info_result(imported_key).await } async fn get_existing_key(&self, pattern: &str) -> Result { @@ -604,6 +640,25 @@ impl AdminRpcHandler { } } + async fn key_info_result(&self, key: Key) -> Result { + let mut relevant_buckets = HashMap::new(); + + for (id, _) in key + .state + .as_option() + .unwrap() + .authorized_buckets + .items() + .iter() + { + if let Some(b) = self.garage.bucket_table.get(id, &EmptyKey).await? { + relevant_buckets.insert(*id, b); + } + } + + Ok(AdminRpc::KeyInfo(key, relevant_buckets)) + } + /// Update **key table** to inform of the new linked bucket async fn update_key_bucket( &self, diff --git a/src/garage/cli/cmd.rs b/src/garage/cli/cmd.rs index b65fea02..1c64f9b5 100644 --- a/src/garage/cli/cmd.rs +++ b/src/garage/cli/cmd.rs @@ -173,8 +173,8 @@ pub async fn cmd_admin( format_table(table); println!("Buckets that don't have a global alias (i.e. that only exist in the namespace of an access key) are not shown."); } - AdminRpc::BucketInfo(bucket) => { - print_bucket_info(&bucket); + AdminRpc::BucketInfo(bucket, rk) => { + print_bucket_info(&bucket, &rk); } AdminRpc::KeyList(kl) => { println!("List of keys:"); @@ -182,8 +182,8 @@ pub async fn cmd_admin( println!("{}\t{}", key.0, key.1); } } - AdminRpc::KeyInfo(key) => { - print_key_info(&key); + AdminRpc::KeyInfo(key, rb) => { + print_key_info(&key, &rb); } r => { error!("Unexpected response: {:?}", r); diff --git a/src/garage/cli/util.rs b/src/garage/cli/util.rs index f586d55b..ad48c301 100644 --- a/src/garage/cli/util.rs +++ b/src/garage/cli/util.rs @@ -1,3 +1,5 @@ +use std::collections::HashMap; + use garage_util::crdt::*; use garage_util::data::Uuid; use garage_util::error::*; @@ -5,7 +7,24 @@ use garage_util::error::*; use garage_model::bucket_table::*; use garage_model::key_table::*; -pub fn print_key_info(key: &Key) { +pub fn print_key_info(key: &Key, relevant_buckets: &HashMap) { + let bucket_global_aliases = |b: &Uuid| { + if let Some(bucket) = relevant_buckets.get(b) { + if let Some(p) = bucket.state.as_option() { + return p + .aliases + .items() + .iter() + .filter(|(_, _, active)| *active) + .map(|(a, _, _)| a.clone()) + .collect::>() + .join(", "); + } + } + + "".to_string() + }; + println!("Key name: {}", key.name.get()); println!("Key ID: {}", key.key_id); println!("Secret key: {}", key.secret_key); @@ -16,18 +35,39 @@ pub fn print_key_info(key: &Key) { let mut table = vec![]; for (alias_name, _, alias) in p.local_aliases.items().iter() { if let Some(bucket_id) = alias.as_option() { - table.push(format!("\t{}\t{}", alias_name, hex::encode(bucket_id))); + table.push(format!( + "\t{}\t{}\t{}", + alias_name, + bucket_global_aliases(bucket_id), + hex::encode(bucket_id) + )); } } format_table(table); println!("\nAuthorized buckets:"); let mut table = vec![]; - for (b, perm) in p.authorized_buckets.items().iter() { + for (bucket_id, perm) in p.authorized_buckets.items().iter() { let rflag = if perm.allow_read { "R" } else { " " }; let wflag = if perm.allow_write { "W" } else { " " }; let oflag = if perm.allow_owner { "O" } else { " " }; - table.push(format!("\t{}{}{}\t{:?}", rflag, wflag, oflag, b)); + let local_aliases = p + .local_aliases + .items() + .iter() + .filter(|(_, _, a)| a.as_option() == Some(bucket_id)) + .map(|(a, _, _)| a.clone()) + .collect::>() + .join(", "); + table.push(format!( + "\t{}{}{}\t{}\t{}\t{:?}", + rflag, + wflag, + oflag, + bucket_global_aliases(bucket_id), + local_aliases, + bucket_id + )); } format_table(table); } @@ -37,32 +77,49 @@ pub fn print_key_info(key: &Key) { } } -pub fn print_bucket_info(bucket: &Bucket) { +pub fn print_bucket_info(bucket: &Bucket, relevant_keys: &HashMap) { println!("Bucket: {}", hex::encode(bucket.id)); match &bucket.state { Deletable::Deleted => println!("Bucket is deleted."), Deletable::Present(p) => { + println!("Website access: {}", p.website_access.get()); + println!("\nGlobal aliases:"); for (alias, _, active) in p.aliases.items().iter() { if *active { - println!("- {}", alias); + println!(" {}", alias); } } println!("\nKey-specific aliases:"); + let mut table = vec![]; for ((key_id, alias), _, active) in p.local_aliases.items().iter() { if *active { - println!("- {} {}", key_id, alias); + let key_name = relevant_keys + .get(key_id) + .map(|k| k.name.get().as_str()) + .unwrap_or(""); + table.push(format!("\t{}\t{} ({})", alias, key_id, key_name)); } } + format_table(table); println!("\nAuthorized keys:"); + let mut table = vec![]; for (k, perm) in p.authorized_keys.items().iter() { let rflag = if perm.allow_read { "R" } else { " " }; let wflag = if perm.allow_write { "W" } else { " " }; let oflag = if perm.allow_owner { "O" } else { " " }; - println!("- {}{}{} {}", rflag, wflag, oflag, k); + let key_name = relevant_keys + .get(k) + .map(|k| k.name.get().as_str()) + .unwrap_or(""); + table.push(format!( + "\t{}{}{}\t{} ({})", + rflag, wflag, oflag, k, key_name + )); } + format_table(table); } }; } -- 2.43.4 From b1cfd16913e6957739958ef729b87c1bf3674a5d Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Fri, 17 Dec 2021 11:53:13 +0100 Subject: [PATCH 07/19] New buckets for 0.6.0: small fixes, including: - ensure bucket names are correct aws s3 names - when making aliases, ensure timestamps of links in both ways are the same - fix small remarks by trinity - don't have a separate website_access field --- Cargo.lock | 1 + src/api/api_server.rs | 8 +- src/api/s3_bucket.rs | 2 +- src/api/s3_website.rs | 2 - src/garage/Cargo.toml | 1 + src/garage/admin.rs | 155 +++++++++++++++++++++----------- src/garage/cli/cmd.rs | 2 +- src/garage/cli/util.rs | 2 +- src/model/bucket_alias_table.rs | 43 +++++++-- src/model/bucket_helper.rs | 12 ++- src/model/bucket_table.rs | 9 +- src/model/key_table.rs | 2 +- src/model/migrate.rs | 32 ++++--- src/model/permission.rs | 3 + src/util/crdt/lww.rs | 7 +- src/util/crdt/lww_map.rs | 62 +++++++++---- src/util/time.rs | 5 ++ src/web/web_server.rs | 2 +- 18 files changed, 243 insertions(+), 107 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8630855f..40d2a29f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -403,6 +403,7 @@ dependencies = [ "rand", "rmp-serde 0.15.5", "serde", + "serde_bytes", "sled", "structopt", "tokio", diff --git a/src/api/api_server.rs b/src/api/api_server.rs index cd866c9f..42987e78 100644 --- a/src/api/api_server.rs +++ b/src/api/api_server.rs @@ -109,11 +109,9 @@ async fn handler_inner(garage: Arc, req: Request) -> Result { - return handle_request_without_bucket(garage, req, api_key, endpoint).await - } - Authorization::Read(bucket) | Authorization::Write(bucket) => bucket.to_string(), + let bucket_name = match endpoint.get_bucket() { + None => return handle_request_without_bucket(garage, req, api_key, endpoint).await, + Some(bucket) => bucket.to_string(), }; let bucket_id = resolve_bucket(&garage, &bucket_name, &api_key).await?; diff --git a/src/api/s3_bucket.rs b/src/api/s3_bucket.rs index dc131a31..4a497c67 100644 --- a/src/api/s3_bucket.rs +++ b/src/api/s3_bucket.rs @@ -67,7 +67,7 @@ pub async fn handle_list_buckets(garage: &Garage, api_key: &Key) -> Result { "Invalid bucket name: {}. See AWS documentation for constraints on S3 bucket names:\nhttps://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html" }; } + #[derive(Debug, Serialize, Deserialize)] pub enum AdminRpc { BucketOperation(BucketOperation), @@ -142,14 +145,14 @@ impl AdminRpcHandler { })); alias } - None => BucketAlias::new(name.clone(), bucket.id), + None => BucketAlias::new(name.clone(), bucket.id) + .ok_or_message(format!(INVALID_BUCKET_NAME_MESSAGE!(), name))?, }; - bucket - .state - .as_option_mut() - .unwrap() - .aliases - .update_in_place(name.clone(), true); + bucket.state.as_option_mut().unwrap().aliases.merge_raw( + name, + alias.state.timestamp(), + &true, + ); self.garage.bucket_table.insert(&bucket).await?; self.garage.bucket_alias_table.insert(&alias).await?; Ok(AdminRpc::Ok(format!("Bucket {} was created.", name))) @@ -222,7 +225,7 @@ impl AdminRpcHandler { // 2. delete bucket alias bucket_alias.state.update(Deletable::Deleted); self.garage.bucket_alias_table.insert(&bucket_alias).await?; - // 3. delete bucket alias + // 3. delete bucket bucket.state = Deletable::delete(); self.garage.bucket_table.insert(&bucket).await?; @@ -259,15 +262,36 @@ impl AdminRpcHandler { } } - key_param.local_aliases = key_param - .local_aliases - .update_mutator(query.new_name.clone(), Deletable::present(bucket_id)); + if !is_valid_bucket_name(&query.new_name) { + return Err(Error::Message(format!( + INVALID_BUCKET_NAME_MESSAGE!(), + query.new_name + ))); + } + + // Checks ok, add alias + let mut bucket_p = bucket.state.as_option_mut().unwrap(); + let bucket_p_local_alias_key = (key.key_id.clone(), query.new_name.clone()); + + // Calculate the timestamp to assign to this aliasing in the two local_aliases maps + // (the one from key to bucket, and the reverse one stored in the bucket iself) + // so that merges on both maps in case of a concurrent operation resolve + // to the same alias being set + let alias_ts = increment_logical_clock_2( + key_param.local_aliases.get_timestamp(&query.new_name), + bucket_p + .local_aliases + .get_timestamp(&bucket_p_local_alias_key), + ); + + key_param.local_aliases = LwwMap::raw_item( + query.new_name.clone(), + alias_ts, + Deletable::present(bucket_id), + ); self.garage.key_table.insert(&key).await?; - let mut bucket_p = bucket.state.as_option_mut().unwrap(); - bucket_p.local_aliases = bucket_p - .local_aliases - .update_mutator((key.key_id.clone(), query.new_name.clone()), true); + bucket_p.local_aliases = LwwMap::raw_item(bucket_p_local_alias_key, alias_ts, true); self.garage.bucket_table.insert(&bucket).await?; Ok(AdminRpc::Ok(format!( @@ -275,40 +299,47 @@ impl AdminRpcHandler { query.new_name, bucket_id, key.key_id ))) } else { - let mut alias = self + let alias = self .garage .bucket_alias_table .get(&EmptyKey, &query.new_name) - .await? - .unwrap_or(BucketAlias { - name: query.new_name.clone(), - state: Lww::new(Deletable::delete()), - }); + .await?; - if let Some(existing_alias) = alias.state.get().as_option() { - if existing_alias.bucket_id == bucket_id { - return Ok(AdminRpc::Ok(format!( - "Alias {} already points to bucket {:?}", - query.new_name, bucket_id - ))); - } else { - return Err(Error::Message(format!( - "Alias {} already exists and points to different bucket: {:?}", - query.new_name, existing_alias.bucket_id - ))); + if let Some(existing_alias) = alias.as_ref() { + if let Some(p) = existing_alias.state.get().as_option() { + if p.bucket_id == bucket_id { + return Ok(AdminRpc::Ok(format!( + "Alias {} already points to bucket {:?}", + query.new_name, bucket_id + ))); + } else { + return Err(Error::Message(format!( + "Alias {} already exists and points to different bucket: {:?}", + query.new_name, p.bucket_id + ))); + } } } // Checks ok, add alias - alias - .state - .update(Deletable::present(AliasParams { bucket_id })); + let mut bucket_p = bucket.state.as_option_mut().unwrap(); + + let alias_ts = increment_logical_clock_2( + bucket_p.aliases.get_timestamp(&query.new_name), + alias.as_ref().map(|a| a.state.timestamp()).unwrap_or(0), + ); + + let alias = match alias { + None => BucketAlias::new(query.new_name.clone(), bucket_id) + .ok_or_message(format!(INVALID_BUCKET_NAME_MESSAGE!(), query.new_name))?, + Some(mut a) => { + a.state = Lww::raw(alias_ts, Deletable::present(AliasParams { bucket_id })); + a + } + }; self.garage.bucket_alias_table.insert(&alias).await?; - let mut bucket_p = bucket.state.as_option_mut().unwrap(); - bucket_p.aliases = bucket_p - .aliases - .update_mutator(query.new_name.clone(), true); + bucket_p.aliases = LwwMap::raw_item(query.new_name.clone(), alias_ts, true); self.garage.bucket_table.insert(&bucket).await?; Ok(AdminRpc::Ok(format!( @@ -336,14 +367,14 @@ impl AdminRpcHandler { .bucket_helper() .get_existing_bucket(bucket_id) .await?; - let mut bucket_state = bucket.state.as_option_mut().unwrap(); + let mut bucket_p = bucket.state.as_option_mut().unwrap(); - let has_other_aliases = bucket_state + let has_other_aliases = bucket_p .aliases .items() .iter() .any(|(_, _, active)| *active) - || bucket_state + || bucket_p .local_aliases .items() .iter() @@ -352,15 +383,22 @@ impl AdminRpcHandler { return Err(Error::Message(format!("Bucket {} doesn't have other aliases, please delete it instead of just unaliasing.", query.name))); } + // Checks ok, remove alias let mut key_param = key.state.as_option_mut().unwrap(); - key_param.local_aliases = key_param - .local_aliases - .update_mutator(query.name.clone(), Deletable::delete()); + let bucket_p_local_alias_key = (key.key_id.clone(), query.name.clone()); + + let alias_ts = increment_logical_clock_2( + key_param.local_aliases.get_timestamp(&query.name), + bucket_p + .local_aliases + .get_timestamp(&bucket_p_local_alias_key), + ); + + key_param.local_aliases = + LwwMap::raw_item(query.name.clone(), alias_ts, Deletable::delete()); self.garage.key_table.insert(&key).await?; - bucket_state.local_aliases = bucket_state - .local_aliases - .update_mutator((key.key_id.clone(), query.name.clone()), false); + bucket_p.local_aliases = LwwMap::raw_item(bucket_p_local_alias_key, alias_ts, false); self.garage.bucket_table.insert(&bucket).await?; Ok(AdminRpc::Ok(format!( @@ -401,12 +439,17 @@ impl AdminRpcHandler { .get(&EmptyKey, &query.name) .await? .ok_or_message("Internal error: alias not found")?; - alias.state.update(Deletable::delete()); + + // Checks ok, remove alias + let alias_ts = increment_logical_clock_2( + alias.state.timestamp(), + bucket_state.aliases.get_timestamp(&query.name), + ); + + alias.state = Lww::raw(alias_ts, Deletable::delete()); self.garage.bucket_alias_table.insert(&alias).await?; - bucket_state.aliases = bucket_state - .aliases - .update_mutator(query.name.clone(), false); + bucket_state.aliases = LwwMap::raw_item(query.name.clone(), alias_ts, false); self.garage.bucket_table.insert(&bucket).await?; Ok(AdminRpc::Ok(format!("Bucket alias {} deleted", query.name))) @@ -494,7 +537,13 @@ impl AdminRpcHandler { )); } - bucket_state.website_access.update(query.allow); + let website = if query.allow { + Some(ByteBuf::from(DEFAULT_WEBSITE_CONFIGURATION.to_vec())) + } else { + None + }; + + bucket_state.website_config.update(website); self.garage.bucket_table.insert(&bucket).await?; let msg = if query.allow { diff --git a/src/garage/cli/cmd.rs b/src/garage/cli/cmd.rs index 1c64f9b5..834261e4 100644 --- a/src/garage/cli/cmd.rs +++ b/src/garage/cli/cmd.rs @@ -167,7 +167,7 @@ pub async fn cmd_admin( let mut table = vec![]; for alias in bl { if let Some(p) = alias.state.get().as_option() { - table.push(format!("\t{}\t{:?}", alias.name, p.bucket_id)); + table.push(format!("\t{}\t{:?}", alias.name(), p.bucket_id)); } } format_table(table); diff --git a/src/garage/cli/util.rs b/src/garage/cli/util.rs index ad48c301..b4ea14d1 100644 --- a/src/garage/cli/util.rs +++ b/src/garage/cli/util.rs @@ -82,7 +82,7 @@ pub fn print_bucket_info(bucket: &Bucket, relevant_keys: &HashMap) match &bucket.state { Deletable::Deleted => println!("Bucket is deleted."), Deletable::Present(p) => { - println!("Website access: {}", p.website_access.get()); + println!("Website access: {}", p.website_config.get().is_some()); println!("\nGlobal aliases:"); for (alias, _, active) in p.aliases.items().iter() { diff --git a/src/model/bucket_alias_table.rs b/src/model/bucket_alias_table.rs index 52484c5b..904a5255 100644 --- a/src/model/bucket_alias_table.rs +++ b/src/model/bucket_alias_table.rs @@ -8,7 +8,7 @@ use garage_util::data::*; /// in the global namespace. #[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] pub struct BucketAlias { - pub name: String, + name: String, pub state: crdt::Lww>, } @@ -22,15 +22,22 @@ impl AutoCrdt for AliasParams { } impl BucketAlias { - pub fn new(name: String, bucket_id: Uuid) -> Self { - BucketAlias { - name, - state: crdt::Lww::new(crdt::Deletable::present(AliasParams { bucket_id })), + pub fn new(name: String, bucket_id: Uuid) -> Option { + if !is_valid_bucket_name(&name) { + None + } else { + Some(BucketAlias { + name, + state: crdt::Lww::new(crdt::Deletable::present(AliasParams { bucket_id })), + }) } } pub fn is_deleted(&self) -> bool { self.state.get().is_deleted() } + pub fn name(&self) -> &str { + &self.name + } } impl Crdt for BucketAlias { @@ -62,3 +69,29 @@ impl TableSchema for BucketAliasTable { filter.apply(entry.is_deleted()) } } + +/// Check if a bucket name is valid. +/// +/// The requirements are listed here: +/// +/// +/// +/// In the case of Garage, bucket names must not be hex-encoded +/// 32 byte string, which is excluded thanks to the +/// maximum length of 63 bytes given in the spec. +pub fn is_valid_bucket_name(n: &str) -> bool { + // Bucket names must be between 3 and 63 characters + n.len() >= 3 && n.len() <= 63 + // Bucket names must be composed of lowercase letters, numbers, + // dashes and dots + && n.chars().all(|c| matches!(c, '.' | '-' | 'a'..='z' | '0'..='9')) + // Bucket names must start and end with a letter or a number + && !n.starts_with(&['-', '.'][..]) + && !n.ends_with(&['-', '.'][..]) + // Bucket names must not be formated as an IP address + && n.parse::().is_err() + // Bucket names must not start wih "xn--" + && !n.starts_with("xn--") + // Bucket names must not end with "-s3alias" + && !n.ends_with("-s3alias") +} diff --git a/src/model/bucket_helper.rs b/src/model/bucket_helper.rs index c1280afa..b55ebc4b 100644 --- a/src/model/bucket_helper.rs +++ b/src/model/bucket_helper.rs @@ -8,12 +8,21 @@ use crate::garage::Garage; pub struct BucketHelper<'a>(pub(crate) &'a Garage); -#[allow(clippy::ptr_arg)] impl<'a> BucketHelper<'a> { + #[allow(clippy::ptr_arg)] pub async fn resolve_global_bucket_name( &self, bucket_name: &String, ) -> Result, Error> { + // Bucket names in Garage are aliases, true bucket identifiers + // are 32-byte UUIDs. This function resolves bucket names into + // their full identifier by looking up in the bucket_alias_table. + // This function also allows buckets to be identified by their + // full UUID (hex-encoded). Here, if the name to be resolved is a + // hex string of the correct length, it is directly parsed as a bucket + // identifier which is returned. There is no risk of this conflicting + // with an actual bucket name: bucket names are max 63 chars long by + // the AWS spec, and hex-encoded UUIDs are 64 chars long. let hexbucket = hex::decode(bucket_name.as_str()) .ok() .map(|by| Uuid::try_from(&by)) @@ -37,7 +46,6 @@ impl<'a> BucketHelper<'a> { } } - #[allow(clippy::ptr_arg)] pub async fn get_existing_bucket(&self, bucket_id: Uuid) -> Result { self.0 .bucket_table diff --git a/src/model/bucket_table.rs b/src/model/bucket_table.rs index 6ae719ae..00e03899 100644 --- a/src/model/bucket_table.rs +++ b/src/model/bucket_table.rs @@ -8,6 +8,8 @@ use garage_util::time::*; use crate::permission::BucketKeyPerm; +pub const DEFAULT_WEBSITE_CONFIGURATION: &[u8] = b""; // TODO (an XML WebsiteConfiguration document per the AWS spec) + /// A bucket is a collection of objects /// /// Its parameters are not directly accessible as: @@ -29,9 +31,8 @@ pub struct BucketParams { /// Map of key with access to the bucket, and what kind of access they give pub authorized_keys: crdt::Map, /// Whether this bucket is allowed for website access - /// (under all of its global alias names) - pub website_access: crdt::Lww, - /// The website configuration XML document + /// (under all of its global alias names), + /// and if so, the website configuration XML document pub website_config: crdt::Lww>, /// Map of aliases that are or have been given to this bucket /// in the global namespace @@ -50,7 +51,6 @@ impl BucketParams { BucketParams { creation_date: now_msec(), authorized_keys: crdt::Map::new(), - website_access: crdt::Lww::new(false), website_config: crdt::Lww::new(None), aliases: crdt::LwwMap::new(), local_aliases: crdt::LwwMap::new(), @@ -61,7 +61,6 @@ impl BucketParams { impl Crdt for BucketParams { fn merge(&mut self, o: &Self) { self.authorized_keys.merge(&o.authorized_keys); - self.website_access.merge(&o.website_access); self.website_config.merge(&o.website_config); self.aliases.merge(&o.aliases); self.local_aliases.merge(&o.local_aliases); diff --git a/src/model/key_table.rs b/src/model/key_table.rs index 526ed496..3285e355 100644 --- a/src/model/key_table.rs +++ b/src/model/key_table.rs @@ -190,7 +190,7 @@ impl TableSchema for KeyTable { local_aliases: crdt::LwwMap::new(), }) }; - let name = crdt::Lww::migrate_from_raw(old_k.name.timestamp(), old_k.name.get().clone()); + let name = crdt::Lww::raw(old_k.name.timestamp(), old_k.name.get().clone()); Some(Key { key_id: old_k.key_id, secret_key: old_k.secret_key, diff --git a/src/model/migrate.rs b/src/model/migrate.rs index 35ff1807..e4469e64 100644 --- a/src/model/migrate.rs +++ b/src/model/migrate.rs @@ -1,5 +1,7 @@ use std::sync::Arc; +use serde_bytes::ByteBuf; + use garage_table::util::EmptyKey; use garage_util::crdt::*; use garage_util::data::*; @@ -38,6 +40,16 @@ impl Migrate { old_bucket: &old_bucket::Bucket, old_bucket_p: &old_bucket::BucketParams, ) -> Result<(), Error> { + let bucket_id = blake2sum(old_bucket.name.as_bytes()); + + let new_name = if is_valid_bucket_name(&old_bucket.name) { + old_bucket.name.clone() + } else { + // if old bucket name was not valid, replace it by + // a hex-encoded name derived from its identifier + hex::encode(&bucket_id.as_slice()[..16]) + }; + let mut new_ak = Map::new(); for (k, ts, perm) in old_bucket_p.authorized_keys.items().iter() { new_ak.put( @@ -52,27 +64,27 @@ impl Migrate { } let mut aliases = LwwMap::new(); - aliases.update_in_place(old_bucket.name.clone(), true); + aliases.update_in_place(new_name.clone(), true); + + let website = if *old_bucket_p.website.get() { + Some(ByteBuf::from(DEFAULT_WEBSITE_CONFIGURATION.to_vec())) + } else { + None + }; let new_bucket = Bucket { - id: blake2sum(old_bucket.name.as_bytes()), + id: bucket_id, state: Deletable::Present(BucketParams { creation_date: now_msec(), authorized_keys: new_ak.clone(), - website_access: Lww::new(*old_bucket_p.website.get()), - website_config: Lww::new(None), + website_config: Lww::new(website), aliases, local_aliases: LwwMap::new(), }), }; self.garage.bucket_table.insert(&new_bucket).await?; - let new_alias = BucketAlias { - name: old_bucket.name.clone(), - state: Lww::new(Deletable::Present(AliasParams { - bucket_id: new_bucket.id, - })), - }; + let new_alias = BucketAlias::new(new_name.clone(), new_bucket.id).unwrap(); self.garage.bucket_alias_table.insert(&new_alias).await?; for (k, perm) in new_ak.items().iter() { diff --git a/src/model/permission.rs b/src/model/permission.rs index 04bb2bc5..ebb24a32 100644 --- a/src/model/permission.rs +++ b/src/model/permission.rs @@ -34,6 +34,9 @@ impl Crdt for BucketKeyPerm { if !other.allow_write { self.allow_write = false; } + if !other.allow_owner { + self.allow_owner = false; + } } _ => (), } diff --git a/src/util/crdt/lww.rs b/src/util/crdt/lww.rs index bc686e05..99bd8e7c 100644 --- a/src/util/crdt/lww.rs +++ b/src/util/crdt/lww.rs @@ -61,7 +61,7 @@ where /// /// Compared to new, the CRDT's timestamp is not set to now /// but must be set to the previous, non-compatible, CRDT's timestamp. - pub fn migrate_from_raw(ts: u64, value: T) -> Self { + pub fn raw(ts: u64, value: T) -> Self { Self { ts, v: value } } @@ -77,6 +77,11 @@ where self.v = new_value; } + /// Get the timestamp currently associated with the value + pub fn timestamp(&self) -> u64 { + self.ts + } + /// Get the CRDT value pub fn get(&self) -> &T { &self.v diff --git a/src/util/crdt/lww_map.rs b/src/util/crdt/lww_map.rs index 21cb6e12..f3a90591 100644 --- a/src/util/crdt/lww_map.rs +++ b/src/util/crdt/lww_map.rs @@ -37,11 +37,12 @@ where pub fn new() -> Self { Self { vals: vec![] } } + /// Used to migrate from a map defined in an incompatible format. This produces /// a map that contains a single item with the specified timestamp (copied from /// the incompatible format). Do this as many times as you have items to migrate, /// and put them all together using the CRDT merge operator. - pub fn migrate_from_raw_item(k: K, ts: u64, v: V) -> Self { + pub fn raw_item(k: K, ts: u64, v: V) -> Self { Self { vals: vec![(k, ts, v)], } @@ -74,9 +75,37 @@ where Self { vals: new_vals } } + /// Updates a value in place in the map (this generates + /// a new timestamp) pub fn update_in_place(&mut self, k: K, new_v: V) { self.merge(&self.update_mutator(k, new_v)); } + + /// Updates a value in place in the map, from a + /// (key, timestamp, value) triple, only if the given + /// timestamp is larger than the timestamp currently + /// in the map + pub fn merge_raw(&mut self, k: &K, ts2: u64, v2: &V) { + match self.vals.binary_search_by(|(k2, _, _)| k2.cmp(k)) { + Ok(i) => { + let (_, ts1, _v1) = &self.vals[i]; + match ts2.cmp(ts1) { + Ordering::Greater => { + self.vals[i].1 = ts2; + self.vals[i].2 = v2.clone(); + } + Ordering::Equal => { + self.vals[i].2.merge(v2); + } + Ordering::Less => (), + } + } + Err(i) => { + self.vals.insert(i, (k.clone(), ts2, v2.clone())); + } + } + } + /// Takes all of the values of the map and returns them. The current map is reset to the /// empty map. This is very usefull to produce in-place a new map that contains only a delta /// that modifies a certain value: @@ -103,10 +132,12 @@ where let vals = std::mem::take(&mut self.vals); Self { vals } } + /// Removes all values from the map pub fn clear(&mut self) { self.vals.clear(); } + /// Get a reference to the value assigned to a key pub fn get(&self, k: &K) -> Option<&V> { match self.vals.binary_search_by(|(k2, _, _)| k2.cmp(k)) { @@ -114,6 +145,16 @@ where Err(_) => None, } } + + /// Get the timestamp of the value assigned to a key, or 0 if + /// no value is assigned + pub fn get_timestamp(&self, k: &K) -> u64 { + match self.vals.binary_search_by(|(k2, _, _)| k2.cmp(k)) { + Ok(i) => self.vals[i].1, + Err(_) => 0, + } + } + /// Gets a reference to all of the items, as a slice. Usefull to iterate on all map values. /// In most case you will want to ignore the timestamp (second item of the tuple). pub fn items(&self) -> &[(K, u64, V)] { @@ -138,24 +179,7 @@ where { fn merge(&mut self, other: &Self) { for (k, ts2, v2) in other.vals.iter() { - match self.vals.binary_search_by(|(k2, _, _)| k2.cmp(k)) { - Ok(i) => { - let (_, ts1, _v1) = &self.vals[i]; - match ts2.cmp(ts1) { - Ordering::Greater => { - self.vals[i].1 = *ts2; - self.vals[i].2 = v2.clone(); - } - Ordering::Equal => { - self.vals[i].2.merge(v2); - } - Ordering::Less => (), - } - } - Err(i) => { - self.vals.insert(i, (k.clone(), *ts2, v2.clone())); - } - } + self.merge_raw(k, *ts2, v2); } } } diff --git a/src/util/time.rs b/src/util/time.rs index d9192443..257b4d2a 100644 --- a/src/util/time.rs +++ b/src/util/time.rs @@ -15,6 +15,11 @@ pub fn increment_logical_clock(prev: u64) -> u64 { std::cmp::max(prev + 1, now_msec()) } +/// Increment two logical clocks +pub fn increment_logical_clock_2(prev: u64, prev2: u64) -> u64 { + std::cmp::max(prev2 + 1, std::cmp::max(prev + 1, now_msec())) +} + /// Convert a timestamp represented as milliseconds since UNIX Epoch to /// its RFC3339 representation, such as "2021-01-01T12:30:00Z" pub fn msec_to_rfc3339(msecs: u64) -> String { diff --git a/src/web/web_server.rs b/src/web/web_server.rs index 6152f282..cc6eed57 100644 --- a/src/web/web_server.rs +++ b/src/web/web_server.rs @@ -99,7 +99,7 @@ async fn serve_file(garage: Arc, req: Request) -> Result Date: Fri, 17 Dec 2021 15:01:35 +0100 Subject: [PATCH 08/19] New buckets for 0.6.0: documentation and build files --- Cargo.nix | 1 + src/util/crdt/crdt.rs | 11 +++++++++++ 2 files changed, 12 insertions(+) diff --git a/Cargo.nix b/Cargo.nix index 78f2b5c0..c6c5c050 100644 --- a/Cargo.nix +++ b/Cargo.nix @@ -638,6 +638,7 @@ in rand = rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.4" { inherit profileName; }; rmp_serde = rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" { inherit profileName; }; serde = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.130" { inherit profileName; }; + serde_bytes = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }; sled = rustPackages."registry+https://github.com/rust-lang/crates.io-index".sled."0.34.7" { inherit profileName; }; structopt = rustPackages."registry+https://github.com/rust-lang/crates.io-index".structopt."0.3.23" { inherit profileName; }; tokio = rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.12.0" { inherit profileName; }; diff --git a/src/util/crdt/crdt.rs b/src/util/crdt/crdt.rs index 2508d03b..00bb2e3b 100644 --- a/src/util/crdt/crdt.rs +++ b/src/util/crdt/crdt.rs @@ -28,6 +28,17 @@ pub trait Crdt { fn merge(&mut self, other: &Self); } +/// Option implements Crdt for any type T, even if T doesn't implement CRDT itself: when +/// different values are detected, they are always merged to None. This can be used for value +/// types which shoulnd't be merged, instead of trying to merge things when we know we don't want +/// to merge them (which is what the AutoCrdt trait is used for most of the time). This cases +/// arises very often, for example with a Lww or a LwwMap: the value type has to be a CRDT so that +/// we have a rule for what to do when timestamps aren't enough to disambiguate (in a distributed +/// system, anything can happen!), and with AutoCrdt the rule is to make an arbitrary (but +/// determinstic) choice between the two. When using an Option instead with this impl, ambiguity +/// cases are explicitely stored as None, which allows us to detect the ambiguity and handle it in +/// the way we want. (this can only work if we are happy with losing the value when an ambiguity +/// arises) impl Crdt for Option where T: Eq, -- 2.43.4 From b76d0580a03c38d901a2b3d19e7c6ede6b0d09a7 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Wed, 22 Dec 2021 09:39:37 +0100 Subject: [PATCH 09/19] Fix forgotten flag --- src/api/s3_bucket.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/api/s3_bucket.rs b/src/api/s3_bucket.rs index 4a497c67..785b89dd 100644 --- a/src/api/s3_bucket.rs +++ b/src/api/s3_bucket.rs @@ -50,7 +50,7 @@ pub async fn handle_list_buckets(garage: &Garage, api_key: &Key) -> Result>(); -- 2.43.4 From c7d5c732442c5802058b46205d450d4620772b7b Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Wed, 22 Dec 2021 09:57:02 +0100 Subject: [PATCH 10/19] Add must_use to some CRDT functions --- src/util/crdt/lww_map.rs | 1 + src/util/crdt/map.rs | 1 + src/util/crdt/mod.rs | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/util/crdt/lww_map.rs b/src/util/crdt/lww_map.rs index f3a90591..1746c3cc 100644 --- a/src/util/crdt/lww_map.rs +++ b/src/util/crdt/lww_map.rs @@ -63,6 +63,7 @@ where /// /// However extracting the mutator on its own and only sending that on the network is very /// interesting as it is much smaller than the whole map. + #[must_use = "CRDT mutators are meant to be merged into a CRDT and not ignored."] pub fn update_mutator(&self, k: K, new_v: V) -> Self { let new_vals = match self.vals.binary_search_by(|(k2, _, _)| k2.cmp(&k)) { Ok(i) => { diff --git a/src/util/crdt/map.rs b/src/util/crdt/map.rs index 7553cd50..ad9a6e55 100644 --- a/src/util/crdt/map.rs +++ b/src/util/crdt/map.rs @@ -33,6 +33,7 @@ where /// This can be used to build a delta-mutator: /// when merged with another map, the value will be added or CRDT-merged if a previous /// value already exists. + #[must_use = "CRDT mutators are meant to be merged into a CRDT and not ignored."] pub fn put_mutator(k: K, v: V) -> Self { Self { vals: vec![(k, v)] } } diff --git a/src/util/crdt/mod.rs b/src/util/crdt/mod.rs index 6ba575ed..64f0984e 100644 --- a/src/util/crdt/mod.rs +++ b/src/util/crdt/mod.rs @@ -7,7 +7,7 @@ //! counter. Alice does +1 on her copy, she reads 1. Bob does +3 on his copy, he reads 3. Now, //! it is easy to merge their counters, order does not count: we always get 4. //! -//! Learn more about CRDT [on Wikipedia](https://en.wikipedia.org/wiki/Conflict-free_replicated_data_type) +//! Learn more about CRDTs [on Wikipedia](https://en.wikipedia.org/wiki/Conflict-free_replicated_data_type) mod bool; #[allow(clippy::module_inception)] -- 2.43.4 From d8ab5bdc3e20759e5ba8a6844393757da3539372 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Wed, 22 Dec 2021 18:50:08 +0100 Subject: [PATCH 11/19] New buckets for 0.6.0: fix model and migration --- src/api/error.rs | 37 ++++++++++++--------- src/api/s3_website.rs | 24 ++++++++++++-- src/garage/admin.rs | 6 ++-- src/model/bucket_alias_table.rs | 10 ++++-- src/model/bucket_table.rs | 17 +++++++--- src/model/key_table.rs | 9 +++--- src/model/lib.rs | 13 +++++--- src/model/migrate.rs | 38 +++++++++++++--------- src/model/object_table.rs | 7 ++-- src/model/version_table.rs | 57 ++++++++++++++++++--------------- src/table/schema.rs | 3 ++ src/util/crdt/lww.rs | 5 +-- src/util/crdt/lww_map.rs | 9 +++--- src/util/crdt/map.rs | 25 +++++++++++++++ src/util/error.rs | 10 +++--- 15 files changed, 174 insertions(+), 96 deletions(-) diff --git a/src/api/error.rs b/src/api/error.rs index 9bb8f8e2..828a2342 100644 --- a/src/api/error.rs +++ b/src/api/error.rs @@ -156,62 +156,67 @@ impl Error { /// Trait to map error to the Bad Request error code pub trait OkOrBadRequest { - type S2; - fn ok_or_bad_request(self, reason: &'static str) -> Self::S2; + type S; + fn ok_or_bad_request>(self, reason: M) -> Result; } impl OkOrBadRequest for Result where E: std::fmt::Display, { - type S2 = Result; - fn ok_or_bad_request(self, reason: &'static str) -> Result { + type S = T; + fn ok_or_bad_request>(self, reason: M) -> Result { match self { Ok(x) => Ok(x), - Err(e) => Err(Error::BadRequest(format!("{}: {}", reason, e))), + Err(e) => Err(Error::BadRequest(format!( + "{}: {}", + reason.as_ref(), + e.to_string() + ))), } } } impl OkOrBadRequest for Option { - type S2 = Result; - fn ok_or_bad_request(self, reason: &'static str) -> Result { + type S = T; + fn ok_or_bad_request>(self, reason: M) -> Result { match self { Some(x) => Ok(x), - None => Err(Error::BadRequest(reason.to_string())), + None => Err(Error::BadRequest(reason.as_ref().to_string())), } } } /// Trait to map an error to an Internal Error code pub trait OkOrInternalError { - type S2; - fn ok_or_internal_error(self, reason: &'static str) -> Self::S2; + type S; + fn ok_or_internal_error>(self, reason: M) -> Result; } impl OkOrInternalError for Result where E: std::fmt::Display, { - type S2 = Result; - fn ok_or_internal_error(self, reason: &'static str) -> Result { + type S = T; + fn ok_or_internal_error>(self, reason: M) -> Result { match self { Ok(x) => Ok(x), Err(e) => Err(Error::InternalError(GarageError::Message(format!( "{}: {}", - reason, e + reason.as_ref(), + e )))), } } } impl OkOrInternalError for Option { - type S2 = Result; - fn ok_or_internal_error(self, reason: &'static str) -> Result { + type S = T; + fn ok_or_internal_error>(self, reason: M) -> Result { match self { Some(x) => Ok(x), None => Err(Error::InternalError(GarageError::Message( - reason.to_string(), + reason.as_ref().to_string(), ))), } } diff --git a/src/api/s3_website.rs b/src/api/s3_website.rs index e76afbf4..1ea57577 100644 --- a/src/api/s3_website.rs +++ b/src/api/s3_website.rs @@ -3,12 +3,12 @@ use std::sync::Arc; use hyper::{Body, Request, Response, StatusCode}; use serde::{Deserialize, Serialize}; -use serde_bytes::ByteBuf; use crate::error::*; use crate::s3_xml::{xmlns_tag, IntValue, Value}; use crate::signature::verify_signed_content; +use garage_model::bucket_table::*; use garage_model::garage::Garage; use garage_table::*; use garage_util::crdt; @@ -58,7 +58,7 @@ pub async fn handle_put_website( if let crdt::Deletable::Present(param) = &mut bucket.state { param .website_config - .update(Some(ByteBuf::from(body.to_vec()))); + .update(Some(conf.into_garage_website_config()?)); garage.bucket_table.insert(&bucket).await?; } else { unreachable!(); @@ -168,6 +168,26 @@ impl WebsiteConfiguration { Ok(()) } + + pub fn into_garage_website_config(self) -> Result { + if let Some(rart) = self.redirect_all_requests_to { + Ok(WebsiteConfig::RedirectAll { + hostname: rart.hostname.0, + protocol: rart + .protocol + .map(|x| x.0) + .unwrap_or_else(|| "http".to_string()), + }) + } else { + Ok(WebsiteConfig::Website { + index_document: self + .index_document + .map(|x| x.suffix.0) + .unwrap_or_else(|| "index.html".to_string()), + error_document: self.error_document.map(|x| x.key.0), + }) + } + } } impl Key { diff --git a/src/garage/admin.rs b/src/garage/admin.rs index 9ea5c19e..49890189 100644 --- a/src/garage/admin.rs +++ b/src/garage/admin.rs @@ -4,7 +4,6 @@ use std::sync::Arc; use async_trait::async_trait; use serde::{Deserialize, Serialize}; -use serde_bytes::ByteBuf; use garage_util::crdt::*; use garage_util::data::*; @@ -538,7 +537,10 @@ impl AdminRpcHandler { } let website = if query.allow { - Some(ByteBuf::from(DEFAULT_WEBSITE_CONFIGURATION.to_vec())) + Some(WebsiteConfig::Website { + index_document: "index.html".into(), + error_document: None, + }) } else { None }; diff --git a/src/model/bucket_alias_table.rs b/src/model/bucket_alias_table.rs index 904a5255..caae76f1 100644 --- a/src/model/bucket_alias_table.rs +++ b/src/model/bucket_alias_table.rs @@ -1,8 +1,10 @@ use serde::{Deserialize, Serialize}; +use garage_util::data::*; +use garage_util::time::*; + use garage_table::crdt::*; use garage_table::*; -use garage_util::data::*; /// The bucket alias table holds the names given to buckets /// in the global namespace. @@ -23,15 +25,19 @@ impl AutoCrdt for AliasParams { impl BucketAlias { pub fn new(name: String, bucket_id: Uuid) -> Option { + Self::raw(name, now_msec(), bucket_id) + } + pub fn raw(name: String, ts: u64, bucket_id: Uuid) -> Option { if !is_valid_bucket_name(&name) { None } else { Some(BucketAlias { name, - state: crdt::Lww::new(crdt::Deletable::present(AliasParams { bucket_id })), + state: crdt::Lww::raw(ts, crdt::Deletable::present(AliasParams { bucket_id })), }) } } + pub fn is_deleted(&self) -> bool { self.state.get().is_deleted() } diff --git a/src/model/bucket_table.rs b/src/model/bucket_table.rs index 00e03899..8dcf6913 100644 --- a/src/model/bucket_table.rs +++ b/src/model/bucket_table.rs @@ -1,5 +1,4 @@ use serde::{Deserialize, Serialize}; -use serde_bytes::ByteBuf; use garage_table::crdt::Crdt; use garage_table::*; @@ -8,8 +7,6 @@ use garage_util::time::*; use crate::permission::BucketKeyPerm; -pub const DEFAULT_WEBSITE_CONFIGURATION: &[u8] = b""; // TODO (an XML WebsiteConfiguration document per the AWS spec) - /// A bucket is a collection of objects /// /// Its parameters are not directly accessible as: @@ -33,7 +30,7 @@ pub struct BucketParams { /// Whether this bucket is allowed for website access /// (under all of its global alias names), /// and if so, the website configuration XML document - pub website_config: crdt::Lww>, + pub website_config: crdt::Lww>, /// Map of aliases that are or have been given to this bucket /// in the global namespace /// (not authoritative: this is just used as an indication to @@ -45,6 +42,18 @@ pub struct BucketParams { pub local_aliases: crdt::LwwMap<(String, String), bool>, } +#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] +pub enum WebsiteConfig { + RedirectAll { + hostname: String, + protocol: String, + }, + Website { + index_document: String, + error_document: Option, + }, +} + impl BucketParams { /// Create an empty BucketParams with no authorized keys and no website accesss pub fn new() -> Self { diff --git a/src/model/key_table.rs b/src/model/key_table.rs index 3285e355..daea5473 100644 --- a/src/model/key_table.rs +++ b/src/model/key_table.rs @@ -6,6 +6,8 @@ use garage_util::data::*; use crate::permission::BucketKeyPerm; +use garage_model_050::key_table as old; + /// An api key #[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] pub struct Key { @@ -173,11 +175,8 @@ impl TableSchema for KeyTable { } fn try_migrate(bytes: &[u8]) -> Option { - let old_k = - match rmp_serde::decode::from_read_ref::<_, garage_model_050::key_table::Key>(bytes) { - Ok(x) => x, - Err(_) => return None, - }; + let old_k = rmp_serde::decode::from_read_ref::<_, old::Key>(bytes).ok()?; + let state = if old_k.deleted.get() { crdt::Deletable::Deleted } else { diff --git a/src/model/lib.rs b/src/model/lib.rs index 3f6b5cd4..e7d7e98b 100644 --- a/src/model/lib.rs +++ b/src/model/lib.rs @@ -1,14 +1,17 @@ #[macro_use] extern crate log; -pub mod block; +pub mod permission; + pub mod block_ref_table; pub mod bucket_alias_table; -pub mod bucket_helper; pub mod bucket_table; -pub mod garage; pub mod key_table; -pub mod migrate; pub mod object_table; -pub mod permission; pub mod version_table; + +pub mod block; + +pub mod bucket_helper; +pub mod garage; +pub mod migrate; diff --git a/src/model/migrate.rs b/src/model/migrate.rs index e4469e64..6b20a01f 100644 --- a/src/model/migrate.rs +++ b/src/model/migrate.rs @@ -1,7 +1,5 @@ use std::sync::Arc; -use serde_bytes::ByteBuf; - use garage_table::util::EmptyKey; use garage_util::crdt::*; use garage_util::data::*; @@ -50,24 +48,32 @@ impl Migrate { hex::encode(&bucket_id.as_slice()[..16]) }; - let mut new_ak = Map::new(); - for (k, ts, perm) in old_bucket_p.authorized_keys.items().iter() { - new_ak.put( - k.to_string(), - BucketKeyPerm { - timestamp: *ts, - allow_read: perm.allow_read, - allow_write: perm.allow_write, - allow_owner: false, - }, - ); - } + let new_ak = old_bucket_p + .authorized_keys + .items() + .iter() + .map(|(k, ts, perm)| { + ( + k.to_string(), + BucketKeyPerm { + timestamp: *ts, + allow_read: perm.allow_read, + allow_write: perm.allow_write, + allow_owner: false, + }, + ) + }) + .collect::>(); let mut aliases = LwwMap::new(); aliases.update_in_place(new_name.clone(), true); + let alias_ts = aliases.get_timestamp(&new_name); let website = if *old_bucket_p.website.get() { - Some(ByteBuf::from(DEFAULT_WEBSITE_CONFIGURATION.to_vec())) + Some(WebsiteConfig::Website { + index_document: "index.html".into(), + error_document: None, + }) } else { None }; @@ -84,7 +90,7 @@ impl Migrate { }; self.garage.bucket_table.insert(&new_bucket).await?; - let new_alias = BucketAlias::new(new_name.clone(), new_bucket.id).unwrap(); + let new_alias = BucketAlias::raw(new_name.clone(), alias_ts, new_bucket.id).unwrap(); self.garage.bucket_alias_table.insert(&new_alias).await?; for (k, perm) in new_ak.items().iter() { diff --git a/src/model/object_table.rs b/src/model/object_table.rs index 45f0daf4..0c6c3a6d 100644 --- a/src/model/object_table.rs +++ b/src/model/object_table.rs @@ -259,11 +259,8 @@ impl TableSchema for ObjectTable { } fn try_migrate(bytes: &[u8]) -> Option { - let old_v = match rmp_serde::decode::from_read_ref::<_, old::Object>(bytes) { - Ok(x) => x, - Err(_) => return None, - }; - Some(migrate_object(old_v)) + let old_obj = rmp_serde::decode::from_read_ref::<_, old::Object>(bytes).ok()?; + Some(migrate_object(old_obj)) } } diff --git a/src/model/version_table.rs b/src/model/version_table.rs index 05cae831..e0b99770 100644 --- a/src/model/version_table.rs +++ b/src/model/version_table.rs @@ -10,6 +10,8 @@ use garage_table::*; use crate::block_ref_table::*; +use garage_model_050::version_table as old; + /// A version of an object #[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] pub struct Version { @@ -149,35 +151,38 @@ impl TableSchema for VersionTable { } fn try_migrate(bytes: &[u8]) -> Option { - let old = - match rmp_serde::decode::from_read_ref::<_, garage_model_050::version_table::Version>( - bytes, - ) { - Ok(x) => x, - Err(_) => return None, - }; - let mut new_blocks = crdt::Map::new(); - for (k, v) in old.blocks.items().iter() { - new_blocks.put( - VersionBlockKey { - part_number: k.part_number, - offset: k.offset, - }, - VersionBlock { - hash: Hash::try_from(v.hash.as_slice()).unwrap(), - size: v.size, - }, - ); - } - let mut new_parts_etags = crdt::Map::new(); - for (k, v) in old.parts_etags.items().iter() { - new_parts_etags.put(*k, v.clone()); - } + let old = rmp_serde::decode::from_read_ref::<_, old::Version>(bytes).ok()?; + + let blocks = old + .blocks + .items() + .iter() + .map(|(k, v)| { + ( + VersionBlockKey { + part_number: k.part_number, + offset: k.offset, + }, + VersionBlock { + hash: Hash::try_from(v.hash.as_slice()).unwrap(), + size: v.size, + }, + ) + }) + .collect::>(); + + let parts_etags = old + .parts_etags + .items() + .iter() + .map(|(k, v)| (*k, v.clone())) + .collect::>(); + Some(Version { uuid: Hash::try_from(old.uuid.as_slice()).unwrap(), deleted: crdt::Bool::new(old.deleted.get()), - blocks: new_blocks, - parts_etags: new_parts_etags, + blocks, + parts_etags, bucket_id: blake2sum(old.bucket.as_bytes()), key: old.key, }) diff --git a/src/table/schema.rs b/src/table/schema.rs index cfe86fba..eba918a2 100644 --- a/src/table/schema.rs +++ b/src/table/schema.rs @@ -16,6 +16,9 @@ impl PartitionKey for String { } } +/// Values of type FixedBytes32 are assumed to be random, +/// either a hash or a random UUID. This means we can use +/// them directly as an index into the hash table. impl PartitionKey for FixedBytes32 { fn hash(&self) -> Hash { *self diff --git a/src/util/crdt/lww.rs b/src/util/crdt/lww.rs index 99bd8e7c..adb07711 100644 --- a/src/util/crdt/lww.rs +++ b/src/util/crdt/lww.rs @@ -57,10 +57,7 @@ where } } - /// Build a new CRDT from a previous non-compatible one - /// - /// Compared to new, the CRDT's timestamp is not set to now - /// but must be set to the previous, non-compatible, CRDT's timestamp. + /// Build a new LWW CRDT from its raw pieces: a timestamp and the value pub fn raw(ts: u64, value: T) -> Self { Self { ts, v: value } } diff --git a/src/util/crdt/lww_map.rs b/src/util/crdt/lww_map.rs index 1746c3cc..c155c3a8 100644 --- a/src/util/crdt/lww_map.rs +++ b/src/util/crdt/lww_map.rs @@ -38,10 +38,11 @@ where Self { vals: vec![] } } - /// Used to migrate from a map defined in an incompatible format. This produces - /// a map that contains a single item with the specified timestamp (copied from - /// the incompatible format). Do this as many times as you have items to migrate, - /// and put them all together using the CRDT merge operator. + /// This produces a map that contains a single item with the specified timestamp. + /// + /// Used to migrate from a map defined in an incompatible format. Do this as many + /// times as you have items to migrate, and put them all together using the + /// CRDT merge operator. pub fn raw_item(k: K, ts: u64, v: V) -> Self { Self { vals: vec![(k, ts, v)], diff --git a/src/util/crdt/map.rs b/src/util/crdt/map.rs index ad9a6e55..f9ed19b6 100644 --- a/src/util/crdt/map.rs +++ b/src/util/crdt/map.rs @@ -1,3 +1,5 @@ +use std::iter::{FromIterator, IntoIterator}; + use serde::{Deserialize, Serialize}; use crate::crdt::crdt::*; @@ -98,3 +100,26 @@ where Self::new() } } + +/// A crdt map can be created from an iterator of key-value pairs. +/// Note that all keys in the iterator must be distinct: +/// this function will throw a panic if it is not the case. +impl FromIterator<(K, V)> for Map +where + K: Clone + Ord, + V: Clone + Crdt, +{ + fn from_iter>(iter: T) -> Self { + let mut vals: Vec<(K, V)> = iter.into_iter().collect(); + vals.sort_by_cached_key(|tup| tup.0.clone()); + + // sanity check + for i in 1..vals.len() { + if vals[i - 1].0 == vals[i].0 { + panic!("Duplicate key in crdt::Map resulting from .from_iter() or .collect()"); + } + } + + Self { vals } + } +} diff --git a/src/util/error.rs b/src/util/error.rs index 08cf1302..ef5a76f2 100644 --- a/src/util/error.rs +++ b/src/util/error.rs @@ -119,17 +119,17 @@ where } } -/// Trait to map error to the Bad Request error code +/// Trait to map any error type to Error::Message pub trait OkOrMessage { - type S2; - fn ok_or_message>(self, message: M) -> Self::S2; + type S; + fn ok_or_message>(self, message: M) -> Result; } impl OkOrMessage for Result where E: std::fmt::Display, { - type S2 = Result; + type S = T; fn ok_or_message>(self, message: M) -> Result { match self { Ok(x) => Ok(x), @@ -139,7 +139,7 @@ where } impl OkOrMessage for Option { - type S2 = Result; + type S = T; fn ok_or_message>(self, message: M) -> Result { match self { Some(x) => Ok(x), -- 2.43.4 From beeef4758e5ec0d521179a799a3237c2c0368911 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Mon, 3 Jan 2022 13:58:05 +0100 Subject: [PATCH 12/19] Some movement of helper code and refactoring of error handling --- Cargo.lock | 1 + src/api/error.rs | 10 +++ src/garage/admin.rs | 64 ++++++++++--------- src/garage/cli/cmd.rs | 14 ++-- src/garage/main.rs | 8 ++- src/model/Cargo.toml | 1 + src/model/block.rs | 8 +-- src/model/garage.rs | 6 +- .../{bucket_helper.rs => helper/bucket.rs} | 13 +--- src/model/helper/error.rs | 51 +++++++++++++++ src/model/helper/mod.rs | 2 + src/model/lib.rs | 2 +- src/rpc/system.rs | 2 +- src/table/gc.rs | 2 +- src/table/sync.rs | 7 +- src/table/table.rs | 2 +- src/util/error.rs | 10 ++- 17 files changed, 137 insertions(+), 66 deletions(-) rename src/model/{bucket_helper.rs => helper/bucket.rs} (91%) create mode 100644 src/model/helper/error.rs create mode 100644 src/model/helper/mod.rs diff --git a/Cargo.lock b/Cargo.lock index 40d2a29f..033d157f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -474,6 +474,7 @@ version = "0.6.0" dependencies = [ "arc-swap", "async-trait", + "err-derive 0.3.0", "futures", "futures-util", "garage_model 0.5.1", diff --git a/src/api/error.rs b/src/api/error.rs index 828a2342..d6d4a1d7 100644 --- a/src/api/error.rs +++ b/src/api/error.rs @@ -4,6 +4,7 @@ use err_derive::Error; use hyper::header::HeaderValue; use hyper::{HeaderMap, StatusCode}; +use garage_model::helper::error::Error as HelperError; use garage_util::error::Error as GarageError; use crate::s3_xml; @@ -83,6 +84,15 @@ impl From for Error { } } +impl From for Error { + fn from(err: HelperError) -> Self { + match err { + HelperError::Internal(i) => Self::InternalError(i), + HelperError::BadRequest(b) => Self::BadRequest(b), + } + } +} + impl Error { /// Get the HTTP status code that best represents the meaning of the error for the client pub fn http_status_code(&self) -> StatusCode { diff --git a/src/garage/admin.rs b/src/garage/admin.rs index 49890189..740114c6 100644 --- a/src/garage/admin.rs +++ b/src/garage/admin.rs @@ -7,7 +7,7 @@ use serde::{Deserialize, Serialize}; use garage_util::crdt::*; use garage_util::data::*; -use garage_util::error::*; +use garage_util::error::{Error as GarageError, OkOrMessage}; use garage_util::time::*; use garage_table::replication::*; @@ -18,6 +18,7 @@ use garage_rpc::*; use garage_model::bucket_alias_table::*; use garage_model::bucket_table::*; use garage_model::garage::Garage; +use garage_model::helper::error::{Error, OkOrBadRequest}; use garage_model::key_table::*; use garage_model::migrate::Migrate; use garage_model::permission::*; @@ -91,7 +92,7 @@ impl AdminRpcHandler { .bucket_helper() .resolve_global_bucket_name(&query.name) .await? - .ok_or_message("Bucket not found")?; + .ok_or_bad_request("Bucket not found")?; let bucket = self .garage @@ -137,7 +138,7 @@ impl AdminRpcHandler { let alias = match self.garage.bucket_alias_table.get(&EmptyKey, name).await? { Some(mut alias) => { if !alias.state.get().is_deleted() { - return Err(Error::BadRpc(format!("Bucket {} already exists", name))); + return Err(Error::BadRequest(format!("Bucket {} already exists", name))); } alias.state.update(Deletable::Present(AliasParams { bucket_id: bucket.id, @@ -145,7 +146,7 @@ impl AdminRpcHandler { alias } None => BucketAlias::new(name.clone(), bucket.id) - .ok_or_message(format!(INVALID_BUCKET_NAME_MESSAGE!(), name))?, + .ok_or_bad_request(format!(INVALID_BUCKET_NAME_MESSAGE!(), name))?, }; bucket.state.as_option_mut().unwrap().aliases.merge_raw( name, @@ -164,7 +165,7 @@ impl AdminRpcHandler { .get(&EmptyKey, &query.name) .await? .filter(|a| !a.is_deleted()) - .ok_or_message(format!("Bucket {} does not exist", query.name))?; + .ok_or_bad_request(format!("Bucket {} does not exist", query.name))?; let bucket_id = bucket_alias.state.get().as_option().unwrap().bucket_id; @@ -182,7 +183,7 @@ impl AdminRpcHandler { .filter(|(_, _, active)| *active) .any(|(name, _, _)| name != &query.name) { - return Err(Error::Message(format!("Bucket {} still has other global aliases. Use `bucket unalias` to delete them one by one.", query.name))); + return Err(Error::BadRequest(format!("Bucket {} still has other global aliases. Use `bucket unalias` to delete them one by one.", query.name))); } if bucket_state .local_aliases @@ -190,7 +191,7 @@ impl AdminRpcHandler { .iter() .any(|(_, _, active)| *active) { - return Err(Error::Message(format!("Bucket {} still has other local aliases. Use `bucket unalias` to delete them one by one.", query.name))); + return Err(Error::BadRequest(format!("Bucket {} still has other local aliases. Use `bucket unalias` to delete them one by one.", query.name))); } // Check bucket is empty @@ -200,11 +201,14 @@ impl AdminRpcHandler { .get_range(&bucket_id, None, Some(DeletedFilter::NotDeleted), 10) .await?; if !objects.is_empty() { - return Err(Error::BadRpc(format!("Bucket {} is not empty", query.name))); + return Err(Error::BadRequest(format!( + "Bucket {} is not empty", + query.name + ))); } if !query.yes { - return Err(Error::BadRpc( + return Err(Error::BadRequest( "Add --yes flag to really perform this operation".to_string(), )); } @@ -218,7 +222,7 @@ impl AdminRpcHandler { .await?; } } else { - return Err(Error::Message(format!("Key not found: {}", key_id))); + return Err(Error::BadRequest(format!("Key not found: {}", key_id))); } } // 2. delete bucket alias @@ -237,7 +241,7 @@ impl AdminRpcHandler { .bucket_helper() .resolve_global_bucket_name(&query.existing_bucket) .await? - .ok_or_message("Bucket not found")?; + .ok_or_bad_request("Bucket not found")?; let mut bucket = self .garage .bucket_helper() @@ -257,12 +261,12 @@ impl AdminRpcHandler { query.new_name, bucket_id, key.key_id ))); } else { - return Err(Error::Message(format!("Alias {} already exists and points to different bucket: {:?} in namespace of key {}", query.new_name, existing_alias, key.key_id))); + return Err(Error::BadRequest(format!("Alias {} already exists and points to different bucket: {:?} in namespace of key {}", query.new_name, existing_alias, key.key_id))); } } if !is_valid_bucket_name(&query.new_name) { - return Err(Error::Message(format!( + return Err(Error::BadRequest(format!( INVALID_BUCKET_NAME_MESSAGE!(), query.new_name ))); @@ -312,7 +316,7 @@ impl AdminRpcHandler { query.new_name, bucket_id ))); } else { - return Err(Error::Message(format!( + return Err(Error::BadRequest(format!( "Alias {} already exists and points to different bucket: {:?}", query.new_name, p.bucket_id ))); @@ -330,7 +334,7 @@ impl AdminRpcHandler { let alias = match alias { None => BucketAlias::new(query.new_name.clone(), bucket_id) - .ok_or_message(format!(INVALID_BUCKET_NAME_MESSAGE!(), query.new_name))?, + .ok_or_bad_request(format!(INVALID_BUCKET_NAME_MESSAGE!(), query.new_name))?, Some(mut a) => { a.state = Lww::raw(alias_ts, Deletable::present(AliasParams { bucket_id })); a @@ -360,7 +364,7 @@ impl AdminRpcHandler { .get(&query.name) .map(|a| a.into_option()) .flatten() - .ok_or_message("Bucket not found")?; + .ok_or_bad_request("Bucket not found")?; let mut bucket = self .garage .bucket_helper() @@ -379,7 +383,7 @@ impl AdminRpcHandler { .iter() .any(|((k, n), _, active)| *k == key.key_id && *n == query.name && *active); if !has_other_aliases { - return Err(Error::Message(format!("Bucket {} doesn't have other aliases, please delete it instead of just unaliasing.", query.name))); + return Err(Error::BadRequest(format!("Bucket {} doesn't have other aliases, please delete it instead of just unaliasing.", query.name))); } // Checks ok, remove alias @@ -410,7 +414,7 @@ impl AdminRpcHandler { .bucket_helper() .resolve_global_bucket_name(&query.name) .await? - .ok_or_message("Bucket not found")?; + .ok_or_bad_request("Bucket not found")?; let mut bucket = self .garage .bucket_helper() @@ -429,7 +433,7 @@ impl AdminRpcHandler { .iter() .any(|(_, _, active)| *active); if !has_other_aliases { - return Err(Error::Message(format!("Bucket {} doesn't have other aliases, please delete it instead of just unaliasing.", query.name))); + return Err(Error::BadRequest(format!("Bucket {} doesn't have other aliases, please delete it instead of just unaliasing.", query.name))); } let mut alias = self @@ -461,7 +465,7 @@ impl AdminRpcHandler { .bucket_helper() .resolve_global_bucket_name(&query.bucket) .await? - .ok_or_message("Bucket not found")?; + .ok_or_bad_request("Bucket not found")?; let bucket = self .garage .bucket_helper() @@ -491,7 +495,7 @@ impl AdminRpcHandler { .bucket_helper() .resolve_global_bucket_name(&query.bucket) .await? - .ok_or_message("Bucket not found")?; + .ok_or_bad_request("Bucket not found")?; let bucket = self .garage .bucket_helper() @@ -521,7 +525,7 @@ impl AdminRpcHandler { .bucket_helper() .resolve_global_bucket_name(&query.bucket) .await? - .ok_or_message("Bucket not found")?; + .ok_or_bad_request("Bucket not found")?; let mut bucket = self .garage @@ -531,7 +535,7 @@ impl AdminRpcHandler { let bucket_state = bucket.state.as_option_mut().unwrap(); if !(query.allow ^ query.deny) { - return Err(Error::Message( + return Err(Error::BadRequest( "You must specify exactly one flag, either --allow or --deny".to_string(), )); } @@ -606,7 +610,7 @@ impl AdminRpcHandler { async fn handle_delete_key(&self, query: &KeyDeleteOpt) -> Result { let mut key = self.get_existing_key(&query.key_pattern).await?; if !query.yes { - return Err(Error::BadRpc( + return Err(Error::BadRequest( "Add --yes flag to really perform this operation".to_string(), )); } @@ -659,7 +663,7 @@ impl AdminRpcHandler { async fn handle_import_key(&self, query: &KeyImportOpt) -> Result { let prev_key = self.garage.key_table.get(&EmptyKey, &query.key_id).await?; if prev_key.is_some() { - return Err(Error::Message(format!("Key {} already exists in data store. Even if it is deleted, we can't let you create a new key with the same ID. Sorry.", query.key_id))); + return Err(Error::BadRequest(format!("Key {} already exists in data store. Even if it is deleted, we can't let you create a new key with the same ID. Sorry.", query.key_id))); } let imported_key = Key::import(&query.key_id, &query.secret_key, &query.name); self.garage.key_table.insert(&imported_key).await?; @@ -682,7 +686,7 @@ impl AdminRpcHandler { .filter(|k| !k.state.is_deleted()) .collect::>(); if candidates.len() != 1 { - Err(Error::Message(format!( + Err(Error::BadRequest(format!( "{} matching keys", candidates.len() ))) @@ -760,7 +764,7 @@ impl AdminRpcHandler { async fn handle_migrate(self: &Arc, opt: MigrateOpt) -> Result { if !opt.yes { - return Err(Error::BadRpc( + return Err(Error::BadRequest( "Please provide the --yes flag to initiate migration operation.".to_string(), )); } @@ -776,7 +780,7 @@ impl AdminRpcHandler { async fn handle_launch_repair(self: &Arc, opt: RepairOpt) -> Result { if !opt.yes { - return Err(Error::BadRpc( + return Err(Error::BadRequest( "Please provide the --yes flag to initiate repair operations.".to_string(), )); } @@ -803,7 +807,7 @@ impl AdminRpcHandler { if failures.is_empty() { Ok(AdminRpc::Ok("Repair launched on all nodes".to_string())) } else { - Err(Error::Message(format!( + Err(Error::BadRequest(format!( "Could not launch repair on nodes: {:?} (launched successfully on other nodes)", failures ))) @@ -946,7 +950,7 @@ impl EndpointHandler for AdminRpcHandler { AdminRpc::Migrate(opt) => self.handle_migrate(opt.clone()).await, AdminRpc::LaunchRepair(opt) => self.handle_launch_repair(opt.clone()).await, AdminRpc::Stats(opt) => self.handle_stats(opt.clone()).await, - _ => Err(Error::BadRpc("Invalid RPC".to_string())), + m => Err(GarageError::unexpected_rpc_message(m).into()), } } } diff --git a/src/garage/cli/cmd.rs b/src/garage/cli/cmd.rs index 834261e4..cca7c401 100644 --- a/src/garage/cli/cmd.rs +++ b/src/garage/cli/cmd.rs @@ -6,6 +6,8 @@ use garage_rpc::layout::*; use garage_rpc::system::*; use garage_rpc::*; +use garage_model::helper::error::Error as HelperError; + use crate::admin::*; use crate::cli::*; @@ -14,14 +16,14 @@ pub async fn cli_command_dispatch( system_rpc_endpoint: &Endpoint, admin_rpc_endpoint: &Endpoint, rpc_host: NodeID, -) -> Result<(), Error> { +) -> Result<(), HelperError> { match cmd { - Command::Status => cmd_status(system_rpc_endpoint, rpc_host).await, + Command::Status => Ok(cmd_status(system_rpc_endpoint, rpc_host).await?), Command::Node(NodeOperation::Connect(connect_opt)) => { - cmd_connect(system_rpc_endpoint, rpc_host, connect_opt).await + Ok(cmd_connect(system_rpc_endpoint, rpc_host, connect_opt).await?) } Command::Layout(layout_opt) => { - cli_layout_command_dispatch(layout_opt, system_rpc_endpoint, rpc_host).await + Ok(cli_layout_command_dispatch(layout_opt, system_rpc_endpoint, rpc_host).await?) } Command::Bucket(bo) => { cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::BucketOperation(bo)).await @@ -149,7 +151,7 @@ pub async fn cmd_connect( println!("Success."); Ok(()) } - r => Err(Error::BadRpc(format!("Unexpected response: {:?}", r))), + m => Err(Error::unexpected_rpc_message(m)), } } @@ -157,7 +159,7 @@ pub async fn cmd_admin( rpc_cli: &Endpoint, rpc_host: NodeID, args: AdminRpc, -) -> Result<(), Error> { +) -> Result<(), HelperError> { match rpc_cli.call(&rpc_host, &args, PRIO_NORMAL).await?? { AdminRpc::Ok(msg) => { println!("{}", msg); diff --git a/src/garage/main.rs b/src/garage/main.rs index 69cd16e7..60a13ac7 100644 --- a/src/garage/main.rs +++ b/src/garage/main.rs @@ -22,6 +22,8 @@ use garage_util::error::*; use garage_rpc::system::*; use garage_rpc::*; +use garage_model::helper::error::Error as HelperError; + use admin::*; use cli::*; @@ -136,5 +138,9 @@ async fn cli_command(opt: Opt) -> Result<(), Error> { let system_rpc_endpoint = netapp.endpoint::(SYSTEM_RPC_PATH.into()); let admin_rpc_endpoint = netapp.endpoint::(ADMIN_RPC_PATH.into()); - cli_command_dispatch(opt.cmd, &system_rpc_endpoint, &admin_rpc_endpoint, id).await + match cli_command_dispatch(opt.cmd, &system_rpc_endpoint, &admin_rpc_endpoint, id).await { + Err(HelperError::Internal(i)) => Err(i), + Err(HelperError::BadRequest(b)) => Err(Error::Message(format!("bad request: {}", b))), + Ok(x) => Ok(x), + } } diff --git a/src/model/Cargo.toml b/src/model/Cargo.toml index 03881f5d..14e49557 100644 --- a/src/model/Cargo.toml +++ b/src/model/Cargo.toml @@ -21,6 +21,7 @@ garage_model_050 = { package = "garage_model", version = "0.5.1" } async-trait = "0.1.7" arc-swap = "1.0" +err-derive = "0.3" hex = "0.4" log = "0.4" rand = "0.8" diff --git a/src/model/block.rs b/src/model/block.rs index 6df8e265..1173c7b3 100644 --- a/src/model/block.rs +++ b/src/model/block.rs @@ -594,10 +594,8 @@ impl BlockManager { need_nodes.push(*node); } } - _ => { - return Err(Error::Message( - "Unexpected response to NeedBlockQuery RPC".to_string(), - )); + m => { + return Err(Error::unexpected_rpc_message(m)); } } } @@ -730,7 +728,7 @@ impl EndpointHandler for BlockManager { BlockRpc::PutBlock { hash, data } => self.write_block(hash, data).await, BlockRpc::GetBlock(h) => self.read_block(h).await, BlockRpc::NeedBlockQuery(h) => self.need_block(h).await.map(BlockRpc::NeedBlockReply), - _ => Err(Error::BadRpc("Unexpected RPC message".to_string())), + m => Err(Error::unexpected_rpc_message(m)), } } } diff --git a/src/model/garage.rs b/src/model/garage.rs index 9db1843c..78b4433a 100644 --- a/src/model/garage.rs +++ b/src/model/garage.rs @@ -15,8 +15,8 @@ use garage_table::*; use crate::block::*; use crate::block_ref_table::*; use crate::bucket_alias_table::*; -use crate::bucket_helper::*; use crate::bucket_table::*; +use crate::helper; use crate::key_table::*; use crate::object_table::*; use crate::version_table::*; @@ -162,7 +162,7 @@ impl Garage { self.block_manager.garage.swap(None); } - pub fn bucket_helper(&self) -> BucketHelper { - BucketHelper(self) + pub fn bucket_helper(&self) -> helper::bucket::BucketHelper { + helper::bucket::BucketHelper(self) } } diff --git a/src/model/bucket_helper.rs b/src/model/helper/bucket.rs similarity index 91% rename from src/model/bucket_helper.rs rename to src/model/helper/bucket.rs index b55ebc4b..e89a723d 100644 --- a/src/model/bucket_helper.rs +++ b/src/model/helper/bucket.rs @@ -1,10 +1,9 @@ -use garage_util::data::*; -use garage_util::error::*; - use garage_table::util::EmptyKey; +use garage_util::data::*; use crate::bucket_table::Bucket; use crate::garage::Garage; +use crate::helper::error::*; pub struct BucketHelper<'a>(pub(crate) &'a Garage); @@ -52,12 +51,6 @@ impl<'a> BucketHelper<'a> { .get(&bucket_id, &EmptyKey) .await? .filter(|b| !b.is_deleted()) - .map(Ok) - .unwrap_or_else(|| { - Err(Error::BadRpc(format!( - "Bucket {:?} does not exist", - bucket_id - ))) - }) + .ok_or_bad_request(format!("Bucket {:?} does not exist", bucket_id)) } } diff --git a/src/model/helper/error.rs b/src/model/helper/error.rs new file mode 100644 index 00000000..b9b515f3 --- /dev/null +++ b/src/model/helper/error.rs @@ -0,0 +1,51 @@ +use err_derive::Error; +use serde::{Deserialize, Serialize}; + +use garage_util::error::Error as GarageError; + +#[derive(Debug, Error, Serialize, Deserialize)] +pub enum Error { + #[error(display = "Internal error: {}", _0)] + Internal(#[error(source)] GarageError), + + #[error(display = "Bad request: {}", _0)] + BadRequest(String), +} + +impl From for Error { + fn from(e: netapp::error::Error) -> Self { + Error::Internal(GarageError::Netapp(e)) + } +} + +pub trait OkOrBadRequest { + type S; + fn ok_or_bad_request>(self, reason: M) -> Result; +} + +impl OkOrBadRequest for Result +where + E: std::fmt::Display, +{ + type S = T; + fn ok_or_bad_request>(self, reason: M) -> Result { + match self { + Ok(x) => Ok(x), + Err(e) => Err(Error::BadRequest(format!( + "{}: {}", + reason.as_ref(), + e.to_string() + ))), + } + } +} + +impl OkOrBadRequest for Option { + type S = T; + fn ok_or_bad_request>(self, reason: M) -> Result { + match self { + Some(x) => Ok(x), + None => Err(Error::BadRequest(reason.as_ref().to_string())), + } + } +} diff --git a/src/model/helper/mod.rs b/src/model/helper/mod.rs new file mode 100644 index 00000000..2f4e8898 --- /dev/null +++ b/src/model/helper/mod.rs @@ -0,0 +1,2 @@ +pub mod bucket; +pub mod error; diff --git a/src/model/lib.rs b/src/model/lib.rs index e7d7e98b..9deaae9d 100644 --- a/src/model/lib.rs +++ b/src/model/lib.rs @@ -12,6 +12,6 @@ pub mod version_table; pub mod block; -pub mod bucket_helper; pub mod garage; +pub mod helper; pub mod migrate; diff --git a/src/rpc/system.rs b/src/rpc/system.rs index aa8947ea..6bca6e3e 100644 --- a/src/rpc/system.rs +++ b/src/rpc/system.rs @@ -576,7 +576,7 @@ impl EndpointHandler for System { self.clone().handle_advertise_cluster_layout(adv).await } SystemRpc::GetKnownNodes => Ok(self.handle_get_known_nodes()), - _ => Err(Error::BadRpc("Unexpected RPC message".to_string())), + m => Err(Error::unexpected_rpc_message(m)), } } } diff --git a/src/table/gc.rs b/src/table/gc.rs index 5cb8cb9b..8d0a5bef 100644 --- a/src/table/gc.rs +++ b/src/table/gc.rs @@ -315,7 +315,7 @@ where } Ok(GcRpc::Ok) } - _ => Err(Error::Message("Unexpected GC RPC".to_string())), + m => Err(Error::unexpected_rpc_message(m)), } } } diff --git a/src/table/sync.rs b/src/table/sync.rs index df9fb4d0..1df2b01d 100644 --- a/src/table/sync.rs +++ b/src/table/sync.rs @@ -514,10 +514,7 @@ where if let SyncRpc::Ok = rpc_resp { Ok(()) } else { - Err(Error::Message(format!( - "Unexpected response to RPC Update: {}", - debug_serialize(&rpc_resp) - ))) + Err(Error::unexpected_rpc_message(rpc_resp)) } } } @@ -545,7 +542,7 @@ where self.data.update_many(items)?; Ok(SyncRpc::Ok) } - _ => Err(Error::Message("Unexpected sync RPC".to_string())), + m => Err(Error::unexpected_rpc_message(m)), } } } diff --git a/src/table/table.rs b/src/table/table.rs index 396888c1..01789c11 100644 --- a/src/table/table.rs +++ b/src/table/table.rs @@ -311,7 +311,7 @@ where self.data.update_many(pairs)?; Ok(TableRpc::Ok) } - _ => Err(Error::BadRpc("Unexpected table RPC".to_string())), + m => Err(Error::unexpected_rpc_message(m)), } } } diff --git a/src/util/error.rs b/src/util/error.rs index ef5a76f2..bdb3a69b 100644 --- a/src/util/error.rs +++ b/src/util/error.rs @@ -59,8 +59,8 @@ pub enum Error { )] Quorum(usize, usize, usize, Vec), - #[error(display = "Bad RPC: {}", _0)] - BadRpc(String), + #[error(display = "Unexpected RPC message: {}", _0)] + UnexpectedRpcMessage(String), #[error(display = "Corrupt data: does not match hash {:?}", _0)] CorruptData(Hash), @@ -69,6 +69,12 @@ pub enum Error { Message(String), } +impl Error { + pub fn unexpected_rpc_message(v: T) -> Self { + Self::UnexpectedRpcMessage(debug_serialize(&v)) + } +} + impl From> for Error { fn from(e: sled::transaction::TransactionError) -> Error { match e { -- 2.43.4 From 2140cd72054ac6e3a94cbe5931727159de20a97f Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Mon, 3 Jan 2022 15:06:19 +0100 Subject: [PATCH 13/19] Remove website redirects --- src/api/s3_website.rs | 18 +++++++++--------- src/garage/admin.rs | 2 +- src/model/bucket_table.rs | 12 +++--------- src/model/migrate.rs | 2 +- 4 files changed, 14 insertions(+), 20 deletions(-) diff --git a/src/api/s3_website.rs b/src/api/s3_website.rs index 1ea57577..8686b832 100644 --- a/src/api/s3_website.rs +++ b/src/api/s3_website.rs @@ -170,16 +170,16 @@ impl WebsiteConfiguration { } pub fn into_garage_website_config(self) -> Result { - if let Some(rart) = self.redirect_all_requests_to { - Ok(WebsiteConfig::RedirectAll { - hostname: rart.hostname.0, - protocol: rart - .protocol - .map(|x| x.0) - .unwrap_or_else(|| "http".to_string()), - }) + if self.redirect_all_requests_to.is_some() { + Err(Error::NotImplemented( + "S3 website redirects are not currently implemented in Garage.".into(), + )) + } else if self.routing_rules.map(|x| !x.is_empty()).unwrap_or(false) { + Err(Error::NotImplemented( + "S3 routing rules are not currently implemented in Garage.".into(), + )) } else { - Ok(WebsiteConfig::Website { + Ok(WebsiteConfig { index_document: self .index_document .map(|x| x.suffix.0) diff --git a/src/garage/admin.rs b/src/garage/admin.rs index 740114c6..9328f46e 100644 --- a/src/garage/admin.rs +++ b/src/garage/admin.rs @@ -541,7 +541,7 @@ impl AdminRpcHandler { } let website = if query.allow { - Some(WebsiteConfig::Website { + Some(WebsiteConfig { index_document: "index.html".into(), error_document: None, }) diff --git a/src/model/bucket_table.rs b/src/model/bucket_table.rs index 8dcf6913..fef29b62 100644 --- a/src/model/bucket_table.rs +++ b/src/model/bucket_table.rs @@ -43,15 +43,9 @@ pub struct BucketParams { } #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] -pub enum WebsiteConfig { - RedirectAll { - hostname: String, - protocol: String, - }, - Website { - index_document: String, - error_document: Option, - }, +pub struct WebsiteConfig { + pub index_document: String, + pub error_document: Option, } impl BucketParams { diff --git a/src/model/migrate.rs b/src/model/migrate.rs index 6b20a01f..a5508c4d 100644 --- a/src/model/migrate.rs +++ b/src/model/migrate.rs @@ -70,7 +70,7 @@ impl Migrate { let alias_ts = aliases.get_timestamp(&new_name); let website = if *old_bucket_p.website.get() { - Some(WebsiteConfig::Website { + Some(WebsiteConfig { index_document: "index.html".into(), error_document: None, }) -- 2.43.4 From e59c23a69df116737c428ccbfbe4dfeff4d956d5 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Mon, 3 Jan 2022 17:22:40 +0100 Subject: [PATCH 14/19] Refactor logic for setting/unsetting aliases --- src/garage/admin.rs | 482 +++++++++----------------------- src/model/bucket_alias_table.rs | 9 +- src/model/helper/bucket.rs | 386 ++++++++++++++++++++++++- src/model/migrate.rs | 91 +++--- src/model/permission.rs | 11 + 5 files changed, 568 insertions(+), 411 deletions(-) diff --git a/src/garage/admin.rs b/src/garage/admin.rs index 9328f46e..0c1e58f8 100644 --- a/src/garage/admin.rs +++ b/src/garage/admin.rs @@ -7,7 +7,7 @@ use serde::{Deserialize, Serialize}; use garage_util::crdt::*; use garage_util::data::*; -use garage_util::error::{Error as GarageError, OkOrMessage}; +use garage_util::error::Error as GarageError; use garage_util::time::*; use garage_table::replication::*; @@ -28,8 +28,6 @@ use crate::repair::Repair; pub const ADMIN_RPC_PATH: &str = "garage/admin_rpc.rs/Rpc"; -macro_rules! INVALID_BUCKET_NAME_MESSAGE { () => { "Invalid bucket name: {}. See AWS documentation for constraints on S3 bucket names:\nhttps://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html" }; } - #[derive(Debug, Serialize, Deserialize)] pub enum AdminRpc { BucketOperation(BucketOperation), @@ -134,47 +132,54 @@ impl AdminRpcHandler { #[allow(clippy::ptr_arg)] async fn handle_create_bucket(&self, name: &String) -> Result { - let mut bucket = Bucket::new(); - let alias = match self.garage.bucket_alias_table.get(&EmptyKey, name).await? { - Some(mut alias) => { - if !alias.state.get().is_deleted() { - return Err(Error::BadRequest(format!("Bucket {} already exists", name))); - } - alias.state.update(Deletable::Present(AliasParams { - bucket_id: bucket.id, - })); - alias + if !is_valid_bucket_name(name) { + return Err(Error::BadRequest(format!( + "{}: {}", + name, INVALID_BUCKET_NAME_MESSAGE + ))); + } + + if let Some(alias) = self.garage.bucket_alias_table.get(&EmptyKey, name).await? { + if !alias.state.get().is_deleted() { + return Err(Error::BadRequest(format!("Bucket {} already exists", name))); } - None => BucketAlias::new(name.clone(), bucket.id) - .ok_or_bad_request(format!(INVALID_BUCKET_NAME_MESSAGE!(), name))?, - }; - bucket.state.as_option_mut().unwrap().aliases.merge_raw( - name, - alias.state.timestamp(), - &true, - ); + } + + // ---- done checking, now commit ---- + + let bucket = Bucket::new(); self.garage.bucket_table.insert(&bucket).await?; - self.garage.bucket_alias_table.insert(&alias).await?; + + self.garage + .bucket_helper() + .set_global_bucket_alias(bucket.id, name) + .await?; + Ok(AdminRpc::Ok(format!("Bucket {} was created.", name))) } async fn handle_delete_bucket(&self, query: &DeleteBucketOpt) -> Result { - let mut bucket_alias = self + let helper = self.garage.bucket_helper(); + + let bucket_id = helper + .resolve_global_bucket_name(&query.name) + .await? + .ok_or_bad_request("Bucket not found")?; + + // Get the alias, but keep in minde here the bucket name + // given in parameter can also be directly the bucket's ID. + // In that case bucket_alias will be None, and + // we can still delete the bucket if it has zero aliases + // (a condition which we try to prevent but that could still happen somehow). + // We just won't try to delete an alias entry because there isn't one. + let bucket_alias = self .garage .bucket_alias_table .get(&EmptyKey, &query.name) - .await? - .filter(|a| !a.is_deleted()) - .ok_or_bad_request(format!("Bucket {} does not exist", query.name))?; - - let bucket_id = bucket_alias.state.get().as_option().unwrap().bucket_id; + .await?; // Check bucket doesn't have other aliases - let mut bucket = self - .garage - .bucket_helper() - .get_existing_bucket(bucket_id) - .await?; + let mut bucket = helper.get_existing_bucket(bucket_id).await?; let bucket_state = bucket.state.as_option().unwrap(); if bucket_state .aliases @@ -216,18 +221,18 @@ impl AdminRpcHandler { // --- done checking, now commit --- // 1. delete authorization from keys that had access for (key_id, _) in bucket.authorized_keys() { - if let Some(key) = self.garage.key_table.get(&EmptyKey, key_id).await? { - if !key.state.is_deleted() { - self.update_key_bucket(&key, bucket.id, false, false, false) - .await?; - } - } else { - return Err(Error::BadRequest(format!("Key not found: {}", key_id))); - } + helper + .set_bucket_key_permissions(bucket.id, key_id, BucketKeyPerm::no_permissions()) + .await?; } + // 2. delete bucket alias - bucket_alias.state.update(Deletable::Deleted); - self.garage.bucket_alias_table.insert(&bucket_alias).await?; + if bucket_alias.is_some() { + helper + .unset_global_bucket_alias(bucket_id, &query.name) + .await?; + } + // 3. delete bucket bucket.state = Deletable::delete(); self.garage.bucket_table.insert(&bucket).await?; @@ -236,125 +241,39 @@ impl AdminRpcHandler { } async fn handle_alias_bucket(&self, query: &AliasBucketOpt) -> Result { - let bucket_id = self - .garage - .bucket_helper() + let helper = self.garage.bucket_helper(); + + let bucket_id = helper .resolve_global_bucket_name(&query.existing_bucket) .await? .ok_or_bad_request("Bucket not found")?; - let mut bucket = self - .garage - .bucket_helper() - .get_existing_bucket(bucket_id) - .await?; - if let Some(key_local) = &query.local { - let mut key = self.get_existing_key(key_local).await?; - let mut key_param = key.state.as_option_mut().unwrap(); - - if let Some(Deletable::Present(existing_alias)) = - key_param.local_aliases.get(&query.new_name) - { - if *existing_alias == bucket_id { - return Ok(AdminRpc::Ok(format!( - "Alias {} already points to bucket {:?} in namespace of key {}", - query.new_name, bucket_id, key.key_id - ))); - } else { - return Err(Error::BadRequest(format!("Alias {} already exists and points to different bucket: {:?} in namespace of key {}", query.new_name, existing_alias, key.key_id))); - } - } - - if !is_valid_bucket_name(&query.new_name) { - return Err(Error::BadRequest(format!( - INVALID_BUCKET_NAME_MESSAGE!(), - query.new_name - ))); - } - - // Checks ok, add alias - let mut bucket_p = bucket.state.as_option_mut().unwrap(); - let bucket_p_local_alias_key = (key.key_id.clone(), query.new_name.clone()); - - // Calculate the timestamp to assign to this aliasing in the two local_aliases maps - // (the one from key to bucket, and the reverse one stored in the bucket iself) - // so that merges on both maps in case of a concurrent operation resolve - // to the same alias being set - let alias_ts = increment_logical_clock_2( - key_param.local_aliases.get_timestamp(&query.new_name), - bucket_p - .local_aliases - .get_timestamp(&bucket_p_local_alias_key), - ); - - key_param.local_aliases = LwwMap::raw_item( - query.new_name.clone(), - alias_ts, - Deletable::present(bucket_id), - ); - self.garage.key_table.insert(&key).await?; - - bucket_p.local_aliases = LwwMap::raw_item(bucket_p_local_alias_key, alias_ts, true); - self.garage.bucket_table.insert(&bucket).await?; + if let Some(key_pattern) = &query.local { + let key = helper.get_existing_matching_key(key_pattern).await?; + helper + .set_local_bucket_alias(bucket_id, &key.key_id, &query.new_name) + .await?; Ok(AdminRpc::Ok(format!( - "Alias {} created to bucket {:?} in namespace of key {}", + "Alias {} now points to bucket {:?} in namespace of key {}", query.new_name, bucket_id, key.key_id ))) } else { - let alias = self - .garage - .bucket_alias_table - .get(&EmptyKey, &query.new_name) + helper + .set_global_bucket_alias(bucket_id, &query.new_name) .await?; - - if let Some(existing_alias) = alias.as_ref() { - if let Some(p) = existing_alias.state.get().as_option() { - if p.bucket_id == bucket_id { - return Ok(AdminRpc::Ok(format!( - "Alias {} already points to bucket {:?}", - query.new_name, bucket_id - ))); - } else { - return Err(Error::BadRequest(format!( - "Alias {} already exists and points to different bucket: {:?}", - query.new_name, p.bucket_id - ))); - } - } - } - - // Checks ok, add alias - let mut bucket_p = bucket.state.as_option_mut().unwrap(); - - let alias_ts = increment_logical_clock_2( - bucket_p.aliases.get_timestamp(&query.new_name), - alias.as_ref().map(|a| a.state.timestamp()).unwrap_or(0), - ); - - let alias = match alias { - None => BucketAlias::new(query.new_name.clone(), bucket_id) - .ok_or_bad_request(format!(INVALID_BUCKET_NAME_MESSAGE!(), query.new_name))?, - Some(mut a) => { - a.state = Lww::raw(alias_ts, Deletable::present(AliasParams { bucket_id })); - a - } - }; - self.garage.bucket_alias_table.insert(&alias).await?; - - bucket_p.aliases = LwwMap::raw_item(query.new_name.clone(), alias_ts, true); - self.garage.bucket_table.insert(&bucket).await?; - Ok(AdminRpc::Ok(format!( - "Alias {} created to bucket {:?}", + "Alias {} now points to bucket {:?}", query.new_name, bucket_id ))) } } async fn handle_unalias_bucket(&self, query: &UnaliasBucketOpt) -> Result { - if let Some(key_local) = &query.local { - let mut key = self.get_existing_key(key_local).await?; + let helper = self.garage.bucket_helper(); + + if let Some(key_pattern) = &query.local { + let key = helper.get_existing_matching_key(key_pattern).await?; let bucket_id = key .state @@ -365,122 +284,56 @@ impl AdminRpcHandler { .map(|a| a.into_option()) .flatten() .ok_or_bad_request("Bucket not found")?; - let mut bucket = self - .garage - .bucket_helper() - .get_existing_bucket(bucket_id) + + helper + .unset_local_bucket_alias(bucket_id, &key.key_id, &query.name) .await?; - let mut bucket_p = bucket.state.as_option_mut().unwrap(); - - let has_other_aliases = bucket_p - .aliases - .items() - .iter() - .any(|(_, _, active)| *active) - || bucket_p - .local_aliases - .items() - .iter() - .any(|((k, n), _, active)| *k == key.key_id && *n == query.name && *active); - if !has_other_aliases { - return Err(Error::BadRequest(format!("Bucket {} doesn't have other aliases, please delete it instead of just unaliasing.", query.name))); - } - - // Checks ok, remove alias - let mut key_param = key.state.as_option_mut().unwrap(); - let bucket_p_local_alias_key = (key.key_id.clone(), query.name.clone()); - - let alias_ts = increment_logical_clock_2( - key_param.local_aliases.get_timestamp(&query.name), - bucket_p - .local_aliases - .get_timestamp(&bucket_p_local_alias_key), - ); - - key_param.local_aliases = - LwwMap::raw_item(query.name.clone(), alias_ts, Deletable::delete()); - self.garage.key_table.insert(&key).await?; - - bucket_p.local_aliases = LwwMap::raw_item(bucket_p_local_alias_key, alias_ts, false); - self.garage.bucket_table.insert(&bucket).await?; Ok(AdminRpc::Ok(format!( - "Bucket alias {} deleted from namespace of key {}", - query.name, key.key_id + "Alias {} no longer points to bucket {:?} in namespace of key {}", + &query.name, bucket_id, key.key_id ))) } else { - let bucket_id = self - .garage - .bucket_helper() + let bucket_id = helper .resolve_global_bucket_name(&query.name) .await? .ok_or_bad_request("Bucket not found")?; - let mut bucket = self - .garage - .bucket_helper() - .get_existing_bucket(bucket_id) + + helper + .unset_global_bucket_alias(bucket_id, &query.name) .await?; - let mut bucket_state = bucket.state.as_option_mut().unwrap(); - let has_other_aliases = bucket_state - .aliases - .items() - .iter() - .any(|(name, _, active)| *name != query.name && *active) - || bucket_state - .local_aliases - .items() - .iter() - .any(|(_, _, active)| *active); - if !has_other_aliases { - return Err(Error::BadRequest(format!("Bucket {} doesn't have other aliases, please delete it instead of just unaliasing.", query.name))); - } - - let mut alias = self - .garage - .bucket_alias_table - .get(&EmptyKey, &query.name) - .await? - .ok_or_message("Internal error: alias not found")?; - - // Checks ok, remove alias - let alias_ts = increment_logical_clock_2( - alias.state.timestamp(), - bucket_state.aliases.get_timestamp(&query.name), - ); - - alias.state = Lww::raw(alias_ts, Deletable::delete()); - self.garage.bucket_alias_table.insert(&alias).await?; - - bucket_state.aliases = LwwMap::raw_item(query.name.clone(), alias_ts, false); - self.garage.bucket_table.insert(&bucket).await?; - - Ok(AdminRpc::Ok(format!("Bucket alias {} deleted", query.name))) + Ok(AdminRpc::Ok(format!( + "Alias {} no longer points to bucket {:?}", + &query.name, bucket_id + ))) } } async fn handle_bucket_allow(&self, query: &PermBucketOpt) -> Result { - let bucket_id = self - .garage - .bucket_helper() + let helper = self.garage.bucket_helper(); + + let bucket_id = helper .resolve_global_bucket_name(&query.bucket) .await? .ok_or_bad_request("Bucket not found")?; - let bucket = self - .garage - .bucket_helper() - .get_existing_bucket(bucket_id) - .await?; - let key = self.get_existing_key(&query.key_pattern).await?; + let key = helper.get_existing_matching_key(&query.key_pattern).await?; let allow_read = query.read || key.allow_read(&bucket_id); let allow_write = query.write || key.allow_write(&bucket_id); let allow_owner = query.owner || key.allow_owner(&bucket_id); - let new_perm = self - .update_key_bucket(&key, bucket_id, allow_read, allow_write, allow_owner) - .await?; - self.update_bucket_key(bucket, &key.key_id, new_perm) + helper + .set_bucket_key_permissions( + bucket_id, + &key.key_id, + BucketKeyPerm { + timestamp: now_msec(), + allow_read, + allow_write, + allow_owner, + }, + ) .await?; Ok(AdminRpc::Ok(format!( @@ -490,27 +343,29 @@ impl AdminRpcHandler { } async fn handle_bucket_deny(&self, query: &PermBucketOpt) -> Result { - let bucket_id = self - .garage - .bucket_helper() + let helper = self.garage.bucket_helper(); + + let bucket_id = helper .resolve_global_bucket_name(&query.bucket) .await? .ok_or_bad_request("Bucket not found")?; - let bucket = self - .garage - .bucket_helper() - .get_existing_bucket(bucket_id) - .await?; - let key = self.get_existing_key(&query.key_pattern).await?; + let key = helper.get_existing_matching_key(&query.key_pattern).await?; let allow_read = !query.read && key.allow_read(&bucket_id); let allow_write = !query.write && key.allow_write(&bucket_id); let allow_owner = !query.owner && key.allow_owner(&bucket_id); - let new_perm = self - .update_key_bucket(&key, bucket_id, allow_read, allow_write, allow_owner) - .await?; - self.update_bucket_key(bucket, &key.key_id, new_perm) + helper + .set_bucket_key_permissions( + bucket_id, + &key.key_id, + BucketKeyPerm { + timestamp: now_msec(), + allow_read, + allow_write, + allow_owner, + }, + ) .await?; Ok(AdminRpc::Ok(format!( @@ -590,7 +445,11 @@ impl AdminRpcHandler { } async fn handle_key_info(&self, query: &KeyOpt) -> Result { - let key = self.get_existing_key(&query.key_pattern).await?; + let key = self + .garage + .bucket_helper() + .get_existing_matching_key(&query.key_pattern) + .await?; self.key_info_result(key).await } @@ -601,54 +460,43 @@ impl AdminRpcHandler { } async fn handle_rename_key(&self, query: &KeyRenameOpt) -> Result { - let mut key = self.get_existing_key(&query.key_pattern).await?; + let mut key = self + .garage + .bucket_helper() + .get_existing_matching_key(&query.key_pattern) + .await?; key.name.update(query.new_name.clone()); self.garage.key_table.insert(&key).await?; self.key_info_result(key).await } async fn handle_delete_key(&self, query: &KeyDeleteOpt) -> Result { - let mut key = self.get_existing_key(&query.key_pattern).await?; + let helper = self.garage.bucket_helper(); + + let mut key = helper.get_existing_matching_key(&query.key_pattern).await?; + if !query.yes { return Err(Error::BadRequest( "Add --yes flag to really perform this operation".to_string(), )); } + let state = key.state.as_option_mut().unwrap(); // --- done checking, now commit --- // 1. Delete local aliases for (alias, _, to) in state.local_aliases.items().iter() { if let Deletable::Present(bucket_id) = to { - if let Some(mut bucket) = self.garage.bucket_table.get(bucket_id, &EmptyKey).await? - { - if let Deletable::Present(bucket_state) = &mut bucket.state { - bucket_state.local_aliases = bucket_state - .local_aliases - .update_mutator((key.key_id.to_string(), alias.to_string()), false); - self.garage.bucket_table.insert(&bucket).await?; - } - } else { - // ignore - } + helper + .unset_local_bucket_alias(*bucket_id, &key.key_id, alias) + .await?; } } // 2. Delete authorized buckets - for (ab_id, auth) in state.authorized_buckets.items().iter() { - if let Some(bucket) = self.garage.bucket_table.get(ab_id, &EmptyKey).await? { - let new_perm = BucketKeyPerm { - timestamp: increment_logical_clock(auth.timestamp), - allow_read: false, - allow_write: false, - allow_owner: false, - }; - if !bucket.is_deleted() { - self.update_bucket_key(bucket, &key.key_id, new_perm) - .await?; - } - } else { - // ignore - } + for (ab_id, _auth) in state.authorized_buckets.items().iter() { + helper + .set_bucket_key_permissions(*ab_id, &key.key_id, BucketKeyPerm::no_permissions()) + .await?; } // 3. Actually delete key key.state = Deletable::delete(); @@ -671,30 +519,6 @@ impl AdminRpcHandler { self.key_info_result(imported_key).await } - async fn get_existing_key(&self, pattern: &str) -> Result { - let candidates = self - .garage - .key_table - .get_range( - &EmptyKey, - None, - Some(KeyFilter::Matches(pattern.to_string())), - 10, - ) - .await? - .into_iter() - .filter(|k| !k.state.is_deleted()) - .collect::>(); - if candidates.len() != 1 { - Err(Error::BadRequest(format!( - "{} matching keys", - candidates.len() - ))) - } else { - Ok(candidates.into_iter().next().unwrap()) - } - } - async fn key_info_result(&self, key: Key) -> Result { let mut relevant_buckets = HashMap::new(); @@ -714,54 +538,6 @@ impl AdminRpcHandler { Ok(AdminRpc::KeyInfo(key, relevant_buckets)) } - /// Update **key table** to inform of the new linked bucket - async fn update_key_bucket( - &self, - key: &Key, - bucket_id: Uuid, - allow_read: bool, - allow_write: bool, - allow_owner: bool, - ) -> Result { - let mut key = key.clone(); - let mut key_state = key.state.as_option_mut().unwrap(); - - let perm = key_state - .authorized_buckets - .get(&bucket_id) - .cloned() - .map(|old_perm| BucketKeyPerm { - timestamp: increment_logical_clock(old_perm.timestamp), - allow_read, - allow_write, - allow_owner, - }) - .unwrap_or(BucketKeyPerm { - timestamp: now_msec(), - allow_read, - allow_write, - allow_owner, - }); - - key_state.authorized_buckets = Map::put_mutator(bucket_id, perm); - - self.garage.key_table.insert(&key).await?; - Ok(perm) - } - - /// Update **bucket table** to inform of the new linked key - async fn update_bucket_key( - &self, - mut bucket: Bucket, - key_id: &str, - new_perm: BucketKeyPerm, - ) -> Result<(), Error> { - bucket.state.as_option_mut().unwrap().authorized_keys = - Map::put_mutator(key_id.to_string(), new_perm); - self.garage.bucket_table.insert(&bucket).await?; - Ok(()) - } - async fn handle_migrate(self: &Arc, opt: MigrateOpt) -> Result { if !opt.yes { return Err(Error::BadRequest( diff --git a/src/model/bucket_alias_table.rs b/src/model/bucket_alias_table.rs index caae76f1..45807178 100644 --- a/src/model/bucket_alias_table.rs +++ b/src/model/bucket_alias_table.rs @@ -1,7 +1,6 @@ use serde::{Deserialize, Serialize}; use garage_util::data::*; -use garage_util::time::*; use garage_table::crdt::*; use garage_table::*; @@ -24,10 +23,7 @@ impl AutoCrdt for AliasParams { } impl BucketAlias { - pub fn new(name: String, bucket_id: Uuid) -> Option { - Self::raw(name, now_msec(), bucket_id) - } - pub fn raw(name: String, ts: u64, bucket_id: Uuid) -> Option { + pub fn new(name: String, ts: u64, bucket_id: Uuid) -> Option { if !is_valid_bucket_name(&name) { None } else { @@ -101,3 +97,6 @@ pub fn is_valid_bucket_name(n: &str) -> bool { // Bucket names must not end with "-s3alias" && !n.ends_with("-s3alias") } + +/// Error message to return for invalid bucket names +pub const INVALID_BUCKET_NAME_MESSAGE: &str = "Invalid bucket name. See AWS documentation for constraints on S3 bucket names:\nhttps://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html"; diff --git a/src/model/helper/bucket.rs b/src/model/helper/bucket.rs index e89a723d..bb4b5b24 100644 --- a/src/model/helper/bucket.rs +++ b/src/model/helper/bucket.rs @@ -1,14 +1,20 @@ use garage_table::util::EmptyKey; +use garage_util::crdt::*; use garage_util::data::*; +use garage_util::error::{Error as GarageError, OkOrMessage}; +use garage_util::time::*; -use crate::bucket_table::Bucket; +use crate::bucket_alias_table::*; +use crate::bucket_table::*; use crate::garage::Garage; use crate::helper::error::*; +use crate::key_table::{Key, KeyFilter}; +use crate::permission::BucketKeyPerm; pub struct BucketHelper<'a>(pub(crate) &'a Garage); +#[allow(clippy::ptr_arg)] impl<'a> BucketHelper<'a> { - #[allow(clippy::ptr_arg)] pub async fn resolve_global_bucket_name( &self, bucket_name: &String, @@ -45,12 +51,386 @@ impl<'a> BucketHelper<'a> { } } + /// Returns a Bucket if it is present in bucket table, + /// even if it is in deleted state. Querying a non-existing + /// bucket ID returns an internal error. + pub async fn get_internal_bucket(&self, bucket_id: Uuid) -> Result { + Ok(self + .0 + .bucket_table + .get(&bucket_id, &EmptyKey) + .await? + .ok_or_message(format!("Bucket {:?} does not exist", bucket_id))?) + } + + /// Returns a Bucket if it is present in bucket table, + /// only if it is in non-deleted state. + /// Querying a non-existing bucket ID or a deleted bucket + /// returns a bad request error. pub async fn get_existing_bucket(&self, bucket_id: Uuid) -> Result { self.0 .bucket_table .get(&bucket_id, &EmptyKey) .await? .filter(|b| !b.is_deleted()) - .ok_or_bad_request(format!("Bucket {:?} does not exist", bucket_id)) + .ok_or_bad_request(format!( + "Bucket {:?} does not exist or has been deleted", + bucket_id + )) + } + + /// Returns a Key if it is present in key table, + /// even if it is in deleted state. Querying a non-existing + /// key ID returns an internal error. + pub async fn get_internal_key(&self, key_id: &String) -> Result { + Ok(self + .0 + .key_table + .get(&EmptyKey, key_id) + .await? + .ok_or_message(format!("Key {} does not exist", key_id))?) + } + + /// Returns a Key if it is present in key table, + /// only if it is in non-deleted state. + /// Querying a non-existing key ID or a deleted key + /// returns a bad request error. + pub async fn get_existing_key(&self, key_id: &String) -> Result { + self.0 + .key_table + .get(&EmptyKey, key_id) + .await? + .filter(|b| !b.state.is_deleted()) + .ok_or_bad_request(format!("Key {} does not exist or has been deleted", key_id)) + } + + /// Returns a Key if it is present in key table, + /// looking it up by key ID or by a match on its name, + /// only if it is in non-deleted state. + /// Querying a non-existing key ID or a deleted key + /// returns a bad request error. + pub async fn get_existing_matching_key(&self, pattern: &str) -> Result { + let candidates = self + .0 + .key_table + .get_range( + &EmptyKey, + None, + Some(KeyFilter::Matches(pattern.to_string())), + 10, + ) + .await? + .into_iter() + .filter(|k| !k.state.is_deleted()) + .collect::>(); + if candidates.len() != 1 { + Err(Error::BadRequest(format!( + "{} matching keys", + candidates.len() + ))) + } else { + Ok(candidates.into_iter().next().unwrap()) + } + } + + /// Sets a new alias for a bucket in global namespace. + /// This function fails if: + /// - alias name is not valid according to S3 spec + /// - bucket does not exist or is deleted + /// - alias already exists and points to another bucket + pub async fn set_global_bucket_alias( + &self, + bucket_id: Uuid, + alias_name: &String, + ) -> Result<(), Error> { + if !is_valid_bucket_name(alias_name) { + return Err(Error::BadRequest(format!( + "{}: {}", + alias_name, INVALID_BUCKET_NAME_MESSAGE + ))); + } + + let mut bucket = self.get_existing_bucket(bucket_id).await?; + + let alias = self.0.bucket_alias_table.get(&EmptyKey, alias_name).await?; + + if let Some(existing_alias) = alias.as_ref() { + if let Some(p) = existing_alias.state.get().as_option() { + if p.bucket_id != bucket_id { + return Err(Error::BadRequest(format!( + "Alias {} already exists and points to different bucket: {:?}", + alias_name, p.bucket_id + ))); + } + } + } + + // Checks ok, add alias + let mut bucket_p = bucket.state.as_option_mut().unwrap(); + + let alias_ts = increment_logical_clock_2( + bucket_p.aliases.get_timestamp(alias_name), + alias.as_ref().map(|a| a.state.timestamp()).unwrap_or(0), + ); + + // ---- timestamp-ensured causality barrier ---- + // writes are now done and all writes use timestamp alias_ts + + let alias = match alias { + None => BucketAlias::new(alias_name.clone(), alias_ts, bucket_id) + .ok_or_bad_request(format!("{}: {}", alias_name, INVALID_BUCKET_NAME_MESSAGE))?, + Some(mut a) => { + a.state = Lww::raw(alias_ts, Deletable::present(AliasParams { bucket_id })); + a + } + }; + self.0.bucket_alias_table.insert(&alias).await?; + + bucket_p.aliases = LwwMap::raw_item(alias_name.clone(), alias_ts, true); + self.0.bucket_table.insert(&bucket).await?; + + Ok(()) + } + + /// Unsets an alias for a bucket in global namespace. + /// This function fails if: + /// - bucket does not exist or is deleted + /// - alias does not exist or maps to another bucket (-> internal error) + /// - bucket has no other aliases (global or local) + pub async fn unset_global_bucket_alias( + &self, + bucket_id: Uuid, + alias_name: &String, + ) -> Result<(), Error> { + let mut bucket = self.get_existing_bucket(bucket_id).await?; + let mut bucket_state = bucket.state.as_option_mut().unwrap(); + + let mut alias = self + .0 + .bucket_alias_table + .get(&EmptyKey, alias_name) + .await? + .filter(|a| { + a.state + .get() + .as_option() + .map(|x| x.bucket_id == bucket_id) + .unwrap_or(false) + }) + .ok_or_message(format!( + "Internal error: alias not found or does not point to bucket {:?}", + bucket_id + ))?; + + let has_other_global_aliases = bucket_state + .aliases + .items() + .iter() + .any(|(name, _, active)| name != alias_name && *active); + let has_other_local_aliases = bucket_state + .local_aliases + .items() + .iter() + .any(|(_, _, active)| *active); + if !has_other_global_aliases && !has_other_local_aliases { + return Err(Error::BadRequest(format!("Bucket {} doesn't have other aliases, please delete it instead of just unaliasing.", alias_name))); + } + + // Checks ok, remove alias + let alias_ts = increment_logical_clock_2( + alias.state.timestamp(), + bucket_state.aliases.get_timestamp(alias_name), + ); + + // ---- timestamp-ensured causality barrier ---- + // writes are now done and all writes use timestamp alias_ts + + alias.state = Lww::raw(alias_ts, Deletable::delete()); + self.0.bucket_alias_table.insert(&alias).await?; + + bucket_state.aliases = LwwMap::raw_item(alias_name.clone(), alias_ts, false); + self.0.bucket_table.insert(&bucket).await?; + + Ok(()) + } + + /// Sets a new alias for a bucket in the local namespace of a key. + /// This function fails if: + /// - alias name is not valid according to S3 spec + /// - bucket does not exist or is deleted + /// - key does not exist or is deleted + /// - alias already exists and points to another bucket + pub async fn set_local_bucket_alias( + &self, + bucket_id: Uuid, + key_id: &String, + alias_name: &String, + ) -> Result<(), Error> { + if !is_valid_bucket_name(alias_name) { + return Err(Error::BadRequest(format!( + "{}: {}", + alias_name, INVALID_BUCKET_NAME_MESSAGE + ))); + } + + let mut bucket = self.get_existing_bucket(bucket_id).await?; + let mut key = self.get_existing_key(key_id).await?; + + let mut key_param = key.state.as_option_mut().unwrap(); + + if let Some(Deletable::Present(existing_alias)) = key_param.local_aliases.get(alias_name) { + if *existing_alias != bucket_id { + return Err(Error::BadRequest(format!("Alias {} already exists in namespace of key {} and points to different bucket: {:?}", alias_name, key.key_id, existing_alias))); + } + } + + // Checks ok, add alias + let mut bucket_p = bucket.state.as_option_mut().unwrap(); + let bucket_p_local_alias_key = (key.key_id.clone(), alias_name.clone()); + + // Calculate the timestamp to assign to this aliasing in the two local_aliases maps + // (the one from key to bucket, and the reverse one stored in the bucket iself) + // so that merges on both maps in case of a concurrent operation resolve + // to the same alias being set + let alias_ts = increment_logical_clock_2( + key_param.local_aliases.get_timestamp(alias_name), + bucket_p + .local_aliases + .get_timestamp(&bucket_p_local_alias_key), + ); + + // ---- timestamp-ensured causality barrier ---- + // writes are now done and all writes use timestamp alias_ts + + key_param.local_aliases = + LwwMap::raw_item(alias_name.clone(), alias_ts, Deletable::present(bucket_id)); + self.0.key_table.insert(&key).await?; + + bucket_p.local_aliases = LwwMap::raw_item(bucket_p_local_alias_key, alias_ts, true); + self.0.bucket_table.insert(&bucket).await?; + + Ok(()) + } + + /// Unsets an alias for a bucket in the local namespace of a key. + /// This function fails if: + /// - bucket does not exist or is deleted + /// - key does not exist or is deleted + /// - alias does not exist or maps to another bucket (-> internal error) + /// - bucket has no other aliases (global or local) + pub async fn unset_local_bucket_alias( + &self, + bucket_id: Uuid, + key_id: &String, + alias_name: &String, + ) -> Result<(), Error> { + let mut bucket = self.get_existing_bucket(bucket_id).await?; + let mut key = self.get_existing_key(key_id).await?; + + let mut bucket_p = bucket.state.as_option_mut().unwrap(); + + if key + .state + .as_option() + .unwrap() + .local_aliases + .get(alias_name) + .map(|x| x.as_option()) + .flatten() != Some(&bucket_id) + { + return Err(GarageError::Message(format!( + "Bucket {:?} does not have alias {} in namespace of key {}", + bucket_id, alias_name, key_id + )) + .into()); + } + + let has_other_global_aliases = bucket_p + .aliases + .items() + .iter() + .any(|(_, _, active)| *active); + let has_other_local_aliases = bucket_p + .local_aliases + .items() + .iter() + .any(|((k, n), _, active)| *k == key.key_id && n == alias_name && *active); + if !has_other_global_aliases && !has_other_local_aliases { + return Err(Error::BadRequest(format!("Bucket {} doesn't have other aliases, please delete it instead of just unaliasing.", alias_name))); + } + + // Checks ok, remove alias + let mut key_param = key.state.as_option_mut().unwrap(); + let bucket_p_local_alias_key = (key.key_id.clone(), alias_name.clone()); + + let alias_ts = increment_logical_clock_2( + key_param.local_aliases.get_timestamp(alias_name), + bucket_p + .local_aliases + .get_timestamp(&bucket_p_local_alias_key), + ); + + // ---- timestamp-ensured causality barrier ---- + // writes are now done and all writes use timestamp alias_ts + + key_param.local_aliases = + LwwMap::raw_item(alias_name.clone(), alias_ts, Deletable::delete()); + self.0.key_table.insert(&key).await?; + + bucket_p.local_aliases = LwwMap::raw_item(bucket_p_local_alias_key, alias_ts, false); + self.0.bucket_table.insert(&bucket).await?; + + Ok(()) + } + + /// Sets permissions for a key on a bucket. + /// This function fails if: + /// - bucket or key cannot be found at all (its ok if they are in deleted state) + /// - bucket or key is in deleted state and we are trying to set permissions other than "deny + /// all" + pub async fn set_bucket_key_permissions( + &self, + bucket_id: Uuid, + key_id: &String, + mut perm: BucketKeyPerm, + ) -> Result<(), Error> { + let mut bucket = self.get_internal_bucket(bucket_id).await?; + let mut key = self.get_internal_key(key_id).await?; + + let allow_any = perm.allow_read || perm.allow_write || perm.allow_owner; + + if let Some(bstate) = bucket.state.as_option() { + if let Some(kp) = bstate.authorized_keys.get(key_id) { + perm.timestamp = increment_logical_clock_2(perm.timestamp, kp.timestamp); + } + } else if allow_any { + return Err(Error::BadRequest( + "Trying to give permissions on a deleted bucket".into(), + )); + } + + if let Some(kstate) = key.state.as_option() { + if let Some(bp) = kstate.authorized_buckets.get(&bucket_id) { + perm.timestamp = increment_logical_clock_2(perm.timestamp, bp.timestamp); + } + } else if allow_any { + return Err(Error::BadRequest( + "Trying to give permissions to a deleted key".into(), + )); + } + + // ---- timestamp-ensured causality barrier ---- + + if let Some(bstate) = bucket.state.as_option_mut() { + bstate.authorized_keys = Map::put_mutator(key_id.clone(), perm); + self.0.bucket_table.insert(&bucket).await?; + } + + if let Some(kstate) = key.state.as_option_mut() { + kstate.authorized_buckets = Map::put_mutator(bucket_id, perm); + self.0.key_table.insert(&key).await?; + } + + Ok(()) } } diff --git a/src/model/migrate.rs b/src/model/migrate.rs index a5508c4d..65140c4b 100644 --- a/src/model/migrate.rs +++ b/src/model/migrate.rs @@ -1,9 +1,8 @@ use std::sync::Arc; -use garage_table::util::EmptyKey; use garage_util::crdt::*; use garage_util::data::*; -use garage_util::error::*; +use garage_util::error::Error as GarageError; use garage_util::time::*; use garage_model_050::bucket_table as old_bucket; @@ -11,6 +10,7 @@ use garage_model_050::bucket_table as old_bucket; use crate::bucket_alias_table::*; use crate::bucket_table::*; use crate::garage::Garage; +use crate::helper::error::*; use crate::permission::*; pub struct Migrate { @@ -19,11 +19,16 @@ pub struct Migrate { impl Migrate { pub async fn migrate_buckets050(&self) -> Result<(), Error> { - let tree = self.garage.db.open_tree("bucket:table")?; + let tree = self + .garage + .db + .open_tree("bucket:table") + .map_err(GarageError::from)?; for res in tree.iter() { - let (_k, v) = res?; - let bucket = rmp_serde::decode::from_read_ref::<_, old_bucket::Bucket>(&v[..])?; + let (_k, v) = res.map_err(GarageError::from)?; + let bucket = rmp_serde::decode::from_read_ref::<_, old_bucket::Bucket>(&v[..]) + .map_err(GarageError::from)?; if let old_bucket::BucketState::Present(p) = bucket.state.get() { self.migrate_buckets050_do_bucket(&bucket, p).await?; @@ -48,27 +53,6 @@ impl Migrate { hex::encode(&bucket_id.as_slice()[..16]) }; - let new_ak = old_bucket_p - .authorized_keys - .items() - .iter() - .map(|(k, ts, perm)| { - ( - k.to_string(), - BucketKeyPerm { - timestamp: *ts, - allow_read: perm.allow_read, - allow_write: perm.allow_write, - allow_owner: false, - }, - ) - }) - .collect::>(); - - let mut aliases = LwwMap::new(); - aliases.update_in_place(new_name.clone(), true); - let alias_ts = aliases.get_timestamp(&new_name); - let website = if *old_bucket_p.website.get() { Some(WebsiteConfig { index_document: "index.html".into(), @@ -78,32 +62,39 @@ impl Migrate { None }; - let new_bucket = Bucket { - id: bucket_id, - state: Deletable::Present(BucketParams { - creation_date: now_msec(), - authorized_keys: new_ak.clone(), - website_config: Lww::new(website), - aliases, - local_aliases: LwwMap::new(), - }), - }; - self.garage.bucket_table.insert(&new_bucket).await?; + self.garage + .bucket_table + .insert(&Bucket { + id: bucket_id, + state: Deletable::Present(BucketParams { + creation_date: now_msec(), + authorized_keys: Map::new(), + website_config: Lww::new(website), + aliases: LwwMap::new(), + local_aliases: LwwMap::new(), + }), + }) + .await?; - let new_alias = BucketAlias::raw(new_name.clone(), alias_ts, new_bucket.id).unwrap(); - self.garage.bucket_alias_table.insert(&new_alias).await?; + self.garage + .bucket_helper() + .set_global_bucket_alias(bucket_id, &new_name) + .await?; - for (k, perm) in new_ak.items().iter() { - let mut key = self - .garage - .key_table - .get(&EmptyKey, k) - .await? - .ok_or_message(format!("Missing key: {}", k))?; - if let Some(p) = key.state.as_option_mut() { - p.authorized_buckets.put(new_bucket.id, *perm); - } - self.garage.key_table.insert(&key).await?; + for (k, ts, perm) in old_bucket_p.authorized_keys.items().iter() { + self.garage + .bucket_helper() + .set_bucket_key_permissions( + bucket_id, + k, + BucketKeyPerm { + timestamp: *ts, + allow_read: perm.allow_read, + allow_write: perm.allow_write, + allow_owner: false, + }, + ) + .await?; } Ok(()) diff --git a/src/model/permission.rs b/src/model/permission.rs index ebb24a32..b8f7dd71 100644 --- a/src/model/permission.rs +++ b/src/model/permission.rs @@ -20,6 +20,17 @@ pub struct BucketKeyPerm { pub allow_owner: bool, } +impl BucketKeyPerm { + pub fn no_permissions() -> Self { + Self { + timestamp: 0, + allow_read: false, + allow_write: false, + allow_owner: false, + } + } +} + impl Crdt for BucketKeyPerm { fn merge(&mut self, other: &Self) { match other.timestamp.cmp(&self.timestamp) { -- 2.43.4 From de37658b94d6ac54721e18316dd8dddf50589afb Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Mon, 3 Jan 2022 17:56:26 +0100 Subject: [PATCH 15/19] Hopefully fix Nix build --- Cargo.nix | 3 ++- default.nix | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Cargo.nix b/Cargo.nix index c6c5c050..53e93c34 100644 --- a/Cargo.nix +++ b/Cargo.nix @@ -253,7 +253,7 @@ in registry = "registry+https://github.com/rust-lang/crates.io-index"; src = fetchCratesIo { inherit name version; sha256 = "95059428f66df56b63431fdb4e1947ed2190586af5c5a8a8b71122bdf5a7f469"; }; dependencies = { - ${ if hostPlatform.config == "aarch64-apple-darwin" || hostPlatform.parsed.cpu.name == "aarch64" && hostPlatform.parsed.kernel.name == "linux" then "libc" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.103" { inherit profileName; }; + ${ if hostPlatform.parsed.cpu.name == "aarch64" && hostPlatform.parsed.kernel.name == "linux" || hostPlatform.config == "aarch64-apple-darwin" then "libc" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.103" { inherit profileName; }; }; }); @@ -716,6 +716,7 @@ in dependencies = { arc_swap = rustPackages."registry+https://github.com/rust-lang/crates.io-index".arc-swap."1.4.0" { inherit profileName; }; async_trait = buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.51" { profileName = "__noProfile"; }; + err_derive = buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.0" { profileName = "__noProfile"; }; futures = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.17" { inherit profileName; }; futures_util = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.17" { inherit profileName; }; garage_model_050 = rustPackages."registry+https://github.com/rust-lang/crates.io-index".garage_model."0.5.1" { inherit profileName; }; diff --git a/default.nix b/default.nix index d44d5741..e0336174 100644 --- a/default.nix +++ b/default.nix @@ -56,7 +56,7 @@ in let */ ''^(src|tests)'' # fixed default ''.*\.(rs|toml)$'' # fixed default - ''^(crdt|replication|cli)'' # our crate submodules + ''^(crdt|replication|cli|helper)'' # our crate submodules ]; }; -- 2.43.4 From ba7f268b990cd17c5d20bf9e0eb6ff77d30fe845 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Mon, 3 Jan 2022 18:03:12 +0100 Subject: [PATCH 16/19] Rename and change query filters --- src/model/helper/bucket.rs | 3 +-- src/model/key_table.rs | 9 +++++---- src/table/util.rs | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/model/helper/bucket.rs b/src/model/helper/bucket.rs index bb4b5b24..8bc54b3f 100644 --- a/src/model/helper/bucket.rs +++ b/src/model/helper/bucket.rs @@ -116,12 +116,11 @@ impl<'a> BucketHelper<'a> { .get_range( &EmptyKey, None, - Some(KeyFilter::Matches(pattern.to_string())), + Some(KeyFilter::MatchesAndNotDeleted(pattern.to_string())), 10, ) .await? .into_iter() - .filter(|k| !k.state.is_deleted()) .collect::>(); if candidates.len() != 1 { Err(Error::BadRequest(format!( diff --git a/src/model/key_table.rs b/src/model/key_table.rs index daea5473..7afa0337 100644 --- a/src/model/key_table.rs +++ b/src/model/key_table.rs @@ -152,7 +152,7 @@ pub struct KeyTable; #[derive(Clone, Debug, Serialize, Deserialize)] pub enum KeyFilter { Deleted(DeletedFilter), - Matches(String), + MatchesAndNotDeleted(String), } impl TableSchema for KeyTable { @@ -166,10 +166,11 @@ impl TableSchema for KeyTable { fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool { match filter { KeyFilter::Deleted(df) => df.apply(entry.state.is_deleted()), - KeyFilter::Matches(pat) => { + KeyFilter::MatchesAndNotDeleted(pat) => { let pat = pat.to_lowercase(); - entry.key_id.to_lowercase().starts_with(&pat) - || entry.name.get().to_lowercase() == pat + !entry.state.is_deleted() + && (entry.key_id.to_lowercase().starts_with(&pat) + || entry.name.get().to_lowercase() == pat) } } } diff --git a/src/table/util.rs b/src/table/util.rs index 043a457c..2a5c3afe 100644 --- a/src/table/util.rs +++ b/src/table/util.rs @@ -19,7 +19,7 @@ impl PartitionKey for EmptyKey { #[derive(Clone, Copy, Debug, Serialize, Deserialize)] pub enum DeletedFilter { - All, + Any, Deleted, NotDeleted, } @@ -27,7 +27,7 @@ pub enum DeletedFilter { impl DeletedFilter { pub fn apply(&self, deleted: bool) -> bool { match self { - DeletedFilter::All => true, + DeletedFilter::Any => true, DeletedFilter::Deleted => deleted, DeletedFilter::NotDeleted => !deleted, } -- 2.43.4 From 1bcd6fabbdc0cd9dee88ba28daecb5339f2c13ec Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Mon, 3 Jan 2022 18:32:15 +0100 Subject: [PATCH 17/19] New buckets for 0.6.0: small changes - Fix bucket delete - fix merge of bucket creation date - Replace deletable with option in aliases Rationale: if two aliases point to conflicting bucket, resolving by making an arbitrary choice risks making data accessible when it shouldn't be. We'd rather resolve to deleting the alias until someone puts it back. --- src/api/api_server.rs | 4 +- src/api/s3_bucket.rs | 8 ++-- src/garage/admin.rs | 16 ++++--- src/garage/cli/cmd.rs | 4 +- src/garage/cli/util.rs | 4 +- src/model/bucket_alias_table.rs | 17 ++----- src/model/bucket_table.rs | 1 + src/model/helper/bucket.rs | 79 ++++++++++++++++++++++++--------- src/model/key_table.rs | 2 +- src/util/crdt/crdt.rs | 6 --- src/util/data.rs | 4 +- src/web/web_server.rs | 3 +- 12 files changed, 86 insertions(+), 62 deletions(-) diff --git a/src/api/api_server.rs b/src/api/api_server.rs index 42987e78..f5ebed37 100644 --- a/src/api/api_server.rs +++ b/src/api/api_server.rs @@ -7,7 +7,6 @@ use hyper::server::conn::AddrStream; use hyper::service::{make_service_fn, service_fn}; use hyper::{Body, Request, Response, Server}; -use garage_util::crdt; use garage_util::data::*; use garage_util::error::Error as GarageError; @@ -306,8 +305,7 @@ async fn resolve_bucket( .as_option() .ok_or_else(|| Error::Forbidden("Operation is not allowed for this key.".to_string()))?; - if let Some(crdt::Deletable::Present(bucket_id)) = api_key_params.local_aliases.get(bucket_name) - { + if let Some(Some(bucket_id)) = api_key_params.local_aliases.get(bucket_name) { Ok(*bucket_id) } else { Ok(garage diff --git a/src/api/s3_bucket.rs b/src/api/s3_bucket.rs index 785b89dd..24ec6b98 100644 --- a/src/api/s3_bucket.rs +++ b/src/api/s3_bucket.rs @@ -65,8 +65,8 @@ pub async fn handle_list_buckets(garage: &Garage, api_key: &Key) -> Result Result) { println!("\nKey-specific bucket aliases:"); let mut table = vec![]; for (alias_name, _, alias) in p.local_aliases.items().iter() { - if let Some(bucket_id) = alias.as_option() { + if let Some(bucket_id) = alias { table.push(format!( "\t{}\t{}\t{}", alias_name, @@ -55,7 +55,7 @@ pub fn print_key_info(key: &Key, relevant_buckets: &HashMap) { .local_aliases .items() .iter() - .filter(|(_, _, a)| a.as_option() == Some(bucket_id)) + .filter(|(_, _, a)| *a == Some(*bucket_id)) .map(|(a, _, _)| a.clone()) .collect::>() .join(", "); diff --git a/src/model/bucket_alias_table.rs b/src/model/bucket_alias_table.rs index 45807178..fce03d04 100644 --- a/src/model/bucket_alias_table.rs +++ b/src/model/bucket_alias_table.rs @@ -10,32 +10,23 @@ use garage_table::*; #[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] pub struct BucketAlias { name: String, - pub state: crdt::Lww>, -} - -#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)] -pub struct AliasParams { - pub bucket_id: Uuid, -} - -impl AutoCrdt for AliasParams { - const WARN_IF_DIFFERENT: bool = true; + pub state: crdt::Lww>, } impl BucketAlias { - pub fn new(name: String, ts: u64, bucket_id: Uuid) -> Option { + pub fn new(name: String, ts: u64, bucket_id: Option) -> Option { if !is_valid_bucket_name(&name) { None } else { Some(BucketAlias { name, - state: crdt::Lww::raw(ts, crdt::Deletable::present(AliasParams { bucket_id })), + state: crdt::Lww::raw(ts, bucket_id), }) } } pub fn is_deleted(&self) -> bool { - self.state.get().is_deleted() + self.state.get().is_none() } pub fn name(&self) -> &str { &self.name diff --git a/src/model/bucket_table.rs b/src/model/bucket_table.rs index fef29b62..52c2316c 100644 --- a/src/model/bucket_table.rs +++ b/src/model/bucket_table.rs @@ -63,6 +63,7 @@ impl BucketParams { impl Crdt for BucketParams { fn merge(&mut self, o: &Self) { + self.creation_date = std::cmp::min(self.creation_date, o.creation_date); self.authorized_keys.merge(&o.authorized_keys); self.website_config.merge(&o.website_config); self.aliases.merge(&o.aliases); diff --git a/src/model/helper/bucket.rs b/src/model/helper/bucket.rs index 8bc54b3f..52cedb12 100644 --- a/src/model/helper/bucket.rs +++ b/src/model/helper/bucket.rs @@ -46,7 +46,7 @@ impl<'a> BucketHelper<'a> { .bucket_alias_table .get(&EmptyKey, bucket_name) .await? - .map(|x| x.state.get().as_option().map(|x| x.bucket_id)) + .map(|x| *x.state.get()) .flatten()) } } @@ -154,11 +154,11 @@ impl<'a> BucketHelper<'a> { let alias = self.0.bucket_alias_table.get(&EmptyKey, alias_name).await?; if let Some(existing_alias) = alias.as_ref() { - if let Some(p) = existing_alias.state.get().as_option() { - if p.bucket_id != bucket_id { + if let Some(p_bucket) = existing_alias.state.get() { + if *p_bucket != bucket_id { return Err(Error::BadRequest(format!( "Alias {} already exists and points to different bucket: {:?}", - alias_name, p.bucket_id + alias_name, p_bucket ))); } } @@ -176,10 +176,10 @@ impl<'a> BucketHelper<'a> { // writes are now done and all writes use timestamp alias_ts let alias = match alias { - None => BucketAlias::new(alias_name.clone(), alias_ts, bucket_id) + None => BucketAlias::new(alias_name.clone(), alias_ts, Some(bucket_id)) .ok_or_bad_request(format!("{}: {}", alias_name, INVALID_BUCKET_NAME_MESSAGE))?, Some(mut a) => { - a.state = Lww::raw(alias_ts, Deletable::present(AliasParams { bucket_id })); + a.state = Lww::raw(alias_ts, Some(bucket_id)); a } }; @@ -209,13 +209,7 @@ impl<'a> BucketHelper<'a> { .bucket_alias_table .get(&EmptyKey, alias_name) .await? - .filter(|a| { - a.state - .get() - .as_option() - .map(|x| x.bucket_id == bucket_id) - .unwrap_or(false) - }) + .filter(|a| a.state.get().map(|x| x == bucket_id).unwrap_or(false)) .ok_or_message(format!( "Internal error: alias not found or does not point to bucket {:?}", bucket_id @@ -244,7 +238,7 @@ impl<'a> BucketHelper<'a> { // ---- timestamp-ensured causality barrier ---- // writes are now done and all writes use timestamp alias_ts - alias.state = Lww::raw(alias_ts, Deletable::delete()); + alias.state = Lww::raw(alias_ts, None); self.0.bucket_alias_table.insert(&alias).await?; bucket_state.aliases = LwwMap::raw_item(alias_name.clone(), alias_ts, false); @@ -253,6 +247,51 @@ impl<'a> BucketHelper<'a> { Ok(()) } + /// Ensures a bucket does not have a certain global alias. + /// Contrarily to unset_global_bucket_alias, this does not + /// fail on any condition other than: + /// - bucket cannot be found (its fine if it is in deleted state) + /// - alias cannot be found (its fine if it points to nothing or + /// to another bucket) + pub async fn purge_global_bucket_alias( + &self, + bucket_id: Uuid, + alias_name: &String, + ) -> Result<(), Error> { + let mut bucket = self.get_internal_bucket(bucket_id).await?; + + let mut alias = self + .0 + .bucket_alias_table + .get(&EmptyKey, alias_name) + .await? + .ok_or_message(format!("Alias {} not found", alias_name))?; + + // Checks ok, remove alias + let alias_ts = match bucket.state.as_option() { + Some(bucket_state) => increment_logical_clock_2( + alias.state.timestamp(), + bucket_state.aliases.get_timestamp(alias_name), + ), + None => increment_logical_clock(alias.state.timestamp()), + }; + + // ---- timestamp-ensured causality barrier ---- + // writes are now done and all writes use timestamp alias_ts + + if alias.state.get() == &Some(bucket_id) { + alias.state = Lww::raw(alias_ts, None); + self.0.bucket_alias_table.insert(&alias).await?; + } + + if let Some(mut bucket_state) = bucket.state.as_option_mut() { + bucket_state.aliases = LwwMap::raw_item(alias_name.clone(), alias_ts, false); + self.0.bucket_table.insert(&bucket).await?; + } + + Ok(()) + } + /// Sets a new alias for a bucket in the local namespace of a key. /// This function fails if: /// - alias name is not valid according to S3 spec @@ -277,7 +316,7 @@ impl<'a> BucketHelper<'a> { let mut key_param = key.state.as_option_mut().unwrap(); - if let Some(Deletable::Present(existing_alias)) = key_param.local_aliases.get(alias_name) { + if let Some(Some(existing_alias)) = key_param.local_aliases.get(alias_name) { if *existing_alias != bucket_id { return Err(Error::BadRequest(format!("Alias {} already exists in namespace of key {} and points to different bucket: {:?}", alias_name, key.key_id, existing_alias))); } @@ -301,8 +340,7 @@ impl<'a> BucketHelper<'a> { // ---- timestamp-ensured causality barrier ---- // writes are now done and all writes use timestamp alias_ts - key_param.local_aliases = - LwwMap::raw_item(alias_name.clone(), alias_ts, Deletable::present(bucket_id)); + key_param.local_aliases = LwwMap::raw_item(alias_name.clone(), alias_ts, Some(bucket_id)); self.0.key_table.insert(&key).await?; bucket_p.local_aliases = LwwMap::raw_item(bucket_p_local_alias_key, alias_ts, true); @@ -334,8 +372,8 @@ impl<'a> BucketHelper<'a> { .unwrap() .local_aliases .get(alias_name) - .map(|x| x.as_option()) - .flatten() != Some(&bucket_id) + .cloned() + .flatten() != Some(bucket_id) { return Err(GarageError::Message(format!( "Bucket {:?} does not have alias {} in namespace of key {}", @@ -372,8 +410,7 @@ impl<'a> BucketHelper<'a> { // ---- timestamp-ensured causality barrier ---- // writes are now done and all writes use timestamp alias_ts - key_param.local_aliases = - LwwMap::raw_item(alias_name.clone(), alias_ts, Deletable::delete()); + key_param.local_aliases = LwwMap::raw_item(alias_name.clone(), alias_ts, None); self.0.key_table.insert(&key).await?; bucket_p.local_aliases = LwwMap::raw_item(bucket_p_local_alias_key, alias_ts, false); diff --git a/src/model/key_table.rs b/src/model/key_table.rs index 7afa0337..c25f2da4 100644 --- a/src/model/key_table.rs +++ b/src/model/key_table.rs @@ -31,7 +31,7 @@ pub struct Key { pub struct KeyParams { pub allow_create_bucket: crdt::Lww, pub authorized_buckets: crdt::Map, - pub local_aliases: crdt::LwwMap>, + pub local_aliases: crdt::LwwMap>, } impl KeyParams { diff --git a/src/util/crdt/crdt.rs b/src/util/crdt/crdt.rs index 00bb2e3b..06876897 100644 --- a/src/util/crdt/crdt.rs +++ b/src/util/crdt/crdt.rs @@ -1,5 +1,3 @@ -use crate::data::*; - /// Definition of a CRDT - all CRDT Rust types implement this. /// /// A CRDT is defined as a merge operator that respects a certain set of axioms. @@ -87,7 +85,3 @@ impl AutoCrdt for String { impl AutoCrdt for bool { const WARN_IF_DIFFERENT: bool = true; } - -impl AutoCrdt for FixedBytes32 { - const WARN_IF_DIFFERENT: bool = true; -} diff --git a/src/util/data.rs b/src/util/data.rs index 6b8ee527..f0744307 100644 --- a/src/util/data.rs +++ b/src/util/data.rs @@ -5,7 +5,7 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer}; use std::fmt; /// An array of 32 bytes -#[derive(Default, PartialOrd, Ord, Clone, Hash, PartialEq, Copy)] +#[derive(Default, Clone, Hash, PartialEq, Eq, PartialOrd, Ord, Copy)] pub struct FixedBytes32([u8; 32]); impl From<[u8; 32]> for FixedBytes32 { @@ -20,8 +20,6 @@ impl std::convert::AsRef<[u8]> for FixedBytes32 { } } -impl Eq for FixedBytes32 {} - impl fmt::Debug for FixedBytes32 { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}…", hex::encode(&self.0[..8])) diff --git a/src/web/web_server.rs b/src/web/web_server.rs index cc6eed57..f13f289e 100644 --- a/src/web/web_server.rs +++ b/src/web/web_server.rs @@ -86,9 +86,8 @@ async fn serve_file(garage: Arc, req: Request) -> Result Date: Mon, 3 Jan 2022 19:06:04 +0100 Subject: [PATCH 18/19] New buckets for 0.6.0: make bucket id a SK and not a HK, CLI updates --- src/api/s3_bucket.rs | 20 +++++++---------- src/api/s3_website.rs | 4 ++-- src/garage/admin.rs | 10 ++++----- src/garage/cli/cmd.rs | 15 ++----------- src/garage/cli/util.rs | 44 ++++++++++++++++++++++++++++++++++++-- src/garage/main.rs | 4 ++-- src/model/bucket_table.rs | 28 ++++++++++++++++++------ src/model/helper/bucket.rs | 6 +++--- src/web/web_server.rs | 2 +- 9 files changed, 86 insertions(+), 47 deletions(-) diff --git a/src/api/s3_bucket.rs b/src/api/s3_bucket.rs index 24ec6b98..50aeb173 100644 --- a/src/api/s3_bucket.rs +++ b/src/api/s3_bucket.rs @@ -58,21 +58,17 @@ pub async fn handle_list_buckets(garage: &Garage, api_key: &Key) -> Result Result, Error> { let mut bucket = garage .bucket_table - .get(&bucket_id, &EmptyKey) + .get(&EmptyKey, &bucket_id) .await? .ok_or(Error::NotFound)?; @@ -48,7 +48,7 @@ pub async fn handle_put_website( let mut bucket = garage .bucket_table - .get(&bucket_id, &EmptyKey) + .get(&EmptyKey, &bucket_id) .await? .ok_or(Error::NotFound)?; diff --git a/src/garage/admin.rs b/src/garage/admin.rs index bca1bc5a..b1eb6915 100644 --- a/src/garage/admin.rs +++ b/src/garage/admin.rs @@ -38,7 +38,7 @@ pub enum AdminRpc { // Replies Ok(String), - BucketList(Vec), + BucketList(Vec), BucketInfo(Bucket, HashMap), KeyList(Vec<(String, String)>), KeyInfo(Key, HashMap), @@ -76,12 +76,12 @@ impl AdminRpcHandler { } async fn handle_list_buckets(&self) -> Result { - let bucket_aliases = self + let buckets = self .garage - .bucket_alias_table + .bucket_table .get_range(&EmptyKey, None, Some(DeletedFilter::NotDeleted), 10000) .await?; - Ok(AdminRpc::BucketList(bucket_aliases)) + Ok(AdminRpc::BucketList(buckets)) } async fn handle_bucket_info(&self, query: &BucketOpt) -> Result { @@ -536,7 +536,7 @@ impl AdminRpcHandler { .items() .iter() { - if let Some(b) = self.garage.bucket_table.get(id, &EmptyKey).await? { + if let Some(b) = self.garage.bucket_table.get(&EmptyKey, id).await? { relevant_buckets.insert(*id, b); } } diff --git a/src/garage/cli/cmd.rs b/src/garage/cli/cmd.rs index 515f2143..a90277a0 100644 --- a/src/garage/cli/cmd.rs +++ b/src/garage/cli/cmd.rs @@ -165,24 +165,13 @@ pub async fn cmd_admin( println!("{}", msg); } AdminRpc::BucketList(bl) => { - println!("List of buckets:"); - let mut table = vec![]; - for alias in bl { - if let Some(alias_bucket) = alias.state.get() { - table.push(format!("\t{}\t{:?}", alias.name(), alias_bucket)); - } - } - format_table(table); - println!("Buckets that don't have a global alias (i.e. that only exist in the namespace of an access key) are not shown."); + print_bucket_list(bl); } AdminRpc::BucketInfo(bucket, rk) => { print_bucket_info(&bucket, &rk); } AdminRpc::KeyList(kl) => { - println!("List of keys:"); - for key in kl { - println!("{}\t{}", key.0, key.1); - } + print_key_list(kl); } AdminRpc::KeyInfo(key, rb) => { print_key_info(&key, &rb); diff --git a/src/garage/cli/util.rs b/src/garage/cli/util.rs index 8d31a4c5..7a7d0e9b 100644 --- a/src/garage/cli/util.rs +++ b/src/garage/cli/util.rs @@ -7,6 +7,46 @@ use garage_util::error::*; use garage_model::bucket_table::*; use garage_model::key_table::*; +pub fn print_bucket_list(bl: Vec) { + println!("List of buckets:"); + + let mut table = vec![]; + for bucket in bl { + let aliases = bucket + .aliases() + .iter() + .filter(|(_, _, active)| *active) + .map(|(name, _, _)| name.to_string()) + .collect::>(); + let local_aliases_n = match bucket + .local_aliases() + .iter() + .filter(|(_, _, active)| *active) + .count() + { + 0 => "".into(), + 1 => "1 local alias".into(), + n => format!("{} local aliases", n), + }; + table.push(format!( + "\t{}\t{}\t{}", + aliases.join(","), + local_aliases_n, + hex::encode(bucket.id) + )); + } + format_table(table); +} + +pub fn print_key_list(kl: Vec<(String, String)>) { + println!("List of keys:"); + let mut table = vec![]; + for key in kl { + table.push(format!("\t{}\t{}", key.0, key.1)); + } + format_table(table); +} + pub fn print_key_info(key: &Key, relevant_buckets: &HashMap) { let bucket_global_aliases = |b: &Uuid| { if let Some(bucket) = relevant_buckets.get(b) { @@ -99,7 +139,7 @@ pub fn print_bucket_info(bucket: &Bucket, relevant_keys: &HashMap) .get(key_id) .map(|k| k.name.get().as_str()) .unwrap_or(""); - table.push(format!("\t{}\t{} ({})", alias, key_id, key_name)); + table.push(format!("\t{} ({})\t{}", key_id, key_name, alias)); } } format_table(table); @@ -115,7 +155,7 @@ pub fn print_bucket_info(bucket: &Bucket, relevant_keys: &HashMap) .map(|k| k.name.get().as_str()) .unwrap_or(""); table.push(format!( - "\t{}{}{}\t{} ({})", + "\t{}{}{}\t{}\t{}", rflag, wflag, oflag, k, key_name )); } diff --git a/src/garage/main.rs b/src/garage/main.rs index 60a13ac7..870455e1 100644 --- a/src/garage/main.rs +++ b/src/garage/main.rs @@ -139,8 +139,8 @@ async fn cli_command(opt: Opt) -> Result<(), Error> { let admin_rpc_endpoint = netapp.endpoint::(ADMIN_RPC_PATH.into()); match cli_command_dispatch(opt.cmd, &system_rpc_endpoint, &admin_rpc_endpoint, id).await { - Err(HelperError::Internal(i)) => Err(i), - Err(HelperError::BadRequest(b)) => Err(Error::Message(format!("bad request: {}", b))), + Err(HelperError::Internal(i)) => Err(Error::Message(format!("Internal error: {}", i))), + Err(HelperError::BadRequest(b)) => Err(Error::Message(b)), Ok(x) => Ok(x), } } diff --git a/src/model/bucket_table.rs b/src/model/bucket_table.rs index 52c2316c..d687e774 100644 --- a/src/model/bucket_table.rs +++ b/src/model/bucket_table.rs @@ -105,15 +105,29 @@ impl Bucket { crdt::Deletable::Present(state) => state.authorized_keys.items(), } } + + pub fn aliases(&self) -> &[(String, u64, bool)] { + match &self.state { + crdt::Deletable::Deleted => &[], + crdt::Deletable::Present(state) => state.aliases.items(), + } + } + + pub fn local_aliases(&self) -> &[((String, String), u64, bool)] { + match &self.state { + crdt::Deletable::Deleted => &[], + crdt::Deletable::Present(state) => state.local_aliases.items(), + } + } } -impl Entry for Bucket { - fn partition_key(&self) -> &Uuid { - &self.id - } - fn sort_key(&self) -> &EmptyKey { +impl Entry for Bucket { + fn partition_key(&self) -> &EmptyKey { &EmptyKey } + fn sort_key(&self) -> &Uuid { + &self.id + } } impl Crdt for Bucket { @@ -127,8 +141,8 @@ pub struct BucketTable; impl TableSchema for BucketTable { const TABLE_NAME: &'static str = "bucket_v2"; - type P = Uuid; - type S = EmptyKey; + type P = EmptyKey; + type S = Uuid; type E = Bucket; type Filter = DeletedFilter; diff --git a/src/model/helper/bucket.rs b/src/model/helper/bucket.rs index 52cedb12..6f171c8b 100644 --- a/src/model/helper/bucket.rs +++ b/src/model/helper/bucket.rs @@ -36,7 +36,7 @@ impl<'a> BucketHelper<'a> { Ok(self .0 .bucket_table - .get(&bucket_id, &EmptyKey) + .get(&EmptyKey, &bucket_id) .await? .filter(|x| !x.state.is_deleted()) .map(|_| bucket_id)) @@ -58,7 +58,7 @@ impl<'a> BucketHelper<'a> { Ok(self .0 .bucket_table - .get(&bucket_id, &EmptyKey) + .get(&EmptyKey, &bucket_id) .await? .ok_or_message(format!("Bucket {:?} does not exist", bucket_id))?) } @@ -70,7 +70,7 @@ impl<'a> BucketHelper<'a> { pub async fn get_existing_bucket(&self, bucket_id: Uuid) -> Result { self.0 .bucket_table - .get(&bucket_id, &EmptyKey) + .get(&EmptyKey, &bucket_id) .await? .filter(|b| !b.is_deleted()) .ok_or_bad_request(format!( diff --git a/src/web/web_server.rs b/src/web/web_server.rs index f13f289e..49e5f21b 100644 --- a/src/web/web_server.rs +++ b/src/web/web_server.rs @@ -93,7 +93,7 @@ async fn serve_file(garage: Arc, req: Request) -> Result Date: Tue, 4 Jan 2022 18:59:17 +0100 Subject: [PATCH 19/19] Small changes in key model and refactoring --- src/api/s3_bucket.rs | 8 +-- src/api/signature.rs | 3 +- src/garage/admin.rs | 21 +++++-- src/garage/cli/util.rs | 31 +++++----- src/model/bucket_table.rs | 29 ++++++---- src/model/key_table.rs | 118 +++++++++++++++++++------------------- src/model/permission.rs | 14 ++--- 7 files changed, 120 insertions(+), 104 deletions(-) diff --git a/src/api/s3_bucket.rs b/src/api/s3_bucket.rs index 50aeb173..27208ffa 100644 --- a/src/api/s3_bucket.rs +++ b/src/api/s3_bucket.rs @@ -38,8 +38,8 @@ pub fn handle_get_bucket_versioning() -> Result, Error> { } pub async fn handle_list_buckets(garage: &Garage, api_key: &Key) -> Result, Error> { - let key_state = api_key.state.as_option().ok_or_internal_error( - "Key should not be in deleted state at this point (internal error)", + let key_p = api_key.params().ok_or_internal_error( + "Key should not be in deleted state at this point (in handle_list_buckets)", )?; // Collect buckets user has access to @@ -74,7 +74,7 @@ pub async fn handle_list_buckets(garage: &Garage, api_key: &Key) -> Result Result>(); Ok(AdminRpc::KeyList(key_ids)) } @@ -454,7 +460,7 @@ impl AdminRpcHandler { } async fn handle_create_key(&self, query: &KeyNewOpt) -> Result { - let key = Key::new(query.name.clone()); + let key = Key::new(&query.name); self.garage.key_table.insert(&key).await?; self.key_info_result(key).await } @@ -465,7 +471,10 @@ impl AdminRpcHandler { .bucket_helper() .get_existing_matching_key(&query.key_pattern) .await?; - key.name.update(query.new_name.clone()); + key.params_mut() + .unwrap() + .name + .update(query.new_name.clone()); self.garage.key_table.insert(&key).await?; self.key_info_result(key).await } @@ -500,7 +509,7 @@ impl AdminRpcHandler { // 2. Remove permissions on all authorized buckets for (ab_id, _auth) in state.authorized_buckets.items().iter() { helper - .set_bucket_key_permissions(*ab_id, &key.key_id, BucketKeyPerm::no_permissions()) + .set_bucket_key_permissions(*ab_id, &key.key_id, BucketKeyPerm::NO_PERMISSIONS) .await?; } diff --git a/src/garage/cli/util.rs b/src/garage/cli/util.rs index 7a7d0e9b..365831c4 100644 --- a/src/garage/cli/util.rs +++ b/src/garage/cli/util.rs @@ -65,11 +65,11 @@ pub fn print_key_info(key: &Key, relevant_buckets: &HashMap) { "".to_string() }; - println!("Key name: {}", key.name.get()); - println!("Key ID: {}", key.key_id); - println!("Secret key: {}", key.secret_key); match &key.state { Deletable::Present(p) => { + println!("Key name: {}", p.name.get()); + println!("Key ID: {}", key.key_id); + println!("Secret key: {}", p.secret_key); println!("Can create buckets: {}", p.allow_create_bucket.get()); println!("\nKey-specific bucket aliases:"); let mut table = vec![]; @@ -112,12 +112,19 @@ pub fn print_key_info(key: &Key, relevant_buckets: &HashMap) { format_table(table); } Deletable::Deleted => { - println!("\nKey is deleted."); + println!("Key {} is deleted.", key.key_id); } } } pub fn print_bucket_info(bucket: &Bucket, relevant_keys: &HashMap) { + let key_name = |k| { + relevant_keys + .get(k) + .map(|k| k.params().unwrap().name.get().as_str()) + .unwrap_or("") + }; + println!("Bucket: {}", hex::encode(bucket.id)); match &bucket.state { Deletable::Deleted => println!("Bucket is deleted."), @@ -135,11 +142,7 @@ pub fn print_bucket_info(bucket: &Bucket, relevant_keys: &HashMap) let mut table = vec![]; for ((key_id, alias), _, active) in p.local_aliases.items().iter() { if *active { - let key_name = relevant_keys - .get(key_id) - .map(|k| k.name.get().as_str()) - .unwrap_or(""); - table.push(format!("\t{} ({})\t{}", key_id, key_name, alias)); + table.push(format!("\t{} ({})\t{}", key_id, key_name(key_id), alias)); } } format_table(table); @@ -150,13 +153,13 @@ pub fn print_bucket_info(bucket: &Bucket, relevant_keys: &HashMap) let rflag = if perm.allow_read { "R" } else { " " }; let wflag = if perm.allow_write { "W" } else { " " }; let oflag = if perm.allow_owner { "O" } else { " " }; - let key_name = relevant_keys - .get(k) - .map(|k| k.name.get().as_str()) - .unwrap_or(""); table.push(format!( "\t{}{}{}\t{}\t{}", - rflag, wflag, oflag, k, key_name + rflag, + wflag, + oflag, + k, + key_name(k) )); } format_table(table); diff --git a/src/model/bucket_table.rs b/src/model/bucket_table.rs index d687e774..db7cec18 100644 --- a/src/model/bucket_table.rs +++ b/src/model/bucket_table.rs @@ -97,27 +97,32 @@ impl Bucket { self.state.is_deleted() } + /// Returns an option representing the parameters (None if in deleted state) + pub fn params(&self) -> Option<&BucketParams> { + self.state.as_option() + } + + /// Mutable version of `.params()` + pub fn params_mut(&mut self) -> Option<&mut BucketParams> { + self.state.as_option_mut() + } + /// Return the list of authorized keys, when each was updated, and the permission associated to /// the key pub fn authorized_keys(&self) -> &[(String, BucketKeyPerm)] { - match &self.state { - crdt::Deletable::Deleted => &[], - crdt::Deletable::Present(state) => state.authorized_keys.items(), - } + self.params() + .map(|s| s.authorized_keys.items()) + .unwrap_or(&[]) } pub fn aliases(&self) -> &[(String, u64, bool)] { - match &self.state { - crdt::Deletable::Deleted => &[], - crdt::Deletable::Present(state) => state.aliases.items(), - } + self.params().map(|s| s.aliases.items()).unwrap_or(&[]) } pub fn local_aliases(&self) -> &[((String, String), u64, bool)] { - match &self.state { - crdt::Deletable::Deleted => &[], - crdt::Deletable::Present(state) => state.local_aliases.items(), - } + self.params() + .map(|s| s.local_aliases.items()) + .unwrap_or(&[]) } } diff --git a/src/model/key_table.rs b/src/model/key_table.rs index c25f2da4..d5e30f3f 100644 --- a/src/model/key_table.rs +++ b/src/model/key_table.rs @@ -14,29 +14,37 @@ pub struct Key { /// The id of the key (immutable), used as partition key pub key_id: String, - /// The secret_key associated - pub secret_key: String, - - /// Name for the key - pub name: crdt::Lww, - - /// If the key is present: it gives some permissions, - /// a map of bucket IDs (uuids) to permissions. - /// Otherwise no permissions are granted to key + /// Internal state of the key pub state: crdt::Deletable, } /// Configuration for a key #[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] pub struct KeyParams { + /// The secret_key associated (immutable) + pub secret_key: String, + + /// Name for the key + pub name: crdt::Lww, + + /// Flag to allow users having this key to create buckets pub allow_create_bucket: crdt::Lww, + + /// If the key is present: it gives some permissions, + /// a map of bucket IDs (uuids) to permissions. + /// Otherwise no permissions are granted to key pub authorized_buckets: crdt::Map, + + /// A key can have a local view of buckets names it is + /// the only one to see, this is the namespace for these aliases pub local_aliases: crdt::LwwMap>, } impl KeyParams { - pub fn new() -> Self { + fn new(secret_key: &str, name: &str) -> Self { KeyParams { + secret_key: secret_key.to_string(), + name: crdt::Lww::new(name.to_string()), allow_create_bucket: crdt::Lww::new(false), authorized_buckets: crdt::Map::new(), local_aliases: crdt::LwwMap::new(), @@ -44,14 +52,9 @@ impl KeyParams { } } -impl Default for KeyParams { - fn default() -> Self { - Self::new() - } -} - impl Crdt for KeyParams { fn merge(&mut self, o: &Self) { + self.name.merge(&o.name); self.allow_create_bucket.merge(&o.allow_create_bucket); self.authorized_buckets.merge(&o.authorized_buckets); self.local_aliases.merge(&o.local_aliases); @@ -60,14 +63,12 @@ impl Crdt for KeyParams { impl Key { /// Initialize a new Key, generating a random identifier and associated secret key - pub fn new(name: String) -> Self { + pub fn new(name: &str) -> Self { let key_id = format!("GK{}", hex::encode(&rand::random::<[u8; 12]>()[..])); let secret_key = hex::encode(&rand::random::<[u8; 32]>()[..]); Self { key_id, - secret_key, - name: crdt::Lww::new(name), - state: crdt::Deletable::present(KeyParams::new()), + state: crdt::Deletable::present(KeyParams::new(&secret_key, name)), } } @@ -75,9 +76,7 @@ impl Key { pub fn import(key_id: &str, secret_key: &str, name: &str) -> Self { Self { key_id: key_id.to_string(), - secret_key: secret_key.to_string(), - name: crdt::Lww::new(name.to_string()), - state: crdt::Deletable::present(KeyParams::new()), + state: crdt::Deletable::present(KeyParams::new(secret_key, name)), } } @@ -85,49 +84,47 @@ impl Key { pub fn delete(key_id: String) -> Self { Self { key_id, - secret_key: "".into(), - name: crdt::Lww::new("".to_string()), state: crdt::Deletable::Deleted, } } + /// Returns true if this represents a deleted bucket + pub fn is_deleted(&self) -> bool { + self.state.is_deleted() + } + + /// Returns an option representing the params (None if in deleted state) + pub fn params(&self) -> Option<&KeyParams> { + self.state.as_option() + } + + /// Mutable version of `.state()` + pub fn params_mut(&mut self) -> Option<&mut KeyParams> { + self.state.as_option_mut() + } + + /// Get permissions for a bucket + pub fn bucket_permissions(&self, bucket: &Uuid) -> BucketKeyPerm { + self.params() + .map(|params| params.authorized_buckets.get(bucket)) + .flatten() + .cloned() + .unwrap_or(BucketKeyPerm::NO_PERMISSIONS) + } + /// Check if `Key` is allowed to read in bucket pub fn allow_read(&self, bucket: &Uuid) -> bool { - if let crdt::Deletable::Present(params) = &self.state { - params - .authorized_buckets - .get(bucket) - .map(|x| x.allow_read) - .unwrap_or(false) - } else { - false - } + self.bucket_permissions(bucket).allow_read } /// Check if `Key` is allowed to write in bucket pub fn allow_write(&self, bucket: &Uuid) -> bool { - if let crdt::Deletable::Present(params) = &self.state { - params - .authorized_buckets - .get(bucket) - .map(|x| x.allow_write) - .unwrap_or(false) - } else { - false - } + self.bucket_permissions(bucket).allow_write } /// Check if `Key` is owner of bucket pub fn allow_owner(&self, bucket: &Uuid) -> bool { - if let crdt::Deletable::Present(params) = &self.state { - params - .authorized_buckets - .get(bucket) - .map(|x| x.allow_owner) - .unwrap_or(false) - } else { - false - } + self.bucket_permissions(bucket).allow_owner } } @@ -142,7 +139,6 @@ impl Entry for Key { impl Crdt for Key { fn merge(&mut self, other: &Self) { - self.name.merge(&other.name); self.state.merge(&other.state); } } @@ -168,15 +164,20 @@ impl TableSchema for KeyTable { KeyFilter::Deleted(df) => df.apply(entry.state.is_deleted()), KeyFilter::MatchesAndNotDeleted(pat) => { let pat = pat.to_lowercase(); - !entry.state.is_deleted() - && (entry.key_id.to_lowercase().starts_with(&pat) - || entry.name.get().to_lowercase() == pat) + entry + .params() + .map(|p| { + entry.key_id.to_lowercase().starts_with(&pat) + || p.name.get().to_lowercase() == pat + }) + .unwrap_or(false) } } } fn try_migrate(bytes: &[u8]) -> Option { let old_k = rmp_serde::decode::from_read_ref::<_, old::Key>(bytes).ok()?; + let name = crdt::Lww::raw(old_k.name.timestamp(), old_k.name.get().clone()); let state = if old_k.deleted.get() { crdt::Deletable::Deleted @@ -185,16 +186,15 @@ impl TableSchema for KeyTable { // migration is performed in specific migration code in // garage/migrate.rs crdt::Deletable::Present(KeyParams { + secret_key: old_k.secret_key, + name, allow_create_bucket: crdt::Lww::new(false), authorized_buckets: crdt::Map::new(), local_aliases: crdt::LwwMap::new(), }) }; - let name = crdt::Lww::raw(old_k.name.timestamp(), old_k.name.get().clone()); Some(Key { key_id: old_k.key_id, - secret_key: old_k.secret_key, - name, state, }) } diff --git a/src/model/permission.rs b/src/model/permission.rs index b8f7dd71..67527ed0 100644 --- a/src/model/permission.rs +++ b/src/model/permission.rs @@ -21,14 +21,12 @@ pub struct BucketKeyPerm { } impl BucketKeyPerm { - pub fn no_permissions() -> Self { - Self { - timestamp: 0, - allow_read: false, - allow_write: false, - allow_owner: false, - } - } + pub const NO_PERMISSIONS: Self = Self { + timestamp: 0, + allow_read: false, + allow_write: false, + allow_owner: false, + }; } impl Crdt for BucketKeyPerm { -- 2.43.4