From edb13dea2fcb59869337ba7313e944b5f63179e4 Mon Sep 17 00:00:00 2001 From: Yureka Date: Sun, 5 May 2024 11:20:24 +0200 Subject: [PATCH] refactor: make TableShardedReplication a thin wrapper around LayoutManager --- src/model/garage.rs | 12 ++++------- src/table/replication/sharded.rs | 34 ++++++++++++++++++-------------- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/src/model/garage.rs b/src/model/garage.rs index 273690db..d4428449 100644 --- a/src/model/garage.rs +++ b/src/model/garage.rs @@ -152,17 +152,13 @@ impl Garage { let system = System::new(network_key, replication_factor, consistency_mode, &config)?; let data_rep_param = TableShardedReplication { - system: system.clone(), - replication_factor: replication_factor.into(), - write_quorum: replication_factor.write_quorum(consistency_mode), - read_quorum: 1, + layout_manager: system.layout_manager.clone(), + consistency_mode: ConsistencyMode::Degraded, // Blocks are content addressed, so one read is sufficient }; let meta_rep_param = TableShardedReplication { - system: system.clone(), - replication_factor: replication_factor.into(), - write_quorum: replication_factor.write_quorum(consistency_mode), - read_quorum: replication_factor.read_quorum(consistency_mode), + layout_manager: system.layout_manager.clone(), + consistency_mode, }; let control_rep_param = TableFullReplication { diff --git a/src/table/replication/sharded.rs b/src/table/replication/sharded.rs index e0245949..bdfe94fd 100644 --- a/src/table/replication/sharded.rs +++ b/src/table/replication/sharded.rs @@ -1,9 +1,10 @@ use std::sync::Arc; use garage_rpc::layout::*; -use garage_rpc::system::System; +use garage_rpc::replication_mode::ConsistencyMode; use garage_util::data::*; +use crate::replication::sharded::manager::LayoutManager; use crate::replication::*; /// Sharded replication schema: @@ -15,42 +16,45 @@ use crate::replication::*; #[derive(Clone)] pub struct TableShardedReplication { /// The membership manager of this node - pub system: Arc, - /// How many time each data should be replicated - pub replication_factor: usize, - /// How many nodes to contact for a read, should be at most `replication_factor` - pub read_quorum: usize, - /// How many nodes to contact for a write, should be at most `replication_factor` - pub write_quorum: usize, + pub layout_manager: Arc, + pub consistency_mode: ConsistencyMode, } impl TableReplication for TableShardedReplication { type WriteSets = WriteLock>>; fn storage_nodes(&self, hash: &Hash) -> Vec { - self.system.cluster_layout().storage_nodes_of(hash) + self.layout_manager.layout().storage_nodes_of(hash) } fn read_nodes(&self, hash: &Hash) -> Vec { - self.system.cluster_layout().read_nodes_of(hash) + self.layout_manager.layout().read_nodes_of(hash) } fn read_quorum(&self) -> usize { - self.read_quorum + self.layout_manager + .layout() + .current() + .replication_factor + .read_quorum(self.consistency_mode) } fn write_sets(&self, hash: &Hash) -> Self::WriteSets { - self.system.layout_manager.write_sets_of(hash) + self.layout_manager.write_sets_of(hash) } fn write_quorum(&self) -> usize { - self.write_quorum + self.layout_manager + .layout() + .current() + .replication_factor + .write_quorum(self.consistency_mode) } fn partition_of(&self, hash: &Hash) -> Partition { - self.system.cluster_layout().current().partition_of(hash) + self.layout_manager.layout().current().partition_of(hash) } fn sync_partitions(&self) -> SyncPartitions { - let layout = self.system.cluster_layout(); + let layout = self.layout_manager.layout(); let layout_version = layout.ack_map_min(); let mut partitions = layout