refactor: make TableShardedReplication a thin wrapper around LayoutManager
Some checks failed
ci/woodpecker/pr/debug Pipeline failed
Some checks failed
ci/woodpecker/pr/debug Pipeline failed
This commit is contained in:
parent
060ad0da32
commit
edb13dea2f
2 changed files with 23 additions and 23 deletions
|
@ -152,17 +152,13 @@ impl Garage {
|
|||
let system = System::new(network_key, replication_factor, consistency_mode, &config)?;
|
||||
|
||||
let data_rep_param = TableShardedReplication {
|
||||
system: system.clone(),
|
||||
replication_factor: replication_factor.into(),
|
||||
write_quorum: replication_factor.write_quorum(consistency_mode),
|
||||
read_quorum: 1,
|
||||
layout_manager: system.layout_manager.clone(),
|
||||
consistency_mode: ConsistencyMode::Degraded, // Blocks are content addressed, so one read is sufficient
|
||||
};
|
||||
|
||||
let meta_rep_param = TableShardedReplication {
|
||||
system: system.clone(),
|
||||
replication_factor: replication_factor.into(),
|
||||
write_quorum: replication_factor.write_quorum(consistency_mode),
|
||||
read_quorum: replication_factor.read_quorum(consistency_mode),
|
||||
layout_manager: system.layout_manager.clone(),
|
||||
consistency_mode,
|
||||
};
|
||||
|
||||
let control_rep_param = TableFullReplication {
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use garage_rpc::layout::*;
|
||||
use garage_rpc::system::System;
|
||||
use garage_rpc::replication_mode::ConsistencyMode;
|
||||
use garage_util::data::*;
|
||||
|
||||
use crate::replication::sharded::manager::LayoutManager;
|
||||
use crate::replication::*;
|
||||
|
||||
/// Sharded replication schema:
|
||||
|
@ -15,42 +16,45 @@ use crate::replication::*;
|
|||
#[derive(Clone)]
|
||||
pub struct TableShardedReplication {
|
||||
/// The membership manager of this node
|
||||
pub system: Arc<System>,
|
||||
/// How many time each data should be replicated
|
||||
pub replication_factor: usize,
|
||||
/// How many nodes to contact for a read, should be at most `replication_factor`
|
||||
pub read_quorum: usize,
|
||||
/// How many nodes to contact for a write, should be at most `replication_factor`
|
||||
pub write_quorum: usize,
|
||||
pub layout_manager: Arc<LayoutManager>,
|
||||
pub consistency_mode: ConsistencyMode,
|
||||
}
|
||||
|
||||
impl TableReplication for TableShardedReplication {
|
||||
type WriteSets = WriteLock<Vec<Vec<Uuid>>>;
|
||||
|
||||
fn storage_nodes(&self, hash: &Hash) -> Vec<Uuid> {
|
||||
self.system.cluster_layout().storage_nodes_of(hash)
|
||||
self.layout_manager.layout().storage_nodes_of(hash)
|
||||
}
|
||||
|
||||
fn read_nodes(&self, hash: &Hash) -> Vec<Uuid> {
|
||||
self.system.cluster_layout().read_nodes_of(hash)
|
||||
self.layout_manager.layout().read_nodes_of(hash)
|
||||
}
|
||||
fn read_quorum(&self) -> usize {
|
||||
self.read_quorum
|
||||
self.layout_manager
|
||||
.layout()
|
||||
.current()
|
||||
.replication_factor
|
||||
.read_quorum(self.consistency_mode)
|
||||
}
|
||||
|
||||
fn write_sets(&self, hash: &Hash) -> Self::WriteSets {
|
||||
self.system.layout_manager.write_sets_of(hash)
|
||||
self.layout_manager.write_sets_of(hash)
|
||||
}
|
||||
fn write_quorum(&self) -> usize {
|
||||
self.write_quorum
|
||||
self.layout_manager
|
||||
.layout()
|
||||
.current()
|
||||
.replication_factor
|
||||
.write_quorum(self.consistency_mode)
|
||||
}
|
||||
|
||||
fn partition_of(&self, hash: &Hash) -> Partition {
|
||||
self.system.cluster_layout().current().partition_of(hash)
|
||||
self.layout_manager.layout().current().partition_of(hash)
|
||||
}
|
||||
|
||||
fn sync_partitions(&self) -> SyncPartitions {
|
||||
let layout = self.system.cluster_layout();
|
||||
let layout = self.layout_manager.layout();
|
||||
let layout_version = layout.ack_map_min();
|
||||
|
||||
let mut partitions = layout
|
||||
|
|
Loading…
Reference in a new issue