refactor: make TableShardedReplication a thin wrapper around LayoutManager
Some checks failed
ci/woodpecker/pr/debug Pipeline failed
Some checks failed
ci/woodpecker/pr/debug Pipeline failed
This commit is contained in:
parent
060ad0da32
commit
edb13dea2f
2 changed files with 23 additions and 23 deletions
|
@ -152,17 +152,13 @@ impl Garage {
|
||||||
let system = System::new(network_key, replication_factor, consistency_mode, &config)?;
|
let system = System::new(network_key, replication_factor, consistency_mode, &config)?;
|
||||||
|
|
||||||
let data_rep_param = TableShardedReplication {
|
let data_rep_param = TableShardedReplication {
|
||||||
system: system.clone(),
|
layout_manager: system.layout_manager.clone(),
|
||||||
replication_factor: replication_factor.into(),
|
consistency_mode: ConsistencyMode::Degraded, // Blocks are content addressed, so one read is sufficient
|
||||||
write_quorum: replication_factor.write_quorum(consistency_mode),
|
|
||||||
read_quorum: 1,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let meta_rep_param = TableShardedReplication {
|
let meta_rep_param = TableShardedReplication {
|
||||||
system: system.clone(),
|
layout_manager: system.layout_manager.clone(),
|
||||||
replication_factor: replication_factor.into(),
|
consistency_mode,
|
||||||
write_quorum: replication_factor.write_quorum(consistency_mode),
|
|
||||||
read_quorum: replication_factor.read_quorum(consistency_mode),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let control_rep_param = TableFullReplication {
|
let control_rep_param = TableFullReplication {
|
||||||
|
|
|
@ -1,9 +1,10 @@
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use garage_rpc::layout::*;
|
use garage_rpc::layout::*;
|
||||||
use garage_rpc::system::System;
|
use garage_rpc::replication_mode::ConsistencyMode;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
|
||||||
|
use crate::replication::sharded::manager::LayoutManager;
|
||||||
use crate::replication::*;
|
use crate::replication::*;
|
||||||
|
|
||||||
/// Sharded replication schema:
|
/// Sharded replication schema:
|
||||||
|
@ -15,42 +16,45 @@ use crate::replication::*;
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct TableShardedReplication {
|
pub struct TableShardedReplication {
|
||||||
/// The membership manager of this node
|
/// The membership manager of this node
|
||||||
pub system: Arc<System>,
|
pub layout_manager: Arc<LayoutManager>,
|
||||||
/// How many time each data should be replicated
|
pub consistency_mode: ConsistencyMode,
|
||||||
pub replication_factor: usize,
|
|
||||||
/// How many nodes to contact for a read, should be at most `replication_factor`
|
|
||||||
pub read_quorum: usize,
|
|
||||||
/// How many nodes to contact for a write, should be at most `replication_factor`
|
|
||||||
pub write_quorum: usize,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TableReplication for TableShardedReplication {
|
impl TableReplication for TableShardedReplication {
|
||||||
type WriteSets = WriteLock<Vec<Vec<Uuid>>>;
|
type WriteSets = WriteLock<Vec<Vec<Uuid>>>;
|
||||||
|
|
||||||
fn storage_nodes(&self, hash: &Hash) -> Vec<Uuid> {
|
fn storage_nodes(&self, hash: &Hash) -> Vec<Uuid> {
|
||||||
self.system.cluster_layout().storage_nodes_of(hash)
|
self.layout_manager.layout().storage_nodes_of(hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn read_nodes(&self, hash: &Hash) -> Vec<Uuid> {
|
fn read_nodes(&self, hash: &Hash) -> Vec<Uuid> {
|
||||||
self.system.cluster_layout().read_nodes_of(hash)
|
self.layout_manager.layout().read_nodes_of(hash)
|
||||||
}
|
}
|
||||||
fn read_quorum(&self) -> usize {
|
fn read_quorum(&self) -> usize {
|
||||||
self.read_quorum
|
self.layout_manager
|
||||||
|
.layout()
|
||||||
|
.current()
|
||||||
|
.replication_factor
|
||||||
|
.read_quorum(self.consistency_mode)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn write_sets(&self, hash: &Hash) -> Self::WriteSets {
|
fn write_sets(&self, hash: &Hash) -> Self::WriteSets {
|
||||||
self.system.layout_manager.write_sets_of(hash)
|
self.layout_manager.write_sets_of(hash)
|
||||||
}
|
}
|
||||||
fn write_quorum(&self) -> usize {
|
fn write_quorum(&self) -> usize {
|
||||||
self.write_quorum
|
self.layout_manager
|
||||||
|
.layout()
|
||||||
|
.current()
|
||||||
|
.replication_factor
|
||||||
|
.write_quorum(self.consistency_mode)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn partition_of(&self, hash: &Hash) -> Partition {
|
fn partition_of(&self, hash: &Hash) -> Partition {
|
||||||
self.system.cluster_layout().current().partition_of(hash)
|
self.layout_manager.layout().current().partition_of(hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn sync_partitions(&self) -> SyncPartitions {
|
fn sync_partitions(&self) -> SyncPartitions {
|
||||||
let layout = self.system.cluster_layout();
|
let layout = self.layout_manager.layout();
|
||||||
let layout_version = layout.ack_map_min();
|
let layout_version = layout.ack_map_min();
|
||||||
|
|
||||||
let mut partitions = layout
|
let mut partitions = layout
|
||||||
|
|
Loading…
Reference in a new issue