Merge pull request 'refactor: remove max_write_errors and max_faults' (#760) from yuka/garage:remove-max-write-errors into next-0.10
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
ci/woodpecker/pr/debug Pipeline was successful
ci/woodpecker/cron/release/3 Pipeline was successful
ci/woodpecker/cron/debug Pipeline was successful
ci/woodpecker/cron/release/2 Pipeline was successful
ci/woodpecker/cron/release/4 Pipeline was successful
ci/woodpecker/cron/release/1 Pipeline was successful
ci/woodpecker/cron/publish Pipeline was successful

Reviewed-on: #760
This commit is contained in:
Alex 2024-03-05 21:56:17 +00:00
commit 603604cdfc
5 changed files with 5 additions and 19 deletions

View file

@ -247,7 +247,6 @@ impl Garage {
let control_rep_param = TableFullReplication { let control_rep_param = TableFullReplication {
system: system.clone(), system: system.clone(),
max_faults: replication_mode.control_write_max_faults(),
}; };
info!("Initialize block manager..."); info!("Initialize block manager...");

View file

@ -21,13 +21,6 @@ impl ReplicationMode {
} }
} }
pub fn control_write_max_faults(&self) -> usize {
match self {
Self::None => 0,
_ => 1,
}
}
pub fn replication_factor(&self) -> usize { pub fn replication_factor(&self) -> usize {
match self { match self {
Self::None => 1, Self::None => 1,

View file

@ -21,8 +21,6 @@ use crate::replication::*;
pub struct TableFullReplication { pub struct TableFullReplication {
/// The membership manager of this node /// The membership manager of this node
pub system: Arc<System>, pub system: Arc<System>,
/// Max number of faults allowed while replicating a record
pub max_faults: usize,
} }
impl TableReplication for TableFullReplication { impl TableReplication for TableFullReplication {
@ -45,15 +43,15 @@ impl TableReplication for TableFullReplication {
} }
fn write_quorum(&self) -> usize { fn write_quorum(&self) -> usize {
let nmembers = self.system.cluster_layout().current().all_nodes().len(); let nmembers = self.system.cluster_layout().current().all_nodes().len();
if nmembers > self.max_faults {
nmembers - self.max_faults let max_faults = if nmembers > 1 { 1 } else { 0 };
if nmembers > max_faults {
nmembers - max_faults
} else { } else {
1 1
} }
} }
fn max_write_errors(&self) -> usize {
self.max_faults
}
fn partition_of(&self, _hash: &Hash) -> Partition { fn partition_of(&self, _hash: &Hash) -> Partition {
0u16 0u16

View file

@ -20,7 +20,6 @@ pub trait TableReplication: Send + Sync + 'static {
fn write_sets(&self, hash: &Hash) -> Self::WriteSets; fn write_sets(&self, hash: &Hash) -> Self::WriteSets;
/// Responses needed to consider a write succesfull in each set /// Responses needed to consider a write succesfull in each set
fn write_quorum(&self) -> usize; fn write_quorum(&self) -> usize;
fn max_write_errors(&self) -> usize;
// Accessing partitions, for Merkle tree & sync // Accessing partitions, for Merkle tree & sync
/// Get partition for data with given hash /// Get partition for data with given hash

View file

@ -44,9 +44,6 @@ impl TableReplication for TableShardedReplication {
fn write_quorum(&self) -> usize { fn write_quorum(&self) -> usize {
self.write_quorum self.write_quorum
} }
fn max_write_errors(&self) -> usize {
self.replication_factor - self.write_quorum
}
fn partition_of(&self, hash: &Hash) -> Partition { fn partition_of(&self, hash: &Hash) -> Partition {
self.system.cluster_layout().current().partition_of(hash) self.system.cluster_layout().current().partition_of(hash)