Garage v1.0 #683
3 changed files with 11 additions and 3 deletions
|
@ -41,6 +41,7 @@ pub struct LayoutHelper {
|
|||
|
||||
trackers_hash: Hash,
|
||||
staging_hash: Hash,
|
||||
is_check_ok: bool,
|
||||
|
||||
// ack lock: counts in-progress write operations for each
|
||||
// layout version ; we don't increase the ack update tracker
|
||||
|
@ -107,6 +108,8 @@ impl LayoutHelper {
|
|||
.entry(layout.current().version)
|
||||
.or_insert(AtomicUsize::new(0));
|
||||
|
||||
let is_check_ok = layout.check().is_ok();
|
||||
|
||||
LayoutHelper {
|
||||
replication_factor,
|
||||
consistency_mode,
|
||||
|
@ -118,6 +121,7 @@ impl LayoutHelper {
|
|||
trackers_hash,
|
||||
staging_hash,
|
||||
ack_lock,
|
||||
is_check_ok,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -153,6 +157,10 @@ impl LayoutHelper {
|
|||
&self.inner().versions
|
||||
}
|
||||
|
||||
pub fn is_check_ok(&self) -> bool {
|
||||
self.is_check_ok
|
||||
}
|
||||
|
||||
/// Return all nodes that have a role (gateway or storage)
|
||||
/// in one of the currently active layout versions
|
||||
pub fn all_nodes(&self) -> &[Uuid] {
|
||||
|
|
|
@ -160,12 +160,12 @@ impl LayoutManager {
|
|||
fn merge_layout(&self, adv: &LayoutHistory) -> Option<LayoutHistory> {
|
||||
let mut layout = self.layout.write().unwrap();
|
||||
let prev_digest = layout.digest();
|
||||
let prev_layout_check = layout.inner().check().is_ok();
|
||||
let prev_layout_check = layout.is_check_ok();
|
||||
|
||||
if !prev_layout_check || adv.check().is_ok() {
|
||||
if layout.update(|l| l.merge(adv)) {
|
||||
layout.update_trackers(self.node_id);
|
||||
if prev_layout_check && layout.inner().check().is_err() {
|
||||
if prev_layout_check && !layout.is_check_ok() {
|
||||
panic!("Merged two correct layouts and got an incorrect layout.");
|
||||
}
|
||||
assert!(layout.digest() != prev_digest);
|
||||
|
|
|
@ -634,7 +634,7 @@ impl System {
|
|||
.filter(|p| p.is_up())
|
||||
.count();
|
||||
|
||||
let not_configured = self.cluster_layout().inner().check().is_err();
|
||||
let not_configured = !self.cluster_layout().is_check_ok();
|
||||
let no_peers = n_connected < self.replication_factor.into();
|
||||
let expected_n_nodes = self.cluster_layout().all_nodes().len();
|
||||
let bad_peers = n_connected != expected_n_nodes;
|
||||
|
|
Loading…
Reference in a new issue