[next-0.10] cache layout check result
This commit is contained in:
parent
01a0bd5410
commit
32f1786f9f
3 changed files with 11 additions and 3 deletions
|
@ -41,6 +41,7 @@ pub struct LayoutHelper {
|
||||||
|
|
||||||
trackers_hash: Hash,
|
trackers_hash: Hash,
|
||||||
staging_hash: Hash,
|
staging_hash: Hash,
|
||||||
|
is_check_ok: bool,
|
||||||
|
|
||||||
// ack lock: counts in-progress write operations for each
|
// ack lock: counts in-progress write operations for each
|
||||||
// layout version ; we don't increase the ack update tracker
|
// layout version ; we don't increase the ack update tracker
|
||||||
|
@ -107,6 +108,8 @@ impl LayoutHelper {
|
||||||
.entry(layout.current().version)
|
.entry(layout.current().version)
|
||||||
.or_insert(AtomicUsize::new(0));
|
.or_insert(AtomicUsize::new(0));
|
||||||
|
|
||||||
|
let is_check_ok = layout.check().is_ok();
|
||||||
|
|
||||||
LayoutHelper {
|
LayoutHelper {
|
||||||
replication_factor,
|
replication_factor,
|
||||||
consistency_mode,
|
consistency_mode,
|
||||||
|
@ -118,6 +121,7 @@ impl LayoutHelper {
|
||||||
trackers_hash,
|
trackers_hash,
|
||||||
staging_hash,
|
staging_hash,
|
||||||
ack_lock,
|
ack_lock,
|
||||||
|
is_check_ok,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -153,6 +157,10 @@ impl LayoutHelper {
|
||||||
&self.inner().versions
|
&self.inner().versions
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn is_check_ok(&self) -> bool {
|
||||||
|
self.is_check_ok
|
||||||
|
}
|
||||||
|
|
||||||
/// Return all nodes that have a role (gateway or storage)
|
/// Return all nodes that have a role (gateway or storage)
|
||||||
/// in one of the currently active layout versions
|
/// in one of the currently active layout versions
|
||||||
pub fn all_nodes(&self) -> &[Uuid] {
|
pub fn all_nodes(&self) -> &[Uuid] {
|
||||||
|
|
|
@ -160,12 +160,12 @@ impl LayoutManager {
|
||||||
fn merge_layout(&self, adv: &LayoutHistory) -> Option<LayoutHistory> {
|
fn merge_layout(&self, adv: &LayoutHistory) -> Option<LayoutHistory> {
|
||||||
let mut layout = self.layout.write().unwrap();
|
let mut layout = self.layout.write().unwrap();
|
||||||
let prev_digest = layout.digest();
|
let prev_digest = layout.digest();
|
||||||
let prev_layout_check = layout.inner().check().is_ok();
|
let prev_layout_check = layout.is_check_ok();
|
||||||
|
|
||||||
if !prev_layout_check || adv.check().is_ok() {
|
if !prev_layout_check || adv.check().is_ok() {
|
||||||
if layout.update(|l| l.merge(adv)) {
|
if layout.update(|l| l.merge(adv)) {
|
||||||
layout.update_trackers(self.node_id);
|
layout.update_trackers(self.node_id);
|
||||||
if prev_layout_check && layout.inner().check().is_err() {
|
if prev_layout_check && !layout.is_check_ok() {
|
||||||
panic!("Merged two correct layouts and got an incorrect layout.");
|
panic!("Merged two correct layouts and got an incorrect layout.");
|
||||||
}
|
}
|
||||||
assert!(layout.digest() != prev_digest);
|
assert!(layout.digest() != prev_digest);
|
||||||
|
|
|
@ -634,7 +634,7 @@ impl System {
|
||||||
.filter(|p| p.is_up())
|
.filter(|p| p.is_up())
|
||||||
.count();
|
.count();
|
||||||
|
|
||||||
let not_configured = self.cluster_layout().inner().check().is_err();
|
let not_configured = !self.cluster_layout().is_check_ok();
|
||||||
let no_peers = n_connected < self.replication_factor.into();
|
let no_peers = n_connected < self.replication_factor.into();
|
||||||
let expected_n_nodes = self.cluster_layout().all_nodes().len();
|
let expected_n_nodes = self.cluster_layout().all_nodes().len();
|
||||||
let bad_peers = n_connected != expected_n_nodes;
|
let bad_peers = n_connected != expected_n_nodes;
|
||||||
|
|
Loading…
Reference in a new issue