diff --git a/src/model/block.rs b/src/model/block.rs index 89685630..5f428fe1 100644 --- a/src/model/block.rs +++ b/src/model/block.rs @@ -250,7 +250,6 @@ impl BlockManager { } /// Decrement the number of time a block is used - // when counter reach 0, it seems not put to resync which I assume put it to gc? pub fn block_decref(&self, hash: &Hash) -> Result<(), Error> { let new_rc = self.rc.update_and_fetch(&hash, |old| { let old_v = old.map(u64_from_be_bytes).unwrap_or(0); diff --git a/src/model/bucket_table.rs b/src/model/bucket_table.rs index 8198deb7..6a4b021d 100644 --- a/src/model/bucket_table.rs +++ b/src/model/bucket_table.rs @@ -57,7 +57,7 @@ impl CRDT for BucketParams { } impl BucketParams { - /// Create a new default `BucketParams` + /// Initializes a new instance of the Bucket struct pub fn new() -> Self { BucketParams { authorized_keys: crdt::LWWMap::new(), @@ -75,7 +75,7 @@ impl Bucket { } } - /// Query if bucket is deleted + /// Returns true if this represents a deleted bucket pub fn is_deleted(&self) -> bool { *self.state.get() == BucketState::Deleted } @@ -113,10 +113,6 @@ impl TableSchema for BucketTable { type E = Bucket; type Filter = DeletedFilter; - fn updated(&self, _old: Option, _new: Option) { - // nothing to do when updated - } - fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool { filter.apply(entry.is_deleted()) } diff --git a/src/model/key_table.rs b/src/model/key_table.rs index e1dcd7f4..578f8683 100644 --- a/src/model/key_table.rs +++ b/src/model/key_table.rs @@ -125,10 +125,6 @@ impl TableSchema for KeyTable { type E = Key; type Filter = KeyFilter; - fn updated(&self, _old: Option, _new: Option) { - // nothing to do when updated - } - fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool { match filter { KeyFilter::Deleted(df) => df.apply(entry.deleted.get()), diff --git a/src/rpc/membership.rs b/src/rpc/membership.rs index 4fce1a7b..5f7bbc96 100644 --- a/src/rpc/membership.rs +++ b/src/rpc/membership.rs @@ -417,8 +417,8 @@ impl System { } } else if let Some(id) = id_option { if let Some(st) = status.nodes.get_mut(id) { - // TODO this might double-increment the value as the counter is already - // incremented for any kind of failure in rpc_client + // we need to increment failure counter as call was done using by_addr so the + // counter was not auto-incremented st.num_failures.fetch_add(1, Ordering::SeqCst); if !st.is_up() { warn!("Node {:?} seems to be down.", id); diff --git a/src/rpc/ring.rs b/src/rpc/ring.rs index 04f8b590..bffd7f1f 100644 --- a/src/rpc/ring.rs +++ b/src/rpc/ring.rs @@ -28,7 +28,7 @@ const PARTITION_MASK_U16: u16 = ((1 << PARTITION_BITS) - 1) << (16 - PARTITION_B /// The maximum number of time an object might get replicated pub const MAX_REPLICATION: usize = 3; -/// The versionned configurations of all nodes known in the network +/// The user-defined configuration of the cluster's nodes #[derive(Clone, Debug, Serialize, Deserialize)] pub struct NetworkConfig { /// Map of each node's id to it's configuration diff --git a/src/table/schema.rs b/src/table/schema.rs index c17ccc15..13517271 100644 --- a/src/table/schema.rs +++ b/src/table/schema.rs @@ -76,7 +76,7 @@ pub trait TableSchema: Send + Sync { // as the update itself is an unchangeable fact that will never go back // due to CRDT logic. Typically errors in propagation of info should be logged // to stderr. - fn updated(&self, old: Option, new: Option); + fn updated(&self, _old: Option, _new: Option) {} fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool; }