change some more comments and revert changes on TableSchema
All checks were successful
continuous-integration/drone/push Build is passing
All checks were successful
continuous-integration/drone/push Build is passing
This commit is contained in:
parent
74373aebcf
commit
2812a027ea
6 changed files with 6 additions and 15 deletions
|
@ -250,7 +250,6 @@ impl BlockManager {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Decrement the number of time a block is used
|
/// Decrement the number of time a block is used
|
||||||
// when counter reach 0, it seems not put to resync which I assume put it to gc?
|
|
||||||
pub fn block_decref(&self, hash: &Hash) -> Result<(), Error> {
|
pub fn block_decref(&self, hash: &Hash) -> Result<(), Error> {
|
||||||
let new_rc = self.rc.update_and_fetch(&hash, |old| {
|
let new_rc = self.rc.update_and_fetch(&hash, |old| {
|
||||||
let old_v = old.map(u64_from_be_bytes).unwrap_or(0);
|
let old_v = old.map(u64_from_be_bytes).unwrap_or(0);
|
||||||
|
|
|
@ -57,7 +57,7 @@ impl CRDT for BucketParams {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BucketParams {
|
impl BucketParams {
|
||||||
/// Create a new default `BucketParams`
|
/// Initializes a new instance of the Bucket struct
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
BucketParams {
|
BucketParams {
|
||||||
authorized_keys: crdt::LWWMap::new(),
|
authorized_keys: crdt::LWWMap::new(),
|
||||||
|
@ -75,7 +75,7 @@ impl Bucket {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Query if bucket is deleted
|
/// Returns true if this represents a deleted bucket
|
||||||
pub fn is_deleted(&self) -> bool {
|
pub fn is_deleted(&self) -> bool {
|
||||||
*self.state.get() == BucketState::Deleted
|
*self.state.get() == BucketState::Deleted
|
||||||
}
|
}
|
||||||
|
@ -113,10 +113,6 @@ impl TableSchema for BucketTable {
|
||||||
type E = Bucket;
|
type E = Bucket;
|
||||||
type Filter = DeletedFilter;
|
type Filter = DeletedFilter;
|
||||||
|
|
||||||
fn updated(&self, _old: Option<Self::E>, _new: Option<Self::E>) {
|
|
||||||
// nothing to do when updated
|
|
||||||
}
|
|
||||||
|
|
||||||
fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool {
|
fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool {
|
||||||
filter.apply(entry.is_deleted())
|
filter.apply(entry.is_deleted())
|
||||||
}
|
}
|
||||||
|
|
|
@ -125,10 +125,6 @@ impl TableSchema for KeyTable {
|
||||||
type E = Key;
|
type E = Key;
|
||||||
type Filter = KeyFilter;
|
type Filter = KeyFilter;
|
||||||
|
|
||||||
fn updated(&self, _old: Option<Self::E>, _new: Option<Self::E>) {
|
|
||||||
// nothing to do when updated
|
|
||||||
}
|
|
||||||
|
|
||||||
fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool {
|
fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool {
|
||||||
match filter {
|
match filter {
|
||||||
KeyFilter::Deleted(df) => df.apply(entry.deleted.get()),
|
KeyFilter::Deleted(df) => df.apply(entry.deleted.get()),
|
||||||
|
|
|
@ -417,8 +417,8 @@ impl System {
|
||||||
}
|
}
|
||||||
} else if let Some(id) = id_option {
|
} else if let Some(id) = id_option {
|
||||||
if let Some(st) = status.nodes.get_mut(id) {
|
if let Some(st) = status.nodes.get_mut(id) {
|
||||||
// TODO this might double-increment the value as the counter is already
|
// we need to increment failure counter as call was done using by_addr so the
|
||||||
// incremented for any kind of failure in rpc_client
|
// counter was not auto-incremented
|
||||||
st.num_failures.fetch_add(1, Ordering::SeqCst);
|
st.num_failures.fetch_add(1, Ordering::SeqCst);
|
||||||
if !st.is_up() {
|
if !st.is_up() {
|
||||||
warn!("Node {:?} seems to be down.", id);
|
warn!("Node {:?} seems to be down.", id);
|
||||||
|
|
|
@ -28,7 +28,7 @@ const PARTITION_MASK_U16: u16 = ((1 << PARTITION_BITS) - 1) << (16 - PARTITION_B
|
||||||
/// The maximum number of time an object might get replicated
|
/// The maximum number of time an object might get replicated
|
||||||
pub const MAX_REPLICATION: usize = 3;
|
pub const MAX_REPLICATION: usize = 3;
|
||||||
|
|
||||||
/// The versionned configurations of all nodes known in the network
|
/// The user-defined configuration of the cluster's nodes
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
pub struct NetworkConfig {
|
pub struct NetworkConfig {
|
||||||
/// Map of each node's id to it's configuration
|
/// Map of each node's id to it's configuration
|
||||||
|
|
|
@ -76,7 +76,7 @@ pub trait TableSchema: Send + Sync {
|
||||||
// as the update itself is an unchangeable fact that will never go back
|
// as the update itself is an unchangeable fact that will never go back
|
||||||
// due to CRDT logic. Typically errors in propagation of info should be logged
|
// due to CRDT logic. Typically errors in propagation of info should be logged
|
||||||
// to stderr.
|
// to stderr.
|
||||||
fn updated(&self, old: Option<Self::E>, new: Option<Self::E>);
|
fn updated(&self, _old: Option<Self::E>, _new: Option<Self::E>) {}
|
||||||
|
|
||||||
fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool;
|
fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue