2020-04-16 12:50:49 +00:00
|
|
|
use rand::Rng;
|
2020-04-19 11:22:28 +00:00
|
|
|
use std::collections::{BTreeMap, VecDeque};
|
2021-02-23 18:11:02 +00:00
|
|
|
use std::sync::{Arc, Mutex};
|
2020-04-16 16:41:10 +00:00
|
|
|
use std::time::{Duration, Instant};
|
2020-04-16 12:50:49 +00:00
|
|
|
|
2021-02-23 18:11:02 +00:00
|
|
|
use futures::future::join_all;
|
2020-04-17 13:36:16 +00:00
|
|
|
use futures::{pin_mut, select};
|
2020-04-16 12:50:49 +00:00
|
|
|
use futures_util::future::*;
|
2020-04-17 13:36:16 +00:00
|
|
|
use futures_util::stream::*;
|
|
|
|
use serde::{Deserialize, Serialize};
|
|
|
|
use serde_bytes::ByteBuf;
|
2020-04-19 17:59:59 +00:00
|
|
|
use tokio::sync::{mpsc, watch};
|
2020-04-16 12:50:49 +00:00
|
|
|
|
2021-02-21 12:11:10 +00:00
|
|
|
use garage_rpc::ring::Ring;
|
2020-04-24 10:10:01 +00:00
|
|
|
use garage_util::data::*;
|
|
|
|
use garage_util::error::Error;
|
|
|
|
|
|
|
|
use crate::*;
|
2020-04-16 12:50:49 +00:00
|
|
|
|
2020-04-16 17:28:02 +00:00
|
|
|
const MAX_DEPTH: usize = 16;
|
2020-04-21 16:15:32 +00:00
|
|
|
const SCAN_INTERVAL: Duration = Duration::from_secs(3600);
|
2020-04-16 16:41:10 +00:00
|
|
|
const CHECKSUM_CACHE_TIMEOUT: Duration = Duration::from_secs(1800);
|
2020-04-21 16:05:55 +00:00
|
|
|
const TABLE_SYNC_RPC_TIMEOUT: Duration = Duration::from_secs(30);
|
2020-04-19 11:22:28 +00:00
|
|
|
|
|
|
|
pub struct TableSyncer<F: TableSchema, R: TableReplication> {
|
|
|
|
table: Arc<Table<F, R>>,
|
|
|
|
todo: Mutex<SyncTodo>,
|
2020-04-21 16:05:55 +00:00
|
|
|
cache: Vec<Mutex<BTreeMap<SyncRange, RangeChecksumCache>>>,
|
2020-04-16 12:50:49 +00:00
|
|
|
}
|
|
|
|
|
2020-04-17 15:09:57 +00:00
|
|
|
#[derive(Serialize, Deserialize)]
|
2021-02-23 18:59:43 +00:00
|
|
|
pub(crate) enum SyncRPC {
|
2020-04-17 19:59:07 +00:00
|
|
|
GetRootChecksumRange(Hash, Hash),
|
|
|
|
RootChecksumRange(SyncRange),
|
2021-02-23 18:11:02 +00:00
|
|
|
Checksums(Vec<RangeChecksum>),
|
2020-04-17 16:27:29 +00:00
|
|
|
Difference(Vec<SyncRange>, Vec<Arc<ByteBuf>>),
|
2020-04-17 15:09:57 +00:00
|
|
|
}
|
|
|
|
|
2021-02-23 18:59:43 +00:00
|
|
|
struct SyncTodo {
|
2020-04-19 11:22:28 +00:00
|
|
|
todo: Vec<TodoPartition>,
|
2020-04-16 12:50:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Clone)]
|
2020-04-19 11:22:28 +00:00
|
|
|
struct TodoPartition {
|
2021-02-23 18:11:02 +00:00
|
|
|
// Partition consists in hashes between begin included and end excluded
|
2020-04-19 11:22:28 +00:00
|
|
|
begin: Hash,
|
|
|
|
end: Hash,
|
2021-02-23 18:11:02 +00:00
|
|
|
|
|
|
|
// Are we a node that stores this partition or not?
|
2020-04-19 11:22:28 +00:00
|
|
|
retain: bool,
|
2020-04-16 12:50:49 +00:00
|
|
|
}
|
|
|
|
|
2020-04-21 16:05:55 +00:00
|
|
|
// A SyncRange defines a query on the dataset stored by a node, in the following way:
|
|
|
|
// - all items whose key are >= `begin`
|
|
|
|
// - stopping at the first item whose key hash has at least `level` leading zero bytes (excluded)
|
|
|
|
// - except if the first item of the range has such many leading zero bytes
|
|
|
|
// - and stopping at `end` (excluded) if such an item is not found
|
|
|
|
// The checksum itself does not store all of the items in the database, only the hashes of the "sub-ranges"
|
|
|
|
// i.e. of ranges of level `level-1` that cover the same range
|
|
|
|
// (ranges of level 0 do not exist and their hash is simply the hash of the first item >= begin)
|
|
|
|
// See RangeChecksum for the struct that stores this information.
|
2020-04-16 16:41:10 +00:00
|
|
|
#[derive(Hash, PartialEq, Eq, Debug, Clone, Serialize, Deserialize)]
|
2021-02-23 18:59:43 +00:00
|
|
|
pub(crate) struct SyncRange {
|
2020-04-19 11:22:28 +00:00
|
|
|
begin: Vec<u8>,
|
|
|
|
end: Vec<u8>,
|
|
|
|
level: usize,
|
2020-04-16 16:41:10 +00:00
|
|
|
}
|
|
|
|
|
2020-04-16 17:28:02 +00:00
|
|
|
impl std::cmp::PartialOrd for SyncRange {
|
|
|
|
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
|
|
|
Some(self.cmp(other))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
impl std::cmp::Ord for SyncRange {
|
|
|
|
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
2020-04-21 16:05:55 +00:00
|
|
|
self.begin
|
|
|
|
.cmp(&other.begin)
|
|
|
|
.then(self.level.cmp(&other.level))
|
|
|
|
.then(self.end.cmp(&other.end))
|
2020-04-16 17:28:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-16 16:41:10 +00:00
|
|
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
2021-02-23 18:59:43 +00:00
|
|
|
pub(crate) struct RangeChecksum {
|
2020-04-19 11:22:28 +00:00
|
|
|
bounds: SyncRange,
|
|
|
|
children: Vec<(SyncRange, Hash)>,
|
|
|
|
found_limit: Option<Vec<u8>>,
|
2020-04-16 16:41:10 +00:00
|
|
|
|
2020-04-17 13:36:16 +00:00
|
|
|
#[serde(skip, default = "std::time::Instant::now")]
|
2020-04-19 11:22:28 +00:00
|
|
|
time: Instant,
|
2020-04-16 16:41:10 +00:00
|
|
|
}
|
|
|
|
|
2020-04-21 16:05:55 +00:00
|
|
|
#[derive(Debug, Clone)]
|
2021-02-23 18:59:43 +00:00
|
|
|
struct RangeChecksumCache {
|
2020-04-21 16:05:55 +00:00
|
|
|
hash: Option<Hash>, // None if no children
|
|
|
|
found_limit: Option<Vec<u8>>,
|
|
|
|
time: Instant,
|
|
|
|
}
|
|
|
|
|
2020-04-19 11:22:28 +00:00
|
|
|
impl<F, R> TableSyncer<F, R>
|
|
|
|
where
|
|
|
|
F: TableSchema + 'static,
|
|
|
|
R: TableReplication + 'static,
|
|
|
|
{
|
2021-02-23 18:59:43 +00:00
|
|
|
pub(crate) async fn launch(table: Arc<Table<F, R>>) -> Arc<Self> {
|
2020-04-16 12:50:49 +00:00
|
|
|
let todo = SyncTodo { todo: Vec::new() };
|
|
|
|
let syncer = Arc::new(TableSyncer {
|
|
|
|
table: table.clone(),
|
|
|
|
todo: Mutex::new(todo),
|
2020-04-17 13:36:16 +00:00
|
|
|
cache: (0..MAX_DEPTH)
|
|
|
|
.map(|_| Mutex::new(BTreeMap::new()))
|
|
|
|
.collect::<Vec<_>>(),
|
2020-04-16 12:50:49 +00:00
|
|
|
});
|
|
|
|
|
2020-04-19 17:59:59 +00:00
|
|
|
let (busy_tx, busy_rx) = mpsc::unbounded_channel();
|
|
|
|
|
2020-04-16 12:50:49 +00:00
|
|
|
let s1 = syncer.clone();
|
|
|
|
table
|
|
|
|
.system
|
|
|
|
.background
|
2020-04-19 21:33:38 +00:00
|
|
|
.spawn_worker(
|
|
|
|
format!("table sync watcher for {}", table.name),
|
|
|
|
move |must_exit: watch::Receiver<bool>| s1.watcher_task(must_exit, busy_rx),
|
|
|
|
)
|
2020-04-16 12:50:49 +00:00
|
|
|
.await;
|
|
|
|
|
|
|
|
let s2 = syncer.clone();
|
|
|
|
table
|
|
|
|
.system
|
|
|
|
.background
|
2020-04-19 21:33:38 +00:00
|
|
|
.spawn_worker(
|
|
|
|
format!("table syncer for {}", table.name),
|
|
|
|
move |must_exit: watch::Receiver<bool>| s2.syncer_task(must_exit, busy_tx),
|
|
|
|
)
|
2020-04-16 12:50:49 +00:00
|
|
|
.await;
|
|
|
|
|
2020-04-21 16:15:32 +00:00
|
|
|
let s3 = syncer.clone();
|
2020-04-22 16:51:52 +00:00
|
|
|
tokio::spawn(async move {
|
2020-04-21 16:15:32 +00:00
|
|
|
tokio::time::delay_for(Duration::from_secs(20)).await;
|
|
|
|
s3.add_full_scan().await;
|
|
|
|
});
|
|
|
|
|
2020-04-16 12:50:49 +00:00
|
|
|
syncer
|
|
|
|
}
|
|
|
|
|
|
|
|
async fn watcher_task(
|
|
|
|
self: Arc<Self>,
|
|
|
|
mut must_exit: watch::Receiver<bool>,
|
2020-04-19 17:59:59 +00:00
|
|
|
mut busy_rx: mpsc::UnboundedReceiver<bool>,
|
2020-04-16 12:50:49 +00:00
|
|
|
) -> Result<(), Error> {
|
|
|
|
let mut prev_ring: Arc<Ring> = self.table.system.ring.borrow().clone();
|
|
|
|
let mut ring_recv: watch::Receiver<Arc<Ring>> = self.table.system.ring.clone();
|
2020-04-19 17:59:59 +00:00
|
|
|
let mut nothing_to_do_since = Some(Instant::now());
|
2020-04-16 12:50:49 +00:00
|
|
|
|
2020-04-16 17:28:02 +00:00
|
|
|
while !*must_exit.borrow() {
|
2020-04-16 12:50:49 +00:00
|
|
|
let s_ring_recv = ring_recv.recv().fuse();
|
2020-04-19 17:59:59 +00:00
|
|
|
let s_busy = busy_rx.recv().fuse();
|
2020-04-16 12:50:49 +00:00
|
|
|
let s_must_exit = must_exit.recv().fuse();
|
2020-04-19 17:59:59 +00:00
|
|
|
let s_timeout = tokio::time::delay_for(Duration::from_secs(1)).fuse();
|
|
|
|
pin_mut!(s_ring_recv, s_busy, s_must_exit, s_timeout);
|
2020-04-16 12:50:49 +00:00
|
|
|
|
|
|
|
select! {
|
|
|
|
new_ring_r = s_ring_recv => {
|
|
|
|
if let Some(new_ring) = new_ring_r {
|
2020-04-21 12:54:55 +00:00
|
|
|
debug!("({}) Adding ring difference to syncer todo list", self.table.name);
|
2021-02-23 18:11:02 +00:00
|
|
|
self.todo.lock().unwrap().add_ring_difference(&self.table, &prev_ring, &new_ring);
|
2020-04-16 12:50:49 +00:00
|
|
|
prev_ring = new_ring;
|
|
|
|
}
|
|
|
|
}
|
2020-04-19 17:59:59 +00:00
|
|
|
busy_opt = s_busy => {
|
|
|
|
if let Some(busy) = busy_opt {
|
|
|
|
if busy {
|
|
|
|
nothing_to_do_since = None;
|
|
|
|
} else {
|
|
|
|
if nothing_to_do_since.is_none() {
|
|
|
|
nothing_to_do_since = Some(Instant::now());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-04-16 12:50:49 +00:00
|
|
|
must_exit_v = s_must_exit => {
|
|
|
|
if must_exit_v.unwrap_or(false) {
|
2020-04-16 17:28:02 +00:00
|
|
|
break;
|
2020-04-16 12:50:49 +00:00
|
|
|
}
|
|
|
|
}
|
2020-04-19 17:59:59 +00:00
|
|
|
_ = s_timeout => {
|
|
|
|
if nothing_to_do_since.map(|t| Instant::now() - t >= SCAN_INTERVAL).unwrap_or(false) {
|
|
|
|
nothing_to_do_since = None;
|
2020-04-21 12:54:55 +00:00
|
|
|
debug!("({}) Adding full scan to syncer todo list", self.table.name);
|
2020-04-21 16:05:55 +00:00
|
|
|
self.add_full_scan().await;
|
2020-04-19 17:59:59 +00:00
|
|
|
}
|
|
|
|
}
|
2020-04-16 12:50:49 +00:00
|
|
|
}
|
|
|
|
}
|
2020-04-16 17:28:02 +00:00
|
|
|
Ok(())
|
2020-04-16 12:50:49 +00:00
|
|
|
}
|
|
|
|
|
2020-04-21 16:05:55 +00:00
|
|
|
pub async fn add_full_scan(&self) {
|
2021-02-23 18:11:02 +00:00
|
|
|
self.todo.lock().unwrap().add_full_scan(&self.table);
|
2020-04-21 16:05:55 +00:00
|
|
|
}
|
|
|
|
|
2020-04-16 12:50:49 +00:00
|
|
|
async fn syncer_task(
|
|
|
|
self: Arc<Self>,
|
|
|
|
mut must_exit: watch::Receiver<bool>,
|
2020-04-19 17:59:59 +00:00
|
|
|
busy_tx: mpsc::UnboundedSender<bool>,
|
2020-04-16 12:50:49 +00:00
|
|
|
) -> Result<(), Error> {
|
2020-04-16 16:41:10 +00:00
|
|
|
while !*must_exit.borrow() {
|
2021-02-23 18:11:02 +00:00
|
|
|
let task = self.todo.lock().unwrap().pop_task();
|
|
|
|
if let Some(partition) = task {
|
2020-04-19 17:59:59 +00:00
|
|
|
busy_tx.send(true)?;
|
2020-04-17 13:36:16 +00:00
|
|
|
let res = self
|
|
|
|
.clone()
|
|
|
|
.sync_partition(&partition, &mut must_exit)
|
|
|
|
.await;
|
2020-04-16 16:41:10 +00:00
|
|
|
if let Err(e) = res {
|
2020-04-21 12:54:55 +00:00
|
|
|
warn!(
|
2020-04-17 13:36:16 +00:00
|
|
|
"({}) Error while syncing {:?}: {}",
|
|
|
|
self.table.name, partition, e
|
|
|
|
);
|
2020-04-16 16:41:10 +00:00
|
|
|
}
|
|
|
|
} else {
|
2020-04-19 17:59:59 +00:00
|
|
|
busy_tx.send(false)?;
|
2020-04-16 16:41:10 +00:00
|
|
|
tokio::time::delay_for(Duration::from_secs(1)).await;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
2020-04-16 12:50:49 +00:00
|
|
|
|
2020-04-17 13:36:16 +00:00
|
|
|
async fn sync_partition(
|
|
|
|
self: Arc<Self>,
|
2020-04-19 11:22:28 +00:00
|
|
|
partition: &TodoPartition,
|
2020-04-17 13:36:16 +00:00
|
|
|
must_exit: &mut watch::Receiver<bool>,
|
|
|
|
) -> Result<(), Error> {
|
2021-02-23 18:11:02 +00:00
|
|
|
if partition.retain {
|
|
|
|
let my_id = self.table.system.id;
|
|
|
|
let nodes = self
|
|
|
|
.table
|
|
|
|
.replication
|
|
|
|
.write_nodes(&partition.begin, &self.table.system)
|
|
|
|
.into_iter()
|
|
|
|
.filter(|node| *node != my_id)
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
|
|
|
debug!(
|
|
|
|
"({}) Preparing to sync {:?} with {:?}...",
|
|
|
|
self.table.name, partition, nodes
|
|
|
|
);
|
|
|
|
let root_cks = self.root_checksum(&partition.begin, &partition.end, must_exit)?;
|
|
|
|
|
|
|
|
let mut sync_futures = nodes
|
|
|
|
.iter()
|
|
|
|
.map(|node| {
|
|
|
|
self.clone().do_sync_with(
|
|
|
|
partition.clone(),
|
|
|
|
root_cks.clone(),
|
|
|
|
*node,
|
|
|
|
must_exit.clone(),
|
|
|
|
)
|
|
|
|
})
|
|
|
|
.collect::<FuturesUnordered<_>>();
|
|
|
|
|
|
|
|
let mut n_errors = 0;
|
|
|
|
while let Some(r) = sync_futures.next().await {
|
|
|
|
if let Err(e) = r {
|
|
|
|
n_errors += 1;
|
|
|
|
warn!("({}) Sync error: {}", self.table.name, e);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if n_errors > self.table.replication.max_write_errors() {
|
|
|
|
return Err(Error::Message(format!(
|
|
|
|
"Sync failed with too many nodes (should have been: {:?}).",
|
|
|
|
nodes
|
|
|
|
)));
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
self.offload_partition(&partition.begin, &partition.end, must_exit)
|
|
|
|
.await?;
|
|
|
|
}
|
2020-04-21 14:07:15 +00:00
|
|
|
|
2021-02-23 18:11:02 +00:00
|
|
|
Ok(())
|
|
|
|
}
|
2020-04-16 16:41:10 +00:00
|
|
|
|
2021-02-23 18:11:02 +00:00
|
|
|
// Offload partition: this partition is not something we are storing,
|
|
|
|
// so send it out to all other nodes that store it and delete items locally.
|
|
|
|
// We don't bother checking if the remote nodes already have the items,
|
|
|
|
// we just batch-send everything. Offloading isn't supposed to happen very often.
|
|
|
|
// If any of the nodes that are supposed to store the items is unable to
|
|
|
|
// save them, we interrupt the process.
|
|
|
|
async fn offload_partition(
|
|
|
|
self: &Arc<Self>,
|
|
|
|
begin: &Hash,
|
|
|
|
end: &Hash,
|
|
|
|
must_exit: &mut watch::Receiver<bool>,
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
let mut counter: usize = 0;
|
|
|
|
|
|
|
|
while !*must_exit.borrow() {
|
|
|
|
let mut items = Vec::new();
|
|
|
|
|
|
|
|
for item in self.table.store.range(begin.to_vec()..end.to_vec()) {
|
|
|
|
let (key, value) = item?;
|
|
|
|
items.push((key.to_vec(), Arc::new(ByteBuf::from(value.as_ref()))));
|
|
|
|
|
|
|
|
if items.len() >= 1024 {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if items.len() > 0 {
|
|
|
|
let nodes = self
|
|
|
|
.table
|
|
|
|
.replication
|
|
|
|
.write_nodes(&begin, &self.table.system)
|
|
|
|
.into_iter()
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
if nodes.contains(&self.table.system.id) {
|
|
|
|
warn!("Interrupting offload as partitions seem to have changed");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
counter += 1;
|
|
|
|
debug!("Offloading items from {:?}..{:?} ({})", begin, end, counter);
|
|
|
|
self.offload_items(&items, &nodes[..]).await?;
|
|
|
|
} else {
|
|
|
|
break;
|
2020-04-16 16:41:10 +00:00
|
|
|
}
|
2020-04-19 20:36:36 +00:00
|
|
|
}
|
|
|
|
|
2021-02-23 18:11:02 +00:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
async fn offload_items(
|
|
|
|
self: &Arc<Self>,
|
|
|
|
items: &Vec<(Vec<u8>, Arc<ByteBuf>)>,
|
|
|
|
nodes: &[UUID],
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
let values = items.iter().map(|(_k, v)| v.clone()).collect::<Vec<_>>();
|
|
|
|
let update_msg = Arc::new(TableRPC::<F>::Update(values));
|
|
|
|
|
|
|
|
for res in join_all(nodes.iter().map(|to| {
|
2020-04-17 13:36:16 +00:00
|
|
|
self.table
|
2021-02-23 18:11:02 +00:00
|
|
|
.rpc_client
|
|
|
|
.call_arc(*to, update_msg.clone(), TABLE_SYNC_RPC_TIMEOUT)
|
|
|
|
}))
|
|
|
|
.await
|
|
|
|
{
|
|
|
|
res?;
|
|
|
|
}
|
|
|
|
|
|
|
|
// All remote nodes have written those items, now we can delete them locally
|
2021-02-23 19:25:15 +00:00
|
|
|
for (k, v) in items.iter() {
|
|
|
|
self.table.delete_if_equal(&k[..], &v[..])?;
|
2020-04-16 16:41:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2021-02-23 18:11:02 +00:00
|
|
|
fn root_checksum(
|
2020-04-17 13:36:16 +00:00
|
|
|
self: &Arc<Self>,
|
|
|
|
begin: &Hash,
|
|
|
|
end: &Hash,
|
|
|
|
must_exit: &mut watch::Receiver<bool>,
|
|
|
|
) -> Result<RangeChecksum, Error> {
|
2020-04-16 17:28:02 +00:00
|
|
|
for i in 1..MAX_DEPTH {
|
2021-02-23 18:11:02 +00:00
|
|
|
let rc = self.range_checksum(
|
|
|
|
&SyncRange {
|
|
|
|
begin: begin.to_vec(),
|
|
|
|
end: end.to_vec(),
|
|
|
|
level: i,
|
|
|
|
},
|
|
|
|
must_exit,
|
|
|
|
)?;
|
2020-04-16 16:41:10 +00:00
|
|
|
if rc.found_limit.is_none() {
|
|
|
|
return Ok(rc);
|
|
|
|
}
|
|
|
|
}
|
2020-04-17 13:36:16 +00:00
|
|
|
Err(Error::Message(format!(
|
2020-04-21 16:05:55 +00:00
|
|
|
"Unable to compute root checksum (this should never happen)"
|
2020-04-17 13:36:16 +00:00
|
|
|
)))
|
2020-04-16 16:41:10 +00:00
|
|
|
}
|
|
|
|
|
2021-02-23 18:11:02 +00:00
|
|
|
fn range_checksum(
|
2020-04-17 13:36:16 +00:00
|
|
|
self: &Arc<Self>,
|
|
|
|
range: &SyncRange,
|
|
|
|
must_exit: &mut watch::Receiver<bool>,
|
|
|
|
) -> Result<RangeChecksum, Error> {
|
2020-04-21 16:05:55 +00:00
|
|
|
assert!(range.level != 0);
|
2021-02-23 20:27:28 +00:00
|
|
|
trace!("Call range_checksum {:?}", range);
|
2020-04-21 16:05:55 +00:00
|
|
|
|
2020-04-16 16:41:10 +00:00
|
|
|
if range.level == 1 {
|
|
|
|
let mut children = vec![];
|
2020-04-17 13:36:16 +00:00
|
|
|
for item in self
|
|
|
|
.table
|
|
|
|
.store
|
|
|
|
.range(range.begin.clone()..range.end.clone())
|
|
|
|
{
|
2020-04-16 16:41:10 +00:00
|
|
|
let (key, value) = item?;
|
2021-02-23 17:14:37 +00:00
|
|
|
let key_hash = blake2sum(&key[..]);
|
2020-04-21 16:05:55 +00:00
|
|
|
if children.len() > 0
|
|
|
|
&& key_hash.as_slice()[0..range.level]
|
|
|
|
.iter()
|
|
|
|
.all(|x| *x == 0u8)
|
2020-04-17 13:36:16 +00:00
|
|
|
{
|
2021-02-23 20:27:28 +00:00
|
|
|
trace!("range_checksum {:?} returning {} items", range, children.len());
|
2020-04-17 13:36:16 +00:00
|
|
|
return Ok(RangeChecksum {
|
2020-04-16 16:41:10 +00:00
|
|
|
bounds: range.clone(),
|
|
|
|
children,
|
|
|
|
found_limit: Some(key.to_vec()),
|
|
|
|
time: Instant::now(),
|
2020-04-17 13:36:16 +00:00
|
|
|
});
|
2020-04-16 16:41:10 +00:00
|
|
|
}
|
2020-04-17 13:36:16 +00:00
|
|
|
let item_range = SyncRange {
|
2020-04-16 16:41:10 +00:00
|
|
|
begin: key.to_vec(),
|
|
|
|
end: vec![],
|
|
|
|
level: 0,
|
|
|
|
};
|
2021-02-23 17:14:37 +00:00
|
|
|
children.push((item_range, blake2sum(&value[..])));
|
2020-04-16 16:41:10 +00:00
|
|
|
}
|
2021-02-23 20:27:28 +00:00
|
|
|
trace!("range_checksum {:?} returning {} items", range, children.len());
|
2020-04-17 13:36:16 +00:00
|
|
|
Ok(RangeChecksum {
|
2020-04-16 16:41:10 +00:00
|
|
|
bounds: range.clone(),
|
|
|
|
children,
|
|
|
|
found_limit: None,
|
|
|
|
time: Instant::now(),
|
|
|
|
})
|
|
|
|
} else {
|
|
|
|
let mut children = vec![];
|
2020-04-17 13:36:16 +00:00
|
|
|
let mut sub_range = SyncRange {
|
2020-04-16 16:41:10 +00:00
|
|
|
begin: range.begin.clone(),
|
|
|
|
end: range.end.clone(),
|
|
|
|
level: range.level - 1,
|
|
|
|
};
|
|
|
|
let mut time = Instant::now();
|
|
|
|
while !*must_exit.borrow() {
|
2021-02-23 18:11:02 +00:00
|
|
|
let sub_ck = self.range_checksum_cached_hash(&sub_range, must_exit)?;
|
2020-04-16 16:41:10 +00:00
|
|
|
|
2020-04-21 17:08:42 +00:00
|
|
|
if let Some(hash) = sub_ck.hash {
|
|
|
|
children.push((sub_range.clone(), hash));
|
2020-04-16 16:41:10 +00:00
|
|
|
if sub_ck.time < time {
|
|
|
|
time = sub_ck.time;
|
2020-04-16 12:50:49 +00:00
|
|
|
}
|
|
|
|
}
|
2020-04-16 16:41:10 +00:00
|
|
|
|
2020-04-21 16:05:55 +00:00
|
|
|
if sub_ck.found_limit.is_none() || sub_ck.hash.is_none() {
|
2021-02-23 20:27:28 +00:00
|
|
|
trace!("range_checksum {:?} returning {} items", range, children.len());
|
2020-04-17 13:36:16 +00:00
|
|
|
return Ok(RangeChecksum {
|
2020-04-16 16:41:10 +00:00
|
|
|
bounds: range.clone(),
|
|
|
|
children,
|
|
|
|
found_limit: None,
|
|
|
|
time,
|
|
|
|
});
|
|
|
|
}
|
|
|
|
let found_limit = sub_ck.found_limit.unwrap();
|
|
|
|
|
2021-02-23 17:14:37 +00:00
|
|
|
let actual_limit_hash = blake2sum(&found_limit[..]);
|
2020-04-17 13:36:16 +00:00
|
|
|
if actual_limit_hash.as_slice()[0..range.level]
|
|
|
|
.iter()
|
2020-04-21 16:05:55 +00:00
|
|
|
.all(|x| *x == 0u8)
|
2020-04-17 13:36:16 +00:00
|
|
|
{
|
2021-02-23 20:27:28 +00:00
|
|
|
trace!("range_checksum {:?} returning {} items", range, children.len());
|
2020-04-17 13:36:16 +00:00
|
|
|
return Ok(RangeChecksum {
|
2020-04-16 16:41:10 +00:00
|
|
|
bounds: range.clone(),
|
|
|
|
children,
|
|
|
|
found_limit: Some(found_limit.clone()),
|
|
|
|
time,
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
sub_range.begin = found_limit;
|
2020-04-16 12:50:49 +00:00
|
|
|
}
|
2021-02-23 20:27:28 +00:00
|
|
|
trace!("range_checksum {:?} exiting due to must_exit", range);
|
2020-04-16 16:41:10 +00:00
|
|
|
Err(Error::Message(format!("Exiting.")))
|
2020-04-16 12:50:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-23 18:11:02 +00:00
|
|
|
fn range_checksum_cached_hash(
|
|
|
|
self: &Arc<Self>,
|
|
|
|
range: &SyncRange,
|
|
|
|
must_exit: &mut watch::Receiver<bool>,
|
|
|
|
) -> Result<RangeChecksumCache, Error> {
|
|
|
|
{
|
|
|
|
let mut cache = self.cache[range.level].lock().unwrap();
|
2020-04-21 16:05:55 +00:00
|
|
|
if let Some(v) = cache.get(&range) {
|
|
|
|
if Instant::now() - v.time < CHECKSUM_CACHE_TIMEOUT {
|
|
|
|
return Ok(v.clone());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
cache.remove(&range);
|
2021-02-23 18:11:02 +00:00
|
|
|
}
|
2020-04-21 16:05:55 +00:00
|
|
|
|
2021-02-23 18:11:02 +00:00
|
|
|
let v = self.range_checksum(&range, must_exit)?;
|
|
|
|
trace!(
|
|
|
|
"({}) New checksum calculated for {}-{}/{}, {} children",
|
|
|
|
self.table.name,
|
|
|
|
hex::encode(&range.begin)
|
|
|
|
.chars()
|
|
|
|
.take(16)
|
|
|
|
.collect::<String>(),
|
|
|
|
hex::encode(&range.end).chars().take(16).collect::<String>(),
|
|
|
|
range.level,
|
|
|
|
v.children.len()
|
|
|
|
);
|
2020-04-21 16:05:55 +00:00
|
|
|
|
2021-02-23 18:11:02 +00:00
|
|
|
let hash = if v.children.len() > 0 {
|
|
|
|
Some(blake2sum(&rmp_to_vec_all_named(&v)?[..]))
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
let cache_entry = RangeChecksumCache {
|
|
|
|
hash,
|
|
|
|
found_limit: v.found_limit,
|
|
|
|
time: v.time,
|
|
|
|
};
|
|
|
|
|
|
|
|
let mut cache = self.cache[range.level].lock().unwrap();
|
|
|
|
cache.insert(range.clone(), cache_entry.clone());
|
|
|
|
Ok(cache_entry)
|
2020-04-21 16:05:55 +00:00
|
|
|
}
|
|
|
|
|
2020-04-17 13:36:16 +00:00
|
|
|
async fn do_sync_with(
|
|
|
|
self: Arc<Self>,
|
2020-04-19 11:22:28 +00:00
|
|
|
partition: TodoPartition,
|
2020-04-17 13:36:16 +00:00
|
|
|
root_ck: RangeChecksum,
|
|
|
|
who: UUID,
|
|
|
|
mut must_exit: watch::Receiver<bool>,
|
|
|
|
) -> Result<(), Error> {
|
2020-04-16 16:41:10 +00:00
|
|
|
let mut todo = VecDeque::new();
|
2020-04-17 19:59:07 +00:00
|
|
|
|
|
|
|
// If their root checksum has level > than us, use that as a reference
|
|
|
|
let root_cks_resp = self
|
|
|
|
.table
|
2020-04-18 17:21:34 +00:00
|
|
|
.rpc_client
|
|
|
|
.call(
|
2020-04-23 14:40:59 +00:00
|
|
|
who,
|
|
|
|
TableRPC::<F>::SyncRPC(SyncRPC::GetRootChecksumRange(
|
2020-04-17 19:59:07 +00:00
|
|
|
partition.begin.clone(),
|
|
|
|
partition.end.clone(),
|
|
|
|
)),
|
2020-04-19 11:22:28 +00:00
|
|
|
TABLE_SYNC_RPC_TIMEOUT,
|
2020-04-17 19:59:07 +00:00
|
|
|
)
|
|
|
|
.await?;
|
|
|
|
if let TableRPC::<F>::SyncRPC(SyncRPC::RootChecksumRange(range)) = root_cks_resp {
|
|
|
|
if range.level > root_ck.bounds.level {
|
2021-02-23 18:11:02 +00:00
|
|
|
let their_root_range_ck = self.range_checksum(&range, &mut must_exit)?;
|
2020-04-17 19:59:07 +00:00
|
|
|
todo.push_back(their_root_range_ck);
|
|
|
|
} else {
|
|
|
|
todo.push_back(root_ck);
|
|
|
|
}
|
2020-04-21 16:05:55 +00:00
|
|
|
} else {
|
2020-11-08 14:04:30 +00:00
|
|
|
return Err(Error::Message(format!(
|
2020-04-21 16:05:55 +00:00
|
|
|
"Invalid respone to GetRootChecksumRange RPC: {}",
|
|
|
|
debug_serialize(root_cks_resp)
|
|
|
|
)));
|
2020-04-17 19:59:07 +00:00
|
|
|
}
|
2020-04-16 12:50:49 +00:00
|
|
|
|
2020-04-16 16:41:10 +00:00
|
|
|
while !todo.is_empty() && !*must_exit.borrow() {
|
2020-04-16 17:57:13 +00:00
|
|
|
let total_children = todo.iter().map(|x| x.children.len()).fold(0, |x, y| x + y);
|
2020-04-21 12:54:55 +00:00
|
|
|
trace!(
|
2020-04-17 13:36:16 +00:00
|
|
|
"({}) Sync with {:?}: {} ({}) remaining",
|
|
|
|
self.table.name,
|
|
|
|
who,
|
|
|
|
todo.len(),
|
|
|
|
total_children
|
|
|
|
);
|
2020-04-16 17:28:02 +00:00
|
|
|
|
2020-04-21 16:05:55 +00:00
|
|
|
let step_size = std::cmp::min(16, todo.len());
|
|
|
|
let step = todo.drain(..step_size).collect::<Vec<_>>();
|
2020-04-16 17:28:02 +00:00
|
|
|
|
2020-04-17 13:36:16 +00:00
|
|
|
let rpc_resp = self
|
|
|
|
.table
|
2020-04-18 17:21:34 +00:00
|
|
|
.rpc_client
|
|
|
|
.call(
|
2020-04-23 14:40:59 +00:00
|
|
|
who,
|
2021-02-23 18:11:02 +00:00
|
|
|
TableRPC::<F>::SyncRPC(SyncRPC::Checksums(step)),
|
2020-04-19 11:22:28 +00:00
|
|
|
TABLE_SYNC_RPC_TIMEOUT,
|
2020-04-17 16:51:29 +00:00
|
|
|
)
|
2020-04-17 13:36:16 +00:00
|
|
|
.await?;
|
2020-04-17 16:51:29 +00:00
|
|
|
if let TableRPC::<F>::SyncRPC(SyncRPC::Difference(mut diff_ranges, diff_items)) =
|
|
|
|
rpc_resp
|
|
|
|
{
|
2020-04-19 19:38:45 +00:00
|
|
|
if diff_ranges.len() > 0 || diff_items.len() > 0 {
|
2020-04-21 12:54:55 +00:00
|
|
|
info!(
|
2020-04-19 19:38:45 +00:00
|
|
|
"({}) Sync with {:?}: difference {} ranges, {} items",
|
|
|
|
self.table.name,
|
|
|
|
who,
|
|
|
|
diff_ranges.len(),
|
|
|
|
diff_items.len()
|
|
|
|
);
|
|
|
|
}
|
2020-04-17 16:27:29 +00:00
|
|
|
let mut items_to_send = vec![];
|
|
|
|
for differing in diff_ranges.drain(..) {
|
2020-04-16 17:28:02 +00:00
|
|
|
if differing.level == 0 {
|
2020-04-17 16:27:29 +00:00
|
|
|
items_to_send.push(differing.begin);
|
2020-04-16 17:28:02 +00:00
|
|
|
} else {
|
2021-02-23 18:11:02 +00:00
|
|
|
let checksum = self.range_checksum(&differing, &mut must_exit)?;
|
2020-04-16 17:28:02 +00:00
|
|
|
todo.push_back(checksum);
|
|
|
|
}
|
|
|
|
}
|
2021-02-23 18:11:02 +00:00
|
|
|
if diff_items.len() > 0 {
|
2020-04-23 14:40:59 +00:00
|
|
|
self.table.handle_update(&diff_items[..]).await?;
|
2020-04-17 16:27:29 +00:00
|
|
|
}
|
|
|
|
if items_to_send.len() > 0 {
|
2020-04-21 17:08:42 +00:00
|
|
|
self.send_items(who, items_to_send).await?;
|
2020-04-16 17:28:02 +00:00
|
|
|
}
|
|
|
|
} else {
|
2020-11-08 14:04:30 +00:00
|
|
|
return Err(Error::Message(format!(
|
2020-04-17 15:09:57 +00:00
|
|
|
"Unexpected response to sync RPC checksums: {}",
|
2020-04-17 13:36:16 +00:00
|
|
|
debug_serialize(&rpc_resp)
|
|
|
|
)));
|
2020-04-16 17:28:02 +00:00
|
|
|
}
|
2020-04-16 16:41:10 +00:00
|
|
|
}
|
2020-04-16 15:04:28 +00:00
|
|
|
Ok(())
|
2020-04-16 12:50:49 +00:00
|
|
|
}
|
2020-04-16 16:41:10 +00:00
|
|
|
|
2020-04-21 16:05:55 +00:00
|
|
|
async fn send_items(&self, who: UUID, item_list: Vec<Vec<u8>>) -> Result<(), Error> {
|
2020-04-21 12:54:55 +00:00
|
|
|
info!(
|
2020-04-17 13:36:16 +00:00
|
|
|
"({}) Sending {} items to {:?}",
|
|
|
|
self.table.name,
|
|
|
|
item_list.len(),
|
|
|
|
who
|
|
|
|
);
|
2020-04-16 17:28:02 +00:00
|
|
|
|
|
|
|
let mut values = vec![];
|
|
|
|
for item in item_list.iter() {
|
|
|
|
if let Some(v) = self.table.store.get(&item[..])? {
|
|
|
|
values.push(Arc::new(ByteBuf::from(v.as_ref())));
|
|
|
|
}
|
|
|
|
}
|
2020-04-17 13:36:16 +00:00
|
|
|
let rpc_resp = self
|
|
|
|
.table
|
2020-04-18 17:21:34 +00:00
|
|
|
.rpc_client
|
2020-04-23 14:40:59 +00:00
|
|
|
.call(who, TableRPC::<F>::Update(values), TABLE_SYNC_RPC_TIMEOUT)
|
2020-04-17 13:36:16 +00:00
|
|
|
.await?;
|
2020-04-16 17:28:02 +00:00
|
|
|
if let TableRPC::<F>::Ok = rpc_resp {
|
|
|
|
Ok(())
|
|
|
|
} else {
|
2020-04-17 13:36:16 +00:00
|
|
|
Err(Error::Message(format!(
|
|
|
|
"Unexpected response to RPC Update: {}",
|
|
|
|
debug_serialize(&rpc_resp)
|
|
|
|
)))
|
2020-04-16 17:28:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-23 18:59:43 +00:00
|
|
|
pub(crate) async fn handle_rpc(
|
2020-04-17 13:36:16 +00:00
|
|
|
self: &Arc<Self>,
|
2020-04-17 15:09:57 +00:00
|
|
|
message: &SyncRPC,
|
2020-04-17 13:36:16 +00:00
|
|
|
mut must_exit: watch::Receiver<bool>,
|
2020-04-17 15:09:57 +00:00
|
|
|
) -> Result<SyncRPC, Error> {
|
2020-04-17 19:59:07 +00:00
|
|
|
match message {
|
|
|
|
SyncRPC::GetRootChecksumRange(begin, end) => {
|
2021-02-23 18:11:02 +00:00
|
|
|
let root_cks = self.root_checksum(&begin, &end, &mut must_exit)?;
|
2020-04-17 19:59:07 +00:00
|
|
|
Ok(SyncRPC::RootChecksumRange(root_cks.bounds))
|
|
|
|
}
|
2021-02-23 18:11:02 +00:00
|
|
|
SyncRPC::Checksums(checksums) => {
|
|
|
|
self.handle_checksums_rpc(&checksums[..], &mut must_exit)
|
2020-04-17 19:59:07 +00:00
|
|
|
.await
|
|
|
|
}
|
|
|
|
_ => Err(Error::Message(format!("Unexpected sync RPC"))),
|
|
|
|
}
|
|
|
|
}
|
2020-04-17 16:27:29 +00:00
|
|
|
|
2020-04-19 11:22:28 +00:00
|
|
|
async fn handle_checksums_rpc(
|
2020-04-17 19:59:07 +00:00
|
|
|
self: &Arc<Self>,
|
|
|
|
checksums: &[RangeChecksum],
|
|
|
|
must_exit: &mut watch::Receiver<bool>,
|
|
|
|
) -> Result<SyncRPC, Error> {
|
|
|
|
let mut ret_ranges = vec![];
|
|
|
|
let mut ret_items = vec![];
|
|
|
|
|
2020-04-21 16:05:55 +00:00
|
|
|
for their_ckr in checksums.iter() {
|
2021-02-23 18:11:02 +00:00
|
|
|
let our_ckr = self.range_checksum(&their_ckr.bounds, must_exit)?;
|
2020-04-21 16:05:55 +00:00
|
|
|
for (their_range, their_hash) in their_ckr.children.iter() {
|
2020-04-17 19:59:07 +00:00
|
|
|
let differs = match our_ckr
|
|
|
|
.children
|
2020-04-21 16:05:55 +00:00
|
|
|
.binary_search_by(|(our_range, _)| our_range.cmp(&their_range))
|
2020-04-17 19:59:07 +00:00
|
|
|
{
|
2020-04-21 16:05:55 +00:00
|
|
|
Err(_) => {
|
|
|
|
if their_range.level >= 1 {
|
2021-02-23 18:11:02 +00:00
|
|
|
let cached_hash =
|
|
|
|
self.range_checksum_cached_hash(&their_range, must_exit)?;
|
2020-04-21 16:05:55 +00:00
|
|
|
cached_hash.hash.map(|h| h != *their_hash).unwrap_or(true)
|
|
|
|
} else {
|
|
|
|
true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(i) => our_ckr.children[i].1 != *their_hash,
|
2020-04-17 19:59:07 +00:00
|
|
|
};
|
|
|
|
if differs {
|
2020-04-21 16:05:55 +00:00
|
|
|
ret_ranges.push(their_range.clone());
|
2021-02-23 18:11:02 +00:00
|
|
|
if their_range.level == 0 {
|
2020-04-21 16:05:55 +00:00
|
|
|
if let Some(item_bytes) =
|
|
|
|
self.table.store.get(their_range.begin.as_slice())?
|
|
|
|
{
|
2020-04-17 19:59:07 +00:00
|
|
|
ret_items.push(Arc::new(ByteBuf::from(item_bytes.to_vec())));
|
2020-04-16 16:41:10 +00:00
|
|
|
}
|
2020-04-17 16:27:29 +00:00
|
|
|
}
|
|
|
|
}
|
2020-04-17 19:59:07 +00:00
|
|
|
}
|
2020-04-21 16:05:55 +00:00
|
|
|
for (our_range, _hash) in our_ckr.children.iter() {
|
|
|
|
if let Some(their_found_limit) = &their_ckr.found_limit {
|
|
|
|
if our_range.begin.as_slice() > their_found_limit.as_slice() {
|
|
|
|
break;
|
|
|
|
}
|
2020-04-17 19:59:07 +00:00
|
|
|
}
|
2020-04-17 16:27:29 +00:00
|
|
|
|
2020-04-21 16:05:55 +00:00
|
|
|
let not_present = our_ckr
|
2020-04-17 19:59:07 +00:00
|
|
|
.children
|
2020-04-21 16:05:55 +00:00
|
|
|
.binary_search_by(|(their_range, _)| their_range.cmp(&our_range))
|
2020-04-17 19:59:07 +00:00
|
|
|
.is_err();
|
|
|
|
if not_present {
|
2020-04-21 16:05:55 +00:00
|
|
|
if our_range.level > 0 {
|
|
|
|
ret_ranges.push(our_range.clone());
|
2020-04-17 19:59:07 +00:00
|
|
|
}
|
2021-02-23 18:11:02 +00:00
|
|
|
if our_range.level == 0 {
|
2020-04-21 16:05:55 +00:00
|
|
|
if let Some(item_bytes) =
|
|
|
|
self.table.store.get(our_range.begin.as_slice())?
|
|
|
|
{
|
2020-04-17 19:59:07 +00:00
|
|
|
ret_items.push(Arc::new(ByteBuf::from(item_bytes.to_vec())));
|
2020-04-17 15:09:57 +00:00
|
|
|
}
|
2020-04-16 16:41:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-04-17 19:59:07 +00:00
|
|
|
let n_checksums = checksums
|
|
|
|
.iter()
|
|
|
|
.map(|x| x.children.len())
|
|
|
|
.fold(0, |x, y| x + y);
|
2020-04-19 19:38:45 +00:00
|
|
|
if ret_ranges.len() > 0 || ret_items.len() > 0 {
|
2020-04-21 12:54:55 +00:00
|
|
|
trace!(
|
2020-04-19 19:38:45 +00:00
|
|
|
"({}) Checksum comparison RPC: {} different + {} items for {} received",
|
|
|
|
self.table.name,
|
|
|
|
ret_ranges.len(),
|
|
|
|
ret_items.len(),
|
|
|
|
n_checksums
|
|
|
|
);
|
|
|
|
}
|
2020-04-17 19:59:07 +00:00
|
|
|
Ok(SyncRPC::Difference(ret_ranges, ret_items))
|
2020-04-16 16:41:10 +00:00
|
|
|
}
|
2020-04-16 17:28:02 +00:00
|
|
|
|
2021-02-23 18:59:43 +00:00
|
|
|
pub(crate) fn invalidate(self: &Arc<Self>, item_key: &[u8]) {
|
2020-04-16 17:28:02 +00:00
|
|
|
for i in 1..MAX_DEPTH {
|
2020-04-17 13:36:16 +00:00
|
|
|
let needle = SyncRange {
|
2020-04-16 17:28:02 +00:00
|
|
|
begin: item_key.to_vec(),
|
|
|
|
end: vec![],
|
|
|
|
level: i,
|
|
|
|
};
|
2021-02-23 18:11:02 +00:00
|
|
|
let mut cache = self.cache[i].lock().unwrap();
|
2020-04-16 17:28:02 +00:00
|
|
|
if let Some(cache_entry) = cache.range(..=needle).rev().next() {
|
2021-02-23 18:59:43 +00:00
|
|
|
if cache_entry.0.begin[..] <= *item_key && cache_entry.0.end[..] > *item_key {
|
2020-04-16 17:28:02 +00:00
|
|
|
let index = cache_entry.0.clone();
|
|
|
|
drop(cache_entry);
|
|
|
|
cache.remove(&index);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-04-16 12:50:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl SyncTodo {
|
2020-04-19 11:22:28 +00:00
|
|
|
fn add_full_scan<F: TableSchema, R: TableReplication>(&mut self, table: &Table<F, R>) {
|
2020-04-21 17:08:42 +00:00
|
|
|
let my_id = table.system.id;
|
2020-04-16 12:50:49 +00:00
|
|
|
|
|
|
|
self.todo.clear();
|
|
|
|
|
2020-04-19 11:22:28 +00:00
|
|
|
let ring = table.system.ring.borrow().clone();
|
|
|
|
let split_points = table.replication.split_points(&ring);
|
|
|
|
|
|
|
|
for i in 0..split_points.len() - 1 {
|
2020-04-21 17:08:42 +00:00
|
|
|
let begin = split_points[i];
|
|
|
|
let end = split_points[i + 1];
|
2020-04-19 13:14:23 +00:00
|
|
|
let nodes = table.replication.replication_nodes(&begin, &ring);
|
2020-04-19 11:22:28 +00:00
|
|
|
|
|
|
|
let retain = nodes.contains(&my_id);
|
|
|
|
if !retain {
|
|
|
|
// Check if we have some data to send, otherwise skip
|
2020-04-21 17:08:42 +00:00
|
|
|
if table.store.range(begin..end).next().is_none() {
|
2020-04-19 11:22:28 +00:00
|
|
|
continue;
|
|
|
|
}
|
2020-04-16 15:04:28 +00:00
|
|
|
}
|
|
|
|
|
2020-04-19 11:22:28 +00:00
|
|
|
self.todo.push(TodoPartition { begin, end, retain });
|
2020-04-16 12:50:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-19 11:22:28 +00:00
|
|
|
fn add_ring_difference<F: TableSchema, R: TableReplication>(
|
2020-04-16 12:50:49 +00:00
|
|
|
&mut self,
|
2020-04-19 11:22:28 +00:00
|
|
|
table: &Table<F, R>,
|
|
|
|
old_ring: &Ring,
|
|
|
|
new_ring: &Ring,
|
2020-04-16 12:50:49 +00:00
|
|
|
) {
|
2020-04-21 17:08:42 +00:00
|
|
|
let my_id = table.system.id;
|
2020-04-16 15:04:28 +00:00
|
|
|
|
2020-04-21 16:05:55 +00:00
|
|
|
// If it is us who are entering or leaving the system,
|
|
|
|
// initiate a full sync instead of incremental sync
|
|
|
|
if old_ring.config.members.contains_key(&my_id)
|
|
|
|
!= new_ring.config.members.contains_key(&my_id)
|
|
|
|
{
|
|
|
|
self.add_full_scan(table);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-04-19 11:22:28 +00:00
|
|
|
let mut all_points = None
|
|
|
|
.into_iter()
|
|
|
|
.chain(table.replication.split_points(old_ring).drain(..))
|
|
|
|
.chain(table.replication.split_points(new_ring).drain(..))
|
2020-04-21 17:08:42 +00:00
|
|
|
.chain(self.todo.iter().map(|x| x.begin))
|
|
|
|
.chain(self.todo.iter().map(|x| x.end))
|
2020-04-19 11:22:28 +00:00
|
|
|
.collect::<Vec<_>>();
|
|
|
|
all_points.sort();
|
|
|
|
all_points.dedup();
|
|
|
|
|
|
|
|
let mut old_todo = std::mem::replace(&mut self.todo, vec![]);
|
|
|
|
old_todo.sort_by(|x, y| x.begin.cmp(&y.begin));
|
2020-04-16 15:04:28 +00:00
|
|
|
let mut new_todo = vec![];
|
2020-04-19 11:22:28 +00:00
|
|
|
|
2020-04-16 15:04:28 +00:00
|
|
|
for i in 0..all_points.len() - 1 {
|
2020-04-21 17:08:42 +00:00
|
|
|
let begin = all_points[i];
|
|
|
|
let end = all_points[i + 1];
|
2020-04-19 11:22:28 +00:00
|
|
|
let was_ours = table
|
|
|
|
.replication
|
2020-04-19 13:14:23 +00:00
|
|
|
.replication_nodes(&begin, &old_ring)
|
2020-04-16 15:04:28 +00:00
|
|
|
.contains(&my_id);
|
2020-04-19 11:22:28 +00:00
|
|
|
let is_ours = table
|
|
|
|
.replication
|
2020-04-19 13:14:23 +00:00
|
|
|
.replication_nodes(&begin, &new_ring)
|
2020-04-16 15:04:28 +00:00
|
|
|
.contains(&my_id);
|
2020-04-19 11:22:28 +00:00
|
|
|
|
|
|
|
let was_todo = match old_todo.binary_search_by(|x| x.begin.cmp(&begin)) {
|
2020-04-16 15:04:28 +00:00
|
|
|
Ok(_) => true,
|
|
|
|
Err(j) => {
|
2020-04-19 11:22:28 +00:00
|
|
|
(j > 0 && old_todo[j - 1].begin < end && begin < old_todo[j - 1].end)
|
|
|
|
|| (j < old_todo.len()
|
|
|
|
&& old_todo[j].begin < end && begin < old_todo[j].end)
|
2020-04-16 15:04:28 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
if was_todo || (is_ours && !was_ours) || (was_ours && !is_ours) {
|
2020-04-19 11:22:28 +00:00
|
|
|
new_todo.push(TodoPartition {
|
2020-04-16 15:04:28 +00:00
|
|
|
begin,
|
|
|
|
end,
|
|
|
|
retain: is_ours,
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
self.todo = new_todo;
|
2020-04-16 12:50:49 +00:00
|
|
|
}
|
|
|
|
|
2020-04-19 11:22:28 +00:00
|
|
|
fn pop_task(&mut self) -> Option<TodoPartition> {
|
2020-04-16 12:50:49 +00:00
|
|
|
if self.todo.is_empty() {
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
|
|
|
let i = rand::thread_rng().gen_range::<usize, _, _>(0, self.todo.len());
|
|
|
|
if i == self.todo.len() - 1 {
|
|
|
|
self.todo.pop()
|
|
|
|
} else {
|
|
|
|
let replacement = self.todo.pop().unwrap();
|
|
|
|
let ret = std::mem::replace(&mut self.todo[i], replacement);
|
|
|
|
Some(ret)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|