2022-05-10 11:16:57 +00:00
|
|
|
//! Module that implements RPCs specific to K2V.
|
|
|
|
//! This is necessary for insertions into the K2V store,
|
|
|
|
//! as they have to be transmitted to one of the nodes responsible
|
|
|
|
//! for storing the entry to be processed (the API entry
|
|
|
|
//! node does not process the entry directly, as this would
|
|
|
|
//! mean the vector clock gets much larger than needed).
|
|
|
|
|
2023-01-10 11:54:24 +00:00
|
|
|
use std::collections::{BTreeMap, HashMap};
|
2023-01-10 10:01:49 +00:00
|
|
|
use std::convert::TryInto;
|
2023-01-11 10:35:36 +00:00
|
|
|
use std::sync::{Arc, Mutex, MutexGuard};
|
2023-01-11 15:12:07 +00:00
|
|
|
use std::time::{Duration, Instant};
|
2022-05-10 11:16:57 +00:00
|
|
|
|
|
|
|
use async_trait::async_trait;
|
|
|
|
use futures::stream::FuturesUnordered;
|
|
|
|
use futures::StreamExt;
|
|
|
|
use serde::{Deserialize, Serialize};
|
|
|
|
use tokio::select;
|
|
|
|
|
2023-01-10 10:01:49 +00:00
|
|
|
use garage_db as db;
|
|
|
|
|
2022-05-10 11:16:57 +00:00
|
|
|
use garage_util::crdt::*;
|
|
|
|
use garage_util::data::*;
|
|
|
|
use garage_util::error::*;
|
2023-01-10 10:01:49 +00:00
|
|
|
use garage_util::time::now_msec;
|
2022-05-10 11:16:57 +00:00
|
|
|
|
|
|
|
use garage_rpc::system::System;
|
|
|
|
use garage_rpc::*;
|
|
|
|
|
|
|
|
use garage_table::replication::{TableReplication, TableShardedReplication};
|
|
|
|
use garage_table::{PartitionKey, Table};
|
|
|
|
|
2023-01-11 11:27:19 +00:00
|
|
|
use crate::helper::error::Error as HelperError;
|
2022-05-10 11:16:57 +00:00
|
|
|
use crate::k2v::causality::*;
|
|
|
|
use crate::k2v::item_table::*;
|
2023-01-10 11:54:24 +00:00
|
|
|
use crate::k2v::seen::*;
|
2023-01-10 09:30:59 +00:00
|
|
|
use crate::k2v::sub::*;
|
2022-05-10 11:16:57 +00:00
|
|
|
|
2023-01-11 15:12:07 +00:00
|
|
|
const POLL_RANGE_EXTRA_DELAY: Duration = Duration::from_millis(200);
|
|
|
|
|
2023-01-26 16:26:32 +00:00
|
|
|
const TIMESTAMP_KEY: &[u8] = b"timestamp";
|
2023-01-10 10:01:49 +00:00
|
|
|
|
2022-05-10 11:16:57 +00:00
|
|
|
/// RPC messages for K2V
|
|
|
|
#[derive(Debug, Serialize, Deserialize)]
|
|
|
|
enum K2VRpc {
|
|
|
|
Ok,
|
|
|
|
InsertItem(InsertedItem),
|
|
|
|
InsertManyItems(Vec<InsertedItem>),
|
|
|
|
PollItem {
|
|
|
|
key: PollKey,
|
|
|
|
causal_context: CausalContext,
|
|
|
|
timeout_msec: u64,
|
|
|
|
},
|
2023-01-10 11:54:24 +00:00
|
|
|
PollRange {
|
|
|
|
range: PollRange,
|
|
|
|
seen_str: Option<String>,
|
|
|
|
timeout_msec: u64,
|
|
|
|
},
|
2022-05-10 11:16:57 +00:00
|
|
|
PollItemResponse(Option<K2VItem>),
|
2023-01-10 11:54:24 +00:00
|
|
|
PollRangeResponse(Uuid, Vec<K2VItem>),
|
2022-05-10 11:16:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Serialize, Deserialize)]
|
|
|
|
struct InsertedItem {
|
|
|
|
partition: K2VItemPartition,
|
|
|
|
sort_key: String,
|
|
|
|
causal_context: Option<CausalContext>,
|
|
|
|
value: DvvsValue,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Rpc for K2VRpc {
|
|
|
|
type Response = Result<K2VRpc, Error>;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// The block manager, handling block exchange between nodes, and block storage on local node
|
|
|
|
pub struct K2VRpcHandler {
|
|
|
|
system: Arc<System>,
|
|
|
|
item_table: Arc<Table<K2VItemTable, TableShardedReplication>>,
|
2023-01-11 10:35:36 +00:00
|
|
|
|
|
|
|
// Using a mutex on the local_timestamp_tree is not strictly necessary,
|
|
|
|
// but it helps to not try to do several inserts at the same time,
|
|
|
|
// which would create transaction conflicts and force many useless retries.
|
2023-01-10 10:01:49 +00:00
|
|
|
local_timestamp_tree: Mutex<db::Tree>,
|
2023-01-11 10:35:36 +00:00
|
|
|
|
2022-05-10 11:16:57 +00:00
|
|
|
endpoint: Arc<Endpoint<K2VRpc, Self>>,
|
|
|
|
subscriptions: Arc<SubscriptionManager>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl K2VRpcHandler {
|
|
|
|
pub fn new(
|
|
|
|
system: Arc<System>,
|
2023-01-10 10:01:49 +00:00
|
|
|
db: &db::Db,
|
2022-05-10 11:16:57 +00:00
|
|
|
item_table: Arc<Table<K2VItemTable, TableShardedReplication>>,
|
|
|
|
subscriptions: Arc<SubscriptionManager>,
|
|
|
|
) -> Arc<Self> {
|
2023-01-10 10:01:49 +00:00
|
|
|
let local_timestamp_tree = db
|
|
|
|
.open_tree("k2v_local_timestamp")
|
|
|
|
.expect("Unable to open DB tree for k2v local timestamp");
|
2022-05-10 11:16:57 +00:00
|
|
|
let endpoint = system.netapp.endpoint("garage_model/k2v/Rpc".to_string());
|
|
|
|
|
|
|
|
let rpc_handler = Arc::new(Self {
|
|
|
|
system,
|
|
|
|
item_table,
|
2023-01-10 10:01:49 +00:00
|
|
|
local_timestamp_tree: Mutex::new(local_timestamp_tree),
|
2022-05-10 11:16:57 +00:00
|
|
|
endpoint,
|
|
|
|
subscriptions,
|
|
|
|
});
|
|
|
|
rpc_handler.endpoint.set_handler(rpc_handler.clone());
|
|
|
|
|
|
|
|
rpc_handler
|
|
|
|
}
|
|
|
|
|
|
|
|
// ---- public interface ----
|
|
|
|
|
|
|
|
pub async fn insert(
|
|
|
|
&self,
|
|
|
|
bucket_id: Uuid,
|
|
|
|
partition_key: String,
|
|
|
|
sort_key: String,
|
|
|
|
causal_context: Option<CausalContext>,
|
|
|
|
value: DvvsValue,
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
let partition = K2VItemPartition {
|
|
|
|
bucket_id,
|
|
|
|
partition_key,
|
|
|
|
};
|
|
|
|
let mut who = self
|
|
|
|
.item_table
|
|
|
|
.data
|
|
|
|
.replication
|
|
|
|
.write_nodes(&partition.hash());
|
|
|
|
who.sort();
|
|
|
|
|
|
|
|
self.system
|
2023-11-09 11:55:36 +00:00
|
|
|
.rpc_helper()
|
2022-05-10 11:16:57 +00:00
|
|
|
.try_call_many(
|
|
|
|
&self.endpoint,
|
|
|
|
&who[..],
|
|
|
|
K2VRpc::InsertItem(InsertedItem {
|
|
|
|
partition,
|
|
|
|
sort_key,
|
|
|
|
causal_context,
|
|
|
|
value,
|
|
|
|
}),
|
|
|
|
RequestStrategy::with_priority(PRIO_NORMAL)
|
|
|
|
.with_quorum(1)
|
|
|
|
.interrupt_after_quorum(true),
|
|
|
|
)
|
|
|
|
.await?;
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
pub async fn insert_batch(
|
|
|
|
&self,
|
|
|
|
bucket_id: Uuid,
|
|
|
|
items: Vec<(String, String, Option<CausalContext>, DvvsValue)>,
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
let n_items = items.len();
|
|
|
|
|
|
|
|
let mut call_list: HashMap<_, Vec<_>> = HashMap::new();
|
|
|
|
|
|
|
|
for (partition_key, sort_key, causal_context, value) in items {
|
|
|
|
let partition = K2VItemPartition {
|
|
|
|
bucket_id,
|
|
|
|
partition_key,
|
|
|
|
};
|
|
|
|
let mut who = self
|
|
|
|
.item_table
|
|
|
|
.data
|
|
|
|
.replication
|
|
|
|
.write_nodes(&partition.hash());
|
|
|
|
who.sort();
|
|
|
|
|
|
|
|
call_list.entry(who).or_default().push(InsertedItem {
|
|
|
|
partition,
|
|
|
|
sort_key,
|
|
|
|
causal_context,
|
|
|
|
value,
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
debug!(
|
|
|
|
"K2V insert_batch: {} requests to insert {} items",
|
|
|
|
call_list.len(),
|
|
|
|
n_items
|
|
|
|
);
|
|
|
|
let call_futures = call_list.into_iter().map(|(nodes, items)| async move {
|
|
|
|
let resp = self
|
|
|
|
.system
|
2023-11-09 11:55:36 +00:00
|
|
|
.rpc_helper()
|
2022-05-10 11:16:57 +00:00
|
|
|
.try_call_many(
|
|
|
|
&self.endpoint,
|
|
|
|
&nodes[..],
|
|
|
|
K2VRpc::InsertManyItems(items),
|
|
|
|
RequestStrategy::with_priority(PRIO_NORMAL)
|
|
|
|
.with_quorum(1)
|
|
|
|
.interrupt_after_quorum(true),
|
|
|
|
)
|
|
|
|
.await?;
|
|
|
|
Ok::<_, Error>((nodes, resp))
|
|
|
|
});
|
|
|
|
|
|
|
|
let mut resps = call_futures.collect::<FuturesUnordered<_>>();
|
|
|
|
while let Some(resp) = resps.next().await {
|
|
|
|
resp?;
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2023-01-10 09:30:59 +00:00
|
|
|
pub async fn poll_item(
|
2022-05-10 11:16:57 +00:00
|
|
|
&self,
|
|
|
|
bucket_id: Uuid,
|
|
|
|
partition_key: String,
|
|
|
|
sort_key: String,
|
|
|
|
causal_context: CausalContext,
|
|
|
|
timeout_msec: u64,
|
2023-01-11 14:17:27 +00:00
|
|
|
) -> Result<Option<K2VItem>, Error> {
|
2022-05-10 11:16:57 +00:00
|
|
|
let poll_key = PollKey {
|
|
|
|
partition: K2VItemPartition {
|
|
|
|
bucket_id,
|
|
|
|
partition_key,
|
|
|
|
},
|
|
|
|
sort_key,
|
|
|
|
};
|
|
|
|
let nodes = self
|
|
|
|
.item_table
|
|
|
|
.data
|
|
|
|
.replication
|
|
|
|
.write_nodes(&poll_key.partition.hash());
|
|
|
|
|
2023-11-09 11:55:36 +00:00
|
|
|
let rpc = self.system.rpc_helper().try_call_many(
|
2022-09-19 18:12:19 +00:00
|
|
|
&self.endpoint,
|
|
|
|
&nodes[..],
|
|
|
|
K2VRpc::PollItem {
|
|
|
|
key: poll_key,
|
|
|
|
causal_context,
|
|
|
|
timeout_msec,
|
|
|
|
},
|
|
|
|
RequestStrategy::with_priority(PRIO_NORMAL)
|
|
|
|
.with_quorum(self.item_table.data.replication.read_quorum())
|
|
|
|
.without_timeout(),
|
|
|
|
);
|
2023-11-09 11:55:36 +00:00
|
|
|
let timeout_duration =
|
|
|
|
Duration::from_millis(timeout_msec) + self.system.rpc_helper().rpc_timeout();
|
2022-09-19 18:12:19 +00:00
|
|
|
let resps = select! {
|
|
|
|
r = rpc => r?,
|
|
|
|
_ = tokio::time::sleep(timeout_duration) => return Ok(None),
|
|
|
|
};
|
2022-05-10 11:16:57 +00:00
|
|
|
|
|
|
|
let mut resp: Option<K2VItem> = None;
|
|
|
|
for v in resps {
|
|
|
|
match v {
|
|
|
|
K2VRpc::PollItemResponse(Some(x)) => {
|
|
|
|
if let Some(y) = &mut resp {
|
|
|
|
y.merge(&x);
|
|
|
|
} else {
|
|
|
|
resp = Some(x);
|
|
|
|
}
|
|
|
|
}
|
2023-01-10 11:54:24 +00:00
|
|
|
K2VRpc::PollItemResponse(None) => (),
|
2023-01-11 14:17:27 +00:00
|
|
|
v => return Err(Error::unexpected_rpc_message(v)),
|
2022-05-10 11:16:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(resp)
|
|
|
|
}
|
|
|
|
|
2023-01-10 11:54:24 +00:00
|
|
|
pub async fn poll_range(
|
|
|
|
&self,
|
|
|
|
range: PollRange,
|
|
|
|
seen_str: Option<String>,
|
|
|
|
timeout_msec: u64,
|
2023-01-11 11:27:19 +00:00
|
|
|
) -> Result<Option<(BTreeMap<String, K2VItem>, String)>, HelperError> {
|
2023-01-11 11:03:17 +00:00
|
|
|
let has_seen_marker = seen_str.is_some();
|
|
|
|
|
2023-01-11 15:12:07 +00:00
|
|
|
// Parse seen marker, we will use it below. This is also the first check
|
|
|
|
// that it is valid, which returns a bad request error if not.
|
2023-01-10 11:54:24 +00:00
|
|
|
let mut seen = seen_str
|
|
|
|
.as_deref()
|
2023-01-11 11:27:19 +00:00
|
|
|
.map(RangeSeenMarker::decode_helper)
|
2023-01-10 11:54:24 +00:00
|
|
|
.transpose()?
|
|
|
|
.unwrap_or_default();
|
|
|
|
seen.restrict(&range);
|
|
|
|
|
2023-01-11 15:12:07 +00:00
|
|
|
// Prepare PollRange RPC to send to the storage nodes responsible for the parititon
|
2023-01-10 11:54:24 +00:00
|
|
|
let nodes = self
|
|
|
|
.item_table
|
|
|
|
.data
|
|
|
|
.replication
|
|
|
|
.write_nodes(&range.partition.hash());
|
2023-01-11 15:12:07 +00:00
|
|
|
let quorum = self.item_table.data.replication.read_quorum();
|
|
|
|
let msg = K2VRpc::PollRange {
|
|
|
|
range,
|
|
|
|
seen_str,
|
|
|
|
timeout_msec,
|
2023-01-10 11:54:24 +00:00
|
|
|
};
|
|
|
|
|
2023-01-11 15:12:07 +00:00
|
|
|
// Send the request to all nodes, use FuturesUnordered to get the responses in any order
|
|
|
|
let msg = msg.into_req().map_err(netapp::error::Error::from)?;
|
|
|
|
let rs = RequestStrategy::with_priority(PRIO_NORMAL).without_timeout();
|
|
|
|
let mut requests = nodes
|
|
|
|
.iter()
|
2023-11-09 11:55:36 +00:00
|
|
|
.map(|node| {
|
|
|
|
self.system
|
|
|
|
.rpc_helper()
|
|
|
|
.call(&self.endpoint, *node, msg.clone(), rs)
|
|
|
|
})
|
2023-01-11 15:12:07 +00:00
|
|
|
.collect::<FuturesUnordered<_>>();
|
|
|
|
|
|
|
|
// Fetch responses. This procedure stops fetching responses when any of the following
|
|
|
|
// conditions arise:
|
|
|
|
// - we have a response to all requests
|
|
|
|
// - we have a response to a read quorum of requests (e.g. 2/3), and an extra delay
|
|
|
|
// has passed since the quorum was achieved
|
|
|
|
// - a global RPC timeout expired
|
|
|
|
// The extra delay after a quorum was received is usefull if the third response was to
|
|
|
|
// arrive during this short interval: this would allow us to consider all the data seen
|
|
|
|
// by that last node in the response we produce, and would likely help reduce the
|
|
|
|
// size of the seen marker that we will return (because we would have an info of the
|
|
|
|
// kind: all items produced by that node until time ts have been returned, so we can
|
|
|
|
// bump the entry in the global vector clock and possibly remove some item-specific
|
|
|
|
// vector clocks)
|
2023-11-09 11:55:36 +00:00
|
|
|
let mut deadline = Instant::now()
|
|
|
|
+ Duration::from_millis(timeout_msec)
|
|
|
|
+ self.system.rpc_helper().rpc_timeout();
|
2023-01-11 15:12:07 +00:00
|
|
|
let mut resps = vec![];
|
|
|
|
let mut errors = vec![];
|
|
|
|
loop {
|
|
|
|
select! {
|
|
|
|
_ = tokio::time::sleep_until(deadline.into()) => {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
res = requests.next() => match res {
|
|
|
|
None => break,
|
|
|
|
Some(Err(e)) => errors.push(e),
|
|
|
|
Some(Ok(r)) => {
|
|
|
|
resps.push(r);
|
|
|
|
if resps.len() >= quorum {
|
|
|
|
deadline = std::cmp::min(deadline, Instant::now() + POLL_RANGE_EXTRA_DELAY);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if errors.len() > nodes.len() - quorum {
|
|
|
|
let errors = errors.iter().map(|e| format!("{}", e)).collect::<Vec<_>>();
|
|
|
|
return Err(Error::Quorum(quorum, resps.len(), nodes.len(), errors).into());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Take all returned items into account to produce the response.
|
2023-01-10 11:54:24 +00:00
|
|
|
let mut new_items = BTreeMap::<String, K2VItem>::new();
|
|
|
|
for v in resps {
|
|
|
|
if let K2VRpc::PollRangeResponse(node, items) = v {
|
|
|
|
seen.mark_seen_node_items(node, items.iter());
|
|
|
|
for item in items.into_iter() {
|
|
|
|
match new_items.get_mut(&item.sort_key) {
|
|
|
|
Some(ent) => {
|
|
|
|
ent.merge(&item);
|
|
|
|
}
|
|
|
|
None => {
|
|
|
|
new_items.insert(item.sort_key.clone(), item);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
2023-01-11 11:27:19 +00:00
|
|
|
return Err(Error::unexpected_rpc_message(v).into());
|
2023-01-10 11:54:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-11 11:03:17 +00:00
|
|
|
if new_items.is_empty() && has_seen_marker {
|
2023-01-10 11:54:24 +00:00
|
|
|
Ok(None)
|
|
|
|
} else {
|
|
|
|
Ok(Some((new_items, seen.encode()?)))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-10 11:16:57 +00:00
|
|
|
// ---- internal handlers ----
|
|
|
|
|
|
|
|
async fn handle_insert(&self, item: &InsertedItem) -> Result<K2VRpc, Error> {
|
2023-01-11 10:35:36 +00:00
|
|
|
let new = {
|
|
|
|
let local_timestamp_tree = self.local_timestamp_tree.lock().unwrap();
|
|
|
|
self.local_insert(&local_timestamp_tree, item)?
|
|
|
|
};
|
2022-05-10 11:16:57 +00:00
|
|
|
|
|
|
|
// Propagate to rest of network
|
|
|
|
if let Some(updated) = new {
|
|
|
|
self.item_table.insert(&updated).await?;
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(K2VRpc::Ok)
|
|
|
|
}
|
|
|
|
|
|
|
|
async fn handle_insert_many(&self, items: &[InsertedItem]) -> Result<K2VRpc, Error> {
|
|
|
|
let mut updated_vec = vec![];
|
|
|
|
|
2023-01-11 10:35:36 +00:00
|
|
|
{
|
|
|
|
let local_timestamp_tree = self.local_timestamp_tree.lock().unwrap();
|
|
|
|
for item in items {
|
|
|
|
let new = self.local_insert(&local_timestamp_tree, item)?;
|
2022-05-10 11:16:57 +00:00
|
|
|
|
2023-01-11 10:35:36 +00:00
|
|
|
if let Some(updated) = new {
|
|
|
|
updated_vec.push(updated);
|
|
|
|
}
|
2022-05-10 11:16:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Propagate to rest of network
|
|
|
|
if !updated_vec.is_empty() {
|
|
|
|
self.item_table.insert_many(&updated_vec).await?;
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(K2VRpc::Ok)
|
|
|
|
}
|
|
|
|
|
2023-01-11 10:35:36 +00:00
|
|
|
fn local_insert(
|
|
|
|
&self,
|
|
|
|
local_timestamp_tree: &MutexGuard<'_, db::Tree>,
|
|
|
|
item: &InsertedItem,
|
|
|
|
) -> Result<Option<K2VItem>, Error> {
|
2023-01-10 10:01:49 +00:00
|
|
|
let now = now_msec();
|
|
|
|
|
2022-05-10 11:16:57 +00:00
|
|
|
self.item_table
|
|
|
|
.data
|
2023-01-10 10:01:49 +00:00
|
|
|
.update_entry_with(&item.partition, &item.sort_key, |tx, ent| {
|
|
|
|
let old_local_timestamp = tx
|
2023-01-26 16:26:32 +00:00
|
|
|
.get(local_timestamp_tree, TIMESTAMP_KEY)?
|
2023-01-10 10:01:49 +00:00
|
|
|
.and_then(|x| x.try_into().ok())
|
|
|
|
.map(u64::from_be_bytes)
|
|
|
|
.unwrap_or_default();
|
|
|
|
|
2022-05-10 11:16:57 +00:00
|
|
|
let mut ent = ent.unwrap_or_else(|| {
|
|
|
|
K2VItem::new(
|
|
|
|
item.partition.bucket_id,
|
|
|
|
item.partition.partition_key.clone(),
|
|
|
|
item.sort_key.clone(),
|
|
|
|
)
|
|
|
|
});
|
2023-01-10 10:01:49 +00:00
|
|
|
let new_local_timestamp = ent.update(
|
|
|
|
self.system.id,
|
|
|
|
&item.causal_context,
|
|
|
|
item.value.clone(),
|
|
|
|
std::cmp::max(old_local_timestamp, now),
|
|
|
|
);
|
|
|
|
|
|
|
|
tx.insert(
|
2023-01-26 16:26:32 +00:00
|
|
|
local_timestamp_tree,
|
2023-01-10 10:01:49 +00:00
|
|
|
TIMESTAMP_KEY,
|
|
|
|
u64::to_be_bytes(new_local_timestamp),
|
|
|
|
)?;
|
|
|
|
|
|
|
|
Ok(ent)
|
2022-05-10 11:16:57 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2023-01-10 09:30:59 +00:00
|
|
|
async fn handle_poll_item(&self, key: &PollKey, ct: &CausalContext) -> Result<K2VItem, Error> {
|
|
|
|
let mut chan = self.subscriptions.subscribe_item(key);
|
2022-05-10 11:16:57 +00:00
|
|
|
|
|
|
|
let mut value = self
|
|
|
|
.item_table
|
|
|
|
.data
|
|
|
|
.read_entry(&key.partition, &key.sort_key)?
|
|
|
|
.map(|bytes| self.item_table.data.decode_entry(&bytes[..]))
|
|
|
|
.transpose()?
|
|
|
|
.unwrap_or_else(|| {
|
|
|
|
K2VItem::new(
|
|
|
|
key.partition.bucket_id,
|
|
|
|
key.partition.partition_key.clone(),
|
|
|
|
key.sort_key.clone(),
|
|
|
|
)
|
|
|
|
});
|
|
|
|
|
|
|
|
while !value.causal_context().is_newer_than(ct) {
|
|
|
|
value = chan.recv().await?;
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(value)
|
|
|
|
}
|
2023-01-10 11:54:24 +00:00
|
|
|
|
|
|
|
async fn handle_poll_range(
|
|
|
|
&self,
|
|
|
|
range: &PollRange,
|
|
|
|
seen_str: &Option<String>,
|
|
|
|
) -> Result<Vec<K2VItem>, Error> {
|
2023-01-11 11:03:17 +00:00
|
|
|
if let Some(seen_str) = seen_str {
|
2023-01-11 11:27:19 +00:00
|
|
|
let seen = RangeSeenMarker::decode(seen_str).ok_or_message("Invalid seenMarker")?;
|
2023-01-11 11:03:17 +00:00
|
|
|
|
|
|
|
// Subscribe now to all changes on that partition,
|
|
|
|
// so that new items that are inserted while we are reading the range
|
|
|
|
// will be seen in the loop below
|
|
|
|
let mut chan = self.subscriptions.subscribe_partition(&range.partition);
|
|
|
|
|
|
|
|
// Check for the presence of any new items already stored in the item table
|
|
|
|
let mut new_items = self.poll_range_read_range(range, &seen)?;
|
|
|
|
|
|
|
|
// If we found no new items, wait for a matching item to arrive
|
|
|
|
// on the channel
|
|
|
|
while new_items.is_empty() {
|
|
|
|
let item = chan.recv().await?;
|
|
|
|
if range.matches(&item) && seen.is_new_item(&item) {
|
|
|
|
new_items.push(item);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(new_items)
|
|
|
|
} else {
|
|
|
|
// If no seen marker was specified, we do not poll for anything.
|
|
|
|
// We return immediately with the set of known items (even if
|
|
|
|
// it is empty), which will give the client an inital view of
|
|
|
|
// the dataset and an initial seen marker for further
|
|
|
|
// PollRange calls.
|
|
|
|
self.poll_range_read_range(range, &RangeSeenMarker::default())
|
|
|
|
}
|
|
|
|
}
|
2023-01-10 11:54:24 +00:00
|
|
|
|
2023-01-11 11:03:17 +00:00
|
|
|
fn poll_range_read_range(
|
|
|
|
&self,
|
|
|
|
range: &PollRange,
|
|
|
|
seen: &RangeSeenMarker,
|
|
|
|
) -> Result<Vec<K2VItem>, Error> {
|
|
|
|
let mut new_items = vec![];
|
2023-01-10 11:54:24 +00:00
|
|
|
|
|
|
|
let partition_hash = range.partition.hash();
|
|
|
|
let first_key = match &range.start {
|
|
|
|
None => partition_hash.to_vec(),
|
|
|
|
Some(sk) => self.item_table.data.tree_key(&range.partition, sk),
|
|
|
|
};
|
|
|
|
for item in self.item_table.data.store.range(first_key..)? {
|
|
|
|
let (key, value) = item?;
|
|
|
|
if &key[..32] != partition_hash.as_slice() {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
let item = self.item_table.data.decode_entry(&value)?;
|
|
|
|
if !range.matches(&item) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if seen.is_new_item(&item) {
|
|
|
|
new_items.push(item);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(new_items)
|
|
|
|
}
|
2022-05-10 11:16:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[async_trait]
|
|
|
|
impl EndpointHandler<K2VRpc> for K2VRpcHandler {
|
|
|
|
async fn handle(self: &Arc<Self>, message: &K2VRpc, _from: NodeID) -> Result<K2VRpc, Error> {
|
|
|
|
match message {
|
|
|
|
K2VRpc::InsertItem(item) => self.handle_insert(item).await,
|
|
|
|
K2VRpc::InsertManyItems(items) => self.handle_insert_many(&items[..]).await,
|
|
|
|
K2VRpc::PollItem {
|
|
|
|
key,
|
|
|
|
causal_context,
|
|
|
|
timeout_msec,
|
|
|
|
} => {
|
|
|
|
let delay = tokio::time::sleep(Duration::from_millis(*timeout_msec));
|
|
|
|
select! {
|
2023-01-10 09:30:59 +00:00
|
|
|
ret = self.handle_poll_item(key, causal_context) => ret.map(Some).map(K2VRpc::PollItemResponse),
|
2022-05-10 11:16:57 +00:00
|
|
|
_ = delay => Ok(K2VRpc::PollItemResponse(None)),
|
|
|
|
}
|
|
|
|
}
|
2023-01-10 11:54:24 +00:00
|
|
|
K2VRpc::PollRange {
|
|
|
|
range,
|
|
|
|
seen_str,
|
|
|
|
timeout_msec,
|
|
|
|
} => {
|
|
|
|
let delay = tokio::time::sleep(Duration::from_millis(*timeout_msec));
|
|
|
|
select! {
|
|
|
|
ret = self.handle_poll_range(range, seen_str) => ret.map(|items| K2VRpc::PollRangeResponse(self.system.id, items)),
|
|
|
|
_ = delay => Ok(K2VRpc::PollRangeResponse(self.system.id, vec![])),
|
|
|
|
}
|
|
|
|
}
|
2022-05-10 11:16:57 +00:00
|
|
|
m => Err(Error::unexpected_rpc_message(m)),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|