2022-05-10 11:16:57 +00:00
|
|
|
use std::borrow::Borrow;
|
|
|
|
use std::collections::{BTreeMap, BTreeSet, HashMap};
|
2020-04-10 20:01:48 +00:00
|
|
|
use std::sync::Arc;
|
2020-04-08 20:00:41 +00:00
|
|
|
|
2021-10-14 09:50:12 +00:00
|
|
|
use async_trait::async_trait;
|
2020-04-11 21:53:32 +00:00
|
|
|
use futures::stream::*;
|
2020-04-11 17:43:29 +00:00
|
|
|
use serde::{Deserialize, Serialize};
|
|
|
|
use serde_bytes::ByteBuf;
|
|
|
|
|
2022-02-24 12:18:51 +00:00
|
|
|
use opentelemetry::{
|
|
|
|
trace::{FutureExt, TraceContextExt, Tracer},
|
|
|
|
Context,
|
|
|
|
};
|
|
|
|
|
2022-06-08 08:01:44 +00:00
|
|
|
use garage_db as db;
|
|
|
|
|
2022-12-14 15:08:05 +00:00
|
|
|
use garage_util::background::BackgroundRunner;
|
2020-04-24 10:10:01 +00:00
|
|
|
use garage_util::data::*;
|
|
|
|
use garage_util::error::Error;
|
2022-02-22 12:53:59 +00:00
|
|
|
use garage_util::metrics::RecordDuration;
|
2023-01-03 13:44:47 +00:00
|
|
|
use garage_util::migrate::Migrate;
|
2020-04-23 17:05:46 +00:00
|
|
|
|
2023-12-07 09:55:15 +00:00
|
|
|
use garage_rpc::rpc_helper::QuorumSetResultTracker;
|
2021-10-14 09:50:12 +00:00
|
|
|
use garage_rpc::system::System;
|
|
|
|
use garage_rpc::*;
|
2020-04-23 17:05:46 +00:00
|
|
|
|
2021-05-02 21:13:08 +00:00
|
|
|
use crate::crdt::Crdt;
|
2021-03-11 15:54:15 +00:00
|
|
|
use crate::data::*;
|
2021-03-12 20:52:19 +00:00
|
|
|
use crate::gc::*;
|
2021-03-16 10:43:58 +00:00
|
|
|
use crate::merkle::*;
|
2022-12-14 10:58:06 +00:00
|
|
|
use crate::queue::InsertQueueWorker;
|
2021-03-11 15:54:15 +00:00
|
|
|
use crate::replication::*;
|
2021-03-11 17:28:03 +00:00
|
|
|
use crate::schema::*;
|
|
|
|
use crate::sync::*;
|
2022-05-10 11:16:57 +00:00
|
|
|
use crate::util::*;
|
2020-04-08 20:00:41 +00:00
|
|
|
|
2023-01-03 14:08:37 +00:00
|
|
|
pub struct Table<F: TableSchema, R: TableReplication> {
|
2021-03-16 10:43:58 +00:00
|
|
|
pub system: Arc<System>,
|
|
|
|
pub data: Arc<TableData<F, R>>,
|
|
|
|
pub merkle_updater: Arc<MerkleUpdater<F, R>>,
|
2021-03-11 15:54:15 +00:00
|
|
|
pub syncer: Arc<TableSyncer<F, R>>,
|
2022-12-14 11:28:07 +00:00
|
|
|
gc: Arc<TableGc<F, R>>,
|
2021-10-14 09:50:12 +00:00
|
|
|
endpoint: Arc<Endpoint<TableRpc<F>, Self>>,
|
2020-04-08 20:00:41 +00:00
|
|
|
}
|
|
|
|
|
2020-04-08 21:01:49 +00:00
|
|
|
#[derive(Serialize, Deserialize)]
|
2021-05-02 21:13:08 +00:00
|
|
|
pub(crate) enum TableRpc<F: TableSchema> {
|
2020-04-08 21:01:49 +00:00
|
|
|
Ok,
|
2020-04-08 21:47:34 +00:00
|
|
|
|
2020-04-09 14:16:27 +00:00
|
|
|
ReadEntry(F::P, F::S),
|
2020-04-11 17:43:29 +00:00
|
|
|
ReadEntryResponse(Option<ByteBuf>),
|
2020-04-08 21:47:34 +00:00
|
|
|
|
2020-04-19 15:15:48 +00:00
|
|
|
// Read range: read all keys in partition P, possibly starting at a certain sort key offset
|
2022-05-10 11:16:57 +00:00
|
|
|
ReadRange {
|
|
|
|
partition: F::P,
|
|
|
|
begin_sort_key: Option<F::S>,
|
|
|
|
filter: Option<F::Filter>,
|
|
|
|
limit: usize,
|
|
|
|
enumeration_order: EnumerationOrder,
|
|
|
|
},
|
2020-04-17 15:09:57 +00:00
|
|
|
|
2020-04-11 17:43:29 +00:00
|
|
|
Update(Vec<Arc<ByteBuf>>),
|
2020-04-08 20:00:41 +00:00
|
|
|
}
|
|
|
|
|
2021-10-15 09:05:09 +00:00
|
|
|
impl<F: TableSchema> Rpc for TableRpc<F> {
|
|
|
|
type Response = Result<TableRpc<F>, Error>;
|
2021-10-14 09:50:12 +00:00
|
|
|
}
|
2020-04-18 17:21:34 +00:00
|
|
|
|
2023-01-03 14:08:37 +00:00
|
|
|
impl<F: TableSchema, R: TableReplication> Table<F, R> {
|
2020-04-17 15:09:57 +00:00
|
|
|
// =============== PUBLIC INTERFACE FUNCTIONS (new, insert, get, etc) ===============
|
|
|
|
|
2022-06-08 08:01:44 +00:00
|
|
|
pub fn new(instance: F, replication: R, system: Arc<System>, db: &db::Db) -> Arc<Self> {
|
2021-10-14 09:50:12 +00:00
|
|
|
let endpoint = system
|
|
|
|
.netapp
|
2021-12-14 11:34:01 +00:00
|
|
|
.endpoint(format!("garage_table/table.rs/Rpc:{}", F::TABLE_NAME));
|
2020-04-18 17:21:34 +00:00
|
|
|
|
2021-12-14 11:34:01 +00:00
|
|
|
let data = TableData::new(system.clone(), instance, replication, db);
|
2021-03-11 12:47:21 +00:00
|
|
|
|
2022-12-14 11:28:07 +00:00
|
|
|
let merkle_updater = MerkleUpdater::new(data.clone());
|
2020-04-18 17:21:34 +00:00
|
|
|
|
2022-12-14 11:28:07 +00:00
|
|
|
let syncer = TableSyncer::new(system.clone(), data.clone(), merkle_updater.clone());
|
|
|
|
let gc = TableGc::new(system.clone(), data.clone());
|
2020-04-18 17:21:34 +00:00
|
|
|
|
2023-11-09 15:32:31 +00:00
|
|
|
system.layout_manager.add_table(F::TABLE_NAME);
|
|
|
|
|
2021-03-12 14:07:23 +00:00
|
|
|
let table = Arc::new(Self {
|
2021-03-16 10:43:58 +00:00
|
|
|
system,
|
2021-03-12 14:07:23 +00:00
|
|
|
data,
|
2021-03-16 10:43:58 +00:00
|
|
|
merkle_updater,
|
2022-12-14 11:28:07 +00:00
|
|
|
gc,
|
2021-03-12 14:07:23 +00:00
|
|
|
syncer,
|
2021-10-14 09:50:12 +00:00
|
|
|
endpoint,
|
2021-03-12 14:07:23 +00:00
|
|
|
});
|
2021-03-11 15:54:15 +00:00
|
|
|
|
2021-10-14 09:50:12 +00:00
|
|
|
table.endpoint.set_handler(table.clone());
|
2021-03-11 12:47:21 +00:00
|
|
|
|
2020-04-16 12:50:49 +00:00
|
|
|
table
|
2020-04-08 20:00:41 +00:00
|
|
|
}
|
|
|
|
|
2022-12-14 11:51:16 +00:00
|
|
|
pub fn spawn_workers(self: &Arc<Self>, bg: &BackgroundRunner) {
|
|
|
|
self.merkle_updater.spawn_workers(bg);
|
|
|
|
self.syncer.spawn_workers(bg);
|
|
|
|
self.gc.spawn_workers(bg);
|
|
|
|
bg.spawn_worker(InsertQueueWorker(self.clone()));
|
2022-12-14 11:28:07 +00:00
|
|
|
}
|
|
|
|
|
2020-04-09 14:16:27 +00:00
|
|
|
pub async fn insert(&self, e: &F::E) -> Result<(), Error> {
|
2022-02-24 12:18:51 +00:00
|
|
|
let tracer = opentelemetry::global::tracer("garage_table");
|
|
|
|
let span = tracer.start(format!("{} insert", F::TABLE_NAME));
|
|
|
|
|
|
|
|
self.insert_internal(e)
|
|
|
|
.bound_record_duration(&self.data.metrics.put_request_duration)
|
|
|
|
.with_context(Context::current_with_span(span))
|
|
|
|
.await?;
|
|
|
|
|
|
|
|
self.data.metrics.put_request_counter.add(1);
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
async fn insert_internal(&self, e: &F::E) -> Result<(), Error> {
|
2020-04-09 14:16:27 +00:00
|
|
|
let hash = e.partition_key().hash();
|
2023-11-14 14:40:46 +00:00
|
|
|
let who = self.data.replication.write_sets(&hash);
|
2020-04-08 21:01:49 +00:00
|
|
|
|
2023-01-03 13:44:47 +00:00
|
|
|
let e_enc = Arc::new(ByteBuf::from(e.encode()?));
|
2021-05-02 21:13:08 +00:00
|
|
|
let rpc = TableRpc::<F>::Update(vec![e_enc]);
|
2020-04-10 20:01:48 +00:00
|
|
|
|
2021-10-14 09:50:12 +00:00
|
|
|
self.system
|
2023-11-09 11:55:36 +00:00
|
|
|
.rpc_helper()
|
2023-11-14 14:40:46 +00:00
|
|
|
.try_write_many_sets(
|
2021-10-14 09:50:12 +00:00
|
|
|
&self.endpoint,
|
2023-11-15 14:40:44 +00:00
|
|
|
who.as_ref(),
|
2020-04-19 11:22:28 +00:00
|
|
|
rpc,
|
2021-10-14 09:50:12 +00:00
|
|
|
RequestStrategy::with_priority(PRIO_NORMAL)
|
2022-09-19 18:12:19 +00:00
|
|
|
.with_quorum(self.data.replication.write_quorum()),
|
2020-04-19 11:22:28 +00:00
|
|
|
)
|
2020-04-10 20:01:48 +00:00
|
|
|
.await?;
|
2022-02-16 13:23:04 +00:00
|
|
|
|
2020-04-08 21:01:49 +00:00
|
|
|
Ok(())
|
|
|
|
}
|
2020-04-08 20:00:41 +00:00
|
|
|
|
2022-12-14 10:58:06 +00:00
|
|
|
/// Insert item locally
|
|
|
|
pub fn queue_insert(&self, tx: &mut db::Transaction, e: &F::E) -> db::TxResult<(), Error> {
|
|
|
|
self.data.queue_insert(tx, e)
|
|
|
|
}
|
|
|
|
|
2023-11-16 15:41:45 +00:00
|
|
|
pub async fn insert_many<I, IE>(self: &Arc<Self>, entries: I) -> Result<(), Error>
|
2022-05-10 11:16:57 +00:00
|
|
|
where
|
|
|
|
I: IntoIterator<Item = IE> + Send + Sync,
|
|
|
|
IE: Borrow<F::E> + Send + Sync,
|
|
|
|
{
|
2022-02-24 12:18:51 +00:00
|
|
|
let tracer = opentelemetry::global::tracer("garage_table");
|
2022-05-10 11:16:57 +00:00
|
|
|
let span = tracer.start(format!("{} insert_many", F::TABLE_NAME));
|
2022-02-24 12:18:51 +00:00
|
|
|
|
2022-02-22 12:53:59 +00:00
|
|
|
self.insert_many_internal(entries)
|
|
|
|
.bound_record_duration(&self.data.metrics.put_request_duration)
|
2022-02-24 12:18:51 +00:00
|
|
|
.with_context(Context::current_with_span(span))
|
2022-02-22 12:53:59 +00:00
|
|
|
.await?;
|
2022-02-24 12:18:51 +00:00
|
|
|
|
2022-02-22 12:53:59 +00:00
|
|
|
self.data.metrics.put_request_counter.add(1);
|
2022-02-24 12:18:51 +00:00
|
|
|
|
2022-02-22 12:53:59 +00:00
|
|
|
Ok(())
|
|
|
|
}
|
2022-02-16 13:23:04 +00:00
|
|
|
|
2023-11-16 15:41:45 +00:00
|
|
|
async fn insert_many_internal<I, IE>(self: &Arc<Self>, entries: I) -> Result<(), Error>
|
2022-05-10 11:16:57 +00:00
|
|
|
where
|
|
|
|
I: IntoIterator<Item = IE> + Send + Sync,
|
|
|
|
IE: Borrow<F::E> + Send + Sync,
|
|
|
|
{
|
2023-11-16 15:41:45 +00:00
|
|
|
// The different items will have to be stored on possibly different nodes.
|
|
|
|
// We will here batch all items into a single request for each concerned
|
|
|
|
// node, with all of the entries it must store within that request.
|
|
|
|
// Each entry has to be saved to a specific list of "write sets", i.e. a set
|
|
|
|
// of node within wich a quorum must be achieved. In normal operation, there
|
|
|
|
// is a single write set which corresponds to the quorum in the current
|
|
|
|
// cluster layout, but when the layout is updated, multiple write sets might
|
|
|
|
// have to be handled at once. Here, since we are sending many entries, we
|
|
|
|
// will have to handle many write sets in all cases. The algorihtm is thus
|
|
|
|
// to send one request to each node with all the items it must save,
|
|
|
|
// and keep track of the OK responses within each write set: if for all sets
|
|
|
|
// a quorum of nodes has answered OK, then the insert has succeeded and
|
|
|
|
// consistency properties (read-after-write) are preserved.
|
|
|
|
|
|
|
|
let quorum = self.data.replication.write_quorum();
|
|
|
|
|
|
|
|
// Serialize all entries and compute the write sets for each of them.
|
|
|
|
// In the case of sharded table replication, this also takes an "ack lock"
|
|
|
|
// to the layout manager to avoid ack'ing newer versions which are not
|
|
|
|
// taken into account by writes in progress (the ack can happen later, once
|
|
|
|
// all writes that didn't take the new layout into account are finished).
|
|
|
|
// These locks are released when entries_vec is dropped, i.e. when this
|
|
|
|
// function returns.
|
|
|
|
let mut entries_vec = Vec::new();
|
2022-05-10 11:16:57 +00:00
|
|
|
for entry in entries.into_iter() {
|
|
|
|
let entry = entry.borrow();
|
2020-04-11 17:43:29 +00:00
|
|
|
let hash = entry.partition_key().hash();
|
2023-12-07 09:55:15 +00:00
|
|
|
let mut write_sets = self.data.replication.write_sets(&hash);
|
|
|
|
for set in write_sets.as_mut().iter_mut() {
|
2023-12-08 13:54:11 +00:00
|
|
|
// Sort nodes in each write sets to merge write sets with same
|
|
|
|
// nodes but in possibly different orders
|
2023-12-07 09:55:15 +00:00
|
|
|
set.sort();
|
|
|
|
}
|
2023-01-03 13:44:47 +00:00
|
|
|
let e_enc = Arc::new(ByteBuf::from(entry.encode()?));
|
2023-11-16 15:41:45 +00:00
|
|
|
entries_vec.push((write_sets, e_enc));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Compute a deduplicated list of all of the write sets,
|
|
|
|
// and compute an index from each node to the position of the sets in which
|
|
|
|
// it takes part, to optimize the detection of a quorum.
|
|
|
|
let mut write_sets = entries_vec
|
|
|
|
.iter()
|
|
|
|
.map(|(wss, _)| wss.as_ref().iter().map(|ws| ws.as_slice()))
|
|
|
|
.flatten()
|
|
|
|
.collect::<Vec<&[Uuid]>>();
|
|
|
|
write_sets.sort();
|
|
|
|
write_sets.dedup();
|
2023-12-07 09:55:15 +00:00
|
|
|
|
|
|
|
let mut result_tracker = QuorumSetResultTracker::new(&write_sets, quorum);
|
2023-11-16 15:41:45 +00:00
|
|
|
|
|
|
|
// Build a map of all nodes to the entries that must be sent to that node.
|
|
|
|
let mut call_list: HashMap<Uuid, Vec<_>> = HashMap::new();
|
|
|
|
for (write_sets, entry_enc) in entries_vec.iter() {
|
|
|
|
for write_set in write_sets.as_ref().iter() {
|
|
|
|
for node in write_set.iter() {
|
2023-12-08 13:54:11 +00:00
|
|
|
let node_entries = call_list.entry(*node).or_default();
|
|
|
|
match node_entries.last() {
|
|
|
|
Some(x) if Arc::ptr_eq(x, entry_enc) => {
|
|
|
|
// skip if entry already in list to send to this node
|
|
|
|
// (could happen if node is in several write sets for this entry)
|
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
node_entries.push(entry_enc.clone());
|
|
|
|
}
|
|
|
|
}
|
2023-11-16 15:41:45 +00:00
|
|
|
}
|
2020-04-11 17:43:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-11-16 15:41:45 +00:00
|
|
|
// Build futures to actually perform each of the corresponding RPC calls
|
|
|
|
let call_futures = call_list.into_iter().map(|(node, entries)| {
|
|
|
|
let this = self.clone();
|
|
|
|
let tracer = opentelemetry::global::tracer("garage");
|
|
|
|
let span = tracer.start(format!("RPC to {:?}", node));
|
|
|
|
let fut = async move {
|
|
|
|
let rpc = TableRpc::<F>::Update(entries);
|
|
|
|
let resp = this
|
|
|
|
.system
|
|
|
|
.rpc_helper()
|
|
|
|
.call(
|
|
|
|
&this.endpoint,
|
|
|
|
node,
|
|
|
|
rpc,
|
|
|
|
RequestStrategy::with_priority(PRIO_NORMAL).with_quorum(quorum),
|
|
|
|
)
|
|
|
|
.await;
|
|
|
|
(node, resp)
|
|
|
|
};
|
|
|
|
fut.with_context(Context::current_with_span(span))
|
2020-04-11 21:53:32 +00:00
|
|
|
});
|
2023-11-16 15:41:45 +00:00
|
|
|
|
|
|
|
// Run all requests in parallel thanks to FuturesUnordered, and collect results.
|
2020-04-11 17:43:29 +00:00
|
|
|
let mut resps = call_futures.collect::<FuturesUnordered<_>>();
|
|
|
|
|
2023-11-16 15:41:45 +00:00
|
|
|
while let Some((node, resp)) = resps.next().await {
|
2023-12-07 09:55:15 +00:00
|
|
|
result_tracker.register_result(node, resp.map(|_| ()));
|
2023-11-16 15:41:45 +00:00
|
|
|
|
2023-12-07 09:55:15 +00:00
|
|
|
if result_tracker.all_quorums_ok() {
|
2023-11-16 15:41:45 +00:00
|
|
|
// Success
|
|
|
|
|
|
|
|
// Continue all other requests in background
|
|
|
|
tokio::spawn(async move {
|
|
|
|
resps.collect::<Vec<(Uuid, Result<_, _>)>>().await;
|
|
|
|
});
|
|
|
|
|
|
|
|
return Ok(());
|
|
|
|
}
|
|
|
|
|
2023-12-07 09:55:15 +00:00
|
|
|
if result_tracker.too_many_failures() {
|
2023-11-16 15:41:45 +00:00
|
|
|
// Too many errors in this set, we know we won't get a quorum
|
|
|
|
break;
|
2020-04-11 17:43:29 +00:00
|
|
|
}
|
|
|
|
}
|
2023-11-16 15:41:45 +00:00
|
|
|
|
|
|
|
// Failure, could not get quorum within at least one set
|
2023-12-07 09:55:15 +00:00
|
|
|
Err(result_tracker.quorum_error())
|
2020-04-11 17:43:29 +00:00
|
|
|
}
|
|
|
|
|
2020-04-16 12:50:49 +00:00
|
|
|
pub async fn get(
|
|
|
|
self: &Arc<Self>,
|
|
|
|
partition_key: &F::P,
|
|
|
|
sort_key: &F::S,
|
|
|
|
) -> Result<Option<F::E>, Error> {
|
2022-02-24 12:18:51 +00:00
|
|
|
let tracer = opentelemetry::global::tracer("garage_table");
|
|
|
|
let span = tracer.start(format!("{} get", F::TABLE_NAME));
|
|
|
|
|
2022-02-22 13:52:41 +00:00
|
|
|
let res = self
|
|
|
|
.get_internal(partition_key, sort_key)
|
2022-02-22 12:53:59 +00:00
|
|
|
.bound_record_duration(&self.data.metrics.get_request_duration)
|
2022-02-24 12:18:51 +00:00
|
|
|
.with_context(Context::current_with_span(span))
|
2022-02-22 12:53:59 +00:00
|
|
|
.await?;
|
2022-02-24 12:18:51 +00:00
|
|
|
|
2022-02-22 12:53:59 +00:00
|
|
|
self.data.metrics.get_request_counter.add(1);
|
2022-02-24 12:18:51 +00:00
|
|
|
|
2022-02-22 12:53:59 +00:00
|
|
|
Ok(res)
|
|
|
|
}
|
2022-02-16 13:23:04 +00:00
|
|
|
|
2022-02-22 12:53:59 +00:00
|
|
|
async fn get_internal(
|
|
|
|
self: &Arc<Self>,
|
|
|
|
partition_key: &F::P,
|
|
|
|
sort_key: &F::S,
|
|
|
|
) -> Result<Option<F::E>, Error> {
|
2020-04-09 14:16:27 +00:00
|
|
|
let hash = partition_key.hash();
|
2021-03-16 10:43:58 +00:00
|
|
|
let who = self.data.replication.read_nodes(&hash);
|
2020-04-08 20:00:41 +00:00
|
|
|
|
2021-05-02 21:13:08 +00:00
|
|
|
let rpc = TableRpc::<F>::ReadEntry(partition_key.clone(), sort_key.clone());
|
2020-04-10 20:01:48 +00:00
|
|
|
let resps = self
|
2021-10-14 09:50:12 +00:00
|
|
|
.system
|
2023-11-09 11:55:36 +00:00
|
|
|
.rpc_helper()
|
2020-04-19 11:22:28 +00:00
|
|
|
.try_call_many(
|
2021-10-14 09:50:12 +00:00
|
|
|
&self.endpoint,
|
2023-11-14 14:40:46 +00:00
|
|
|
&who,
|
2020-04-19 11:22:28 +00:00
|
|
|
rpc,
|
2021-10-14 09:50:12 +00:00
|
|
|
RequestStrategy::with_priority(PRIO_NORMAL)
|
2023-11-14 14:40:46 +00:00
|
|
|
.with_quorum(self.data.replication.read_quorum()),
|
2020-04-19 11:22:28 +00:00
|
|
|
)
|
2020-04-08 21:01:49 +00:00
|
|
|
.await?;
|
|
|
|
|
2020-04-08 21:47:34 +00:00
|
|
|
let mut ret = None;
|
2020-04-09 18:58:39 +00:00
|
|
|
let mut not_all_same = false;
|
2020-04-08 21:01:49 +00:00
|
|
|
for resp in resps {
|
2021-05-02 21:13:08 +00:00
|
|
|
if let TableRpc::ReadEntryResponse(value) = resp {
|
2020-04-11 17:43:29 +00:00
|
|
|
if let Some(v_bytes) = value {
|
2021-03-11 15:54:15 +00:00
|
|
|
let v = self.data.decode_entry(v_bytes.as_slice())?;
|
2020-04-08 21:47:34 +00:00
|
|
|
ret = match ret {
|
|
|
|
None => Some(v),
|
|
|
|
Some(mut x) => {
|
2020-04-09 21:45:07 +00:00
|
|
|
if x != v {
|
2020-04-09 18:58:39 +00:00
|
|
|
not_all_same = true;
|
2020-04-09 21:45:07 +00:00
|
|
|
x.merge(&v);
|
2020-04-09 18:58:39 +00:00
|
|
|
}
|
2020-04-08 21:47:34 +00:00
|
|
|
Some(x)
|
|
|
|
}
|
|
|
|
}
|
2020-04-08 21:01:49 +00:00
|
|
|
}
|
2020-04-08 21:47:34 +00:00
|
|
|
} else {
|
2021-04-23 19:42:52 +00:00
|
|
|
return Err(Error::Message("Invalid return value to read".to_string()));
|
2020-04-08 21:01:49 +00:00
|
|
|
}
|
|
|
|
}
|
2020-04-09 18:58:39 +00:00
|
|
|
if let Some(ret_entry) = &ret {
|
|
|
|
if not_all_same {
|
2020-04-17 15:09:57 +00:00
|
|
|
let self2 = self.clone();
|
|
|
|
let ent2 = ret_entry.clone();
|
2022-12-14 15:08:05 +00:00
|
|
|
tokio::spawn(async move {
|
|
|
|
if let Err(e) = self2.repair_on_read(&who[..], ent2).await {
|
|
|
|
warn!("Error doing repair on read: {}", e);
|
|
|
|
}
|
|
|
|
});
|
2020-04-09 18:58:39 +00:00
|
|
|
}
|
|
|
|
}
|
2022-02-16 13:23:04 +00:00
|
|
|
|
2020-04-08 21:47:34 +00:00
|
|
|
Ok(ret)
|
2020-04-08 21:01:49 +00:00
|
|
|
}
|
|
|
|
|
2020-04-17 15:09:57 +00:00
|
|
|
pub async fn get_range(
|
|
|
|
self: &Arc<Self>,
|
|
|
|
partition_key: &F::P,
|
2020-04-19 15:15:48 +00:00
|
|
|
begin_sort_key: Option<F::S>,
|
2020-04-17 15:09:57 +00:00
|
|
|
filter: Option<F::Filter>,
|
|
|
|
limit: usize,
|
2022-05-10 11:16:57 +00:00
|
|
|
enumeration_order: EnumerationOrder,
|
2020-04-17 15:09:57 +00:00
|
|
|
) -> Result<Vec<F::E>, Error> {
|
2022-02-24 12:18:51 +00:00
|
|
|
let tracer = opentelemetry::global::tracer("garage_table");
|
|
|
|
let span = tracer.start(format!("{} get_range", F::TABLE_NAME));
|
|
|
|
|
2022-02-22 13:52:41 +00:00
|
|
|
let res = self
|
2022-05-10 11:16:57 +00:00
|
|
|
.get_range_internal(
|
|
|
|
partition_key,
|
|
|
|
begin_sort_key,
|
|
|
|
filter,
|
|
|
|
limit,
|
|
|
|
enumeration_order,
|
|
|
|
)
|
2022-02-22 12:53:59 +00:00
|
|
|
.bound_record_duration(&self.data.metrics.get_request_duration)
|
2022-02-24 12:18:51 +00:00
|
|
|
.with_context(Context::current_with_span(span))
|
2022-02-22 12:53:59 +00:00
|
|
|
.await?;
|
2022-02-24 12:18:51 +00:00
|
|
|
|
2022-02-22 12:53:59 +00:00
|
|
|
self.data.metrics.get_request_counter.add(1);
|
2022-02-24 12:18:51 +00:00
|
|
|
|
2022-02-22 12:53:59 +00:00
|
|
|
Ok(res)
|
|
|
|
}
|
2022-02-16 13:23:04 +00:00
|
|
|
|
2022-02-22 12:53:59 +00:00
|
|
|
async fn get_range_internal(
|
|
|
|
self: &Arc<Self>,
|
|
|
|
partition_key: &F::P,
|
|
|
|
begin_sort_key: Option<F::S>,
|
|
|
|
filter: Option<F::Filter>,
|
|
|
|
limit: usize,
|
2022-05-10 11:16:57 +00:00
|
|
|
enumeration_order: EnumerationOrder,
|
2022-02-22 12:53:59 +00:00
|
|
|
) -> Result<Vec<F::E>, Error> {
|
2020-04-17 15:09:57 +00:00
|
|
|
let hash = partition_key.hash();
|
2021-03-16 10:43:58 +00:00
|
|
|
let who = self.data.replication.read_nodes(&hash);
|
2020-04-17 15:09:57 +00:00
|
|
|
|
2022-05-10 11:16:57 +00:00
|
|
|
let rpc = TableRpc::<F>::ReadRange {
|
|
|
|
partition: partition_key.clone(),
|
|
|
|
begin_sort_key,
|
|
|
|
filter,
|
|
|
|
limit,
|
|
|
|
enumeration_order,
|
|
|
|
};
|
2020-04-19 15:15:48 +00:00
|
|
|
|
2020-04-17 15:09:57 +00:00
|
|
|
let resps = self
|
2021-10-14 09:50:12 +00:00
|
|
|
.system
|
2023-11-09 11:55:36 +00:00
|
|
|
.rpc_helper()
|
2020-04-19 11:22:28 +00:00
|
|
|
.try_call_many(
|
2021-10-14 09:50:12 +00:00
|
|
|
&self.endpoint,
|
2023-11-14 14:40:46 +00:00
|
|
|
&who,
|
2020-04-19 11:22:28 +00:00
|
|
|
rpc,
|
2021-10-14 09:50:12 +00:00
|
|
|
RequestStrategy::with_priority(PRIO_NORMAL)
|
2023-11-14 14:40:46 +00:00
|
|
|
.with_quorum(self.data.replication.read_quorum()),
|
2020-04-19 11:22:28 +00:00
|
|
|
)
|
2020-04-17 15:09:57 +00:00
|
|
|
.await?;
|
|
|
|
|
2022-05-10 11:16:57 +00:00
|
|
|
let mut ret: BTreeMap<Vec<u8>, F::E> = BTreeMap::new();
|
|
|
|
let mut to_repair = BTreeSet::new();
|
2020-04-17 15:09:57 +00:00
|
|
|
for resp in resps {
|
2021-05-02 21:13:08 +00:00
|
|
|
if let TableRpc::Update(entries) = resp {
|
2020-04-17 15:09:57 +00:00
|
|
|
for entry_bytes in entries.iter() {
|
2021-03-11 15:54:15 +00:00
|
|
|
let entry = self.data.decode_entry(entry_bytes.as_slice())?;
|
|
|
|
let entry_key = self.data.tree_key(entry.partition_key(), entry.sort_key());
|
2022-05-10 11:16:57 +00:00
|
|
|
match ret.get_mut(&entry_key) {
|
|
|
|
Some(e) => {
|
|
|
|
if *e != entry {
|
|
|
|
e.merge(&entry);
|
|
|
|
to_repair.insert(entry_key.clone());
|
2020-04-17 15:09:57 +00:00
|
|
|
}
|
|
|
|
}
|
2022-05-10 11:16:57 +00:00
|
|
|
None => {
|
|
|
|
ret.insert(entry_key, entry);
|
|
|
|
}
|
2020-04-17 15:09:57 +00:00
|
|
|
}
|
|
|
|
}
|
2022-05-10 11:16:57 +00:00
|
|
|
} else {
|
|
|
|
return Err(Error::unexpected_rpc_message(resp));
|
2020-04-17 15:09:57 +00:00
|
|
|
}
|
|
|
|
}
|
2022-05-10 11:16:57 +00:00
|
|
|
|
2020-04-17 15:09:57 +00:00
|
|
|
if !to_repair.is_empty() {
|
|
|
|
let self2 = self.clone();
|
2022-05-10 11:16:57 +00:00
|
|
|
let to_repair = to_repair
|
|
|
|
.into_iter()
|
|
|
|
.map(|k| ret.get(&k).unwrap().clone())
|
|
|
|
.collect::<Vec<_>>();
|
2022-12-14 15:08:05 +00:00
|
|
|
tokio::spawn(async move {
|
2022-05-10 11:16:57 +00:00
|
|
|
for v in to_repair {
|
2022-12-14 15:08:05 +00:00
|
|
|
if let Err(e) = self2.repair_on_read(&who[..], v).await {
|
|
|
|
warn!("Error doing repair on read: {}", e);
|
|
|
|
}
|
2020-04-17 16:51:29 +00:00
|
|
|
}
|
|
|
|
});
|
2020-04-17 15:09:57 +00:00
|
|
|
}
|
2022-05-10 11:16:57 +00:00
|
|
|
|
|
|
|
// At this point, the `ret` btreemap might contain more than `limit`
|
|
|
|
// items, because nodes might have returned us each `limit` items
|
|
|
|
// but for different keys. We have to take only the first `limit` items
|
|
|
|
// in this map, in the specified enumeration order, for two reasons:
|
|
|
|
// 1. To return to the user no more than the number of items that they requested
|
|
|
|
// 2. To return only items for which we have a read quorum: we do not know
|
|
|
|
// that we have a read quorum for the items after the first `limit`
|
|
|
|
// of them
|
|
|
|
let ret_vec = match enumeration_order {
|
|
|
|
EnumerationOrder::Forward => ret
|
|
|
|
.into_iter()
|
|
|
|
.take(limit)
|
|
|
|
.map(|(_k, v)| v)
|
|
|
|
.collect::<Vec<_>>(),
|
|
|
|
EnumerationOrder::Reverse => ret
|
|
|
|
.into_iter()
|
|
|
|
.rev()
|
|
|
|
.take(limit)
|
|
|
|
.map(|(_k, v)| v)
|
|
|
|
.collect::<Vec<_>>(),
|
|
|
|
};
|
2020-04-17 15:09:57 +00:00
|
|
|
Ok(ret_vec)
|
|
|
|
}
|
|
|
|
|
|
|
|
// =============== UTILITY FUNCTION FOR CLIENT OPERATIONS ===============
|
|
|
|
|
2021-10-15 09:05:09 +00:00
|
|
|
async fn repair_on_read(&self, who: &[Uuid], what: F::E) -> Result<(), Error> {
|
2023-01-03 13:44:47 +00:00
|
|
|
let what_enc = Arc::new(ByteBuf::from(what.encode()?));
|
2021-10-14 09:50:12 +00:00
|
|
|
self.system
|
2023-11-09 11:55:36 +00:00
|
|
|
.rpc_helper()
|
2020-04-18 17:21:34 +00:00
|
|
|
.try_call_many(
|
2021-10-14 09:50:12 +00:00
|
|
|
&self.endpoint,
|
2021-04-23 19:42:52 +00:00
|
|
|
who,
|
2021-05-02 21:13:08 +00:00
|
|
|
TableRpc::<F>::Update(vec![what_enc]),
|
2022-09-19 18:12:19 +00:00
|
|
|
RequestStrategy::with_priority(PRIO_NORMAL).with_quorum(who.len()),
|
2020-04-18 17:21:34 +00:00
|
|
|
)
|
2020-04-16 12:50:49 +00:00
|
|
|
.await?;
|
|
|
|
Ok(())
|
2020-04-11 17:43:29 +00:00
|
|
|
}
|
2021-10-15 09:05:09 +00:00
|
|
|
}
|
2020-04-11 17:43:29 +00:00
|
|
|
|
2021-10-15 09:05:09 +00:00
|
|
|
#[async_trait]
|
2023-01-03 14:08:37 +00:00
|
|
|
impl<F: TableSchema, R: TableReplication> EndpointHandler<TableRpc<F>> for Table<F, R> {
|
2021-10-15 09:05:09 +00:00
|
|
|
async fn handle(
|
|
|
|
self: &Arc<Self>,
|
|
|
|
msg: &TableRpc<F>,
|
|
|
|
_from: NodeID,
|
|
|
|
) -> Result<TableRpc<F>, Error> {
|
2020-04-08 21:01:49 +00:00
|
|
|
match msg {
|
2021-05-02 21:13:08 +00:00
|
|
|
TableRpc::ReadEntry(key, sort_key) => {
|
2021-03-11 15:54:15 +00:00
|
|
|
let value = self.data.read_entry(key, sort_key)?;
|
2021-05-02 21:13:08 +00:00
|
|
|
Ok(TableRpc::ReadEntryResponse(value))
|
2020-04-08 21:01:49 +00:00
|
|
|
}
|
2022-05-10 11:16:57 +00:00
|
|
|
TableRpc::ReadRange {
|
|
|
|
partition,
|
|
|
|
begin_sort_key,
|
|
|
|
filter,
|
|
|
|
limit,
|
|
|
|
enumeration_order,
|
|
|
|
} => {
|
|
|
|
let values = self.data.read_range(
|
|
|
|
partition,
|
|
|
|
begin_sort_key,
|
|
|
|
filter,
|
|
|
|
*limit,
|
|
|
|
*enumeration_order,
|
|
|
|
)?;
|
2021-05-02 21:13:08 +00:00
|
|
|
Ok(TableRpc::Update(values))
|
2020-04-17 17:38:47 +00:00
|
|
|
}
|
2021-05-02 21:13:08 +00:00
|
|
|
TableRpc::Update(pairs) => {
|
2021-03-11 15:54:15 +00:00
|
|
|
self.data.update_many(pairs)?;
|
2021-05-02 21:13:08 +00:00
|
|
|
Ok(TableRpc::Ok)
|
2020-04-08 21:01:49 +00:00
|
|
|
}
|
2022-01-03 12:58:05 +00:00
|
|
|
m => Err(Error::unexpected_rpc_message(m)),
|
2020-04-08 21:01:49 +00:00
|
|
|
}
|
|
|
|
}
|
2020-04-08 20:00:41 +00:00
|
|
|
}
|