garage/src/table.rs

513 lines
13 KiB
Rust
Raw Normal View History

use std::collections::{BTreeMap, HashMap};
use std::sync::Arc;
use std::time::Duration;
2020-04-08 20:00:41 +00:00
use arc_swap::ArcSwapOption;
use async_trait::async_trait;
use futures::stream::*;
use serde::{Deserialize, Serialize};
use serde_bytes::ByteBuf;
2020-04-08 20:00:41 +00:00
use crate::data::*;
use crate::error::Error;
2020-04-08 20:00:41 +00:00
use crate::membership::System;
use crate::proto::*;
2020-04-08 20:00:41 +00:00
use crate::rpc_client::*;
2020-04-16 16:41:10 +00:00
use crate::table_sync::*;
2020-04-08 20:00:41 +00:00
2020-04-12 20:24:53 +00:00
pub struct Table<F: TableSchema> {
2020-04-08 21:01:49 +00:00
pub instance: F,
2020-04-08 20:00:41 +00:00
pub name: String,
pub system: Arc<System>,
pub store: sled::Tree,
2020-04-17 16:27:29 +00:00
pub syncer: ArcSwapOption<TableSyncer<F>>,
2020-04-08 20:00:41 +00:00
pub param: TableReplicationParams,
}
#[derive(Clone)]
pub struct TableReplicationParams {
pub replication_factor: usize,
pub read_quorum: usize,
pub write_quorum: usize,
pub timeout: Duration,
}
#[async_trait]
pub trait TableRpcHandler {
async fn handle(&self, rpc: &[u8]) -> Result<Vec<u8>, Error>;
}
2020-04-12 20:24:53 +00:00
struct TableRpcHandlerAdapter<F: TableSchema> {
2020-04-08 20:00:41 +00:00
table: Arc<Table<F>>,
}
#[async_trait]
2020-04-12 20:24:53 +00:00
impl<F: TableSchema + 'static> TableRpcHandler for TableRpcHandlerAdapter<F> {
2020-04-08 20:00:41 +00:00
async fn handle(&self, rpc: &[u8]) -> Result<Vec<u8>, Error> {
let msg = rmp_serde::decode::from_read_ref::<_, TableRPC<F>>(rpc)?;
let rep = self.table.handle(msg).await?;
2020-04-09 16:43:53 +00:00
Ok(rmp_to_vec_all_named(&rep)?)
2020-04-08 20:00:41 +00:00
}
}
2020-04-08 21:01:49 +00:00
#[derive(Serialize, Deserialize)]
2020-04-12 20:24:53 +00:00
pub enum TableRPC<F: TableSchema> {
2020-04-08 21:01:49 +00:00
Ok,
2020-04-08 21:47:34 +00:00
2020-04-09 14:16:27 +00:00
ReadEntry(F::P, F::S),
ReadEntryResponse(Option<ByteBuf>),
2020-04-08 21:47:34 +00:00
ReadRange(F::P, F::S, Option<F::Filter>, usize),
Update(Vec<Arc<ByteBuf>>),
2020-04-16 16:41:10 +00:00
SyncRPC(SyncRPC),
2020-04-08 20:00:41 +00:00
}
2020-04-09 15:32:28 +00:00
pub trait PartitionKey {
2020-04-08 20:00:41 +00:00
fn hash(&self) -> Hash;
}
2020-04-09 15:32:28 +00:00
pub trait SortKey {
2020-04-09 14:16:27 +00:00
fn sort_key(&self) -> &[u8];
}
pub trait Entry<P: PartitionKey, S: SortKey>:
PartialEq + Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync
{
2020-04-09 14:16:27 +00:00
fn partition_key(&self) -> &P;
fn sort_key(&self) -> &S;
2020-04-09 21:45:07 +00:00
fn merge(&mut self, other: &Self);
2020-04-08 20:00:41 +00:00
}
2020-04-09 14:16:27 +00:00
#[derive(Clone, Serialize, Deserialize)]
pub struct EmptySortKey;
impl SortKey for EmptySortKey {
fn sort_key(&self) -> &[u8] {
&[]
}
}
2020-04-09 15:32:28 +00:00
impl<T: AsRef<str>> PartitionKey for T {
2020-04-09 14:16:27 +00:00
fn hash(&self) -> Hash {
2020-04-09 15:32:28 +00:00
hash(self.as_ref().as_bytes())
2020-04-09 14:16:27 +00:00
}
}
2020-04-09 15:32:28 +00:00
impl<T: AsRef<str>> SortKey for T {
2020-04-09 14:16:27 +00:00
fn sort_key(&self) -> &[u8] {
2020-04-09 15:32:28 +00:00
self.as_ref().as_bytes()
2020-04-09 14:16:27 +00:00
}
}
2020-04-09 21:45:07 +00:00
impl PartitionKey for Hash {
fn hash(&self) -> Hash {
self.clone()
}
}
2020-04-10 20:55:01 +00:00
impl SortKey for Hash {
fn sort_key(&self) -> &[u8] {
self.as_slice()
}
}
2020-04-09 21:45:07 +00:00
2020-04-08 20:00:41 +00:00
#[async_trait]
2020-04-12 20:24:53 +00:00
pub trait TableSchema: Send + Sync {
2020-04-09 15:32:28 +00:00
type P: PartitionKey + Clone + PartialEq + Serialize + for<'de> Deserialize<'de> + Send + Sync;
type S: SortKey + Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync;
2020-04-09 14:16:27 +00:00
type E: Entry<Self::P, Self::S>;
type Filter: Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync;
2020-04-08 20:00:41 +00:00
2020-04-17 12:49:10 +00:00
async fn updated(&self, old: Option<Self::E>, new: Option<Self::E>);
fn matches_filter(_entry: &Self::E, _filter: &Self::Filter) -> bool {
true
}
2020-04-08 20:00:41 +00:00
}
2020-04-12 20:24:53 +00:00
impl<F: TableSchema + 'static> Table<F> {
// =============== PUBLIC INTERFACE FUNCTIONS (new, insert, get, etc) ===============
2020-04-16 12:50:49 +00:00
pub async fn new(
instance: F,
system: Arc<System>,
db: &sled::Db,
name: String,
param: TableReplicationParams,
2020-04-12 20:24:53 +00:00
) -> Arc<Self> {
let store = db.open_tree(&name).expect("Unable to open DB tree");
2020-04-16 12:50:49 +00:00
let table = Arc::new(Self {
2020-04-08 21:01:49 +00:00
instance,
2020-04-08 20:00:41 +00:00
name,
system,
store,
param,
2020-04-17 16:27:29 +00:00
syncer: ArcSwapOption::from(None),
2020-04-16 12:50:49 +00:00
});
2020-04-16 16:41:10 +00:00
let syncer = TableSyncer::launch(table.clone()).await;
2020-04-17 16:27:29 +00:00
table.syncer.swap(Some(syncer));
2020-04-16 12:50:49 +00:00
table
2020-04-08 20:00:41 +00:00
}
2020-04-09 14:16:27 +00:00
pub async fn insert(&self, e: &F::E) -> Result<(), Error> {
let hash = e.partition_key().hash();
let ring = self.system.ring.borrow().clone();
let who = ring.walk_ring(&hash, self.param.replication_factor);
2020-04-17 19:59:07 +00:00
//eprintln!("insert who: {:?}", who);
2020-04-08 21:01:49 +00:00
let e_enc = Arc::new(ByteBuf::from(rmp_to_vec_all_named(e)?));
let rpc = &TableRPC::<F>::Update(vec![e_enc]);
self.rpc_try_call_many(&who[..], &rpc, self.param.write_quorum)
.await?;
2020-04-08 21:01:49 +00:00
Ok(())
}
2020-04-08 20:00:41 +00:00
pub async fn insert_many(&self, entries: &[F::E]) -> Result<(), Error> {
let mut call_list = HashMap::new();
for entry in entries.iter() {
let hash = entry.partition_key().hash();
let ring = self.system.ring.borrow().clone();
let who = ring.walk_ring(&hash, self.param.replication_factor);
let e_enc = Arc::new(ByteBuf::from(rmp_to_vec_all_named(entry)?));
for node in who {
if !call_list.contains_key(&node) {
call_list.insert(node.clone(), vec![]);
}
call_list.get_mut(&node).unwrap().push(e_enc.clone());
}
}
let call_futures = call_list.drain().map(|(node, entries)| async move {
let rpc = TableRPC::<F>::Update(entries);
let rpc_bytes = rmp_to_vec_all_named(&rpc)?;
let message = Message::TableRPC(self.name.to_string(), rpc_bytes);
let resp = rpc_call(self.system.clone(), &node, &message, self.param.timeout).await?;
Ok::<_, Error>((node, resp))
});
let mut resps = call_futures.collect::<FuturesUnordered<_>>();
let mut errors = vec![];
while let Some(resp) = resps.next().await {
if let Err(e) = resp {
errors.push(e);
}
}
if errors.len() > self.param.replication_factor - self.param.write_quorum {
Err(Error::Message("Too many errors".into()))
} else {
Ok(())
}
}
2020-04-16 12:50:49 +00:00
pub async fn get(
self: &Arc<Self>,
partition_key: &F::P,
sort_key: &F::S,
) -> Result<Option<F::E>, Error> {
2020-04-09 14:16:27 +00:00
let hash = partition_key.hash();
let ring = self.system.ring.borrow().clone();
let who = ring.walk_ring(&hash, self.param.replication_factor);
2020-04-17 19:59:07 +00:00
//eprintln!("get who: {:?}", who);
2020-04-08 20:00:41 +00:00
2020-04-09 14:16:27 +00:00
let rpc = &TableRPC::<F>::ReadEntry(partition_key.clone(), sort_key.clone());
let resps = self
.rpc_try_call_many(&who[..], &rpc, self.param.read_quorum)
2020-04-08 21:01:49 +00:00
.await?;
2020-04-08 21:47:34 +00:00
let mut ret = None;
2020-04-09 18:58:39 +00:00
let mut not_all_same = false;
2020-04-08 21:01:49 +00:00
for resp in resps {
2020-04-08 21:47:34 +00:00
if let TableRPC::ReadEntryResponse(value) = resp {
if let Some(v_bytes) = value {
let v = rmp_serde::decode::from_read_ref::<_, F::E>(v_bytes.as_slice())?;
2020-04-08 21:47:34 +00:00
ret = match ret {
None => Some(v),
Some(mut x) => {
2020-04-09 21:45:07 +00:00
if x != v {
2020-04-09 18:58:39 +00:00
not_all_same = true;
2020-04-09 21:45:07 +00:00
x.merge(&v);
2020-04-09 18:58:39 +00:00
}
2020-04-08 21:47:34 +00:00
Some(x)
}
}
2020-04-08 21:01:49 +00:00
}
2020-04-08 21:47:34 +00:00
} else {
return Err(Error::Message(format!("Invalid return value to read")));
2020-04-08 21:01:49 +00:00
}
}
2020-04-09 18:58:39 +00:00
if let Some(ret_entry) = &ret {
if not_all_same {
let self2 = self.clone();
let ent2 = ret_entry.clone();
2020-04-16 12:50:49 +00:00
self.system
.background
.spawn(async move { self2.repair_on_read(&who[..], ent2).await });
2020-04-09 18:58:39 +00:00
}
}
2020-04-08 21:47:34 +00:00
Ok(ret)
2020-04-08 21:01:49 +00:00
}
pub async fn get_range(
self: &Arc<Self>,
partition_key: &F::P,
begin_sort_key: &F::S,
filter: Option<F::Filter>,
limit: usize,
) -> Result<Vec<F::E>, Error> {
let hash = partition_key.hash();
let ring = self.system.ring.borrow().clone();
let who = ring.walk_ring(&hash, self.param.replication_factor);
let rpc =
&TableRPC::<F>::ReadRange(partition_key.clone(), begin_sort_key.clone(), filter, limit);
let resps = self
.rpc_try_call_many(&who[..], &rpc, self.param.read_quorum)
.await?;
let mut ret = BTreeMap::new();
let mut to_repair = BTreeMap::new();
for resp in resps {
if let TableRPC::Update(entries) = resp {
for entry_bytes in entries.iter() {
let entry =
rmp_serde::decode::from_read_ref::<_, F::E>(entry_bytes.as_slice())?;
let entry_key = self.tree_key(entry.partition_key(), entry.sort_key());
match ret.remove(&entry_key) {
None => {
ret.insert(entry_key, Some(entry));
}
Some(Some(mut prev)) => {
let must_repair = prev != entry;
prev.merge(&entry);
if must_repair {
to_repair.insert(entry_key.clone(), Some(prev.clone()));
}
ret.insert(entry_key, Some(prev));
}
Some(None) => unreachable!(),
}
}
}
}
if !to_repair.is_empty() {
let self2 = self.clone();
self.system.background.spawn(async move {
for (_, v) in to_repair.iter_mut() {
self2.repair_on_read(&who[..], v.take().unwrap()).await?;
}
Ok(())
});
}
let ret_vec = ret
.iter_mut()
.take(limit)
.map(|(_k, v)| v.take().unwrap())
.collect::<Vec<_>>();
Ok(ret_vec)
}
// =============== UTILITY FUNCTION FOR CLIENT OPERATIONS ===============
async fn repair_on_read(&self, who: &[UUID], what: F::E) -> Result<(), Error> {
2020-04-16 12:50:49 +00:00
let what_enc = Arc::new(ByteBuf::from(rmp_to_vec_all_named(&what)?));
self.rpc_try_call_many(&who[..], &TableRPC::<F>::Update(vec![what_enc]), who.len())
2020-04-16 12:50:49 +00:00
.await?;
Ok(())
}
async fn rpc_try_call_many(
&self,
who: &[UUID],
rpc: &TableRPC<F>,
quorum: usize,
) -> Result<Vec<TableRPC<F>>, Error> {
//eprintln!("Table RPC to {:?}: {}", who, serde_json::to_string(&rpc)?);
2020-04-09 16:43:53 +00:00
let rpc_bytes = rmp_to_vec_all_named(rpc)?;
2020-04-08 21:01:49 +00:00
let rpc_msg = Message::TableRPC(self.name.to_string(), rpc_bytes);
let resps = rpc_try_call_many(
self.system.clone(),
who,
rpc_msg,
quorum,
self.param.timeout,
)
.await?;
2020-04-08 21:01:49 +00:00
let mut resps_vals = vec![];
for resp in resps {
if let Message::TableRPC(tbl, rep_by) = &resp {
if *tbl == self.name {
resps_vals.push(rmp_serde::decode::from_read_ref(&rep_by)?);
continue;
}
}
return Err(Error::Message(format!(
"Invalid reply to TableRPC: {:?}",
resp
)));
2020-04-08 21:01:49 +00:00
}
//eprintln!(
// "Table RPC responses: {}",
// serde_json::to_string(&resps_vals)?
//);
2020-04-08 21:01:49 +00:00
Ok(resps_vals)
2020-04-08 20:00:41 +00:00
}
2020-04-16 17:28:02 +00:00
pub async fn rpc_call(&self, who: &UUID, rpc: &TableRPC<F>) -> Result<TableRPC<F>, Error> {
let rpc_bytes = rmp_to_vec_all_named(rpc)?;
let rpc_msg = Message::TableRPC(self.name.to_string(), rpc_bytes);
let resp = rpc_call(self.system.clone(), who, &rpc_msg, self.param.timeout).await?;
if let Message::TableRPC(tbl, rep_by) = &resp {
if *tbl == self.name {
return Ok(rmp_serde::decode::from_read_ref(&rep_by)?);
}
}
Err(Error::Message(format!(
"Invalid reply to TableRPC: {:?}",
resp
)))
2020-04-16 17:28:02 +00:00
}
// =============== HANDLERS FOR RPC OPERATIONS (SERVER SIDE) ==============
pub fn rpc_handler(self: Arc<Self>) -> Box<dyn TableRpcHandler + Send + Sync> {
Box::new(TableRpcHandlerAdapter::<F> { table: self })
}
2020-04-16 17:28:02 +00:00
async fn handle(self: &Arc<Self>, msg: TableRPC<F>) -> Result<TableRPC<F>, Error> {
2020-04-08 21:01:49 +00:00
match msg {
2020-04-08 21:47:34 +00:00
TableRPC::ReadEntry(key, sort_key) => {
let value = self.handle_read_entry(&key, &sort_key)?;
Ok(TableRPC::ReadEntryResponse(value))
2020-04-08 21:01:49 +00:00
}
TableRPC::ReadRange(key, begin_sort_key, filter, limit) => {
let values = self.handle_read_range(&key, &begin_sort_key, &filter, limit)?;
Ok(TableRPC::Update(values))
}
2020-04-08 21:01:49 +00:00
TableRPC::Update(pairs) => {
2020-04-08 21:47:34 +00:00
self.handle_update(pairs).await?;
2020-04-08 21:01:49 +00:00
Ok(TableRPC::Ok)
}
TableRPC::SyncRPC(rpc) => {
2020-04-17 16:27:29 +00:00
let syncer = self.syncer.load_full().unwrap();
let response = syncer
.handle_rpc(&rpc, self.system.background.stop_signal.clone())
.await?;
Ok(TableRPC::SyncRPC(response))
2020-04-16 16:41:10 +00:00
}
_ => Err(Error::RPCError(format!("Unexpected table RPC"))),
2020-04-08 21:01:49 +00:00
}
}
fn handle_read_entry(&self, p: &F::P, s: &F::S) -> Result<Option<ByteBuf>, Error> {
2020-04-09 14:16:27 +00:00
let tree_key = self.tree_key(p, s);
2020-04-08 21:47:34 +00:00
if let Some(bytes) = self.store.get(&tree_key)? {
Ok(Some(ByteBuf::from(bytes.to_vec())))
2020-04-08 21:47:34 +00:00
} else {
Ok(None)
2020-04-08 21:01:49 +00:00
}
}
fn handle_read_range(
&self,
p: &F::P,
s: &F::S,
filter: &Option<F::Filter>,
limit: usize,
) -> Result<Vec<Arc<ByteBuf>>, Error> {
let partition_hash = p.hash();
let first_key = self.tree_key(p, s);
let mut ret = vec![];
for item in self.store.range(first_key..) {
let (key, value) = item?;
if &key[..32] != partition_hash.as_slice() {
break;
}
let keep = match filter {
None => true,
Some(f) => {
let entry = rmp_serde::decode::from_read_ref::<_, F::E>(value.as_ref())?;
F::matches_filter(&entry, f)
}
};
if keep {
ret.push(Arc::new(ByteBuf::from(value.as_ref())));
}
if ret.len() >= limit {
break;
}
}
Ok(ret)
}
pub async fn handle_update(
self: &Arc<Self>,
mut entries: Vec<Arc<ByteBuf>>,
) -> Result<(), Error> {
for update_bytes in entries.drain(..) {
let update = rmp_serde::decode::from_read_ref::<_, F::E>(update_bytes.as_slice())?;
2020-04-09 18:58:39 +00:00
let tree_key = self.tree_key(update.partition_key(), update.sort_key());
let (old_entry, new_entry) = self.store.transaction(|db| {
2020-04-09 21:45:07 +00:00
let (old_entry, new_entry) = match db.get(&tree_key)? {
2020-04-09 18:58:39 +00:00
Some(prev_bytes) => {
let old_entry = rmp_serde::decode::from_read_ref::<_, F::E>(&prev_bytes)
.map_err(Error::RMPDecode)
.map_err(sled::ConflictableTransactionError::Abort)?;
2020-04-09 21:45:07 +00:00
let mut new_entry = old_entry.clone();
new_entry.merge(&update);
(Some(old_entry), new_entry)
2020-04-09 18:58:39 +00:00
}
None => (None, update.clone()),
2020-04-09 18:58:39 +00:00
};
2020-04-08 21:01:49 +00:00
2020-04-09 18:58:39 +00:00
let new_bytes = rmp_to_vec_all_named(&new_entry)
.map_err(Error::RMPEncode)
.map_err(sled::ConflictableTransactionError::Abort)?;
db.insert(tree_key.clone(), new_bytes)?;
2020-04-17 12:49:10 +00:00
Ok((old_entry, Some(new_entry)))
2020-04-09 18:58:39 +00:00
})?;
2020-04-08 21:01:49 +00:00
2020-04-17 12:49:10 +00:00
if old_entry != new_entry {
2020-04-16 17:28:02 +00:00
self.instance.updated(old_entry, new_entry).await;
2020-04-17 16:27:29 +00:00
let syncer = self.syncer.load_full().unwrap();
2020-04-16 17:28:02 +00:00
self.system.background.spawn(syncer.invalidate(tree_key));
}
2020-04-08 21:01:49 +00:00
}
Ok(())
2020-04-08 20:00:41 +00:00
}
2020-04-09 14:16:27 +00:00
2020-04-16 16:41:10 +00:00
pub async fn delete_range(&self, begin: &Hash, end: &Hash) -> Result<(), Error> {
eprintln!("({}) Deleting range {:?} - {:?}", self.name, begin, end);
2020-04-17 12:49:10 +00:00
let mut count = 0;
while let Some((key, _value)) = self.store.get_lt(end.as_slice())? {
if key.as_ref() < begin.as_slice() {
break;
}
if let Some(old_val) = self.store.remove(&key)? {
let old_entry = rmp_serde::decode::from_read_ref::<_, F::E>(&old_val)?;
self.instance.updated(Some(old_entry), None).await;
count += 1;
}
}
eprintln!("({}) {} entries deleted", self.name, count);
2020-04-16 16:41:10 +00:00
Ok(())
}
2020-04-09 14:16:27 +00:00
fn tree_key(&self, p: &F::P, s: &F::S) -> Vec<u8> {
let mut ret = p.hash().to_vec();
ret.extend(s.sort_key());
ret
}
2020-04-08 20:00:41 +00:00
}