2020-04-09 15:32:28 +00:00
|
|
|
use async_trait::async_trait;
|
2020-04-10 20:01:48 +00:00
|
|
|
use serde::{Deserialize, Serialize};
|
|
|
|
use std::sync::Arc;
|
2020-04-09 15:32:28 +00:00
|
|
|
|
2020-04-24 10:10:01 +00:00
|
|
|
use garage_util::background::BackgroundRunner;
|
|
|
|
use garage_util::data::*;
|
|
|
|
use garage_util::error::Error;
|
2020-04-23 17:05:46 +00:00
|
|
|
|
2020-04-24 10:10:01 +00:00
|
|
|
use garage_table::table_sharded::*;
|
|
|
|
use garage_table::*;
|
2020-04-18 17:39:08 +00:00
|
|
|
|
2020-04-24 10:10:01 +00:00
|
|
|
use crate::version_table::*;
|
2020-04-09 15:32:28 +00:00
|
|
|
|
2020-04-09 21:45:07 +00:00
|
|
|
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
|
2020-04-09 15:32:28 +00:00
|
|
|
pub struct Object {
|
2020-04-09 21:45:07 +00:00
|
|
|
// Primary key
|
2020-04-09 15:32:28 +00:00
|
|
|
pub bucket: String,
|
2020-04-09 21:45:07 +00:00
|
|
|
|
|
|
|
// Sort key
|
2020-04-09 15:32:28 +00:00
|
|
|
pub key: String,
|
|
|
|
|
2020-04-09 21:45:07 +00:00
|
|
|
// Data
|
2020-04-23 18:16:33 +00:00
|
|
|
versions: Vec<ObjectVersion>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Object {
|
|
|
|
pub fn new(bucket: String, key: String, versions: Vec<ObjectVersion>) -> Self {
|
|
|
|
let mut ret = Self {
|
|
|
|
bucket,
|
|
|
|
key,
|
|
|
|
versions: vec![],
|
|
|
|
};
|
|
|
|
for v in versions {
|
|
|
|
ret.add_version(v)
|
|
|
|
.expect("Twice the same ObjectVersion in Object constructor");
|
|
|
|
}
|
|
|
|
ret
|
|
|
|
}
|
|
|
|
/// Adds a version if it wasn't already present
|
|
|
|
pub fn add_version(&mut self, new: ObjectVersion) -> Result<(), ()> {
|
|
|
|
match self
|
|
|
|
.versions
|
|
|
|
.binary_search_by(|v| v.cmp_key().cmp(&new.cmp_key()))
|
|
|
|
{
|
|
|
|
Err(i) => {
|
|
|
|
self.versions.insert(i, new);
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
Ok(_) => Err(()),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pub fn versions(&self) -> &[ObjectVersion] {
|
|
|
|
&self.versions[..]
|
|
|
|
}
|
2020-04-09 15:32:28 +00:00
|
|
|
}
|
|
|
|
|
2020-04-09 21:45:07 +00:00
|
|
|
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
|
|
|
|
pub struct ObjectVersion {
|
2020-04-09 15:32:28 +00:00
|
|
|
pub uuid: UUID,
|
|
|
|
pub timestamp: u64,
|
|
|
|
|
|
|
|
pub mime_type: String,
|
|
|
|
pub size: u64,
|
|
|
|
pub is_complete: bool,
|
|
|
|
|
2020-04-09 21:45:07 +00:00
|
|
|
pub data: ObjectVersionData,
|
2020-04-09 15:32:28 +00:00
|
|
|
}
|
|
|
|
|
2020-04-09 21:45:07 +00:00
|
|
|
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
|
|
|
|
pub enum ObjectVersionData {
|
2020-04-09 15:32:28 +00:00
|
|
|
DeleteMarker,
|
2020-04-10 20:01:48 +00:00
|
|
|
Inline(#[serde(with = "serde_bytes")] Vec<u8>),
|
2020-04-09 15:32:28 +00:00
|
|
|
FirstBlock(Hash),
|
|
|
|
}
|
|
|
|
|
2020-04-11 16:51:11 +00:00
|
|
|
impl ObjectVersion {
|
|
|
|
fn cmp_key(&self) -> (u64, &UUID) {
|
|
|
|
(self.timestamp, &self.uuid)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-09 15:32:28 +00:00
|
|
|
impl Entry<String, String> for Object {
|
|
|
|
fn partition_key(&self) -> &String {
|
|
|
|
&self.bucket
|
|
|
|
}
|
|
|
|
fn sort_key(&self) -> &String {
|
|
|
|
&self.key
|
|
|
|
}
|
|
|
|
|
2020-04-09 21:45:07 +00:00
|
|
|
fn merge(&mut self, other: &Self) {
|
2020-04-09 15:32:28 +00:00
|
|
|
for other_v in other.versions.iter() {
|
2020-04-11 16:51:11 +00:00
|
|
|
match self
|
|
|
|
.versions
|
|
|
|
.binary_search_by(|v| v.cmp_key().cmp(&other_v.cmp_key()))
|
|
|
|
{
|
2020-04-09 15:32:28 +00:00
|
|
|
Ok(i) => {
|
|
|
|
let mut v = &mut self.versions[i];
|
|
|
|
if other_v.size > v.size {
|
|
|
|
v.size = other_v.size;
|
|
|
|
}
|
2020-04-09 18:58:39 +00:00
|
|
|
if other_v.is_complete && !v.is_complete {
|
2020-04-09 15:32:28 +00:00
|
|
|
v.is_complete = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Err(i) => {
|
|
|
|
self.versions.insert(i, other_v.clone());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-04-10 20:01:48 +00:00
|
|
|
let last_complete = self
|
|
|
|
.versions
|
|
|
|
.iter()
|
|
|
|
.enumerate()
|
|
|
|
.rev()
|
2020-04-09 15:32:28 +00:00
|
|
|
.filter(|(_, v)| v.is_complete)
|
|
|
|
.next()
|
|
|
|
.map(|(vi, _)| vi);
|
|
|
|
|
|
|
|
if let Some(last_vi) = last_complete {
|
|
|
|
self.versions = self.versions.drain(last_vi..).collect::<Vec<_>>();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-09 21:45:07 +00:00
|
|
|
pub struct ObjectTable {
|
2020-04-12 11:03:55 +00:00
|
|
|
pub background: Arc<BackgroundRunner>,
|
2020-04-19 11:22:28 +00:00
|
|
|
pub version_table: Arc<Table<VersionTable, TableShardedReplication>>,
|
2020-04-09 21:45:07 +00:00
|
|
|
}
|
|
|
|
|
2020-04-09 15:32:28 +00:00
|
|
|
#[async_trait]
|
2020-04-12 20:24:53 +00:00
|
|
|
impl TableSchema for ObjectTable {
|
2020-04-09 15:32:28 +00:00
|
|
|
type P = String;
|
|
|
|
type S = String;
|
|
|
|
type E = Object;
|
2020-04-17 15:09:57 +00:00
|
|
|
type Filter = ();
|
2020-04-09 15:32:28 +00:00
|
|
|
|
2020-04-19 20:52:20 +00:00
|
|
|
async fn updated(&self, old: Option<Self::E>, new: Option<Self::E>) -> Result<(), Error> {
|
2020-04-12 11:03:55 +00:00
|
|
|
let version_table = self.version_table.clone();
|
2020-04-17 12:49:10 +00:00
|
|
|
if let (Some(old_v), Some(new_v)) = (old, new) {
|
2020-04-11 16:51:11 +00:00
|
|
|
// Propagate deletion of old versions
|
2020-04-19 20:52:20 +00:00
|
|
|
for v in old_v.versions.iter() {
|
|
|
|
if new_v
|
|
|
|
.versions
|
|
|
|
.binary_search_by(|nv| nv.cmp_key().cmp(&v.cmp_key()))
|
|
|
|
.is_err()
|
|
|
|
{
|
2020-04-23 18:16:33 +00:00
|
|
|
let deleted_version = Version::new(
|
|
|
|
v.uuid,
|
|
|
|
old_v.bucket.clone(),
|
|
|
|
old_v.key.clone(),
|
|
|
|
true,
|
|
|
|
vec![],
|
|
|
|
);
|
2020-04-19 20:52:20 +00:00
|
|
|
version_table.insert(&deleted_version).await?;
|
2020-04-11 16:51:11 +00:00
|
|
|
}
|
2020-04-19 20:52:20 +00:00
|
|
|
}
|
2020-04-17 12:49:10 +00:00
|
|
|
}
|
2020-04-19 20:52:20 +00:00
|
|
|
Ok(())
|
2020-04-09 15:32:28 +00:00
|
|
|
}
|
2020-04-17 15:09:57 +00:00
|
|
|
|
|
|
|
fn matches_filter(_entry: &Self::E, _filter: &Self::Filter) -> bool {
|
|
|
|
// TODO
|
|
|
|
true
|
|
|
|
}
|
2020-04-09 15:32:28 +00:00
|
|
|
}
|