Make table objects slightly more fool-proof; add key table
This commit is contained in:
parent
c9c6b0dbd4
commit
44a1089d95
7 changed files with 290 additions and 75 deletions
|
@ -100,12 +100,7 @@ impl AdminRpcHandler {
|
||||||
};
|
};
|
||||||
self.garage
|
self.garage
|
||||||
.bucket_table
|
.bucket_table
|
||||||
.insert(&Bucket {
|
.insert(&Bucket::new(query.name.clone(), new_time, false, vec![]))
|
||||||
name: query.name.clone(),
|
|
||||||
timestamp: new_time,
|
|
||||||
deleted: false,
|
|
||||||
authorized_keys: vec![],
|
|
||||||
})
|
|
||||||
.await?;
|
.await?;
|
||||||
Ok(AdminRPC::Ok(format!("Bucket {} was created.", query.name)))
|
Ok(AdminRPC::Ok(format!("Bucket {} was created.", query.name)))
|
||||||
}
|
}
|
||||||
|
@ -143,12 +138,12 @@ impl AdminRpcHandler {
|
||||||
}
|
}
|
||||||
self.garage
|
self.garage
|
||||||
.bucket_table
|
.bucket_table
|
||||||
.insert(&Bucket {
|
.insert(&Bucket::new(
|
||||||
name: query.name.clone(),
|
query.name.clone(),
|
||||||
timestamp: std::cmp::max(bucket.timestamp + 1, now_msec()),
|
std::cmp::max(bucket.timestamp + 1, now_msec()),
|
||||||
deleted: true,
|
true,
|
||||||
authorized_keys: vec![],
|
vec![],
|
||||||
})
|
))
|
||||||
.await?;
|
.await?;
|
||||||
Ok(AdminRPC::Ok(format!("Bucket {} was deleted.", query.name)))
|
Ok(AdminRPC::Ok(format!("Bucket {} was deleted.", query.name)))
|
||||||
}
|
}
|
||||||
|
@ -292,7 +287,7 @@ impl AdminRpcHandler {
|
||||||
.get(&version.bucket, &version.key)
|
.get(&version.bucket, &version.key)
|
||||||
.await?;
|
.await?;
|
||||||
let version_exists = match object {
|
let version_exists = match object {
|
||||||
Some(o) => o.versions.iter().any(|x| x.uuid == version.uuid),
|
Some(o) => o.versions().iter().any(|x| x.uuid == version.uuid),
|
||||||
None => {
|
None => {
|
||||||
warn!(
|
warn!(
|
||||||
"Repair versions: object for version {:?} not found",
|
"Repair versions: object for version {:?} not found",
|
||||||
|
@ -305,13 +300,13 @@ impl AdminRpcHandler {
|
||||||
info!("Repair versions: marking version as deleted: {:?}", version);
|
info!("Repair versions: marking version as deleted: {:?}", version);
|
||||||
self.garage
|
self.garage
|
||||||
.version_table
|
.version_table
|
||||||
.insert(&Version {
|
.insert(&Version::new(
|
||||||
uuid: version.uuid,
|
version.uuid,
|
||||||
deleted: true,
|
version.bucket,
|
||||||
blocks: vec![],
|
version.key,
|
||||||
bucket: version.bucket,
|
true,
|
||||||
key: version.key,
|
vec![],
|
||||||
})
|
))
|
||||||
.await?;
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -119,37 +119,29 @@ async fn handle_put(
|
||||||
None => return Err(Error::BadRequest(format!("Empty body"))),
|
None => return Err(Error::BadRequest(format!("Empty body"))),
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut object = Object {
|
let mut object_version = ObjectVersion {
|
||||||
bucket: bucket.into(),
|
|
||||||
key: key.into(),
|
|
||||||
versions: Vec::new(),
|
|
||||||
};
|
|
||||||
object.versions.push(Box::new(ObjectVersion {
|
|
||||||
uuid: version_uuid,
|
uuid: version_uuid,
|
||||||
timestamp: now_msec(),
|
timestamp: now_msec(),
|
||||||
mime_type: mime_type.to_string(),
|
mime_type: mime_type.to_string(),
|
||||||
size: first_block.len() as u64,
|
size: first_block.len() as u64,
|
||||||
is_complete: false,
|
is_complete: false,
|
||||||
data: ObjectVersionData::DeleteMarker,
|
data: ObjectVersionData::DeleteMarker,
|
||||||
}));
|
};
|
||||||
|
|
||||||
if first_block.len() < INLINE_THRESHOLD {
|
if first_block.len() < INLINE_THRESHOLD {
|
||||||
object.versions[0].data = ObjectVersionData::Inline(first_block);
|
object_version.data = ObjectVersionData::Inline(first_block);
|
||||||
object.versions[0].is_complete = true;
|
object_version.is_complete = true;
|
||||||
|
|
||||||
|
let object = Object::new(bucket.into(), key.into(), vec![object_version]);
|
||||||
garage.object_table.insert(&object).await?;
|
garage.object_table.insert(&object).await?;
|
||||||
return Ok(version_uuid);
|
return Ok(version_uuid);
|
||||||
}
|
}
|
||||||
|
|
||||||
let version = Version {
|
let version = Version::new(version_uuid, bucket.into(), key.into(), false, vec![]);
|
||||||
uuid: version_uuid,
|
|
||||||
deleted: false,
|
|
||||||
blocks: Vec::new(),
|
|
||||||
bucket: bucket.into(),
|
|
||||||
key: key.into(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let first_block_hash = hash(&first_block[..]);
|
let first_block_hash = hash(&first_block[..]);
|
||||||
object.versions[0].data = ObjectVersionData::FirstBlock(first_block_hash);
|
object_version.data = ObjectVersionData::FirstBlock(first_block_hash);
|
||||||
|
let object = Object::new(bucket.into(), key.into(), vec![object_version.clone()]);
|
||||||
garage.object_table.insert(&object).await?;
|
garage.object_table.insert(&object).await?;
|
||||||
|
|
||||||
let mut next_offset = first_block.len();
|
let mut next_offset = first_block.len();
|
||||||
|
@ -175,9 +167,12 @@ async fn handle_put(
|
||||||
|
|
||||||
// TODO: if at any step we have an error, we should undo everything we did
|
// TODO: if at any step we have an error, we should undo everything we did
|
||||||
|
|
||||||
object.versions[0].is_complete = true;
|
object_version.is_complete = true;
|
||||||
object.versions[0].size = next_offset as u64;
|
object_version.size = next_offset as u64;
|
||||||
|
|
||||||
|
let object = Object::new(bucket.into(), key.into(), vec![object_version]);
|
||||||
garage.object_table.insert(&object).await?;
|
garage.object_table.insert(&object).await?;
|
||||||
|
|
||||||
Ok(version_uuid)
|
Ok(version_uuid)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -187,8 +182,9 @@ async fn put_block_meta(
|
||||||
offset: u64,
|
offset: u64,
|
||||||
hash: Hash,
|
hash: Hash,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
// TODO: don't clone, restart from empty block list ??
|
||||||
let mut version = version.clone();
|
let mut version = version.clone();
|
||||||
version.blocks.push(VersionBlock { offset, hash: hash });
|
version.add_block(VersionBlock { offset, hash }).unwrap();
|
||||||
|
|
||||||
let block_ref = BlockRef {
|
let block_ref = BlockRef {
|
||||||
block: hash,
|
block: hash,
|
||||||
|
@ -250,7 +246,7 @@ async fn handle_delete(garage: Arc<Garage>, bucket: &str, key: &str) -> Result<U
|
||||||
None => false,
|
None => false,
|
||||||
Some(o) => {
|
Some(o) => {
|
||||||
let mut has_active_version = false;
|
let mut has_active_version = false;
|
||||||
for v in o.versions.iter() {
|
for v in o.versions().iter() {
|
||||||
if v.data != ObjectVersionData::DeleteMarker {
|
if v.data != ObjectVersionData::DeleteMarker {
|
||||||
has_active_version = true;
|
has_active_version = true;
|
||||||
break;
|
break;
|
||||||
|
@ -267,19 +263,18 @@ async fn handle_delete(garage: Arc<Garage>, bucket: &str, key: &str) -> Result<U
|
||||||
|
|
||||||
let version_uuid = gen_uuid();
|
let version_uuid = gen_uuid();
|
||||||
|
|
||||||
let mut object = Object {
|
let object = Object::new(
|
||||||
bucket: bucket.into(),
|
bucket.into(),
|
||||||
key: key.into(),
|
key.into(),
|
||||||
versions: Vec::new(),
|
vec![ObjectVersion {
|
||||||
};
|
|
||||||
object.versions.push(Box::new(ObjectVersion {
|
|
||||||
uuid: version_uuid,
|
uuid: version_uuid,
|
||||||
timestamp: now_msec(),
|
timestamp: now_msec(),
|
||||||
mime_type: "application/x-delete-marker".into(),
|
mime_type: "application/x-delete-marker".into(),
|
||||||
size: 0,
|
size: 0,
|
||||||
is_complete: true,
|
is_complete: true,
|
||||||
data: ObjectVersionData::DeleteMarker,
|
data: ObjectVersionData::DeleteMarker,
|
||||||
}));
|
}],
|
||||||
|
);
|
||||||
|
|
||||||
garage.object_table.insert(&object).await?;
|
garage.object_table.insert(&object).await?;
|
||||||
return Ok(version_uuid);
|
return Ok(version_uuid);
|
||||||
|
@ -290,7 +285,7 @@ async fn handle_get(
|
||||||
bucket: &str,
|
bucket: &str,
|
||||||
key: &str,
|
key: &str,
|
||||||
) -> Result<Response<BodyType>, Error> {
|
) -> Result<Response<BodyType>, Error> {
|
||||||
let mut object = match garage
|
let object = match garage
|
||||||
.object_table
|
.object_table
|
||||||
.get(&bucket.to_string(), &key.to_string())
|
.get(&bucket.to_string(), &key.to_string())
|
||||||
.await?
|
.await?
|
||||||
|
@ -300,8 +295,8 @@ async fn handle_get(
|
||||||
};
|
};
|
||||||
|
|
||||||
let last_v = match object
|
let last_v = match object
|
||||||
.versions
|
.versions()
|
||||||
.drain(..)
|
.iter()
|
||||||
.rev()
|
.rev()
|
||||||
.filter(|v| v.is_complete)
|
.filter(|v| v.is_complete)
|
||||||
.next()
|
.next()
|
||||||
|
@ -311,13 +306,13 @@ async fn handle_get(
|
||||||
};
|
};
|
||||||
|
|
||||||
let resp_builder = Response::builder()
|
let resp_builder = Response::builder()
|
||||||
.header("Content-Type", last_v.mime_type)
|
.header("Content-Type", last_v.mime_type.to_string())
|
||||||
.status(StatusCode::OK);
|
.status(StatusCode::OK);
|
||||||
|
|
||||||
match last_v.data {
|
match &last_v.data {
|
||||||
ObjectVersionData::DeleteMarker => Err(Error::NotFound),
|
ObjectVersionData::DeleteMarker => Err(Error::NotFound),
|
||||||
ObjectVersionData::Inline(bytes) => {
|
ObjectVersionData::Inline(bytes) => {
|
||||||
let body: BodyType = Box::new(BytesBody::from(bytes));
|
let body: BodyType = Box::new(BytesBody::from(bytes.to_vec()));
|
||||||
Ok(resp_builder.body(body)?)
|
Ok(resp_builder.body(body)?)
|
||||||
}
|
}
|
||||||
ObjectVersionData::FirstBlock(first_block_hash) => {
|
ObjectVersionData::FirstBlock(first_block_hash) => {
|
||||||
|
@ -331,7 +326,7 @@ async fn handle_get(
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut blocks = version
|
let mut blocks = version
|
||||||
.blocks
|
.blocks()
|
||||||
.iter()
|
.iter()
|
||||||
.map(|vb| (vb.hash, None))
|
.map(|vb| (vb.hash, None))
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
|
@ -15,7 +15,44 @@ pub struct Bucket {
|
||||||
pub deleted: bool,
|
pub deleted: bool,
|
||||||
|
|
||||||
// Authorized keys
|
// Authorized keys
|
||||||
pub authorized_keys: Vec<AllowedKey>,
|
authorized_keys: Vec<AllowedKey>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Bucket {
|
||||||
|
pub fn new(
|
||||||
|
name: String,
|
||||||
|
timestamp: u64,
|
||||||
|
deleted: bool,
|
||||||
|
authorized_keys: Vec<AllowedKey>,
|
||||||
|
) -> Self {
|
||||||
|
let mut ret = Bucket {
|
||||||
|
name,
|
||||||
|
timestamp,
|
||||||
|
deleted,
|
||||||
|
authorized_keys: vec![],
|
||||||
|
};
|
||||||
|
for key in authorized_keys {
|
||||||
|
ret.add_key(key)
|
||||||
|
.expect("Duplicate AllowedKey in Bucket constructor");
|
||||||
|
}
|
||||||
|
ret
|
||||||
|
}
|
||||||
|
/// Add a key only if it is not already present
|
||||||
|
pub fn add_key(&mut self, key: AllowedKey) -> Result<(), ()> {
|
||||||
|
match self
|
||||||
|
.authorized_keys
|
||||||
|
.binary_search_by(|k| k.access_key_id.cmp(&key.access_key_id))
|
||||||
|
{
|
||||||
|
Err(i) => {
|
||||||
|
self.authorized_keys.insert(i, key);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
Ok(_) => Err(()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn authorized_keys(&self) -> &[AllowedKey] {
|
||||||
|
&self.authorized_keys[..]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
|
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
@ -39,9 +76,10 @@ impl Entry<EmptyKey, String> for Bucket {
|
||||||
*self = other.clone();
|
*self = other.clone();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if self.timestamp > other.timestamp {
|
if self.timestamp > other.timestamp || self.deleted {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
for ak in other.authorized_keys.iter() {
|
for ak in other.authorized_keys.iter() {
|
||||||
match self
|
match self
|
||||||
.authorized_keys
|
.authorized_keys
|
||||||
|
@ -50,9 +88,7 @@ impl Entry<EmptyKey, String> for Bucket {
|
||||||
Ok(i) => {
|
Ok(i) => {
|
||||||
let our_ak = &mut self.authorized_keys[i];
|
let our_ak = &mut self.authorized_keys[i];
|
||||||
if ak.timestamp > our_ak.timestamp {
|
if ak.timestamp > our_ak.timestamp {
|
||||||
our_ak.timestamp = ak.timestamp;
|
*our_ak = ak.clone();
|
||||||
our_ak.allowed_read = ak.allowed_read;
|
|
||||||
our_ak.allowed_write = ak.allowed_write;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(i) => {
|
Err(i) => {
|
||||||
|
|
121
src/store/key_table.rs
Normal file
121
src/store/key_table.rs
Normal file
|
@ -0,0 +1,121 @@
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use crate::error::Error;
|
||||||
|
use crate::table::*;
|
||||||
|
|
||||||
|
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct Key {
|
||||||
|
// Primary key
|
||||||
|
pub access_key_id: String,
|
||||||
|
|
||||||
|
// Associated secret key (immutable)
|
||||||
|
pub secret_access_key: String,
|
||||||
|
|
||||||
|
// Deletion
|
||||||
|
pub deleted: bool,
|
||||||
|
|
||||||
|
// Authorized keys
|
||||||
|
authorized_buckets: Vec<AllowedBucket>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Key {
|
||||||
|
pub fn new(buckets: Vec<AllowedBucket>) -> Self {
|
||||||
|
let access_key_id = format!("GK{}", hex::encode(&rand::random::<[u8; 12]>()[..]));
|
||||||
|
let secret_access_key = hex::encode(&rand::random::<[u8; 32]>()[..]);
|
||||||
|
let mut ret = Self {
|
||||||
|
access_key_id,
|
||||||
|
secret_access_key,
|
||||||
|
deleted: false,
|
||||||
|
authorized_buckets: vec![],
|
||||||
|
};
|
||||||
|
for b in buckets {
|
||||||
|
ret.add_bucket(b);
|
||||||
|
}
|
||||||
|
ret
|
||||||
|
}
|
||||||
|
pub fn delete(access_key_id: String, secret_access_key: String) -> Self {
|
||||||
|
Self {
|
||||||
|
access_key_id,
|
||||||
|
secret_access_key,
|
||||||
|
deleted: true,
|
||||||
|
authorized_buckets: vec![],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/// Add an authorized bucket, only if it wasn't there before
|
||||||
|
pub fn add_bucket(&mut self, new: AllowedBucket) -> Result<(), ()> {
|
||||||
|
match self
|
||||||
|
.authorized_buckets
|
||||||
|
.binary_search_by(|b| b.bucket.cmp(&new.bucket))
|
||||||
|
{
|
||||||
|
Err(i) => {
|
||||||
|
self.authorized_buckets.insert(i, new);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
Ok(_) => Err(()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn authorized_buckets(&self) -> &[AllowedBucket] {
|
||||||
|
&self.authorized_buckets[..]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct AllowedBucket {
|
||||||
|
pub bucket: String,
|
||||||
|
pub timestamp: u64,
|
||||||
|
pub allowed_read: bool,
|
||||||
|
pub allowed_write: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Entry<EmptyKey, String> for Key {
|
||||||
|
fn partition_key(&self) -> &EmptyKey {
|
||||||
|
&EmptyKey
|
||||||
|
}
|
||||||
|
fn sort_key(&self) -> &String {
|
||||||
|
&self.access_key_id
|
||||||
|
}
|
||||||
|
|
||||||
|
fn merge(&mut self, other: &Self) {
|
||||||
|
if other.deleted {
|
||||||
|
self.deleted = true;
|
||||||
|
self.authorized_buckets.clear();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for ab in other.authorized_buckets.iter() {
|
||||||
|
match self
|
||||||
|
.authorized_buckets
|
||||||
|
.binary_search_by(|our_ab| our_ab.bucket.cmp(&ab.bucket))
|
||||||
|
{
|
||||||
|
Ok(i) => {
|
||||||
|
let our_ab = &mut self.authorized_buckets[i];
|
||||||
|
if ab.timestamp > our_ab.timestamp {
|
||||||
|
*our_ab = ab.clone();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(i) => {
|
||||||
|
self.authorized_buckets.insert(i, ab.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct KeyTable;
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl TableSchema for KeyTable {
|
||||||
|
type P = EmptyKey;
|
||||||
|
type S = String;
|
||||||
|
type E = Key;
|
||||||
|
type Filter = ();
|
||||||
|
|
||||||
|
async fn updated(&self, _old: Option<Self::E>, _new: Option<Self::E>) -> Result<(), Error> {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn matches_filter(entry: &Self::E, _filter: &Self::Filter) -> bool {
|
||||||
|
!entry.deleted
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,5 +1,6 @@
|
||||||
pub mod block;
|
pub mod block;
|
||||||
pub mod block_ref_table;
|
pub mod block_ref_table;
|
||||||
pub mod bucket_table;
|
pub mod bucket_table;
|
||||||
|
pub mod key_table;
|
||||||
pub mod object_table;
|
pub mod object_table;
|
||||||
pub mod version_table;
|
pub mod version_table;
|
||||||
|
|
|
@ -20,7 +20,38 @@ pub struct Object {
|
||||||
pub key: String,
|
pub key: String,
|
||||||
|
|
||||||
// Data
|
// Data
|
||||||
pub versions: Vec<Box<ObjectVersion>>,
|
versions: Vec<ObjectVersion>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Object {
|
||||||
|
pub fn new(bucket: String, key: String, versions: Vec<ObjectVersion>) -> Self {
|
||||||
|
let mut ret = Self {
|
||||||
|
bucket,
|
||||||
|
key,
|
||||||
|
versions: vec![],
|
||||||
|
};
|
||||||
|
for v in versions {
|
||||||
|
ret.add_version(v)
|
||||||
|
.expect("Twice the same ObjectVersion in Object constructor");
|
||||||
|
}
|
||||||
|
ret
|
||||||
|
}
|
||||||
|
/// Adds a version if it wasn't already present
|
||||||
|
pub fn add_version(&mut self, new: ObjectVersion) -> Result<(), ()> {
|
||||||
|
match self
|
||||||
|
.versions
|
||||||
|
.binary_search_by(|v| v.cmp_key().cmp(&new.cmp_key()))
|
||||||
|
{
|
||||||
|
Err(i) => {
|
||||||
|
self.versions.insert(i, new);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
Ok(_) => Err(()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn versions(&self) -> &[ObjectVersion] {
|
||||||
|
&self.versions[..]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
|
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
@ -113,13 +144,13 @@ impl TableSchema for ObjectTable {
|
||||||
.binary_search_by(|nv| nv.cmp_key().cmp(&v.cmp_key()))
|
.binary_search_by(|nv| nv.cmp_key().cmp(&v.cmp_key()))
|
||||||
.is_err()
|
.is_err()
|
||||||
{
|
{
|
||||||
let deleted_version = Version {
|
let deleted_version = Version::new(
|
||||||
uuid: v.uuid,
|
v.uuid,
|
||||||
deleted: true,
|
old_v.bucket.clone(),
|
||||||
blocks: vec![],
|
old_v.key.clone(),
|
||||||
bucket: old_v.bucket.clone(),
|
true,
|
||||||
key: old_v.key.clone(),
|
vec![],
|
||||||
};
|
);
|
||||||
version_table.insert(&deleted_version).await?;
|
version_table.insert(&deleted_version).await?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,7 +18,7 @@ pub struct Version {
|
||||||
|
|
||||||
// Actual data: the blocks for this version
|
// Actual data: the blocks for this version
|
||||||
pub deleted: bool,
|
pub deleted: bool,
|
||||||
pub blocks: Vec<VersionBlock>,
|
blocks: Vec<VersionBlock>,
|
||||||
|
|
||||||
// Back link to bucket+key so that we can figure if
|
// Back link to bucket+key so that we can figure if
|
||||||
// this was deleted later on
|
// this was deleted later on
|
||||||
|
@ -26,6 +26,42 @@ pub struct Version {
|
||||||
pub key: String,
|
pub key: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Version {
|
||||||
|
pub fn new(
|
||||||
|
uuid: UUID,
|
||||||
|
bucket: String,
|
||||||
|
key: String,
|
||||||
|
deleted: bool,
|
||||||
|
blocks: Vec<VersionBlock>,
|
||||||
|
) -> Self {
|
||||||
|
let mut ret = Self {
|
||||||
|
uuid,
|
||||||
|
deleted,
|
||||||
|
blocks: vec![],
|
||||||
|
bucket,
|
||||||
|
key,
|
||||||
|
};
|
||||||
|
for b in blocks {
|
||||||
|
ret.add_block(b)
|
||||||
|
.expect("Twice the same VersionBlock in Version constructor");
|
||||||
|
}
|
||||||
|
ret
|
||||||
|
}
|
||||||
|
/// Adds a block if it wasn't already present
|
||||||
|
pub fn add_block(&mut self, new: VersionBlock) -> Result<(), ()> {
|
||||||
|
match self.blocks.binary_search_by(|b| b.offset.cmp(&new.offset)) {
|
||||||
|
Err(i) => {
|
||||||
|
self.blocks.insert(i, new);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
Ok(_) => Err(()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn blocks(&self) -> &[VersionBlock] {
|
||||||
|
&self.blocks[..]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
|
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
|
||||||
pub struct VersionBlock {
|
pub struct VersionBlock {
|
||||||
pub offset: u64,
|
pub offset: u64,
|
||||||
|
|
Loading…
Reference in a new issue