forked from Deuxfleurs/garage
cargo fmt
This commit is contained in:
parent
64a6eda0d2
commit
44dba0e53c
9 changed files with 209 additions and 186 deletions
|
@ -39,16 +39,16 @@ pub async fn handle_copy(
|
||||||
Some(v) => v,
|
Some(v) => v,
|
||||||
None => return Err(Error::NotFound),
|
None => return Err(Error::NotFound),
|
||||||
};
|
};
|
||||||
let source_last_state = match &source_last_v.state {
|
let source_last_state = match &source_last_v.state {
|
||||||
ObjectVersionState::Complete(x) => x,
|
ObjectVersionState::Complete(x) => x,
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let new_uuid = gen_uuid();
|
let new_uuid = gen_uuid();
|
||||||
let dest_object_version = ObjectVersion {
|
let dest_object_version = ObjectVersion {
|
||||||
uuid: new_uuid,
|
uuid: new_uuid,
|
||||||
timestamp: now_msec(),
|
timestamp: now_msec(),
|
||||||
state: ObjectVersionState::Complete(source_last_state.clone()),
|
state: ObjectVersionState::Complete(source_last_state.clone()),
|
||||||
};
|
};
|
||||||
|
|
||||||
match &source_last_state {
|
match &source_last_state {
|
||||||
|
|
|
@ -28,12 +28,10 @@ async fn handle_delete_internal(
|
||||||
Some(o) => o,
|
Some(o) => o,
|
||||||
};
|
};
|
||||||
|
|
||||||
let interesting_versions = object.versions().iter().filter(|v| {
|
let interesting_versions = object.versions().iter().filter(|v| match v.state {
|
||||||
match v.state {
|
ObjectVersionState::Aborted => false,
|
||||||
ObjectVersionState::Aborted => false,
|
ObjectVersionState::Complete(ObjectVersionData::DeleteMarker) => false,
|
||||||
ObjectVersionState::Complete(ObjectVersionData::DeleteMarker) => false,
|
_ => true,
|
||||||
_ => true,
|
|
||||||
}
|
|
||||||
});
|
});
|
||||||
|
|
||||||
let mut must_delete = None;
|
let mut must_delete = None;
|
||||||
|
|
|
@ -12,12 +12,19 @@ use garage_table::EmptyKey;
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
use garage_model::object_table::*;
|
use garage_model::object_table::*;
|
||||||
|
|
||||||
fn object_headers(version: &ObjectVersion, version_meta: &ObjectVersionMeta) -> http::response::Builder {
|
fn object_headers(
|
||||||
|
version: &ObjectVersion,
|
||||||
|
version_meta: &ObjectVersionMeta,
|
||||||
|
) -> http::response::Builder {
|
||||||
let date = UNIX_EPOCH + Duration::from_millis(version.timestamp);
|
let date = UNIX_EPOCH + Duration::from_millis(version.timestamp);
|
||||||
let date_str = httpdate::fmt_http_date(date);
|
let date_str = httpdate::fmt_http_date(date);
|
||||||
|
|
||||||
Response::builder()
|
Response::builder()
|
||||||
.header("Content-Type", version_meta.headers.content_type.to_string())
|
.header(
|
||||||
|
"Content-Type",
|
||||||
|
version_meta.headers.content_type.to_string(),
|
||||||
|
)
|
||||||
|
// TODO: other headers
|
||||||
.header("Content-Length", format!("{}", version_meta.size))
|
.header("Content-Length", format!("{}", version_meta.size))
|
||||||
.header("ETag", version_meta.etag.to_string())
|
.header("ETag", version_meta.etag.to_string())
|
||||||
.header("Last-Modified", date_str)
|
.header("Last-Modified", date_str)
|
||||||
|
@ -48,11 +55,11 @@ pub async fn handle_head(
|
||||||
Some(v) => v,
|
Some(v) => v,
|
||||||
None => return Err(Error::NotFound),
|
None => return Err(Error::NotFound),
|
||||||
};
|
};
|
||||||
let version_meta = match &version.state {
|
let version_meta = match &version.state {
|
||||||
ObjectVersionState::Complete(ObjectVersionData::Inline(meta, _)) => meta,
|
ObjectVersionState::Complete(ObjectVersionData::Inline(meta, _)) => meta,
|
||||||
ObjectVersionState::Complete(ObjectVersionData::FirstBlock(meta, _)) => meta,
|
ObjectVersionState::Complete(ObjectVersionData::FirstBlock(meta, _)) => meta,
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let body: Body = Body::from(vec![]);
|
let body: Body = Body::from(vec![]);
|
||||||
let response = object_headers(&version, version_meta)
|
let response = object_headers(&version, version_meta)
|
||||||
|
@ -87,15 +94,15 @@ pub async fn handle_get(
|
||||||
Some(v) => v,
|
Some(v) => v,
|
||||||
None => return Err(Error::NotFound),
|
None => return Err(Error::NotFound),
|
||||||
};
|
};
|
||||||
let last_v_data = match &last_v.state {
|
let last_v_data = match &last_v.state {
|
||||||
ObjectVersionState::Complete(x) => x,
|
ObjectVersionState::Complete(x) => x,
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
};
|
};
|
||||||
let last_v_meta = match last_v_data {
|
let last_v_meta = match last_v_data {
|
||||||
ObjectVersionData::DeleteMarker => return Err(Error::NotFound),
|
ObjectVersionData::DeleteMarker => return Err(Error::NotFound),
|
||||||
ObjectVersionData::Inline(meta, _) => meta,
|
ObjectVersionData::Inline(meta, _) => meta,
|
||||||
ObjectVersionData::FirstBlock(meta, _) => meta,
|
ObjectVersionData::FirstBlock(meta, _) => meta,
|
||||||
};
|
};
|
||||||
|
|
||||||
let range = match req.headers().get("range") {
|
let range = match req.headers().get("range") {
|
||||||
Some(range) => {
|
Some(range) => {
|
||||||
|
@ -113,7 +120,15 @@ pub async fn handle_get(
|
||||||
None => None,
|
None => None,
|
||||||
};
|
};
|
||||||
if let Some(range) = range {
|
if let Some(range) = range {
|
||||||
return handle_get_range(garage, last_v, last_v_data, last_v_meta, range.start, range.start + range.length).await;
|
return handle_get_range(
|
||||||
|
garage,
|
||||||
|
last_v,
|
||||||
|
last_v_data,
|
||||||
|
last_v_meta,
|
||||||
|
range.start,
|
||||||
|
range.start + range.length,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
let resp_builder = object_headers(&last_v, last_v_meta).status(StatusCode::OK);
|
let resp_builder = object_headers(&last_v, last_v_meta).status(StatusCode::OK);
|
||||||
|
@ -167,8 +182,8 @@ pub async fn handle_get(
|
||||||
pub async fn handle_get_range(
|
pub async fn handle_get_range(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
version: &ObjectVersion,
|
version: &ObjectVersion,
|
||||||
version_data: &ObjectVersionData,
|
version_data: &ObjectVersionData,
|
||||||
version_meta: &ObjectVersionMeta,
|
version_meta: &ObjectVersionMeta,
|
||||||
begin: u64,
|
begin: u64,
|
||||||
end: u64,
|
end: u64,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<Body>, Error> {
|
||||||
|
|
|
@ -74,11 +74,15 @@ pub async fn handle_list(
|
||||||
if let Some(pfx) = common_prefix {
|
if let Some(pfx) = common_prefix {
|
||||||
result_common_prefixes.insert(pfx.to_string());
|
result_common_prefixes.insert(pfx.to_string());
|
||||||
} else {
|
} else {
|
||||||
let size = match &version.state {
|
let size = match &version.state {
|
||||||
ObjectVersionState::Complete(ObjectVersionData::Inline(meta, _)) => meta.size,
|
ObjectVersionState::Complete(ObjectVersionData::Inline(meta, _)) => {
|
||||||
ObjectVersionState::Complete(ObjectVersionData::FirstBlock(meta, _)) => meta.size,
|
meta.size
|
||||||
_ => unreachable!(),
|
}
|
||||||
};
|
ObjectVersionState::Complete(ObjectVersionData::FirstBlock(meta, _)) => {
|
||||||
|
meta.size
|
||||||
|
}
|
||||||
|
_ => unreachable!(),
|
||||||
|
};
|
||||||
let info = match result_keys.get(&object.key) {
|
let info = match result_keys.get(&object.key) {
|
||||||
None => ListResultInfo {
|
None => ListResultInfo {
|
||||||
last_modified: version.timestamp,
|
last_modified: version.timestamp,
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use std::collections::{VecDeque, BTreeMap};
|
use std::collections::{BTreeMap, VecDeque};
|
||||||
use std::fmt::Write;
|
use std::fmt::Write;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
@ -24,10 +24,10 @@ pub async fn handle_put(
|
||||||
key: &str,
|
key: &str,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<Body>, Error> {
|
||||||
let version_uuid = gen_uuid();
|
let version_uuid = gen_uuid();
|
||||||
let headers = ObjectVersionHeaders{
|
let headers = ObjectVersionHeaders {
|
||||||
content_type: get_mime_type(&req)?,
|
content_type: get_mime_type(&req)?,
|
||||||
other: BTreeMap::new(), // TODO
|
other: BTreeMap::new(), // TODO
|
||||||
};
|
};
|
||||||
|
|
||||||
let body = req.into_body();
|
let body = req.into_body();
|
||||||
|
|
||||||
|
@ -44,13 +44,14 @@ pub async fn handle_put(
|
||||||
};
|
};
|
||||||
|
|
||||||
if first_block.len() < INLINE_THRESHOLD {
|
if first_block.len() < INLINE_THRESHOLD {
|
||||||
object_version.state = ObjectVersionState::Complete(ObjectVersionData::Inline(
|
object_version.state = ObjectVersionState::Complete(ObjectVersionData::Inline(
|
||||||
ObjectVersionMeta{
|
ObjectVersionMeta {
|
||||||
headers,
|
headers,
|
||||||
size: first_block.len() as u64,
|
size: first_block.len() as u64,
|
||||||
etag: "".to_string(), // TODO
|
etag: "".to_string(), // TODO
|
||||||
},
|
},
|
||||||
first_block));
|
first_block,
|
||||||
|
));
|
||||||
|
|
||||||
let object = Object::new(bucket.into(), key.into(), vec![object_version]);
|
let object = Object::new(bucket.into(), key.into(), vec![object_version]);
|
||||||
garage.object_table.insert(&object).await?;
|
garage.object_table.insert(&object).await?;
|
||||||
|
@ -76,12 +77,13 @@ pub async fn handle_put(
|
||||||
// TODO: if at any step we have an error, we should undo everything we did
|
// TODO: if at any step we have an error, we should undo everything we did
|
||||||
|
|
||||||
object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock(
|
object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock(
|
||||||
ObjectVersionMeta{
|
ObjectVersionMeta {
|
||||||
headers,
|
headers,
|
||||||
size: total_size,
|
size: total_size,
|
||||||
etag: "".to_string(), // TODO
|
etag: "".to_string(), // TODO
|
||||||
},
|
},
|
||||||
first_block_hash));
|
first_block_hash,
|
||||||
|
));
|
||||||
|
|
||||||
let object = Object::new(bucket.into(), key.into(), vec![object_version]);
|
let object = Object::new(bucket.into(), key.into(), vec![object_version]);
|
||||||
garage.object_table.insert(&object).await?;
|
garage.object_table.insert(&object).await?;
|
||||||
|
@ -207,7 +209,7 @@ impl BodyChunker {
|
||||||
pub fn put_response(version_uuid: UUID) -> Response<Body> {
|
pub fn put_response(version_uuid: UUID) -> Response<Body> {
|
||||||
Response::builder()
|
Response::builder()
|
||||||
.header("x-amz-version-id", hex::encode(version_uuid))
|
.header("x-amz-version-id", hex::encode(version_uuid))
|
||||||
// TODO ETag
|
// TODO ETag
|
||||||
.body(Body::from(vec![]))
|
.body(Body::from(vec![]))
|
||||||
.unwrap()
|
.unwrap()
|
||||||
}
|
}
|
||||||
|
@ -219,10 +221,10 @@ pub async fn handle_create_multipart_upload(
|
||||||
key: &str,
|
key: &str,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<Body>, Error> {
|
||||||
let version_uuid = gen_uuid();
|
let version_uuid = gen_uuid();
|
||||||
let headers = ObjectVersionHeaders{
|
let headers = ObjectVersionHeaders {
|
||||||
content_type: get_mime_type(&req)?,
|
content_type: get_mime_type(&req)?,
|
||||||
other: BTreeMap::new(), // TODO
|
other: BTreeMap::new(), // TODO
|
||||||
};
|
};
|
||||||
|
|
||||||
let object_version = ObjectVersion {
|
let object_version = ObjectVersion {
|
||||||
uuid: version_uuid,
|
uuid: version_uuid,
|
||||||
|
@ -286,9 +288,11 @@ pub async fn handle_put_part(
|
||||||
None => return Err(Error::BadRequest(format!("Object not found"))),
|
None => return Err(Error::BadRequest(format!("Object not found"))),
|
||||||
Some(x) => x,
|
Some(x) => x,
|
||||||
};
|
};
|
||||||
if !object.versions().iter().any(|v| {
|
if !object
|
||||||
v.uuid == version_uuid && v.is_uploading()
|
.versions()
|
||||||
}) {
|
.iter()
|
||||||
|
.any(|v| v.uuid == version_uuid && v.is_uploading())
|
||||||
|
{
|
||||||
return Err(Error::BadRequest(format!(
|
return Err(Error::BadRequest(format!(
|
||||||
"Multipart upload does not exist or is otherwise invalid"
|
"Multipart upload does not exist or is otherwise invalid"
|
||||||
)));
|
)));
|
||||||
|
@ -330,9 +334,10 @@ pub async fn handle_complete_multipart_upload(
|
||||||
None => return Err(Error::BadRequest(format!("Object not found"))),
|
None => return Err(Error::BadRequest(format!("Object not found"))),
|
||||||
Some(x) => x,
|
Some(x) => x,
|
||||||
};
|
};
|
||||||
let object_version = object.versions().iter().find(|v| {
|
let object_version = object
|
||||||
v.uuid == version_uuid && v.is_uploading()
|
.versions()
|
||||||
});
|
.iter()
|
||||||
|
.find(|v| v.uuid == version_uuid && v.is_uploading());
|
||||||
let mut object_version = match object_version {
|
let mut object_version = match object_version {
|
||||||
None => {
|
None => {
|
||||||
return Err(Error::BadRequest(format!(
|
return Err(Error::BadRequest(format!(
|
||||||
|
@ -348,10 +353,10 @@ pub async fn handle_complete_multipart_upload(
|
||||||
if version.blocks().len() == 0 {
|
if version.blocks().len() == 0 {
|
||||||
return Err(Error::BadRequest(format!("No data was uploaded")));
|
return Err(Error::BadRequest(format!("No data was uploaded")));
|
||||||
}
|
}
|
||||||
let headers = match object_version.state {
|
let headers = match object_version.state {
|
||||||
ObjectVersionState::Uploading(headers) => headers.clone(),
|
ObjectVersionState::Uploading(headers) => headers.clone(),
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
};
|
};
|
||||||
|
|
||||||
// TODO: check that all the parts that they pretend they gave us are indeed there
|
// TODO: check that all the parts that they pretend they gave us are indeed there
|
||||||
// TODO: check MD5 sum of all uploaded parts? but that would mean we have to store them somewhere...
|
// TODO: check MD5 sum of all uploaded parts? but that would mean we have to store them somewhere...
|
||||||
|
@ -361,15 +366,14 @@ pub async fn handle_complete_multipart_upload(
|
||||||
.iter()
|
.iter()
|
||||||
.map(|x| x.size)
|
.map(|x| x.size)
|
||||||
.fold(0, |x, y| x + y);
|
.fold(0, |x, y| x + y);
|
||||||
object_version.state = ObjectVersionState::Complete(
|
object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock(
|
||||||
ObjectVersionData::FirstBlock(
|
ObjectVersionMeta {
|
||||||
ObjectVersionMeta{
|
headers,
|
||||||
headers,
|
size: total_size,
|
||||||
size: total_size,
|
etag: "".to_string(), // TODO
|
||||||
etag: "".to_string(),// TODO
|
},
|
||||||
},
|
version.blocks()[0].hash,
|
||||||
version.blocks()[0].hash)
|
));
|
||||||
);
|
|
||||||
|
|
||||||
let final_object = Object::new(bucket.clone(), key.clone(), vec![object_version]);
|
let final_object = Object::new(bucket.clone(), key.clone(), vec![object_version]);
|
||||||
garage.object_table.insert(&final_object).await?;
|
garage.object_table.insert(&final_object).await?;
|
||||||
|
@ -411,9 +415,10 @@ pub async fn handle_abort_multipart_upload(
|
||||||
None => return Err(Error::BadRequest(format!("Object not found"))),
|
None => return Err(Error::BadRequest(format!("Object not found"))),
|
||||||
Some(x) => x,
|
Some(x) => x,
|
||||||
};
|
};
|
||||||
let object_version = object.versions().iter().find(|v| {
|
let object_version = object
|
||||||
v.uuid == version_uuid && v.is_uploading()
|
.versions()
|
||||||
});
|
.iter()
|
||||||
|
.find(|v| v.uuid == version_uuid && v.is_uploading());
|
||||||
let mut object_version = match object_version {
|
let mut object_version = match object_version {
|
||||||
None => {
|
None => {
|
||||||
return Err(Error::BadRequest(format!(
|
return Err(Error::BadRequest(format!(
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::sync::Arc;
|
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
use garage_util::background::BackgroundRunner;
|
use garage_util::background::BackgroundRunner;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
@ -75,23 +75,21 @@ pub enum ObjectVersionState {
|
||||||
impl ObjectVersionState {
|
impl ObjectVersionState {
|
||||||
fn merge(&mut self, other: &Self) {
|
fn merge(&mut self, other: &Self) {
|
||||||
use ObjectVersionState::*;
|
use ObjectVersionState::*;
|
||||||
match other {
|
match other {
|
||||||
Aborted => {
|
Aborted => {
|
||||||
*self = Aborted;
|
*self = Aborted;
|
||||||
}
|
}
|
||||||
Complete(b) => {
|
Complete(b) => match self {
|
||||||
match self {
|
Aborted => {}
|
||||||
Aborted => {},
|
Complete(a) => {
|
||||||
Complete(a) => {
|
a.merge(b);
|
||||||
a.merge(b);
|
}
|
||||||
}
|
Uploading(_) => {
|
||||||
Uploading(_) => {
|
*self = Complete(b.clone());
|
||||||
*self = Complete(b.clone());
|
}
|
||||||
}
|
},
|
||||||
}
|
Uploading(_) => {}
|
||||||
}
|
}
|
||||||
Uploading(_) => {}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -104,47 +102,50 @@ pub enum ObjectVersionData {
|
||||||
|
|
||||||
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
|
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
|
||||||
pub struct ObjectVersionMeta {
|
pub struct ObjectVersionMeta {
|
||||||
pub headers: ObjectVersionHeaders,
|
pub headers: ObjectVersionHeaders,
|
||||||
pub size: u64,
|
pub size: u64,
|
||||||
pub etag: String,
|
pub etag: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
|
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
|
||||||
pub struct ObjectVersionHeaders {
|
pub struct ObjectVersionHeaders {
|
||||||
pub content_type: String,
|
pub content_type: String,
|
||||||
pub other: BTreeMap<String, String>,
|
pub other: BTreeMap<String, String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ObjectVersionData {
|
impl ObjectVersionData {
|
||||||
fn merge(&mut self, b: &Self) {
|
fn merge(&mut self, b: &Self) {
|
||||||
if *self != *b {
|
if *self != *b {
|
||||||
warn!("Inconsistent object version data: {:?} (local) vs {:?} (remote)", self, b);
|
warn!(
|
||||||
}
|
"Inconsistent object version data: {:?} (local) vs {:?} (remote)",
|
||||||
}
|
self, b
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ObjectVersion {
|
impl ObjectVersion {
|
||||||
fn cmp_key(&self) -> (u64, UUID) {
|
fn cmp_key(&self) -> (u64, UUID) {
|
||||||
(self.timestamp, self.uuid)
|
(self.timestamp, self.uuid)
|
||||||
}
|
}
|
||||||
pub fn is_uploading(&self) -> bool {
|
pub fn is_uploading(&self) -> bool {
|
||||||
match self.state {
|
match self.state {
|
||||||
ObjectVersionState::Uploading(_) => true,
|
ObjectVersionState::Uploading(_) => true,
|
||||||
_ => false,
|
_ => false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub fn is_complete(&self) -> bool {
|
pub fn is_complete(&self) -> bool {
|
||||||
match self.state {
|
match self.state {
|
||||||
ObjectVersionState::Complete(_) => true,
|
ObjectVersionState::Complete(_) => true,
|
||||||
_ => false,
|
_ => false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub fn is_data(&self) -> bool {
|
pub fn is_data(&self) -> bool {
|
||||||
match self.state {
|
match self.state {
|
||||||
ObjectVersionState::Complete(ObjectVersionData::DeleteMarker) => false,
|
ObjectVersionState::Complete(ObjectVersionData::DeleteMarker) => false,
|
||||||
ObjectVersionState::Complete(_) => true,
|
ObjectVersionState::Complete(_) => true,
|
||||||
_ => false,
|
_ => false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -163,7 +164,7 @@ impl Entry<String, String> for Object {
|
||||||
.binary_search_by(|v| v.cmp_key().cmp(&other_v.cmp_key()))
|
.binary_search_by(|v| v.cmp_key().cmp(&other_v.cmp_key()))
|
||||||
{
|
{
|
||||||
Ok(i) => {
|
Ok(i) => {
|
||||||
self.versions[i].state.merge(&other_v.state);
|
self.versions[i].state.merge(&other_v.state);
|
||||||
}
|
}
|
||||||
Err(i) => {
|
Err(i) => {
|
||||||
self.versions.insert(i, other_v.clone());
|
self.versions.insert(i, other_v.clone());
|
||||||
|
@ -231,50 +232,54 @@ impl TableSchema for ObjectTable {
|
||||||
entry.versions.iter().any(|v| v.is_data())
|
entry.versions.iter().any(|v| v.is_data())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn try_migrate(bytes: &[u8]) -> Option<Self::E> {
|
fn try_migrate(bytes: &[u8]) -> Option<Self::E> {
|
||||||
let old = match rmp_serde::decode::from_read_ref::<_, prev::Object>(bytes) {
|
let old = match rmp_serde::decode::from_read_ref::<_, prev::Object>(bytes) {
|
||||||
Ok(x) => x,
|
Ok(x) => x,
|
||||||
Err(_) => return None,
|
Err(_) => return None,
|
||||||
};
|
};
|
||||||
let new_v = old.versions().iter()
|
let new_v = old
|
||||||
.map(migrate_version)
|
.versions()
|
||||||
.collect::<Vec<_>>();
|
.iter()
|
||||||
let new = Object::new(old.bucket.clone(), old.key.clone(), new_v);
|
.map(migrate_version)
|
||||||
Some(new)
|
.collect::<Vec<_>>();
|
||||||
}
|
let new = Object::new(old.bucket.clone(), old.key.clone(), new_v);
|
||||||
|
Some(new)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn migrate_version(old: &prev::ObjectVersion) -> ObjectVersion {
|
fn migrate_version(old: &prev::ObjectVersion) -> ObjectVersion {
|
||||||
let headers = ObjectVersionHeaders{
|
let headers = ObjectVersionHeaders {
|
||||||
content_type: old.mime_type.clone(),
|
content_type: old.mime_type.clone(),
|
||||||
other: BTreeMap::new(),
|
other: BTreeMap::new(),
|
||||||
};
|
};
|
||||||
let meta = ObjectVersionMeta{
|
let meta = ObjectVersionMeta {
|
||||||
headers: headers.clone(),
|
headers: headers.clone(),
|
||||||
size: old.size,
|
size: old.size,
|
||||||
etag: "".to_string(),
|
etag: "".to_string(),
|
||||||
};
|
};
|
||||||
let state = match old.state {
|
let state = match old.state {
|
||||||
prev::ObjectVersionState::Uploading => ObjectVersionState::Uploading(headers),
|
prev::ObjectVersionState::Uploading => ObjectVersionState::Uploading(headers),
|
||||||
prev::ObjectVersionState::Aborted => ObjectVersionState::Aborted,
|
prev::ObjectVersionState::Aborted => ObjectVersionState::Aborted,
|
||||||
prev::ObjectVersionState::Complete => {
|
prev::ObjectVersionState::Complete => match &old.data {
|
||||||
match &old.data {
|
prev::ObjectVersionData::Uploading => ObjectVersionState::Uploading(headers),
|
||||||
prev::ObjectVersionData::Uploading => ObjectVersionState::Uploading(headers),
|
prev::ObjectVersionData::DeleteMarker => {
|
||||||
prev::ObjectVersionData::DeleteMarker => ObjectVersionState::Complete(ObjectVersionData::DeleteMarker),
|
ObjectVersionState::Complete(ObjectVersionData::DeleteMarker)
|
||||||
prev::ObjectVersionData::Inline(x) => ObjectVersionState::Complete(ObjectVersionData::Inline(meta, x.clone())),
|
}
|
||||||
prev::ObjectVersionData::FirstBlock(h) => {
|
prev::ObjectVersionData::Inline(x) => {
|
||||||
let mut hash = [0u8; 32];
|
ObjectVersionState::Complete(ObjectVersionData::Inline(meta, x.clone()))
|
||||||
hash.copy_from_slice(h.as_ref());
|
}
|
||||||
ObjectVersionState::Complete(ObjectVersionData::FirstBlock(meta, Hash::from(hash)))
|
prev::ObjectVersionData::FirstBlock(h) => {
|
||||||
}
|
let mut hash = [0u8; 32];
|
||||||
}
|
hash.copy_from_slice(h.as_ref());
|
||||||
}
|
ObjectVersionState::Complete(ObjectVersionData::FirstBlock(meta, Hash::from(hash)))
|
||||||
};
|
}
|
||||||
let mut uuid = [0u8; 32];
|
},
|
||||||
uuid.copy_from_slice(old.uuid.as_ref());
|
};
|
||||||
ObjectVersion{
|
let mut uuid = [0u8; 32];
|
||||||
uuid: UUID::from(uuid),
|
uuid.copy_from_slice(old.uuid.as_ref());
|
||||||
timestamp: old.timestamp,
|
ObjectVersion {
|
||||||
state,
|
uuid: UUID::from(uuid),
|
||||||
}
|
timestamp: old.timestamp,
|
||||||
|
state,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,5 +9,5 @@ pub mod table_fullcopy;
|
||||||
pub mod table_sharded;
|
pub mod table_sharded;
|
||||||
pub mod table_sync;
|
pub mod table_sync;
|
||||||
|
|
||||||
pub use table::*;
|
|
||||||
pub use schema::*;
|
pub use schema::*;
|
||||||
|
pub use table::*;
|
||||||
|
|
|
@ -4,7 +4,6 @@ use serde::{Deserialize, Serialize};
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::error::Error;
|
use garage_util::error::Error;
|
||||||
|
|
||||||
|
|
||||||
pub trait PartitionKey {
|
pub trait PartitionKey {
|
||||||
fn hash(&self) -> Hash;
|
fn hash(&self) -> Hash;
|
||||||
}
|
}
|
||||||
|
@ -64,11 +63,11 @@ pub trait TableSchema: Send + Sync {
|
||||||
type E: Entry<Self::P, Self::S>;
|
type E: Entry<Self::P, Self::S>;
|
||||||
type Filter: Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync;
|
type Filter: Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync;
|
||||||
|
|
||||||
// Action to take if not able to decode current version:
|
// Action to take if not able to decode current version:
|
||||||
// try loading from an older version
|
// try loading from an older version
|
||||||
fn try_migrate(_bytes: &[u8]) -> Option<Self::E> {
|
fn try_migrate(_bytes: &[u8]) -> Option<Self::E> {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn updated(&self, old: Option<Self::E>, new: Option<Self::E>) -> Result<(), Error>;
|
async fn updated(&self, old: Option<Self::E>, new: Option<Self::E>) -> Result<(), Error>;
|
||||||
fn matches_filter(_entry: &Self::E, _filter: &Self::Filter) -> bool {
|
fn matches_filter(_entry: &Self::E, _filter: &Self::Filter) -> bool {
|
||||||
|
|
|
@ -14,8 +14,8 @@ use garage_rpc::membership::{Ring, System};
|
||||||
use garage_rpc::rpc_client::*;
|
use garage_rpc::rpc_client::*;
|
||||||
use garage_rpc::rpc_server::*;
|
use garage_rpc::rpc_server::*;
|
||||||
|
|
||||||
use crate::table_sync::*;
|
|
||||||
use crate::schema::*;
|
use crate::schema::*;
|
||||||
|
use crate::table_sync::*;
|
||||||
|
|
||||||
const TABLE_RPC_TIMEOUT: Duration = Duration::from_secs(10);
|
const TABLE_RPC_TIMEOUT: Duration = Duration::from_secs(10);
|
||||||
|
|
||||||
|
@ -48,7 +48,6 @@ pub enum TableRPC<F: TableSchema> {
|
||||||
|
|
||||||
impl<F: TableSchema> RpcMessage for TableRPC<F> {}
|
impl<F: TableSchema> RpcMessage for TableRPC<F> {}
|
||||||
|
|
||||||
|
|
||||||
pub trait TableReplication: Send + Sync {
|
pub trait TableReplication: Send + Sync {
|
||||||
// See examples in table_sharded.rs and table_fullcopy.rs
|
// See examples in table_sharded.rs and table_fullcopy.rs
|
||||||
// To understand various replication methods
|
// To understand various replication methods
|
||||||
|
@ -456,15 +455,13 @@ where
|
||||||
ret
|
ret
|
||||||
}
|
}
|
||||||
|
|
||||||
fn decode_entry(bytes: &[u8]) -> Result<F::E, Error> {
|
fn decode_entry(bytes: &[u8]) -> Result<F::E, Error> {
|
||||||
match rmp_serde::decode::from_read_ref::<_, F::E>(bytes) {
|
match rmp_serde::decode::from_read_ref::<_, F::E>(bytes) {
|
||||||
Ok(x) => Ok(x),
|
Ok(x) => Ok(x),
|
||||||
Err(e) => {
|
Err(e) => match F::try_migrate(bytes) {
|
||||||
match F::try_migrate(bytes) {
|
Some(x) => Ok(x),
|
||||||
Some(x) => Ok(x),
|
None => Err(e.into()),
|
||||||
None => Err(e.into()),
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue