forked from Deuxfleurs/garage
cargo fmt
This commit is contained in:
parent
64a6eda0d2
commit
44dba0e53c
9 changed files with 209 additions and 186 deletions
|
@ -28,12 +28,10 @@ async fn handle_delete_internal(
|
|||
Some(o) => o,
|
||||
};
|
||||
|
||||
let interesting_versions = object.versions().iter().filter(|v| {
|
||||
match v.state {
|
||||
let interesting_versions = object.versions().iter().filter(|v| match v.state {
|
||||
ObjectVersionState::Aborted => false,
|
||||
ObjectVersionState::Complete(ObjectVersionData::DeleteMarker) => false,
|
||||
_ => true,
|
||||
}
|
||||
});
|
||||
|
||||
let mut must_delete = None;
|
||||
|
|
|
@ -12,12 +12,19 @@ use garage_table::EmptyKey;
|
|||
use garage_model::garage::Garage;
|
||||
use garage_model::object_table::*;
|
||||
|
||||
fn object_headers(version: &ObjectVersion, version_meta: &ObjectVersionMeta) -> http::response::Builder {
|
||||
fn object_headers(
|
||||
version: &ObjectVersion,
|
||||
version_meta: &ObjectVersionMeta,
|
||||
) -> http::response::Builder {
|
||||
let date = UNIX_EPOCH + Duration::from_millis(version.timestamp);
|
||||
let date_str = httpdate::fmt_http_date(date);
|
||||
|
||||
Response::builder()
|
||||
.header("Content-Type", version_meta.headers.content_type.to_string())
|
||||
.header(
|
||||
"Content-Type",
|
||||
version_meta.headers.content_type.to_string(),
|
||||
)
|
||||
// TODO: other headers
|
||||
.header("Content-Length", format!("{}", version_meta.size))
|
||||
.header("ETag", version_meta.etag.to_string())
|
||||
.header("Last-Modified", date_str)
|
||||
|
@ -113,7 +120,15 @@ pub async fn handle_get(
|
|||
None => None,
|
||||
};
|
||||
if let Some(range) = range {
|
||||
return handle_get_range(garage, last_v, last_v_data, last_v_meta, range.start, range.start + range.length).await;
|
||||
return handle_get_range(
|
||||
garage,
|
||||
last_v,
|
||||
last_v_data,
|
||||
last_v_meta,
|
||||
range.start,
|
||||
range.start + range.length,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
let resp_builder = object_headers(&last_v, last_v_meta).status(StatusCode::OK);
|
||||
|
|
|
@ -75,8 +75,12 @@ pub async fn handle_list(
|
|||
result_common_prefixes.insert(pfx.to_string());
|
||||
} else {
|
||||
let size = match &version.state {
|
||||
ObjectVersionState::Complete(ObjectVersionData::Inline(meta, _)) => meta.size,
|
||||
ObjectVersionState::Complete(ObjectVersionData::FirstBlock(meta, _)) => meta.size,
|
||||
ObjectVersionState::Complete(ObjectVersionData::Inline(meta, _)) => {
|
||||
meta.size
|
||||
}
|
||||
ObjectVersionState::Complete(ObjectVersionData::FirstBlock(meta, _)) => {
|
||||
meta.size
|
||||
}
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let info = match result_keys.get(&object.key) {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use std::collections::{VecDeque, BTreeMap};
|
||||
use std::collections::{BTreeMap, VecDeque};
|
||||
use std::fmt::Write;
|
||||
use std::sync::Arc;
|
||||
|
||||
|
@ -50,7 +50,8 @@ pub async fn handle_put(
|
|||
size: first_block.len() as u64,
|
||||
etag: "".to_string(), // TODO
|
||||
},
|
||||
first_block));
|
||||
first_block,
|
||||
));
|
||||
|
||||
let object = Object::new(bucket.into(), key.into(), vec![object_version]);
|
||||
garage.object_table.insert(&object).await?;
|
||||
|
@ -81,7 +82,8 @@ pub async fn handle_put(
|
|||
size: total_size,
|
||||
etag: "".to_string(), // TODO
|
||||
},
|
||||
first_block_hash));
|
||||
first_block_hash,
|
||||
));
|
||||
|
||||
let object = Object::new(bucket.into(), key.into(), vec![object_version]);
|
||||
garage.object_table.insert(&object).await?;
|
||||
|
@ -286,9 +288,11 @@ pub async fn handle_put_part(
|
|||
None => return Err(Error::BadRequest(format!("Object not found"))),
|
||||
Some(x) => x,
|
||||
};
|
||||
if !object.versions().iter().any(|v| {
|
||||
v.uuid == version_uuid && v.is_uploading()
|
||||
}) {
|
||||
if !object
|
||||
.versions()
|
||||
.iter()
|
||||
.any(|v| v.uuid == version_uuid && v.is_uploading())
|
||||
{
|
||||
return Err(Error::BadRequest(format!(
|
||||
"Multipart upload does not exist or is otherwise invalid"
|
||||
)));
|
||||
|
@ -330,9 +334,10 @@ pub async fn handle_complete_multipart_upload(
|
|||
None => return Err(Error::BadRequest(format!("Object not found"))),
|
||||
Some(x) => x,
|
||||
};
|
||||
let object_version = object.versions().iter().find(|v| {
|
||||
v.uuid == version_uuid && v.is_uploading()
|
||||
});
|
||||
let object_version = object
|
||||
.versions()
|
||||
.iter()
|
||||
.find(|v| v.uuid == version_uuid && v.is_uploading());
|
||||
let mut object_version = match object_version {
|
||||
None => {
|
||||
return Err(Error::BadRequest(format!(
|
||||
|
@ -361,15 +366,14 @@ pub async fn handle_complete_multipart_upload(
|
|||
.iter()
|
||||
.map(|x| x.size)
|
||||
.fold(0, |x, y| x + y);
|
||||
object_version.state = ObjectVersionState::Complete(
|
||||
ObjectVersionData::FirstBlock(
|
||||
object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock(
|
||||
ObjectVersionMeta {
|
||||
headers,
|
||||
size: total_size,
|
||||
etag: "".to_string(), // TODO
|
||||
},
|
||||
version.blocks()[0].hash)
|
||||
);
|
||||
version.blocks()[0].hash,
|
||||
));
|
||||
|
||||
let final_object = Object::new(bucket.clone(), key.clone(), vec![object_version]);
|
||||
garage.object_table.insert(&final_object).await?;
|
||||
|
@ -411,9 +415,10 @@ pub async fn handle_abort_multipart_upload(
|
|||
None => return Err(Error::BadRequest(format!("Object not found"))),
|
||||
Some(x) => x,
|
||||
};
|
||||
let object_version = object.versions().iter().find(|v| {
|
||||
v.uuid == version_uuid && v.is_uploading()
|
||||
});
|
||||
let object_version = object
|
||||
.versions()
|
||||
.iter()
|
||||
.find(|v| v.uuid == version_uuid && v.is_uploading());
|
||||
let mut object_version = match object_version {
|
||||
None => {
|
||||
return Err(Error::BadRequest(format!(
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use async_trait::async_trait;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::Arc;
|
||||
use std::collections::BTreeMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use garage_util::background::BackgroundRunner;
|
||||
use garage_util::data::*;
|
||||
|
@ -79,17 +79,15 @@ impl ObjectVersionState {
|
|||
Aborted => {
|
||||
*self = Aborted;
|
||||
}
|
||||
Complete(b) => {
|
||||
match self {
|
||||
Aborted => {},
|
||||
Complete(b) => match self {
|
||||
Aborted => {}
|
||||
Complete(a) => {
|
||||
a.merge(b);
|
||||
}
|
||||
Uploading(_) => {
|
||||
*self = Complete(b.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
Uploading(_) => {}
|
||||
}
|
||||
}
|
||||
|
@ -118,7 +116,10 @@ pub struct ObjectVersionHeaders {
|
|||
impl ObjectVersionData {
|
||||
fn merge(&mut self, b: &Self) {
|
||||
if *self != *b {
|
||||
warn!("Inconsistent object version data: {:?} (local) vs {:?} (remote)", self, b);
|
||||
warn!(
|
||||
"Inconsistent object version data: {:?} (local) vs {:?} (remote)",
|
||||
self, b
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -236,7 +237,9 @@ impl TableSchema for ObjectTable {
|
|||
Ok(x) => x,
|
||||
Err(_) => return None,
|
||||
};
|
||||
let new_v = old.versions().iter()
|
||||
let new_v = old
|
||||
.versions()
|
||||
.iter()
|
||||
.map(migrate_version)
|
||||
.collect::<Vec<_>>();
|
||||
let new = Object::new(old.bucket.clone(), old.key.clone(), new_v);
|
||||
|
@ -257,18 +260,20 @@ fn migrate_version(old: &prev::ObjectVersion) -> ObjectVersion {
|
|||
let state = match old.state {
|
||||
prev::ObjectVersionState::Uploading => ObjectVersionState::Uploading(headers),
|
||||
prev::ObjectVersionState::Aborted => ObjectVersionState::Aborted,
|
||||
prev::ObjectVersionState::Complete => {
|
||||
match &old.data {
|
||||
prev::ObjectVersionState::Complete => match &old.data {
|
||||
prev::ObjectVersionData::Uploading => ObjectVersionState::Uploading(headers),
|
||||
prev::ObjectVersionData::DeleteMarker => ObjectVersionState::Complete(ObjectVersionData::DeleteMarker),
|
||||
prev::ObjectVersionData::Inline(x) => ObjectVersionState::Complete(ObjectVersionData::Inline(meta, x.clone())),
|
||||
prev::ObjectVersionData::DeleteMarker => {
|
||||
ObjectVersionState::Complete(ObjectVersionData::DeleteMarker)
|
||||
}
|
||||
prev::ObjectVersionData::Inline(x) => {
|
||||
ObjectVersionState::Complete(ObjectVersionData::Inline(meta, x.clone()))
|
||||
}
|
||||
prev::ObjectVersionData::FirstBlock(h) => {
|
||||
let mut hash = [0u8; 32];
|
||||
hash.copy_from_slice(h.as_ref());
|
||||
ObjectVersionState::Complete(ObjectVersionData::FirstBlock(meta, Hash::from(hash)))
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
};
|
||||
let mut uuid = [0u8; 32];
|
||||
uuid.copy_from_slice(old.uuid.as_ref());
|
||||
|
|
|
@ -9,5 +9,5 @@ pub mod table_fullcopy;
|
|||
pub mod table_sharded;
|
||||
pub mod table_sync;
|
||||
|
||||
pub use table::*;
|
||||
pub use schema::*;
|
||||
pub use table::*;
|
||||
|
|
|
@ -4,7 +4,6 @@ use serde::{Deserialize, Serialize};
|
|||
use garage_util::data::*;
|
||||
use garage_util::error::Error;
|
||||
|
||||
|
||||
pub trait PartitionKey {
|
||||
fn hash(&self) -> Hash;
|
||||
}
|
||||
|
|
|
@ -14,8 +14,8 @@ use garage_rpc::membership::{Ring, System};
|
|||
use garage_rpc::rpc_client::*;
|
||||
use garage_rpc::rpc_server::*;
|
||||
|
||||
use crate::table_sync::*;
|
||||
use crate::schema::*;
|
||||
use crate::table_sync::*;
|
||||
|
||||
const TABLE_RPC_TIMEOUT: Duration = Duration::from_secs(10);
|
||||
|
||||
|
@ -48,7 +48,6 @@ pub enum TableRPC<F: TableSchema> {
|
|||
|
||||
impl<F: TableSchema> RpcMessage for TableRPC<F> {}
|
||||
|
||||
|
||||
pub trait TableReplication: Send + Sync {
|
||||
// See examples in table_sharded.rs and table_fullcopy.rs
|
||||
// To understand various replication methods
|
||||
|
@ -459,12 +458,10 @@ where
|
|||
fn decode_entry(bytes: &[u8]) -> Result<F::E, Error> {
|
||||
match rmp_serde::decode::from_read_ref::<_, F::E>(bytes) {
|
||||
Ok(x) => Ok(x),
|
||||
Err(e) => {
|
||||
match F::try_migrate(bytes) {
|
||||
Err(e) => match F::try_migrate(bytes) {
|
||||
Some(x) => Ok(x),
|
||||
None => Err(e.into()),
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue