diff --git a/src/api/s3_put.rs b/src/api/s3_put.rs index 7aec12ed..2c5e364f 100644 --- a/src/api/s3_put.rs +++ b/src/api/s3_put.rs @@ -411,7 +411,9 @@ pub async fn handle_put_part( // Store part etag in version let data_md5sum_hex = hex::encode(data_md5sum); let mut version = version; - version.parts_etags.put(part_number, data_md5sum_hex.clone()); + version + .parts_etags + .put(part_number, data_md5sum_hex.clone()); garage.version_table.insert(&version).await?; let response = Response::builder() @@ -495,11 +497,7 @@ pub async fn handle_complete_multipart_upload( for (_, etag) in version.parts_etags.items().iter() { etag_md5_hasher.update(etag.as_bytes()); } - let etag = format!( - "{}-{}", - hex::encode(etag_md5_hasher.finalize()), - num_parts - ); + let etag = format!("{}-{}", hex::encode(etag_md5_hasher.finalize()), num_parts); // Calculate total size of final object let total_size = version diff --git a/src/model/object_table.rs b/src/model/object_table.rs index 3280e7b5..cd09f678 100644 --- a/src/model/object_table.rs +++ b/src/model/object_table.rs @@ -225,4 +225,3 @@ impl TableSchema for ObjectTable { filter.apply(deleted) } } - diff --git a/src/rpc/ring.rs b/src/rpc/ring.rs index 85caafeb..0f86bbb2 100644 --- a/src/rpc/ring.rs +++ b/src/rpc/ring.rs @@ -172,7 +172,7 @@ impl Ring { pub fn walk_ring(&self, from: &Hash, n: usize) -> Vec { if self.ring.len() != 1 << PARTITION_BITS { - warn!("Ring not yet ready, read/writes will be lost"); + warn!("Ring not yet ready, read/writes will be lost!"); return vec![]; } diff --git a/src/table/table_sharded.rs b/src/table/table_sharded.rs index 098637dd..47bdfeaf 100644 --- a/src/table/table_sharded.rs +++ b/src/table/table_sharded.rs @@ -44,6 +44,7 @@ impl TableReplication for TableShardedReplication { fn split_points(&self, ring: &Ring) -> Vec { let mut ret = vec![]; + ret.push([0u8; 32].into()); for entry in ring.ring.iter() { ret.push(entry.location); } diff --git a/src/table/table_sync.rs b/src/table/table_sync.rs index 5fa6793b..c38b6bd5 100644 --- a/src/table/table_sync.rs +++ b/src/table/table_sync.rs @@ -18,10 +18,14 @@ use garage_util::error::Error; use crate::*; const MAX_DEPTH: usize = 16; -const SCAN_INTERVAL: Duration = Duration::from_secs(3600); -const CHECKSUM_CACHE_TIMEOUT: Duration = Duration::from_secs(1800); const TABLE_SYNC_RPC_TIMEOUT: Duration = Duration::from_secs(30); +// Scan & sync every 12 hours +const SCAN_INTERVAL: Duration = Duration::from_secs(12 * 60 * 60); + +// Expire cache after 30 minutes +const CHECKSUM_CACHE_TIMEOUT: Duration = Duration::from_secs(30 * 60); + pub struct TableSyncer { table: Arc>, todo: Mutex, @@ -797,6 +801,10 @@ impl SyncTodo { for i in 0..split_points.len() - 1 { let begin = split_points[i]; let end = split_points[i + 1]; + if begin == end { + continue; + } + let nodes = table.replication.replication_nodes(&begin, &ring); let retain = nodes.contains(&my_id);