WIP add content defined chunking #42

Closed
trinity-1686a wants to merge 42 commits from content-defined-chunking into master
4 changed files with 18 additions and 16 deletions
Showing only changes of commit 7fdaf7aef0 - Show all commits

View file

@ -472,8 +472,8 @@ pub async fn handle_complete_multipart_upload(
}; };
// Check that the list of parts they gave us corresponds to the parts we have here // Check that the list of parts they gave us corresponds to the parts we have here
println!("Expected parts from request: {:?}", body_list_of_parts); debug!("Expected parts from request: {:?}", body_list_of_parts);
println!("Parts stored in version: {:?}", version.parts_etags.items()); debug!("Parts stored in version: {:?}", version.parts_etags.items());
let parts = version let parts = version
.parts_etags .parts_etags
.items() .items()

View file

@ -142,7 +142,7 @@ where
if let Some((old_entry, new_entry)) = changed { if let Some((old_entry, new_entry)) = changed {
self.instance.updated(old_entry, Some(new_entry)); self.instance.updated(old_entry, Some(new_entry));
//self.syncer.load_full().unwrap().invalidate(&tree_key[..]); self.merkle_updater.todo_notify.notify();
} }
Ok(()) Ok(())
@ -163,7 +163,7 @@ where
if removed { if removed {
let old_entry = self.decode_entry(v)?; let old_entry = self.decode_entry(v)?;
self.instance.updated(Some(old_entry), None); self.instance.updated(Some(old_entry), None);
//self.syncer.load_full().unwrap().invalidate(k); self.merkle_updater.todo_notify.notify();
} }
Ok(removed) Ok(removed)
} }

View file

@ -4,7 +4,7 @@ use std::time::Duration;
use futures::select; use futures::select;
use futures_util::future::*; use futures_util::future::*;
use log::{info, warn}; use log::{debug, warn};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use sled::transaction::{ use sled::transaction::{
ConflictableTransactionError, ConflictableTransactionResult, TransactionalTree, ConflictableTransactionError, ConflictableTransactionResult, TransactionalTree,
@ -109,11 +109,11 @@ impl MerkleUpdater {
match x { match x {
Ok((key, valhash)) => { Ok((key, valhash)) => {
if let Err(e) = self.update_item(&key[..], &valhash[..]) { if let Err(e) = self.update_item(&key[..], &valhash[..]) {
warn!("Error while updating Merkle tree item: {}", e); warn!("({}) Error while updating Merkle tree item: {}", self.table_name, e);
} }
} }
Err(e) => { Err(e) => {
warn!("Error while iterating on Merkle todo tree: {}", e); warn!("({}) Error while iterating on Merkle todo tree: {}", self.table_name, e);
tokio::time::delay_for(Duration::from_secs(10)).await; tokio::time::delay_for(Duration::from_secs(10)).await;
} }
} }
@ -152,8 +152,9 @@ impl MerkleUpdater {
.is_ok(); .is_ok();
if !deleted { if !deleted {
info!( debug!(
"Item not deleted from Merkle todo because it changed: {:?}", "({}) Item not deleted from Merkle todo because it changed: {:?}",
self.table_name,
k k
); );
} }
@ -195,7 +196,7 @@ impl MerkleUpdater {
if children.len() == 0 { if children.len() == 0 {
// should not happen // should not happen
warn!("Replacing intermediate node with empty node, should not happen."); warn!("({}) Replacing intermediate node with empty node, should not happen.", self.table_name);
Some(MerkleNode::Empty) Some(MerkleNode::Empty)
} else if children.len() == 1 { } else if children.len() == 1 {
// We now have a single node (case when the update deleted one of only two // We now have a single node (case when the update deleted one of only two

View file

@ -274,7 +274,7 @@ where
.into_iter() .into_iter()
.collect::<Vec<_>>(); .collect::<Vec<_>>();
if nodes.contains(&self.aux.system.id) { if nodes.contains(&self.aux.system.id) {
warn!("Interrupting offload as partitions seem to have changed"); warn!("({}) Interrupting offload as partitions seem to have changed", self.data.name);
break; break;
} }
if nodes.len() < self.aux.replication.write_quorum(&self.aux.system) { if nodes.len() < self.aux.replication.write_quorum(&self.aux.system) {
@ -282,8 +282,9 @@ where
} }
counter += 1; counter += 1;
debug!( info!(
"Offloading {} items from {:?}..{:?} ({})", "({}) Offloading {} items from {:?}..{:?} ({})",
self.data.name,
items.len(), items.len(),
begin, begin,
end, end,
@ -325,7 +326,7 @@ where
} }
if not_removed > 0 { if not_removed > 0 {
debug!("{} items not removed during offload because they changed in between (trying again...)", not_removed); debug!("({}) {} items not removed during offload because they changed in between (trying again...)", self.data.name, not_removed);
} }
Ok(()) Ok(())
@ -448,11 +449,11 @@ where
// Just send that item directly // Just send that item directly
if let Some(val) = self.data.store.get(&ik[..])? { if let Some(val) = self.data.store.get(&ik[..])? {
if blake2sum(&val[..]) != ivhash { if blake2sum(&val[..]) != ivhash {
warn!("Hashes differ between stored value and Merkle tree, key: {:?} (if your server is very busy, don't worry, this happens when the Merkle tree can't be updated fast enough)", ik); warn!("({}) Hashes differ between stored value and Merkle tree, key: {:?} (if your server is very busy, don't worry, this happens when the Merkle tree can't be updated fast enough)", self.data.name, ik);
} }
todo_items.push(val.to_vec()); todo_items.push(val.to_vec());
} else { } else {
warn!("Item from Merkle tree not found in store: {:?} (if your server is very busy, don't worry, this happens when the Merkle tree can't be updated fast enough)", ik); warn!("({}) Item from Merkle tree not found in store: {:?} (if your server is very busy, don't worry, this happens when the Merkle tree can't be updated fast enough)", self.data.name, ik);
} }
} }
MerkleNode::Intermediate(l) => { MerkleNode::Intermediate(l) => {