Smaller batches for index counter propagation
This commit is contained in:
parent
0837b3dacd
commit
e7810e9cb3
2 changed files with 6 additions and 2 deletions
|
@ -53,6 +53,7 @@ impl Worker for RepairWorker {
|
|||
// This is mostly because the Rust bindings for SQLite assume a worst-case scenario
|
||||
// where SQLite is not compiled in thread-safe mode, so we have to wrap everything
|
||||
// in a mutex (see db/sqlite_adapter.rs and discussion in PR #322).
|
||||
// TODO: maybe do this with tokio::task::spawn_blocking ?
|
||||
let mut batch_of_hashes = vec![];
|
||||
let start_bound = match self.next_start.as_ref() {
|
||||
None => Bound::Unbounded,
|
||||
|
|
|
@ -429,7 +429,8 @@ impl<T: CountedItem> Worker for IndexPropagatorWorker<T> {
|
|||
};
|
||||
|
||||
if !self.buf.is_empty() {
|
||||
let entries = self.buf.iter().map(|(_k, v)| v);
|
||||
let entries_k = self.buf.keys().take(100).cloned().collect::<Vec<_>>();
|
||||
let entries = entries_k.iter().map(|k| self.buf.get(k).unwrap());
|
||||
if let Err(e) = self.index_counter.table.insert_many(entries).await {
|
||||
self.errors += 1;
|
||||
if self.errors >= 2 && *must_exit.borrow() {
|
||||
|
@ -441,7 +442,9 @@ impl<T: CountedItem> Worker for IndexPropagatorWorker<T> {
|
|||
// things to go back to normal
|
||||
return Err(e);
|
||||
} else {
|
||||
self.buf.clear();
|
||||
for k in entries_k {
|
||||
self.buf.remove(&k);
|
||||
}
|
||||
self.errors = 0;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue