lifecycle worker: use queue_insert and process objects in batches
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing

This commit is contained in:
Alex 2023-08-31 11:19:26 +02:00
parent 1cfcc61de8
commit adbf5925de

View file

@ -152,6 +152,8 @@ impl Worker for LifecycleWorker {
pos, pos,
last_bucket, last_bucket,
} => { } => {
// Process a batch of 100 items before yielding to bg task scheduler
for _ in 0..100 {
let (object_bytes, next_pos) = match self let (object_bytes, next_pos) = match self
.garage .garage
.object_table .object_table
@ -188,6 +190,7 @@ impl Worker for LifecycleWorker {
} else { } else {
*pos = next_pos; *pos = next_pos;
} }
}
Ok(WorkerState::Busy) Ok(WorkerState::Busy)
} }
@ -260,6 +263,8 @@ async fn process_object(
return Ok(Skip::SkipBucket); return Ok(Skip::SkipBucket);
} }
let db = garage.object_table.data.store.db();
for rule in lifecycle_policy.iter() { for rule in lifecycle_policy.iter() {
if !rule.enabled { if !rule.enabled {
continue; continue;
@ -310,7 +315,9 @@ async fn process_object(
"Lifecycle: expiring 1 object in bucket {:?}", "Lifecycle: expiring 1 object in bucket {:?}",
object.bucket_id object.bucket_id
); );
garage.object_table.insert(&deleted_object).await?; db.transaction(|mut tx| {
garage.object_table.queue_insert(&mut tx, &deleted_object)
})?;
*objects_expired += 1; *objects_expired += 1;
} }
} }
@ -343,7 +350,9 @@ async fn process_object(
); );
let aborted_object = let aborted_object =
Object::new(object.bucket_id, object.key.clone(), aborted_versions); Object::new(object.bucket_id, object.key.clone(), aborted_versions);
garage.object_table.insert(&aborted_object).await?; db.transaction(|mut tx| {
garage.object_table.queue_insert(&mut tx, &aborted_object)
})?;
*mpu_aborted += n_aborted; *mpu_aborted += n_aborted;
} }
} }