Merge pull request 'disable_scrub configuration option' (#774) from disable-scrub into main
All checks were successful
ci/woodpecker/push/debug Pipeline was successful

Reviewed-on: #774
This commit is contained in:
Alex 2024-03-15 09:22:33 +00:00
commit a80ce6ab5a
5 changed files with 61 additions and 40 deletions

View file

@ -19,7 +19,7 @@ connecting to. To run on all nodes, add the `-a` flag as follows:
# Data block operations # Data block operations
## Data store scrub ## Data store scrub {#scrub}
Scrubbing the data store means examining each individual data block to check that Scrubbing the data store means examining each individual data block to check that
their content is correct, by verifying their hash. Any block found to be corrupted their content is correct, by verifying their hash. Any block found to be corrupted

View file

@ -14,6 +14,7 @@ metadata_dir = "/var/lib/garage/meta"
data_dir = "/var/lib/garage/data" data_dir = "/var/lib/garage/data"
metadata_fsync = true metadata_fsync = true
data_fsync = false data_fsync = false
disable_scrub = false
db_engine = "lmdb" db_engine = "lmdb"
@ -87,6 +88,7 @@ Top-level configuration options:
[`data_dir`](#data_dir), [`data_dir`](#data_dir),
[`data_fsync`](#data_fsync), [`data_fsync`](#data_fsync),
[`db_engine`](#db_engine), [`db_engine`](#db_engine),
[`disable_scrub`](#disable_scrub),
[`lmdb_map_size`](#lmdb_map_size), [`lmdb_map_size`](#lmdb_map_size),
[`metadata_dir`](#metadata_dir), [`metadata_dir`](#metadata_dir),
[`metadata_fsync`](#metadata_fsync), [`metadata_fsync`](#metadata_fsync),
@ -344,6 +346,22 @@ at the cost of a moderate drop in write performance.
Similarly to `metatada_fsync`, this is likely not necessary Similarly to `metatada_fsync`, this is likely not necessary
if geographical replication is used. if geographical replication is used.
#### `disable_scrub` {#disable_scrub}
By default, Garage runs a scrub of the data directory approximately once per
month, with a random delay to avoid all nodes running at the same time. When
it scrubs the data directory, Garage will read all of the data files stored on
disk to check their integrity, and will rebuild any data files that it finds
corrupted, using the remaining valid copies stored on other nodes.
See [this page](@/documentation/operations/durability-repair.md#scrub) for details.
Set the `disable_scrub` configuration value to `true` if you don't need Garage
to scrub the data directory, for instance if you are already scrubbing at the
filesystem level. Note that in this case, if you find a corrupted data file,
you should delete it from the data directory and then call `garage repair
blocks` on the node to ensure that it re-obtains a copy from another node on
the network.
#### `block_size` {#block_size} #### `block_size` {#block_size}
Garage splits stored objects in consecutive chunks of size `block_size` Garage splits stored objects in consecutive chunks of size `block_size`

View file

@ -22,7 +22,7 @@ use garage_net::stream::{read_stream_to_end, stream_asyncread, ByteStream};
use garage_db as db; use garage_db as db;
use garage_util::background::{vars, BackgroundRunner}; use garage_util::background::{vars, BackgroundRunner};
use garage_util::config::DataDirEnum; use garage_util::config::Config;
use garage_util::data::*; use garage_util::data::*;
use garage_util::error::*; use garage_util::error::*;
use garage_util::metrics::RecordDuration; use garage_util::metrics::RecordDuration;
@ -84,6 +84,7 @@ pub struct BlockManager {
data_fsync: bool, data_fsync: bool,
compression_level: Option<i32>, compression_level: Option<i32>,
disable_scrub: bool,
mutation_lock: Vec<Mutex<BlockManagerLocked>>, mutation_lock: Vec<Mutex<BlockManagerLocked>>,
@ -119,9 +120,7 @@ struct BlockManagerLocked();
impl BlockManager { impl BlockManager {
pub fn new( pub fn new(
db: &db::Db, db: &db::Db,
data_dir: DataDirEnum, config: &Config,
data_fsync: bool,
compression_level: Option<i32>,
replication: TableShardedReplication, replication: TableShardedReplication,
system: Arc<System>, system: Arc<System>,
) -> Result<Arc<Self>, Error> { ) -> Result<Arc<Self>, Error> {
@ -131,11 +130,13 @@ impl BlockManager {
let data_layout = match data_layout_persister.load() { let data_layout = match data_layout_persister.load() {
Ok(mut layout) => { Ok(mut layout) => {
layout layout
.update(&data_dir) .update(&config.data_dir)
.ok_or_message("invalid data_dir config")?; .ok_or_message("invalid data_dir config")?;
layout layout
} }
Err(_) => DataLayout::initialize(&data_dir).ok_or_message("invalid data_dir config")?, Err(_) => {
DataLayout::initialize(&config.data_dir).ok_or_message("invalid data_dir config")?
}
}; };
data_layout_persister data_layout_persister
.save(&data_layout) .save(&data_layout)
@ -154,7 +155,7 @@ impl BlockManager {
.endpoint("garage_block/manager.rs/Rpc".to_string()); .endpoint("garage_block/manager.rs/Rpc".to_string());
let metrics = BlockManagerMetrics::new( let metrics = BlockManagerMetrics::new(
compression_level, config.compression_level,
rc.rc.clone(), rc.rc.clone(),
resync.queue.clone(), resync.queue.clone(),
resync.errors.clone(), resync.errors.clone(),
@ -166,8 +167,9 @@ impl BlockManager {
replication, replication,
data_layout: ArcSwap::new(Arc::new(data_layout)), data_layout: ArcSwap::new(Arc::new(data_layout)),
data_layout_persister, data_layout_persister,
data_fsync, data_fsync: config.data_fsync,
compression_level, disable_scrub: config.disable_scrub,
compression_level: config.compression_level,
mutation_lock: vec![(); MUTEX_COUNT] mutation_lock: vec![(); MUTEX_COUNT]
.iter() .iter()
.map(|_| Mutex::new(BlockManagerLocked())) .map(|_| Mutex::new(BlockManagerLocked()))
@ -194,6 +196,7 @@ impl BlockManager {
} }
// Spawn scrub worker // Spawn scrub worker
if !self.disable_scrub {
let (scrub_tx, scrub_rx) = mpsc::channel(1); let (scrub_tx, scrub_rx) = mpsc::channel(1);
self.tx_scrub_command.store(Some(Arc::new(scrub_tx))); self.tx_scrub_command.store(Some(Arc::new(scrub_tx)));
bg.spawn_worker(ScrubWorker::new( bg.spawn_worker(ScrubWorker::new(
@ -202,10 +205,12 @@ impl BlockManager {
self.scrub_persister.clone(), self.scrub_persister.clone(),
)); ));
} }
}
pub fn register_bg_vars(&self, vars: &mut vars::BgVars) { pub fn register_bg_vars(&self, vars: &mut vars::BgVars) {
self.resync.register_bg_vars(vars); self.resync.register_bg_vars(vars);
if !self.disable_scrub {
vars.register_rw( vars.register_rw(
&self.scrub_persister, &self.scrub_persister,
"scrub-tranquility", "scrub-tranquility",
@ -222,6 +227,7 @@ impl BlockManager {
p.get_with(|x| x.corruptions_detected) p.get_with(|x| x.corruptions_detected)
}); });
} }
}
/// Ask nodes that might have a (possibly compressed) block for it /// Ask nodes that might have a (possibly compressed) block for it
/// Return it as a stream with a header /// Return it as a stream with a header

View file

@ -177,14 +177,7 @@ impl Garage {
}; };
info!("Initialize block manager..."); info!("Initialize block manager...");
let block_manager = BlockManager::new( let block_manager = BlockManager::new(&db, &config, data_rep_param, system.clone())?;
&db,
config.data_dir.clone(),
config.data_fsync,
config.compression_level,
data_rep_param,
system.clone(),
)?;
block_manager.register_bg_vars(&mut bg_vars); block_manager.register_bg_vars(&mut bg_vars);
// ---- admin tables ---- // ---- admin tables ----

View file

@ -23,6 +23,10 @@ pub struct Config {
#[serde(default)] #[serde(default)]
pub data_fsync: bool, pub data_fsync: bool,
/// Disable automatic scrubbing of the data directory
#[serde(default)]
pub disable_scrub: bool,
/// Size of data blocks to save to disk /// Size of data blocks to save to disk
#[serde( #[serde(
deserialize_with = "deserialize_capacity", deserialize_with = "deserialize_capacity",