block: Ported to opentelemetry 0.18.

This commit is contained in:
Jonathan Davies 2023-01-27 17:04:16 +00:00
parent 0a3d332966
commit 1e6e8db264
3 changed files with 69 additions and 78 deletions

View file

@ -129,12 +129,17 @@ impl BlockManager {
.netapp .netapp
.endpoint("garage_block/manager.rs/Rpc".to_string()); .endpoint("garage_block/manager.rs/Rpc".to_string());
let metrics = BlockManagerMetrics::new( let metrics =
compression_level, BlockManagerMetrics::new(rc.rc.clone(), resync.queue.clone(), resync.errors.clone());
rc.rc.clone(),
resync.queue.clone(), match compression_level {
resync.errors.clone(), Some(v) => metrics
); .compression_level
.observe(&Context::current(), v as u64, &[]),
None => metrics
.compression_level
.observe(&Context::current(), 0_u64, &[]),
}
let scrub_persister = PersisterShared::new(&system.metadata_dir, "scrub_info"); let scrub_persister = PersisterShared::new(&system.metadata_dir, "scrub_info");
@ -475,7 +480,9 @@ impl BlockManager {
)) ))
.await?; .await?;
self.metrics.bytes_written.add(write_size); self.metrics
.bytes_written
.add(&Context::current(), write_size, &[]);
Ok(()) Ok(())
} }
@ -510,7 +517,7 @@ impl BlockManager {
self.metrics self.metrics
.bytes_read .bytes_read
.add(data.inner_buffer().len() as u64); .add(&Context::current(), data.inner_buffer().len() as u64, &[]);
Ok(data) Ok(data)
} }
@ -542,7 +549,9 @@ impl BlockManager {
}; };
if data.verify(*hash).is_err() { if data.verify(*hash).is_err() {
self.metrics.corruption_counter.add(1); self.metrics
.corruption_counter
.add(&Context::current(), 1, &[]);
self.lock_mutate(hash) self.lock_mutate(hash)
.await .await
@ -742,7 +751,7 @@ impl BlockManagerLocked {
path.set_extension("zst"); path.set_extension("zst");
} }
fs::remove_file(path).await?; fs::remove_file(path).await?;
mgr.metrics.delete_counter.add(1); mgr.metrics.delete_counter.add(&Context::current(), 1, &[]);
} }
Ok(()) Ok(())
} }

View file

@ -5,82 +5,61 @@ use garage_db::counted_tree_hack::CountedTree;
/// TableMetrics reference all counter used for metrics /// TableMetrics reference all counter used for metrics
pub struct BlockManagerMetrics { pub struct BlockManagerMetrics {
pub(crate) _compression_level: ValueObserver<u64>, pub(crate) compression_level: ObservableGauge<u64>,
pub(crate) _rc_size: ValueObserver<u64>, pub(crate) _rc_size: ObservableGauge<u64>,
pub(crate) _resync_queue_len: ValueObserver<u64>, pub(crate) _resync_queue_len: ObservableGauge<u64>,
pub(crate) _resync_errored_blocks: ValueObserver<u64>, pub(crate) _resync_errored_blocks: ObservableGauge<u64>,
pub(crate) resync_counter: BoundCounter<u64>, pub(crate) resync_counter: Counter<u64>,
pub(crate) resync_error_counter: BoundCounter<u64>, pub(crate) resync_error_counter: Counter<u64>,
pub(crate) resync_duration: BoundValueRecorder<f64>, pub(crate) resync_duration: Histogram<f64>,
pub(crate) resync_send_counter: Counter<u64>, pub(crate) resync_send_counter: Counter<u64>,
pub(crate) resync_recv_counter: BoundCounter<u64>, pub(crate) resync_recv_counter: Counter<u64>,
pub(crate) bytes_read: BoundCounter<u64>, pub(crate) bytes_read: Counter<u64>,
pub(crate) block_read_duration: BoundValueRecorder<f64>, pub(crate) block_read_duration: Histogram<f64>,
pub(crate) bytes_written: BoundCounter<u64>, pub(crate) bytes_written: Counter<u64>,
pub(crate) block_write_duration: BoundValueRecorder<f64>, pub(crate) block_write_duration: Histogram<f64>,
pub(crate) delete_counter: BoundCounter<u64>, pub(crate) delete_counter: Counter<u64>,
pub(crate) corruption_counter: BoundCounter<u64>, pub(crate) corruption_counter: Counter<u64>,
} }
impl BlockManagerMetrics { impl BlockManagerMetrics {
pub fn new( pub fn new(rc_tree: db::Tree, resync_queue: CountedTree, resync_errors: CountedTree) -> Self {
compression_level: Option<i32>,
rc_tree: db::Tree,
resync_queue: CountedTree,
resync_errors: CountedTree,
) -> Self {
let meter = global::meter("garage_model/block"); let meter = global::meter("garage_model/block");
Self { Self {
_compression_level: meter compression_level: meter
.u64_value_observer("block.compression_level", move |observer| { .u64_observable_gauge("block.compression_level")
match compression_level {
Some(v) => observer.observe(v as u64, &[]),
None => observer.observe(0_u64, &[]),
}
})
.with_description("Garage compression level for node") .with_description("Garage compression level for node")
.init(), .init(),
_rc_size: meter _rc_size: meter
.u64_value_observer("block.rc_size", move |observer| { .u64_observable_gauge("block.rc_size")
if let Ok(Some(v)) = rc_tree.fast_len() {
observer.observe(v as u64, &[])
}
})
.with_description("Number of blocks known to the reference counter") .with_description("Number of blocks known to the reference counter")
.init(), .init(),
_resync_queue_len: meter _resync_queue_len: meter
.u64_value_observer("block.resync_queue_length", move |observer| { .u64_observable_gauge("block.resync_queue_length")
observer.observe(resync_queue.len() as u64, &[])
})
.with_description( .with_description(
"Number of block hashes queued for local check and possible resync", "Number of block hashes queued for local check and possible resync",
) )
.init(), .init(),
_resync_errored_blocks: meter _resync_errored_blocks: meter
.u64_value_observer("block.resync_errored_blocks", move |observer| { .u64_observable_gauge("block.resync_errored_blocks")
observer.observe(resync_errors.len() as u64, &[])
})
.with_description("Number of block hashes whose last resync resulted in an error") .with_description("Number of block hashes whose last resync resulted in an error")
.init(), .init(),
resync_counter: meter resync_counter: meter
.u64_counter("block.resync_counter") .u64_counter("block.resync_counter")
.with_description("Number of calls to resync_block") .with_description("Number of calls to resync_block")
.init() .init(),
.bind(&[]),
resync_error_counter: meter resync_error_counter: meter
.u64_counter("block.resync_error_counter") .u64_counter("block.resync_error_counter")
.with_description("Number of calls to resync_block that returned an error") .with_description("Number of calls to resync_block that returned an error")
.init() .init(),
.bind(&[]),
resync_duration: meter resync_duration: meter
.f64_value_recorder("block.resync_duration") .f64_histogram("block.resync_duration")
.with_description("Duration of resync_block operations") .with_description("Duration of resync_block operations")
.init() .init(),
.bind(&[]),
resync_send_counter: meter resync_send_counter: meter
.u64_counter("block.resync_send_counter") .u64_counter("block.resync_send_counter")
.with_description("Number of blocks sent to another node in resync operations") .with_description("Number of blocks sent to another node in resync operations")
@ -88,40 +67,33 @@ impl BlockManagerMetrics {
resync_recv_counter: meter resync_recv_counter: meter
.u64_counter("block.resync_recv_counter") .u64_counter("block.resync_recv_counter")
.with_description("Number of blocks received from other nodes in resync operations") .with_description("Number of blocks received from other nodes in resync operations")
.init() .init(),
.bind(&[]),
bytes_read: meter bytes_read: meter
.u64_counter("block.bytes_read") .u64_counter("block.bytes_read")
.with_description("Number of bytes read from disk") .with_description("Number of bytes read from disk")
.init() .init(),
.bind(&[]),
block_read_duration: meter block_read_duration: meter
.f64_value_recorder("block.read_duration") .f64_histogram("block.read_duration")
.with_description("Duration of block read operations") .with_description("Duration of block read operations")
.init() .init(),
.bind(&[]),
bytes_written: meter bytes_written: meter
.u64_counter("block.bytes_written") .u64_counter("block.bytes_written")
.with_description("Number of bytes written to disk") .with_description("Number of bytes written to disk")
.init() .init(),
.bind(&[]),
block_write_duration: meter block_write_duration: meter
.f64_value_recorder("block.write_duration") .f64_histogram("block.write_duration")
.with_description("Duration of block write operations") .with_description("Duration of block write operations")
.init() .init(),
.bind(&[]),
delete_counter: meter delete_counter: meter
.u64_counter("block.delete_counter") .u64_counter("block.delete_counter")
.with_description("Number of blocks deleted") .with_description("Number of blocks deleted")
.init() .init(),
.bind(&[]),
corruption_counter: meter corruption_counter: meter
.u64_counter("block.corruption_counter") .u64_counter("block.corruption_counter")
.with_description("Data corruptions detected on block reads") .with_description("Data corruptions detected on block reads")
.init() .init(),
.bind(&[]),
} }
} }
} }

View file

@ -302,10 +302,16 @@ impl BlockResyncManager {
.bound_record_duration(&manager.metrics.resync_duration) .bound_record_duration(&manager.metrics.resync_duration)
.await; .await;
manager.metrics.resync_counter.add(1); manager
.metrics
.resync_counter
.add(&Context::current(), 1, &[]);
if let Err(e) = &res { if let Err(e) = &res {
manager.metrics.resync_error_counter.add(1); manager
.metrics
.resync_error_counter
.add(&Context::current(), 1, &[]);
error!("Error when resyncing {:?}: {}", hash, e); error!("Error when resyncing {:?}: {}", hash, e);
let err_counter = match self.errors.get(hash.as_slice())? { let err_counter = match self.errors.get(hash.as_slice())? {
@ -413,10 +419,11 @@ impl BlockResyncManager {
); );
for node in need_nodes.iter() { for node in need_nodes.iter() {
manager manager.metrics.resync_send_counter.add(
.metrics &Context::current(),
.resync_send_counter 1,
.add(1, &[KeyValue::new("to", format!("{:?}", node))]); &[KeyValue::new("to", format!("{:?}", node))],
);
} }
let block = manager.read_block(hash).await?; let block = manager.read_block(hash).await?;
@ -459,7 +466,10 @@ impl BlockResyncManager {
let block_data = manager.rpc_get_raw_block(hash, None).await?; let block_data = manager.rpc_get_raw_block(hash, None).await?;
manager.metrics.resync_recv_counter.add(1); manager
.metrics
.resync_recv_counter
.add(&Context::current(), 1, &[]);
manager.write_block(hash, &block_data).await?; manager.write_block(hash, &block_data).await?;
} }