[peer-metrics] refactor/simplify SystemMetrics

This commit is contained in:
Alex 2024-02-20 11:35:18 +01:00
parent 0b9859befa
commit 00d479358d
Signed by untrusted user: lx
GPG key ID: 0E496D15096376BE
2 changed files with 41 additions and 88 deletions

View file

@ -3,11 +3,9 @@ use std::collections::HashMap;
use std::io::{Read, Write};
use std::net::{IpAddr, SocketAddr};
use std::path::{Path, PathBuf};
use std::sync::atomic::Ordering;
use std::sync::{Arc, RwLock};
use std::time::{Duration, Instant};
use arc_swap::ArcSwap;
use async_trait::async_trait;
use futures::join;
use serde::{Deserialize, Serialize};
@ -88,7 +86,7 @@ pub struct System {
persist_cluster_layout: Persister<ClusterLayout>,
persist_peer_list: Persister<PeerList>,
local_status: ArcSwap<NodeStatus>,
local_status: Arc<RwLock<NodeStatus>>,
node_status: RwLock<HashMap<Uuid, (u64, NodeStatus)>>,
pub netapp: Arc<NetApp>,
@ -106,7 +104,7 @@ pub struct System {
#[cfg(feature = "kubernetes-discovery")]
kubernetes_discovery: Option<KubernetesDiscoveryConfig>,
metrics: SystemMetrics,
_metrics: SystemMetrics,
replication_mode: ReplicationMode,
replication_factor: usize,
@ -280,10 +278,11 @@ impl System {
}
};
let metrics = SystemMetrics::new(replication_factor);
let mut local_status = NodeStatus::initial(replication_factor, &cluster_layout);
local_status.update_disk_usage(&config.metadata_dir, &config.data_dir, &metrics);
local_status.update_disk_usage(&config.metadata_dir, &config.data_dir);
let local_status = Arc::new(RwLock::new(local_status));
let metrics = SystemMetrics::new(replication_factor, local_status.clone());
let ring = Ring::new(cluster_layout, replication_factor);
let (update_ring, ring) = watch::channel(Arc::new(ring));
@ -357,7 +356,7 @@ impl System {
id: netapp.id.into(),
persist_cluster_layout,
persist_peer_list,
local_status: ArcSwap::new(Arc::new(local_status)),
local_status,
node_status: RwLock::new(HashMap::new()),
netapp: netapp.clone(),
peering: peering.clone(),
@ -377,7 +376,7 @@ impl System {
consul_discovery,
#[cfg(feature = "kubernetes-discovery")]
kubernetes_discovery: config.kubernetes_discovery.clone(),
metrics,
_metrics: metrics,
ring,
update_ring: Mutex::new(update_ring),
@ -546,12 +545,9 @@ impl System {
}
};
let hostname = self.local_status.read().unwrap().hostname.clone();
if let Err(e) = c
.publish_consul_service(
self.netapp.id,
&self.local_status.load_full().hostname,
rpc_public_addr,
)
.publish_consul_service(self.netapp.id, &hostname, rpc_public_addr)
.await
{
error!("Error while publishing Consul service: {}", e);
@ -573,13 +569,8 @@ impl System {
}
};
if let Err(e) = publish_kubernetes_node(
k,
self.netapp.id,
&self.local_status.load_full().hostname,
rpc_public_addr,
)
.await
let hostname = self.local_status.read().unwrap().hostname.clone();
if let Err(e) = publish_kubernetes_node(k, self.netapp.id, &hostname, rpc_public_addr).await
{
error!("Error while publishing node to Kubernetes: {}", e);
}
@ -596,15 +587,13 @@ impl System {
}
fn update_local_status(&self) {
let mut new_si: NodeStatus = self.local_status.load().as_ref().clone();
let mut local_status = self.local_status.write().unwrap();
let ring = self.ring.borrow();
new_si.cluster_layout_version = ring.layout.version;
new_si.cluster_layout_staging_hash = ring.layout.staging_hash;
local_status.cluster_layout_version = ring.layout.version;
local_status.cluster_layout_staging_hash = ring.layout.staging_hash;
new_si.update_disk_usage(&self.metadata_dir, &self.data_dir, &self.metrics);
self.local_status.swap(Arc::new(new_si));
local_status.update_disk_usage(&self.metadata_dir, &self.data_dir);
}
// --- RPC HANDLERS ---
@ -629,7 +618,7 @@ impl System {
from: Uuid,
info: &NodeStatus,
) -> Result<SystemRpc, Error> {
let local_info = self.local_status.load();
let local_info = self.local_status.read().unwrap();
if local_info.replication_factor < info.replication_factor {
error!("Some node have a higher replication factor ({}) than this one ({}). This is not supported and will lead to data corruption. Shutting down for safety.",
@ -644,6 +633,8 @@ impl System {
tokio::spawn(self.clone().pull_cluster_layout(from));
}
drop(local_info);
self.node_status
.write()
.unwrap()
@ -708,7 +699,7 @@ impl System {
let restart_at = Instant::now() + STATUS_EXCHANGE_INTERVAL;
self.update_local_status();
let local_status: NodeStatus = self.local_status.load().as_ref().clone();
let local_status: NodeStatus = self.local_status.read().unwrap().clone();
let _ = self
.rpc
.broadcast(
@ -893,12 +884,7 @@ impl NodeStatus {
}
}
fn update_disk_usage(
&mut self,
meta_dir: &Path,
data_dir: &DataDirEnum,
metrics: &SystemMetrics,
) {
fn update_disk_usage(&mut self, meta_dir: &Path, data_dir: &DataDirEnum) {
use nix::sys::statvfs::statvfs;
let mount_avail = |path: &Path| match statvfs(path) {
Ok(x) => {
@ -934,27 +920,6 @@ impl NodeStatus {
)
})(),
};
if let Some((avail, total)) = self.meta_disk_avail {
metrics
.values
.meta_disk_avail
.store(avail, Ordering::Relaxed);
metrics
.values
.meta_disk_total
.store(total, Ordering::Relaxed);
}
if let Some((avail, total)) = self.data_disk_avail {
metrics
.values
.data_disk_avail
.store(avail, Ordering::Relaxed);
metrics
.values
.data_disk_total
.store(total, Ordering::Relaxed);
}
}
}

View file

@ -1,31 +1,22 @@
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::Arc;
use std::sync::{Arc, RwLock};
use opentelemetry::{global, metrics::*, KeyValue};
use crate::system::NodeStatus;
/// TableMetrics reference all counter used for metrics
pub struct SystemMetrics {
pub(crate) _garage_build_info: ValueObserver<u64>,
pub(crate) _replication_factor: ValueObserver<u64>,
pub(crate) _disk_avail: ValueObserver<u64>,
pub(crate) _disk_total: ValueObserver<u64>,
pub(crate) values: Arc<SystemMetricsValues>,
}
#[derive(Default)]
pub struct SystemMetricsValues {
pub(crate) data_disk_total: AtomicU64,
pub(crate) data_disk_avail: AtomicU64,
pub(crate) meta_disk_total: AtomicU64,
pub(crate) meta_disk_avail: AtomicU64,
}
impl SystemMetrics {
pub fn new(replication_factor: usize) -> Self {
pub fn new(replication_factor: usize, local_status: Arc<RwLock<NodeStatus>>) -> Self {
let meter = global::meter("garage_system");
let values = Arc::new(SystemMetricsValues::default());
let values1 = values.clone();
let values2 = values.clone();
let st1 = local_status.clone();
let st2 = local_status.clone();
Self {
_garage_build_info: meter
.u64_value_observer("garage_build_info", move |observer| {
@ -47,31 +38,28 @@ impl SystemMetrics {
.init(),
_disk_avail: meter
.u64_value_observer("garage_local_disk_avail", move |observer| {
match values1.data_disk_avail.load(Ordering::Relaxed) {
0 => (),
x => observer.observe(x, &[KeyValue::new("volume", "data")]),
};
match values1.meta_disk_avail.load(Ordering::Relaxed) {
0 => (),
x => observer.observe(x, &[KeyValue::new("volume", "metadata")]),
};
let st = st1.read().unwrap();
if let Some((avail, _total)) = st.data_disk_avail {
observer.observe(avail, &[KeyValue::new("volume", "data")]);
}
if let Some((avail, _total)) = st.meta_disk_avail {
observer.observe(avail, &[KeyValue::new("volume", "metadata")]);
}
})
.with_description("Garage available disk space on each node")
.init(),
_disk_total: meter
.u64_value_observer("garage_local_disk_total", move |observer| {
match values2.data_disk_total.load(Ordering::Relaxed) {
0 => (),
x => observer.observe(x, &[KeyValue::new("volume", "data")]),
};
match values2.meta_disk_total.load(Ordering::Relaxed) {
0 => (),
x => observer.observe(x, &[KeyValue::new("volume", "metadata")]),
};
let st = st2.read().unwrap();
if let Some((_avail, total)) = st.data_disk_avail {
observer.observe(total, &[KeyValue::new("volume", "data")]);
}
if let Some((_avail, total)) = st.meta_disk_avail {
observer.observe(total, &[KeyValue::new("volume", "metadata")]);
}
})
.with_description("Garage total disk space on each node")
.init(),
values,
}
}
}