forked from Deuxfleurs/garage
[peer-metrics] refactor SystemMetrics to hold a reference to System
This commit is contained in:
parent
3cdf69f079
commit
182a23cc12
3 changed files with 83 additions and 72 deletions
|
@ -162,6 +162,7 @@ pub async fn run_server(config_file: PathBuf, secrets: Secrets) -> Result<(), Er
|
|||
info!("Netapp exited");
|
||||
|
||||
// Drop all references so that stuff can terminate properly
|
||||
garage.system.cleanup();
|
||||
drop(garage);
|
||||
|
||||
// Await for all background tasks to end
|
||||
|
|
|
@ -6,6 +6,7 @@ use std::path::{Path, PathBuf};
|
|||
use std::sync::{Arc, RwLock};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use arc_swap::ArcSwapOption;
|
||||
use async_trait::async_trait;
|
||||
use futures::join;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
@ -86,7 +87,7 @@ pub struct System {
|
|||
persist_cluster_layout: Persister<ClusterLayout>,
|
||||
persist_peer_list: Persister<PeerList>,
|
||||
|
||||
local_status: Arc<RwLock<NodeStatus>>,
|
||||
pub(crate) local_status: RwLock<NodeStatus>,
|
||||
node_status: RwLock<HashMap<Uuid, (u64, NodeStatus)>>,
|
||||
|
||||
pub netapp: Arc<NetApp>,
|
||||
|
@ -104,10 +105,10 @@ pub struct System {
|
|||
#[cfg(feature = "kubernetes-discovery")]
|
||||
kubernetes_discovery: Option<KubernetesDiscoveryConfig>,
|
||||
|
||||
metrics: SystemMetrics,
|
||||
metrics: ArcSwapOption<SystemMetrics>,
|
||||
|
||||
replication_mode: ReplicationMode,
|
||||
replication_factor: usize,
|
||||
pub(crate) replication_factor: usize,
|
||||
|
||||
/// The ring
|
||||
pub ring: watch::Receiver<Arc<Ring>>,
|
||||
|
@ -280,9 +281,6 @@ impl System {
|
|||
|
||||
let mut local_status = NodeStatus::initial(replication_factor, &cluster_layout);
|
||||
local_status.update_disk_usage(&config.metadata_dir, &config.data_dir);
|
||||
let local_status = Arc::new(RwLock::new(local_status));
|
||||
|
||||
let metrics = SystemMetrics::new(replication_factor, local_status.clone());
|
||||
|
||||
let ring = Ring::new(cluster_layout, replication_factor);
|
||||
let (update_ring, ring) = watch::channel(Arc::new(ring));
|
||||
|
@ -356,7 +354,7 @@ impl System {
|
|||
id: netapp.id.into(),
|
||||
persist_cluster_layout,
|
||||
persist_peer_list,
|
||||
local_status,
|
||||
local_status: RwLock::new(local_status),
|
||||
node_status: RwLock::new(HashMap::new()),
|
||||
netapp: netapp.clone(),
|
||||
peering: peering.clone(),
|
||||
|
@ -376,14 +374,19 @@ impl System {
|
|||
consul_discovery,
|
||||
#[cfg(feature = "kubernetes-discovery")]
|
||||
kubernetes_discovery: config.kubernetes_discovery.clone(),
|
||||
metrics,
|
||||
metrics: ArcSwapOption::new(None),
|
||||
|
||||
ring,
|
||||
update_ring: Mutex::new(update_ring),
|
||||
metadata_dir: config.metadata_dir.clone(),
|
||||
data_dir: config.data_dir.clone(),
|
||||
});
|
||||
|
||||
sys.system_endpoint.set_handler(sys.clone());
|
||||
|
||||
let metrics = SystemMetrics::new(sys.clone());
|
||||
sys.metrics.store(Some(Arc::new(metrics)));
|
||||
|
||||
Ok(sys)
|
||||
}
|
||||
|
||||
|
@ -401,6 +404,11 @@ impl System {
|
|||
);
|
||||
}
|
||||
|
||||
pub fn cleanup(&self) {
|
||||
// Break reference cycle
|
||||
self.metrics.store(None);
|
||||
}
|
||||
|
||||
// ---- Administrative operations (directly available and
|
||||
// also available through RPC) ----
|
||||
|
||||
|
@ -699,11 +707,7 @@ impl System {
|
|||
let restart_at = Instant::now() + STATUS_EXCHANGE_INTERVAL;
|
||||
|
||||
// Update local node status that is exchanged.
|
||||
// Status variables are exported into Prometheus in SystemMetrics,
|
||||
// so we take the opportunity to also update here the health status
|
||||
// that is reported in those metrics.
|
||||
self.update_local_status();
|
||||
*self.metrics.health.write().unwrap() = Some(self.health());
|
||||
|
||||
let local_status: NodeStatus = self.local_status.read().unwrap().clone();
|
||||
let _ = self
|
||||
|
|
|
@ -1,13 +1,12 @@
|
|||
use std::sync::{Arc, RwLock};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use opentelemetry::{global, metrics::*, KeyValue};
|
||||
|
||||
use crate::system::{ClusterHealth, ClusterHealthStatus, NodeStatus};
|
||||
use crate::system::{ClusterHealthStatus, System};
|
||||
|
||||
/// TableMetrics reference all counter used for metrics
|
||||
pub struct SystemMetrics {
|
||||
pub(crate) health: Arc<RwLock<Option<ClusterHealth>>>,
|
||||
|
||||
// Static values
|
||||
pub(crate) _garage_build_info: ValueObserver<u64>,
|
||||
pub(crate) _replication_factor: ValueObserver<u64>,
|
||||
|
@ -29,12 +28,25 @@ pub struct SystemMetrics {
|
|||
}
|
||||
|
||||
impl SystemMetrics {
|
||||
pub fn new(replication_factor: usize, local_status: Arc<RwLock<NodeStatus>>) -> Self {
|
||||
pub fn new(system: Arc<System>) -> Self {
|
||||
let meter = global::meter("garage_system");
|
||||
let health = Arc::new(RwLock::new(None));
|
||||
Self {
|
||||
health: health.clone(),
|
||||
|
||||
let health_cache = RwLock::new((Instant::now(), system.health()));
|
||||
let system2 = system.clone();
|
||||
let get_health = Arc::new(move || {
|
||||
{
|
||||
let cache = health_cache.read().unwrap();
|
||||
if cache.0 > Instant::now() - Duration::from_secs(1) {
|
||||
return cache.1;
|
||||
}
|
||||
}
|
||||
|
||||
let health = system2.health();
|
||||
*health_cache.write().unwrap() = (Instant::now(), health);
|
||||
health
|
||||
});
|
||||
|
||||
Self {
|
||||
// Static values
|
||||
_garage_build_info: meter
|
||||
.u64_value_observer("garage_build_info", move |observer| {
|
||||
|
@ -48,19 +60,22 @@ impl SystemMetrics {
|
|||
})
|
||||
.with_description("Garage build info")
|
||||
.init(),
|
||||
_replication_factor: meter
|
||||
.u64_value_observer("garage_replication_factor", move |observer| {
|
||||
observer.observe(replication_factor as u64, &[])
|
||||
})
|
||||
.with_description("Garage replication factor setting")
|
||||
.init(),
|
||||
_replication_factor: {
|
||||
let replication_factor = system.replication_factor;
|
||||
meter
|
||||
.u64_value_observer("garage_replication_factor", move |observer| {
|
||||
observer.observe(replication_factor as u64, &[])
|
||||
})
|
||||
.with_description("Garage replication factor setting")
|
||||
.init()
|
||||
},
|
||||
|
||||
// Disk space values from System::local_status
|
||||
_disk_avail: {
|
||||
let status = local_status.clone();
|
||||
let system = system.clone();
|
||||
meter
|
||||
.u64_value_observer("garage_local_disk_avail", move |observer| {
|
||||
let st = status.read().unwrap();
|
||||
let st = system.local_status.read().unwrap();
|
||||
if let Some((avail, _total)) = st.data_disk_avail {
|
||||
observer.observe(avail, &[KeyValue::new("volume", "data")]);
|
||||
}
|
||||
|
@ -72,10 +87,10 @@ impl SystemMetrics {
|
|||
.init()
|
||||
},
|
||||
_disk_total: {
|
||||
let status = local_status.clone();
|
||||
let system = system.clone();
|
||||
meter
|
||||
.u64_value_observer("garage_local_disk_total", move |observer| {
|
||||
let st = status.read().unwrap();
|
||||
let st = system.local_status.read().unwrap();
|
||||
if let Some((_avail, total)) = st.data_disk_avail {
|
||||
observer.observe(total, &[KeyValue::new("volume", "data")]);
|
||||
}
|
||||
|
@ -87,98 +102,90 @@ impl SystemMetrics {
|
|||
.init()
|
||||
},
|
||||
|
||||
// Health report from System::health()
|
||||
// Health report from System::()
|
||||
_cluster_healthy: {
|
||||
let health = health.clone();
|
||||
let get_health = get_health.clone();
|
||||
meter
|
||||
.u64_value_observer("cluster_healthy", move |observer| {
|
||||
if let Some(h) = health.read().unwrap().as_ref() {
|
||||
if h.status == ClusterHealthStatus::Healthy {
|
||||
observer.observe(1, &[]);
|
||||
} else {
|
||||
observer.observe(0, &[]);
|
||||
}
|
||||
let h = get_health();
|
||||
if h.status == ClusterHealthStatus::Healthy {
|
||||
observer.observe(1, &[]);
|
||||
} else {
|
||||
observer.observe(0, &[]);
|
||||
}
|
||||
})
|
||||
.with_description("Whether all storage nodes are connected")
|
||||
.init()
|
||||
},
|
||||
_cluster_available: {
|
||||
let health = health.clone();
|
||||
let get_health = get_health.clone();
|
||||
meter.u64_value_observer("cluster_available", move |observer| {
|
||||
if let Some(h) = health.read().unwrap().as_ref() {
|
||||
if h.status != ClusterHealthStatus::Unavailable {
|
||||
observer.observe(1, &[]);
|
||||
} else {
|
||||
observer.observe(0, &[]);
|
||||
}
|
||||
let h = get_health();
|
||||
if h.status != ClusterHealthStatus::Unavailable {
|
||||
observer.observe(1, &[]);
|
||||
} else {
|
||||
observer.observe(0, &[]);
|
||||
}
|
||||
})
|
||||
.with_description("Whether all requests can be served, even if some storage nodes are disconnected")
|
||||
.init()
|
||||
},
|
||||
_known_nodes: {
|
||||
let health = health.clone();
|
||||
let get_health = get_health.clone();
|
||||
meter
|
||||
.u64_value_observer("cluster_known_nodes", move |observer| {
|
||||
if let Some(h) = health.read().unwrap().as_ref() {
|
||||
observer.observe(h.known_nodes as u64, &[]);
|
||||
}
|
||||
let h = get_health();
|
||||
observer.observe(h.known_nodes as u64, &[]);
|
||||
})
|
||||
.with_description("Number of nodes already seen once in the cluster")
|
||||
.init()
|
||||
},
|
||||
_connected_nodes: {
|
||||
let health = health.clone();
|
||||
let get_health = get_health.clone();
|
||||
meter
|
||||
.u64_value_observer("cluster_connected_nodes", move |observer| {
|
||||
if let Some(h) = health.read().unwrap().as_ref() {
|
||||
observer.observe(h.connected_nodes as u64, &[]);
|
||||
}
|
||||
let h = get_health();
|
||||
observer.observe(h.connected_nodes as u64, &[]);
|
||||
})
|
||||
.with_description("Number of nodes currently connected")
|
||||
.init()
|
||||
},
|
||||
_storage_nodes: {
|
||||
let health = health.clone();
|
||||
let get_health = get_health.clone();
|
||||
meter
|
||||
.u64_value_observer("cluster_storage_nodes", move |observer| {
|
||||
if let Some(h) = health.read().unwrap().as_ref() {
|
||||
observer.observe(h.storage_nodes as u64, &[]);
|
||||
}
|
||||
let h = get_health();
|
||||
observer.observe(h.storage_nodes as u64, &[]);
|
||||
})
|
||||
.with_description("Number of storage nodes declared in the current layout")
|
||||
.init()
|
||||
},
|
||||
_storage_nodes_ok: {
|
||||
let health = health.clone();
|
||||
let get_health = get_health.clone();
|
||||
meter
|
||||
.u64_value_observer("cluster_storage_nodes_ok", move |observer| {
|
||||
if let Some(h) = health.read().unwrap().as_ref() {
|
||||
observer.observe(h.storage_nodes_ok as u64, &[]);
|
||||
}
|
||||
let h = get_health();
|
||||
observer.observe(h.storage_nodes_ok as u64, &[]);
|
||||
})
|
||||
.with_description("Number of storage nodes currently connected")
|
||||
.init()
|
||||
},
|
||||
_partitions: {
|
||||
let health = health.clone();
|
||||
let get_health = get_health.clone();
|
||||
meter
|
||||
.u64_value_observer("cluster_partitions", move |observer| {
|
||||
if let Some(h) = health.read().unwrap().as_ref() {
|
||||
observer.observe(h.partitions as u64, &[]);
|
||||
}
|
||||
let h = get_health();
|
||||
observer.observe(h.partitions as u64, &[]);
|
||||
})
|
||||
.with_description("Number of partitions in the layout")
|
||||
.init()
|
||||
},
|
||||
_partitions_quorum: {
|
||||
let health = health.clone();
|
||||
let get_health = get_health.clone();
|
||||
meter
|
||||
.u64_value_observer("cluster_partitions_quorum", move |observer| {
|
||||
if let Some(h) = health.read().unwrap().as_ref() {
|
||||
observer.observe(h.partitions_quorum as u64, &[]);
|
||||
}
|
||||
let h = get_health();
|
||||
observer.observe(h.partitions_quorum as u64, &[]);
|
||||
})
|
||||
.with_description(
|
||||
"Number of partitions for which we have a quorum of connected nodes",
|
||||
|
@ -186,12 +193,11 @@ impl SystemMetrics {
|
|||
.init()
|
||||
},
|
||||
_partitions_all_ok: {
|
||||
let health = health.clone();
|
||||
let get_health = get_health.clone();
|
||||
meter
|
||||
.u64_value_observer("cluster_partitions_all_ok", move |observer| {
|
||||
if let Some(h) = health.read().unwrap().as_ref() {
|
||||
observer.observe(h.partitions_all_ok as u64, &[]);
|
||||
}
|
||||
let h = get_health();
|
||||
observer.observe(h.partitions_all_ok as u64, &[]);
|
||||
})
|
||||
.with_description(
|
||||
"Number of partitions for which all storage nodes are connected",
|
||||
|
|
Loading…
Reference in a new issue