[next-0.10] cluster node status metrics: report nodes of all active layout versions
ci/woodpecker/pr/debug Pipeline was successful Details
ci/woodpecker/push/debug Pipeline was successful Details
ci/woodpecker/deployment/release/2 Pipeline failed Details
ci/woodpecker/deployment/release/1 Pipeline failed Details
ci/woodpecker/deployment/debug Pipeline was successful Details
ci/woodpecker/deployment/release/4 Pipeline failed Details
ci/woodpecker/deployment/release/3 Pipeline failed Details
ci/woodpecker/deployment/publish unknown status Details

This commit is contained in:
Alex 2024-02-20 17:08:31 +01:00
parent 643d1aabd8
commit 75e591727d
Signed by: lx
GPG Key ID: 0E496D15096376BE
1 changed files with 34 additions and 34 deletions

View File

@ -216,12 +216,12 @@ impl SystemMetrics {
.u64_value_observer("cluster_layout_node_connected", move |observer| {
let layout = system.cluster_layout();
let nodes = system.get_known_nodes();
for (id, _, config) in layout.current().roles.items().iter() {
if let Some(role) = &config.0 {
let mut kv = vec![
KeyValue::new("id", format!("{:?}", id)),
KeyValue::new("role_zone", role.zone.clone()),
];
for id in layout.all_nodes().iter() {
let mut kv = vec![KeyValue::new("id", format!("{:?}", id))];
if let Some(role) =
layout.current().roles.get(id).and_then(|r| r.0.as_ref())
{
kv.push(KeyValue::new("role_zone", role.zone.clone()));
match role.capacity {
Some(cap) => {
kv.push(KeyValue::new("role_capacity", cap as i64));
@ -231,24 +231,24 @@ impl SystemMetrics {
kv.push(KeyValue::new("role_gateway", 1));
}
}
}
let value;
if let Some(node) = nodes.iter().find(|n| n.id == *id) {
value = if node.is_up { 1 } else { 0 };
let value;
if let Some(node) = nodes.iter().find(|n| n.id == *id) {
// TODO: if we add address and hostname, and those change, we
// get duplicate metrics, due to bad otel aggregation :(
// Can probably be fixed when we upgrade opentelemetry
// kv.push(KeyValue::new("address", node.addr.to_string()));
// kv.push(KeyValue::new(
// "hostname",
// node.status.hostname.clone(),
// "hostname",
// node.status.hostname.clone(),
// ));
} else {
value = 0;
}
observer.observe(value, &kv);
value = if node.is_up { 1 } else { 0 };
} else {
value = 0;
}
observer.observe(value, &kv);
}
})
.with_description("Connection status for nodes in the cluster layout")
@ -260,12 +260,12 @@ impl SystemMetrics {
.u64_value_observer("cluster_layout_node_disconnected_time", move |observer| {
let layout = system.cluster_layout();
let nodes = system.get_known_nodes();
for (id, _, config) in layout.current().roles.items().iter() {
if let Some(role) = &config.0 {
let mut kv = vec![
KeyValue::new("id", format!("{:?}", id)),
KeyValue::new("role_zone", role.zone.clone()),
];
for id in layout.all_nodes().iter() {
let mut kv = vec![KeyValue::new("id", format!("{:?}", id))];
if let Some(role) =
layout.current().roles.get(id).and_then(|r| r.0.as_ref())
{
kv.push(KeyValue::new("role_zone", role.zone.clone()));
match role.capacity {
Some(cap) => {
kv.push(KeyValue::new("role_capacity", cap as i64));
@ -275,19 +275,19 @@ impl SystemMetrics {
kv.push(KeyValue::new("role_gateway", 1));
}
}
}
if let Some(node) = nodes.iter().find(|n| n.id == *id) {
// TODO: see comment above
// kv.push(KeyValue::new("address", node.addr.to_string()));
// kv.push(KeyValue::new(
// "hostname",
// node.status.hostname.clone(),
// ));
if node.is_up {
observer.observe(0, &kv);
} else if let Some(secs) = node.last_seen_secs_ago {
observer.observe(secs, &kv);
}
if let Some(node) = nodes.iter().find(|n| n.id == *id) {
// TODO: see comment above
// kv.push(KeyValue::new("address", node.addr.to_string()));
// kv.push(KeyValue::new(
// "hostname",
// node.status.hostname.clone(),
// ));
if node.is_up {
observer.observe(0, &kv);
} else if let Some(secs) = node.last_seen_secs_ago {
observer.observe(secs, &kv);
}
}
}